diff --git a/.ci-operator.yaml b/.ci-operator.yaml index e307e5af6..461415cbc 100644 --- a/.ci-operator.yaml +++ b/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.24-openshift-4.21 + tag: rhel-9-release-golang-1.24-openshift-4.20 diff --git a/.github/ISSUE_TEMPLATE/add_new_machine_image.md b/.github/ISSUE_TEMPLATE/add_new_machine_image.md new file mode 100644 index 000000000..384c274a2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/add_new_machine_image.md @@ -0,0 +1,38 @@ +--- +name: Onboard new machine images for latest Kubernetes version +about: Create an issue to track tasks for onboarding new machine images of latest Kubernetes version +title: Onboard new machine images for Kubernetes version v<> + +--- + +/area provider/ibmcloud + +## Tasks + +- [ ] Build images using automation in [image-builder](https://github.com/kubernetes-sigs/image-builder) repository + - [ ] VPC + - [ ] PowerVS + - [ ] PowerVS with DHCP support + +- Test the images + - [ ] VPC + - [ ] PowerVS + - [ ] PowerVS with DHCP support + +- [ ] Update [documentation](https://cluster-api-ibmcloud.sigs.k8s.io/machine-images/) + +- [ ] Import the new images to VPC and PowerVS workspaces for CI + +- [ ] Update Kubernetes version in E2E [config files](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/tree/main/test/e2e/config) + +- [ ] Update [E2E script](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/scripts/ci-e2e.sh) with latest image names + + +**Notes**: +* The format of the new image name should be as follows: + * VPC: capibm-vpc-{os-distribution}- + {os-version}-kube-v{k8s-version} + * ex: capibm-vpc-ubuntu-2404-kube-v1-32-3 + * PowerVS: capibm-powervs-{os-distribution}- + {os-version}-{k8s-version} + * ex: capibm-powervs-centos-streams9-1-32-3 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/cluster_api_version_update.md b/.github/ISSUE_TEMPLATE/cluster_api_version_update.md index 7b6d7811a..5db8eaf87 100644 --- a/.github/ISSUE_TEMPLATE/cluster_api_version_update.md +++ b/.github/ISSUE_TEMPLATE/cluster_api_version_update.md @@ -20,7 +20,6 @@ Update cluster-api version Update Kubernetes version - [ ] [go.mod](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/go.mod) - [ ] [Kubebuilder version](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/Makefile#L84) -- [ ] [scripts](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/fetch_ext_bins.sh#L29) If Go version is bumped, update it in the following files diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index 971af93c8..4601dab90 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -15,6 +15,7 @@ After every CAPIBM major version release: - [ ] [Update release support data in docs](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/docs/book/src/developer/release-support-guidelines.md) - [ ] [Update docs with reference to latest release](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/README.md#compatibility-with-cluster-api-and-kubernetes-versions) - [ ] Update and add documentation link for new release branch in Netlify +- [ ] Update capibmadm tool to the latest version after each new release - [ ] Add new presubmit job for latest release branch in [kubernetes/test-infra](https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/cluster-api-provider-ibmcloud) - [ ] Update kubekins-e2e image to relevent Kubernetes version - [ ] Add E2E CI jobs for latest release branch in [ppc64le-cloud/test-infra](https://github.com/ppc64le-cloud/test-infra/blob/master/config/jobs/periodic/cluster-api-provider-ibmcloud/test-e2e-capi-ibmcloud-periodics.yaml) diff --git a/.github/workflows/validate-yaml-lint.yaml b/.github/workflows/validate-yaml-lint.yaml deleted file mode 100644 index 13380adab..000000000 --- a/.github/workflows/validate-yaml-lint.yaml +++ /dev/null @@ -1,9 +0,0 @@ -name: YamlLint -on: [push, pull_request] -jobs: - yamllint: - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - - name: Run yamllint make target - run: make yamllint diff --git a/.github/workflows/weekly-security-scan.yaml b/.github/workflows/weekly-security-scan.yaml index e3d0f8747..d69d9edcf 100644 --- a/.github/workflows/weekly-security-scan.yaml +++ b/.github/workflows/weekly-security-scan.yaml @@ -13,19 +13,19 @@ jobs: strategy: fail-fast: false matrix: - branch: [ main, release-0.10, release-0.9 ] + branch: [ main, release-0.11, release-0.10 ] name: Trivy runs-on: ubuntu-24.04 steps: - name: Check out code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # tag=v4.1.7 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 with: ref: ${{ matrix.branch }} - name: Calculate go version id: vars run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT - name: Set up Go - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # tag=v5.0.2 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # tag=v5.5.0 with: go-version: ${{ steps.vars.outputs.go_version }} - name: Run verify security target diff --git a/.golangci.yml b/.golangci.yml index a13693d85..5a360bb4e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,215 +1,231 @@ +version: "2" +run: + go: "1.24" + build-tags: + - tools + - e2e + allow-parallel-runners: true linters: - disable-all: true + default: none enable: - - asasalint - - asciicheck - - bodyclose - - containedctx - - decorder - - dogsled - - errcheck - - errchkjson - - copyloopvar - - gci - - goconst - - gocritic - - gocyclo - - godot - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - importas - - ineffassign - - misspell - - nakedret - - nilerr - - noctx - - nolintlint - - nosprintfhostport - - prealloc - - predeclared - - reassign - - revive - - rowserrcheck - - staticcheck - - stylecheck - - thelper - - typecheck - - unconvert - - unparam - - unused - - usestdlibvars - - whitespace - -linters-settings: - gocyclo: - min-complexity: 20 - godot: - # declarations - for top level declaration comments (default); - # toplevel - for top level comments; - # all - for all comments. - scope: toplevel - exclude: - - '^ \+.*' - - '^ ANCHOR.*' - gci: - sections: - - standard - - default - - prefix(github.com/IBM) - - prefix(k8s.io) - - prefix(sigs.k8s.io) - - prefix(sigs.k8s.io/cluster-api) - - prefix(sigs.k8s.io/cluster-api-provider-ibmcloud) - - blank - - dot - importas: - no-unaliased: true - alias: - # Kubernetes - - pkg: k8s.io/api/core/v1 - alias: corev1 - - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 - alias: apiextensionsv1 - - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 - alias: metav1 - - pkg: k8s.io/apimachinery/pkg/api/errors - alias: apierrors - - pkg: k8s.io/apimachinery/pkg/util/errors - alias: kerrors - # Controller Runtime - - pkg: sigs.k8s.io/controller-runtime - alias: ctrl - # CAPI - - pkg: sigs.k8s.io/cluster-api/api/v1alpha3 - alias: capiv1alpha3 - - pkg: sigs.k8s.io/cluster-api/api/v1alpha4 - alias: capiv1alpha4 - - pkg: sigs.k8s.io/cluster-api/api/v1beta1 - alias: capiv1beta1 - # CAPI-IBMCLOUD - - pkg: sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1 - alias: infrav1beta1 - - pkg: sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2 - alias: infrav1beta2 - nolintlint: - allow-unused: false - allow-leading-space: false - require-specific: true - gosec: - excludes: - - G307 # Deferring unsafe method "Close" on type "\*os.File" - - G108 # Profiling endpoint is automatically exposed on /debug/pprof - gocritic: - enabled-tags: - - experimental - disabled-checks: - - appendAssign - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - evalOrder - - ifElseChain - - octalLiteral - - regexpSimplify - - sloppyReassign - - truncateCmp - - typeDefFirst - - unnamedResult - - unnecessaryDefer - - whyNoLint - - wrapperFunc - unused: - go: "1.23" -issues: - max-same-issues: 0 - max-issues-per-linter: 0 - # We are disabling default golangci exclusions because we want to help reviewers to focus on reviewing the most relevant - # changes in PRs and avoid nitpicking. - exclude-use-default: false - exclude-rules: - - linters: - - gci - path: _test\.go - - linters: - - revive - text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" - - linters: + - asasalint + - asciicheck + - bodyclose + - containedctx + - copyloopvar + - decorder + - dogsled - errcheck - text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # Exclude some packages or code to require comments, for example test code, or fake clients. - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - source: (func|type).*Fake.* - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: fake_\.go - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: "(framework|e2e)/.*.go" - # Disable unparam "always receives" which might not be really - # useful when building libraries. - - linters: - - unparam - text: always receives - # Dot imports for gomega or ginkgo are allowed - # within test files. - - path: _test\.go - text: should not use dot imports - - path: (framework|e2e)/.*.go - text: should not use dot imports - - path: _test\.go - text: cyclomatic complexity - # Append should be able to assign to a different var/slice. - - linters: + - errchkjson + - goconst - gocritic - text: "appendAssign: append result not assigned to the same slice" - # Disable linters for conversion - - linters: - - staticcheck - text: "SA1019: in.(.+) is deprecated" - path: .*(api|types)\/.*\/.*conversion.*\.go$ - - linters: + - gocyclo + - godot + - goprintffuncname + - gosec + - govet + - importas + - ineffassign + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - nosprintfhostport + - prealloc + - predeclared + - reassign - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: .*(api|types)\/.*\/.*conversion.*\.go$ - - linters: - - revive - text: "var-naming: don't use underscores in Go names;" - path: .*(api|types)\/.*\/.*conversion.*\.go$ - - linters: - - revive - text: "receiver-naming: receiver name" - path: .*(api|types)\/.*\/.*conversion.*\.go$ - - linters: - - stylecheck - text: "ST1003: should not use underscores in Go names;" - path: .*(api|types)\/.*\/.*conversion.*\.go$ - - linters: - - stylecheck - text: "ST1016: methods on the same type should have the same receiver name" - path: .*(api|types)\/.*\/.*conversion.*\.go$ - # hack/tools - - linters: - - typecheck - text: import (".+") is a program, not an importable package - path: ^tools\.go$ - # We don't care about defer in for loops in test files. - - linters: - - gocritic - text: "deferInLoop: Possible resource leak, 'defer' is called in the 'for' loop" - path: _test\.go - exclude-files: - - "zz_generated.*\\.go$" - -run: - go: "1.23" - timeout: 10m - build-tags: - - tools - - e2e - allow-parallel-runners: true + - rowserrcheck + - staticcheck + - thelper + - unconvert + - unparam + - unused + - usestdlibvars + - whitespace + settings: + gocritic: + disabled-checks: + - appendAssign + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - evalOrder + - ifElseChain + - octalLiteral + - regexpSimplify + - sloppyReassign + - truncateCmp + - typeDefFirst + - unnamedResult + - unnecessaryDefer + - whyNoLint + - wrapperFunc + enabled-tags: + - experimental + gocyclo: + min-complexity: 20 + godot: + scope: toplevel + exclude: + - ^ \+.* + - ^ ANCHOR.* + gosec: + excludes: + - G307 # Deferring unsafe method "Close" on type "\*os.File" + - G108 # Profiling endpoint is automatically exposed on /debug/pprof + importas: + alias: + # Kubernetes + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + # Controller Runtime + - pkg: sigs.k8s.io/controller-runtime + alias: ctrl + # CAPI + - pkg: sigs.k8s.io/cluster-api/api/v1alpha3 + alias: capiv1alpha3 + - pkg: sigs.k8s.io/cluster-api/api/v1alpha4 + alias: capiv1alpha4 + - pkg: sigs.k8s.io/cluster-api/api/v1beta1 + alias: clusterv1beta1 + # CAPI-IBMCLOUD + - pkg: sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta1 + alias: infrav1beta1 + - pkg: sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2 + alias: infrav1 + no-unaliased: true + nolintlint: + require-specific: true + allow-unused: false + staticcheck: + checks: + - all + # QF1006: could lift into loop condition + - -QF1006 + # QF1007: could merge conditional assignment into variable declaration + - -QF1007 + # QF1008 could remove embedded field from selector + - -QF1008 + exclusions: + generated: lax + rules: + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: 'exported: exported method .*\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported' + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?(S|s)?etenv). is not checked + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + path: fake_\.go + text: exported (method|function|type|const) (.+) should have comment or be unexported + - linters: + - revive + path: (framework|e2e)/.*.go + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega or ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: (framework|e2e)/.*.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: 'appendAssign: append result not assigned to the same slice' + # Disable linters for conversion + - linters: + - staticcheck + path: .*(api|types)\/.*\/.*conversion.*\.go$ + text: 'SA1019: in.(.+) is deprecated' + - linters: + - revive + path: .*(api|types)\/.*\/.*conversion.*\.go$ + text: exported (method|function|type|const) (.+) should have comment or be unexported + - linters: + - revive + path: .*(api|types)\/.*\/.*conversion.*\.go$ + text: 'var-naming: don''t use underscores in Go names;' + - linters: + - revive + path: .*(api|types)\/.*\/.*conversion.*\.go$ + text: 'receiver-naming: receiver name' + - linters: + - staticcheck + text: "ST1003: should not use underscores in Go names;" + path: .*(api|types)\/.*\/.*conversion.*\.go$ + - linters: + - staticcheck + text: "ST1016: methods on the same type should have the same receiver name" + path: .*(api|types)\/.*\/.*conversion.*\.go$ + - linters: + - staticcheck + path: .*(api|types)\/.*\/.*conversion.*\.go$ + text: 'ST1003: should not use underscores in Go names;' + - linters: + - staticcheck + path: .*(api|types)\/.*\/.*conversion.*\.go$ + text: 'ST1016: methods on the same type should have the same receiver name' + # hack/tools + - linters: + - typecheck + text: import (".+") is a program, not an importable package + path: ^tools\.go$ + # We don't care about defer in for loops in test files. + - linters: + - gocritic + path: _test\.go + text: 'deferInLoop: Possible resource leak, ''defer'' is called in the ''for'' loop' + paths: + - zz_generated.*\.go$ + - third_party$ + - builtin$ + - examples$ +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +formatters: + enable: + - gci + - gofmt + - goimports + settings: + gci: + sections: + - standard + - default + - prefix(github.com/IBM) + - prefix(k8s.io) + - prefix(sigs.k8s.io) + - prefix(sigs.k8s.io/cluster-api) + - prefix(sigs.k8s.io/cluster-api-provider-ibmcloud) + - blank + - dot + exclusions: + generated: lax + paths: + - zz_generated.*\.go$ + - third_party$ + - builtin$ + - examples$ + - _test\.go diff --git a/Dockerfile b/Dockerfile index 5fb26fb67..43e5b7375 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,7 +39,6 @@ COPY api/ api/ COPY controllers/ controllers/ COPY cloud/ cloud/ COPY pkg/ pkg/ -COPY util/ util/ COPY internal/ internal/ # Build diff --git a/Makefile b/Makefile index b2fc8276e..880f09a80 100644 --- a/Makefile +++ b/Makefile @@ -84,7 +84,7 @@ RELEASE_NOTES_DIR := CHANGELOG OUTPUT_TYPE ?= type=registry # Go -GO_VERSION ?=1.23.8 +GO_VERSION ?=1.24.6 GO_CONTAINER_IMAGE ?= golang:$(GO_VERSION) # Trivy @@ -105,7 +105,7 @@ PULL_POLICY ?= Always # Set build time variables including version details LDFLAGS := $(shell ./hack/version.sh) -KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.32.0 +KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.33.0 # main controller CORE_IMAGE_NAME ?= cluster-api-ibmcloud-controller @@ -179,8 +179,8 @@ help: # Display this help # Generate code .PHONY: generate -generate: ## Run all generate-go generate-modules generate-manifests generate-go-deepcopy generate-go-conversions generate-templates - $(MAKE) generate-go generate-modules generate-manifests generate-go-deepcopy generate-go-conversions generate-templates +generate: ## Run all generate-go generate-modules generate-manifests generate-go-deepcopy generate-go-conversions generate-templates generate-e2e-templates + $(MAKE) generate-go generate-modules generate-manifests generate-go-deepcopy generate-go-conversions generate-templates generate-e2e-templates generate-go-deepcopy: $(CONTROLLER_GEN) ## Generate deepcopy go code $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." @@ -211,11 +211,8 @@ generate-templates: $(KUSTOMIZE) ## Generate cluster templates .PHONY: generate-e2e-templates generate-e2e-templates: $(KUSTOMIZE) ## Generate E2E cluster templates -ifeq ($(E2E_FLAVOR), powervs-md-remediation) $(KUSTOMIZE) build $(E2E_TEMPLATES)/cluster-template-powervs-md-remediation --load-restrictor LoadRestrictionsNone > $(E2E_TEMPLATES)/cluster-template-powervs-md-remediation.yaml -else $(KUSTOMIZE) build $(E2E_TEMPLATES)/cluster-template-vpc --load-restrictor LoadRestrictionsNone > $(E2E_TEMPLATES)/cluster-template-vpc.yaml -endif .PHONY: generate-modules generate-modules: ## Runs go mod to ensure modules are up to date @@ -498,7 +495,7 @@ docker-build-core-image: ensure-buildx ## Build the multiarch core docker image .PHONY: lint lint: $(GOLANGCI_LINT) ## Lint codebase - $(GOLANGCI_LINT) run -v --fast=false + $(GOLANGCI_LINT) run -v --fast-only=false .PHONY: lint-fix lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter @@ -518,7 +515,7 @@ define checkdiff git --no-pager diff --name-only FETCH_HEAD endef -ALL_VERIFY_CHECKS = boilerplate shellcheck modules gen conversions go-version +ALL_VERIFY_CHECKS = boilerplate shellcheck modules gen conversions go-version yamllint linkcheck .PHONY: verify verify: $(addprefix verify-,$(ALL_VERIFY_CHECKS)) ## Run all verify-* targets @@ -586,9 +583,15 @@ else endif -.PHONY: yamllint -yamllint: - @docker run --rm $$(tty -s && echo "-it" || echo) -v $(PWD):/data cytopia/yamllint:latest /data --config-file /data/.yamllint --no-warnings +CURR_DIR := $(shell pwd) +.PHONY: verify-yamllint +verify-yamllint: + @docker run -v $(CURR_DIR):/data cytopia/yamllint:latest /data --config-file /data/.yamllint --no-warnings + +MD_FILES := $(shell find . -iname "*.md") +.PHONY: verify-linkcheck +verify-linkcheck: + @docker run --init -w /input -v $(CURR_DIR):/input ghcr.io/tcort/markdown-link-check:3.12 -q -p $(MD_FILES) ## -------------------------------------- ## Cleanup / Verification diff --git a/README.md b/README.md index 5b0e6b666..6ae05f0bc 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ This provider's versions are compatible with the following versions of Cluster A |:----------------------------------------|:---------------:|:--------------:| | CAPIBM v1alpha4 (v0.1.x) | ✓ | | | CAPIBM v1beta1 (v0.2.x, v0.3.x) | | ✓ | -| CAPIBM v1beta2 (v0.[4-10].x, main) | | ✓ | +| CAPIBM v1beta2 (v0.[4-11].x, main) | | ✓ | (See [Kubernetes support matrix][cluster-api-supported-v] of Cluster API versions). diff --git a/RELEASE.md b/RELEASE.md index 6395d3d02..0cd9d5b75 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -11,7 +11,7 @@ The Kubernetes cluster-api-provider-ibmcloud is released on an as-needed basis. The process is as follows: 1. An issue is proposing a new release with a changelog since the last release -1. All [OWNERS](OWNERS) must LGTM this release -1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` -1. The release issue is closed -1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] cluster-api-provider-ibmcloud $VERSION is released` +2. All [OWNERS](OWNERS) must LGTM this release +3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +4. The release issue is closed +5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] cluster-api-provider-ibmcloud $VERSION is released` diff --git a/api/v1beta1/conditions_consts.go b/api/v1beta1/conditions_consts.go index cdc8fc595..041b6c059 100644 --- a/api/v1beta1/conditions_consts.go +++ b/api/v1beta1/conditions_consts.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) const ( @@ -42,7 +42,7 @@ const ( const ( // InstanceReadyCondition reports on current status of the instance. Ready indicates the instance is in a Running state. - InstanceReadyCondition capiv1beta1.ConditionType = "InstanceReady" + InstanceReadyCondition clusterv1beta1.ConditionType = "InstanceReady" ) const ( @@ -60,10 +60,10 @@ const ( const ( // ImageReadyCondition reports on current status of the image. Ready indicates the image is in a active state. - ImageReadyCondition capiv1beta1.ConditionType = "ImageReady" + ImageReadyCondition clusterv1beta1.ConditionType = "ImageReady" // ImageImportedCondition reports on current status of the image import job. Ready indicates the import job is finished. - ImageImportedCondition capiv1beta1.ConditionType = "ImageImported" + ImageImportedCondition clusterv1beta1.ConditionType = "ImageImported" ) const ( @@ -73,5 +73,5 @@ const ( const ( // LoadBalancerReadyCondition reports on current status of the load balancer. Ready indicates the load balancer is in a active state. - LoadBalancerReadyCondition capiv1beta1.ConditionType = "LoadBalancerReady" + LoadBalancerReadyCondition clusterv1beta1.ConditionType = "LoadBalancerReady" ) diff --git a/api/v1beta1/ibmpowervs_conversion.go b/api/v1beta1/ibmpowervs_conversion.go index 019ad5796..5cc85a66f 100644 --- a/api/v1beta1/ibmpowervs_conversion.go +++ b/api/v1beta1/ibmpowervs_conversion.go @@ -26,130 +26,130 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" ) func (src *IBMPowerVSCluster) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSCluster) + dst := dstRaw.(*infrav1.IBMPowerVSCluster) return Convert_v1beta1_IBMPowerVSCluster_To_v1beta2_IBMPowerVSCluster(src, dst, nil) } func (dst *IBMPowerVSCluster) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSCluster) + src := srcRaw.(*infrav1.IBMPowerVSCluster) return Convert_v1beta2_IBMPowerVSCluster_To_v1beta1_IBMPowerVSCluster(src, dst, nil) } func (src *IBMPowerVSClusterList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSClusterList) + dst := dstRaw.(*infrav1.IBMPowerVSClusterList) return Convert_v1beta1_IBMPowerVSClusterList_To_v1beta2_IBMPowerVSClusterList(src, dst, nil) } func (dst *IBMPowerVSClusterList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSClusterList) + src := srcRaw.(*infrav1.IBMPowerVSClusterList) return Convert_v1beta2_IBMPowerVSClusterList_To_v1beta1_IBMPowerVSClusterList(src, dst, nil) } func (src *IBMPowerVSClusterTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSClusterTemplate) + dst := dstRaw.(*infrav1.IBMPowerVSClusterTemplate) return Convert_v1beta1_IBMPowerVSClusterTemplate_To_v1beta2_IBMPowerVSClusterTemplate(src, dst, nil) } func (dst *IBMPowerVSClusterTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSClusterTemplate) + src := srcRaw.(*infrav1.IBMPowerVSClusterTemplate) return Convert_v1beta2_IBMPowerVSClusterTemplate_To_v1beta1_IBMPowerVSClusterTemplate(src, dst, nil) } func (src *IBMPowerVSClusterTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSClusterTemplateList) + dst := dstRaw.(*infrav1.IBMPowerVSClusterTemplateList) return Convert_v1beta1_IBMPowerVSClusterTemplateList_To_v1beta2_IBMPowerVSClusterTemplateList(src, dst, nil) } func (dst *IBMPowerVSClusterTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSClusterTemplateList) + src := srcRaw.(*infrav1.IBMPowerVSClusterTemplateList) return Convert_v1beta2_IBMPowerVSClusterTemplateList_To_v1beta1_IBMPowerVSClusterTemplateList(src, dst, nil) } func (src *IBMPowerVSMachine) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSMachine) + dst := dstRaw.(*infrav1.IBMPowerVSMachine) return Convert_v1beta1_IBMPowerVSMachine_To_v1beta2_IBMPowerVSMachine(src, dst, nil) } func (dst *IBMPowerVSMachine) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSMachine) + src := srcRaw.(*infrav1.IBMPowerVSMachine) return Convert_v1beta2_IBMPowerVSMachine_To_v1beta1_IBMPowerVSMachine(src, dst, nil) } func (src *IBMPowerVSMachineList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSMachineList) + dst := dstRaw.(*infrav1.IBMPowerVSMachineList) return Convert_v1beta1_IBMPowerVSMachineList_To_v1beta2_IBMPowerVSMachineList(src, dst, nil) } func (dst *IBMPowerVSMachineList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSMachineList) + src := srcRaw.(*infrav1.IBMPowerVSMachineList) return Convert_v1beta2_IBMPowerVSMachineList_To_v1beta1_IBMPowerVSMachineList(src, dst, nil) } func (src *IBMPowerVSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSMachineTemplate) + dst := dstRaw.(*infrav1.IBMPowerVSMachineTemplate) return Convert_v1beta1_IBMPowerVSMachineTemplate_To_v1beta2_IBMPowerVSMachineTemplate(src, dst, nil) } func (dst *IBMPowerVSMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSMachineTemplate) + src := srcRaw.(*infrav1.IBMPowerVSMachineTemplate) return Convert_v1beta2_IBMPowerVSMachineTemplate_To_v1beta1_IBMPowerVSMachineTemplate(src, dst, nil) } func (src *IBMPowerVSMachineTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSMachineTemplateList) + dst := dstRaw.(*infrav1.IBMPowerVSMachineTemplateList) return Convert_v1beta1_IBMPowerVSMachineTemplateList_To_v1beta2_IBMPowerVSMachineTemplateList(src, dst, nil) } func (dst *IBMPowerVSMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSMachineTemplateList) + src := srcRaw.(*infrav1.IBMPowerVSMachineTemplateList) return Convert_v1beta2_IBMPowerVSMachineTemplateList_To_v1beta1_IBMPowerVSMachineTemplateList(src, dst, nil) } func (src *IBMPowerVSImage) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSImage) + dst := dstRaw.(*infrav1.IBMPowerVSImage) return Convert_v1beta1_IBMPowerVSImage_To_v1beta2_IBMPowerVSImage(src, dst, nil) } func (dst *IBMPowerVSImage) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSImage) + src := srcRaw.(*infrav1.IBMPowerVSImage) return Convert_v1beta2_IBMPowerVSImage_To_v1beta1_IBMPowerVSImage(src, dst, nil) } func (src *IBMPowerVSImageList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMPowerVSImageList) + dst := dstRaw.(*infrav1.IBMPowerVSImageList) return Convert_v1beta1_IBMPowerVSImageList_To_v1beta2_IBMPowerVSImageList(src, dst, nil) } func (dst *IBMPowerVSImageList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMPowerVSImageList) + src := srcRaw.(*infrav1.IBMPowerVSImageList) return Convert_v1beta2_IBMPowerVSImageList_To_v1beta1_IBMPowerVSImageList(src, dst, nil) } -func Convert_v1beta1_IBMPowerVSMachineSpec_To_v1beta2_IBMPowerVSMachineSpec(in *IBMPowerVSMachineSpec, out *infrav1beta2.IBMPowerVSMachineSpec, s apiconversion.Scope) error { +func Convert_v1beta1_IBMPowerVSMachineSpec_To_v1beta2_IBMPowerVSMachineSpec(in *IBMPowerVSMachineSpec, out *infrav1.IBMPowerVSMachineSpec, s apiconversion.Scope) error { out.SystemType = in.SysType out.Processors = intstr.FromString(in.Processors) @@ -160,18 +160,18 @@ func Convert_v1beta1_IBMPowerVSMachineSpec_To_v1beta2_IBMPowerVSMachineSpec(in * out.MemoryGiB = int32(memory) switch in.ProcType { - case strings.ToLower(string(infrav1beta2.PowerVSProcessorTypeDedicated)): - out.ProcessorType = infrav1beta2.PowerVSProcessorTypeDedicated - case strings.ToLower(string(infrav1beta2.PowerVSProcessorTypeShared)): - out.ProcessorType = infrav1beta2.PowerVSProcessorTypeShared - case strings.ToLower(string(infrav1beta2.PowerVSProcessorTypeCapped)): - out.ProcessorType = infrav1beta2.PowerVSProcessorTypeCapped + case strings.ToLower(string(infrav1.PowerVSProcessorTypeDedicated)): + out.ProcessorType = infrav1.PowerVSProcessorTypeDedicated + case strings.ToLower(string(infrav1.PowerVSProcessorTypeShared)): + out.ProcessorType = infrav1.PowerVSProcessorTypeShared + case strings.ToLower(string(infrav1.PowerVSProcessorTypeCapped)): + out.ProcessorType = infrav1.PowerVSProcessorTypeCapped } return autoConvert_v1beta1_IBMPowerVSMachineSpec_To_v1beta2_IBMPowerVSMachineSpec(in, out, s) } -func Convert_v1beta2_IBMPowerVSMachineSpec_To_v1beta1_IBMPowerVSMachineSpec(in *infrav1beta2.IBMPowerVSMachineSpec, out *IBMPowerVSMachineSpec, s apiconversion.Scope) error { +func Convert_v1beta2_IBMPowerVSMachineSpec_To_v1beta1_IBMPowerVSMachineSpec(in *infrav1.IBMPowerVSMachineSpec, out *IBMPowerVSMachineSpec, s apiconversion.Scope) error { out.SysType = in.SystemType out.Memory = strconv.FormatInt(int64(in.MemoryGiB), 10) @@ -183,33 +183,37 @@ func Convert_v1beta2_IBMPowerVSMachineSpec_To_v1beta1_IBMPowerVSMachineSpec(in * } switch in.ProcessorType { - case infrav1beta2.PowerVSProcessorTypeDedicated: - out.ProcType = strings.ToLower(string(infrav1beta2.PowerVSProcessorTypeDedicated)) - case infrav1beta2.PowerVSProcessorTypeShared: - out.ProcType = strings.ToLower(string(infrav1beta2.PowerVSProcessorTypeShared)) - case infrav1beta2.PowerVSProcessorTypeCapped: - out.ProcType = strings.ToLower(string(infrav1beta2.PowerVSProcessorTypeCapped)) + case infrav1.PowerVSProcessorTypeDedicated: + out.ProcType = strings.ToLower(string(infrav1.PowerVSProcessorTypeDedicated)) + case infrav1.PowerVSProcessorTypeShared: + out.ProcType = strings.ToLower(string(infrav1.PowerVSProcessorTypeShared)) + case infrav1.PowerVSProcessorTypeCapped: + out.ProcType = strings.ToLower(string(infrav1.PowerVSProcessorTypeCapped)) } return autoConvert_v1beta2_IBMPowerVSMachineSpec_To_v1beta1_IBMPowerVSMachineSpec(in, out, s) } -func Convert_v1beta2_IBMPowerVSClusterSpec_To_v1beta1_IBMPowerVSClusterSpec(in *infrav1beta2.IBMPowerVSClusterSpec, out *IBMPowerVSClusterSpec, s apiconversion.Scope) error { +func Convert_v1beta2_IBMPowerVSClusterSpec_To_v1beta1_IBMPowerVSClusterSpec(in *infrav1.IBMPowerVSClusterSpec, out *IBMPowerVSClusterSpec, s apiconversion.Scope) error { if in.ServiceInstance != nil && in.ServiceInstance.ID != nil { out.ServiceInstanceID = *in.ServiceInstance.ID } return autoConvert_v1beta2_IBMPowerVSClusterSpec_To_v1beta1_IBMPowerVSClusterSpec(in, out, s) } -func Convert_v1beta2_IBMPowerVSClusterStatus_To_v1beta1_IBMPowerVSClusterStatus(in *infrav1beta2.IBMPowerVSClusterStatus, out *IBMPowerVSClusterStatus, s apiconversion.Scope) error { +func Convert_v1beta2_IBMPowerVSClusterStatus_To_v1beta1_IBMPowerVSClusterStatus(in *infrav1.IBMPowerVSClusterStatus, out *IBMPowerVSClusterStatus, s apiconversion.Scope) error { return autoConvert_v1beta2_IBMPowerVSClusterStatus_To_v1beta1_IBMPowerVSClusterStatus(in, out, s) } -func Convert_v1beta2_IBMPowerVSImageSpec_To_v1beta1_IBMPowerVSImageSpec(in *infrav1beta2.IBMPowerVSImageSpec, out *IBMPowerVSImageSpec, s apiconversion.Scope) error { +func Convert_v1beta2_IBMPowerVSImageSpec_To_v1beta1_IBMPowerVSImageSpec(in *infrav1.IBMPowerVSImageSpec, out *IBMPowerVSImageSpec, s apiconversion.Scope) error { return autoConvert_v1beta2_IBMPowerVSImageSpec_To_v1beta1_IBMPowerVSImageSpec(in, out, s) } -func Convert_v1beta2_IBMPowerVSMachineStatus_To_v1beta1_IBMPowerVSMachineStatus(in *infrav1beta2.IBMPowerVSMachineStatus, out *IBMPowerVSMachineStatus, s apiconversion.Scope) error { +func Convert_v1beta2_IBMPowerVSMachineStatus_To_v1beta1_IBMPowerVSMachineStatus(in *infrav1.IBMPowerVSMachineStatus, out *IBMPowerVSMachineStatus, s apiconversion.Scope) error { // CAPI V1Beta2 was added in CAPIBM v1beta2 return autoConvert_v1beta2_IBMPowerVSMachineStatus_To_v1beta1_IBMPowerVSMachineStatus(in, out, s) } + +func Convert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus(in *infrav1.IBMPowerVSImageStatus, out *IBMPowerVSImageStatus, s apiconversion.Scope) error { + return autoConvert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus(in, out, s) +} diff --git a/api/v1beta1/ibmpowervscluster_types.go b/api/v1beta1/ibmpowervscluster_types.go index 8b0bdbcf8..f616d8a31 100644 --- a/api/v1beta1/ibmpowervscluster_types.go +++ b/api/v1beta1/ibmpowervscluster_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -44,7 +44,7 @@ type IBMPowerVSClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint capiv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // IBMPowerVSClusterStatus defines the observed state of IBMPowerVSCluster. diff --git a/api/v1beta1/ibmpowervsclustertemplate_types.go b/api/v1beta1/ibmpowervsclustertemplate_types.go index f83136e2c..0f99113c9 100644 --- a/api/v1beta1/ibmpowervsclustertemplate_types.go +++ b/api/v1beta1/ibmpowervsclustertemplate_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // IBMPowerVSClusterTemplateSpec defines the desired state of IBMPowerVSClusterTemplate. @@ -53,8 +53,8 @@ type IBMPowerVSClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta capiv1beta1.ObjectMeta `json:"metadata,omitempty"` - Spec IBMPowerVSClusterSpec `json:"spec"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` + Spec IBMPowerVSClusterSpec `json:"spec"` } func init() { diff --git a/api/v1beta1/ibmpowervsimage_types.go b/api/v1beta1/ibmpowervsimage_types.go index 1c8386ada..535c0d7cb 100644 --- a/api/v1beta1/ibmpowervsimage_types.go +++ b/api/v1beta1/ibmpowervsimage_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -51,7 +51,7 @@ type IBMPowerVSImageSpec struct { // Type of storage, storage pool with the most available space will be selected. // +kubebuilder:default=tier1 - // +kubebuilder:validation:Enum=tier1;tier3 + // +kubebuilder:validation:Enum=tier0;tier1;tier3 // +optional StorageType string `json:"storageType,omitempty"` @@ -82,7 +82,7 @@ type IBMPowerVSImageStatus struct { // Conditions defines current service state of the IBMPowerVSImage. // +optional - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } //+kubebuilder:object:root=true @@ -100,12 +100,12 @@ type IBMPowerVSImage struct { } // GetConditions returns the observations of the operational state of the IBMPowerVSImage resource. -func (r *IBMPowerVSImage) GetConditions() capiv1beta1.Conditions { +func (r *IBMPowerVSImage) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMPowerVSImage to the predescribed clusterv1.Conditions. -func (r *IBMPowerVSImage) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMPowerVSImage to the predescribed clusterv1beta1.Conditions. +func (r *IBMPowerVSImage) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/ibmpowervsmachine_types.go b/api/v1beta1/ibmpowervsmachine_types.go index 09c5eaa2e..1f09acb80 100644 --- a/api/v1beta1/ibmpowervsmachine_types.go +++ b/api/v1beta1/ibmpowervsmachine_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -164,7 +164,7 @@ type IBMPowerVSMachineStatus struct { // Conditions defines current service state of the IBMPowerVSMachine. // +optional - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Region specifies the Power VS Service instance region. Region *string `json:"region,omitempty"` @@ -194,12 +194,12 @@ type IBMPowerVSMachine struct { } // GetConditions returns the observations of the operational state of the IBMPowerVSMachine resource. -func (r *IBMPowerVSMachine) GetConditions() capiv1beta1.Conditions { +func (r *IBMPowerVSMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMPowerVSMachine to the predescribed clusterv1.Conditions. -func (r *IBMPowerVSMachine) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMPowerVSMachine to the predescribed clusterv1beta1.Conditions. +func (r *IBMPowerVSMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/ibmvpc_conversion.go b/api/v1beta1/ibmvpc_conversion.go index a839f00ac..dda591423 100644 --- a/api/v1beta1/ibmvpc_conversion.go +++ b/api/v1beta1/ibmvpc_conversion.go @@ -23,54 +23,54 @@ import ( utilconversion "sigs.k8s.io/cluster-api/util/conversion" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" ) func (src *IBMVPCCluster) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMVPCCluster) + dst := dstRaw.(*infrav1.IBMVPCCluster) return Convert_v1beta1_IBMVPCCluster_To_v1beta2_IBMVPCCluster(src, dst, nil) } func (dst *IBMVPCCluster) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMVPCCluster) + src := srcRaw.(*infrav1.IBMVPCCluster) return Convert_v1beta2_IBMVPCCluster_To_v1beta1_IBMVPCCluster(src, dst, nil) } func (src *IBMVPCClusterList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMVPCClusterList) + dst := dstRaw.(*infrav1.IBMVPCClusterList) return Convert_v1beta1_IBMVPCClusterList_To_v1beta2_IBMVPCClusterList(src, dst, nil) } func (dst *IBMVPCClusterList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMVPCClusterList) + src := srcRaw.(*infrav1.IBMVPCClusterList) return Convert_v1beta2_IBMVPCClusterList_To_v1beta1_IBMVPCClusterList(src, dst, nil) } func (src *IBMVPCMachine) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMVPCMachine) + dst := dstRaw.(*infrav1.IBMVPCMachine) if err := Convert_v1beta1_IBMVPCMachine_To_v1beta2_IBMVPCMachine(src, dst, nil); err != nil { return err } if src.Spec.Image != "" { - dst.Spec.Image = &infrav1beta2.IBMVPCResourceReference{ + dst.Spec.Image = &infrav1.IBMVPCResourceReference{ ID: &src.Spec.Image, } } if src.Spec.ImageName != "" { - dst.Spec.Image = &infrav1beta2.IBMVPCResourceReference{ + dst.Spec.Image = &infrav1.IBMVPCResourceReference{ Name: &src.Spec.ImageName, } } for _, sshKey := range src.Spec.SSHKeyNames { - dst.Spec.SSHKeys = append(dst.Spec.SSHKeys, &infrav1beta2.IBMVPCResourceReference{ + dst.Spec.SSHKeys = append(dst.Spec.SSHKeys, &infrav1.IBMVPCResourceReference{ Name: sshKey, }) } @@ -79,7 +79,7 @@ func (src *IBMVPCMachine) ConvertTo(dstRaw conversion.Hub) error { } func (dst *IBMVPCMachine) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMVPCMachine) + src := srcRaw.(*infrav1.IBMVPCMachine) if err := Convert_v1beta2_IBMVPCMachine_To_v1beta1_IBMVPCMachine(src, dst, nil); err != nil { return err @@ -108,38 +108,38 @@ func (dst *IBMVPCMachine) ConvertFrom(srcRaw conversion.Hub) error { } func (src *IBMVPCMachineList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMVPCMachineList) + dst := dstRaw.(*infrav1.IBMVPCMachineList) return Convert_v1beta1_IBMVPCMachineList_To_v1beta2_IBMVPCMachineList(src, dst, nil) } func (dst *IBMVPCMachineList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMVPCMachineList) + src := srcRaw.(*infrav1.IBMVPCMachineList) return Convert_v1beta2_IBMVPCMachineList_To_v1beta1_IBMVPCMachineList(src, dst, nil) } func (src *IBMVPCMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMVPCMachineTemplate) + dst := dstRaw.(*infrav1.IBMVPCMachineTemplate) if err := Convert_v1beta1_IBMVPCMachineTemplate_To_v1beta2_IBMVPCMachineTemplate(src, dst, nil); err != nil { return err } if src.Spec.Template.Spec.Image != "" { - dst.Spec.Template.Spec.Image = &infrav1beta2.IBMVPCResourceReference{ + dst.Spec.Template.Spec.Image = &infrav1.IBMVPCResourceReference{ ID: &src.Spec.Template.Spec.Image, } } if src.Spec.Template.Spec.ImageName != "" { - dst.Spec.Template.Spec.Image = &infrav1beta2.IBMVPCResourceReference{ + dst.Spec.Template.Spec.Image = &infrav1.IBMVPCResourceReference{ Name: &src.Spec.Template.Spec.ImageName, } } for _, sshKey := range src.Spec.Template.Spec.SSHKeyNames { - dst.Spec.Template.Spec.SSHKeys = append(dst.Spec.Template.Spec.SSHKeys, &infrav1beta2.IBMVPCResourceReference{ + dst.Spec.Template.Spec.SSHKeys = append(dst.Spec.Template.Spec.SSHKeys, &infrav1.IBMVPCResourceReference{ Name: sshKey, }) } @@ -148,7 +148,7 @@ func (src *IBMVPCMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } func (dst *IBMVPCMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMVPCMachineTemplate) + src := srcRaw.(*infrav1.IBMVPCMachineTemplate) if err := Convert_v1beta2_IBMVPCMachineTemplate_To_v1beta1_IBMVPCMachineTemplate(src, dst, nil); err != nil { return err @@ -177,39 +177,39 @@ func (dst *IBMVPCMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error { } func (src *IBMVPCMachineTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*infrav1beta2.IBMVPCMachineTemplateList) + dst := dstRaw.(*infrav1.IBMVPCMachineTemplateList) return Convert_v1beta1_IBMVPCMachineTemplateList_To_v1beta2_IBMVPCMachineTemplateList(src, dst, nil) } func (dst *IBMVPCMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*infrav1beta2.IBMVPCMachineTemplateList) + src := srcRaw.(*infrav1.IBMVPCMachineTemplateList) return Convert_v1beta2_IBMVPCMachineTemplateList_To_v1beta1_IBMVPCMachineTemplateList(src, dst, nil) } -func Convert_v1beta1_IBMVPCMachineSpec_To_v1beta2_IBMVPCMachineSpec(in *IBMVPCMachineSpec, out *infrav1beta2.IBMVPCMachineSpec, s apiconversion.Scope) error { +func Convert_v1beta1_IBMVPCMachineSpec_To_v1beta2_IBMVPCMachineSpec(in *IBMVPCMachineSpec, out *infrav1.IBMVPCMachineSpec, s apiconversion.Scope) error { return autoConvert_v1beta1_IBMVPCMachineSpec_To_v1beta2_IBMVPCMachineSpec(in, out, s) } -func Convert_v1beta2_IBMVPCMachineSpec_To_v1beta1_IBMVPCMachineSpec(in *infrav1beta2.IBMVPCMachineSpec, out *IBMVPCMachineSpec, s apiconversion.Scope) error { +func Convert_v1beta2_IBMVPCMachineSpec_To_v1beta1_IBMVPCMachineSpec(in *infrav1.IBMVPCMachineSpec, out *IBMVPCMachineSpec, s apiconversion.Scope) error { return autoConvert_v1beta2_IBMVPCMachineSpec_To_v1beta1_IBMVPCMachineSpec(in, out, s) } -func Convert_v1beta2_IBMVPCMachineTemplateStatus_To_v1beta1_IBMVPCMachineTemplateStatus(in *infrav1beta2.IBMVPCMachineTemplateStatus, out *IBMVPCMachineTemplateStatus, s apiconversion.Scope) error { +func Convert_v1beta2_IBMVPCMachineTemplateStatus_To_v1beta1_IBMVPCMachineTemplateStatus(in *infrav1.IBMVPCMachineTemplateStatus, out *IBMVPCMachineTemplateStatus, s apiconversion.Scope) error { return autoConvert_v1beta2_IBMVPCMachineTemplateStatus_To_v1beta1_IBMVPCMachineTemplateStatus(in, out, s) } -func Convert_Slice_Pointer_string_To_Slice_Pointer_v1beta2_IBMVPCResourceReference(in *[]*string, out *[]*infrav1beta2.IBMVPCResourceReference, _ apiconversion.Scope) error { +func Convert_Slice_Pointer_string_To_Slice_Pointer_v1beta2_IBMVPCResourceReference(in *[]*string, out *[]*infrav1.IBMVPCResourceReference, _ apiconversion.Scope) error { for _, sshKey := range *in { - *out = append(*out, &infrav1beta2.IBMVPCResourceReference{ + *out = append(*out, &infrav1.IBMVPCResourceReference{ ID: sshKey, }) } return nil } -func Convert_Slice_Pointer_v1beta2_IBMVPCResourceReference_To_Slice_Pointer_string(in *[]*infrav1beta2.IBMVPCResourceReference, out *[]*string, _ apiconversion.Scope) error { +func Convert_Slice_Pointer_v1beta2_IBMVPCResourceReference_To_Slice_Pointer_string(in *[]*infrav1.IBMVPCResourceReference, out *[]*string, _ apiconversion.Scope) error { if in != nil { for _, sshKey := range *in { if sshKey.ID != nil { @@ -220,22 +220,22 @@ func Convert_Slice_Pointer_v1beta2_IBMVPCResourceReference_To_Slice_Pointer_stri return nil } -func Convert_v1beta2_VPCLoadBalancerSpec_To_v1beta1_VPCLoadBalancerSpec(in *infrav1beta2.VPCLoadBalancerSpec, out *VPCLoadBalancerSpec, s apiconversion.Scope) error { +func Convert_v1beta2_VPCLoadBalancerSpec_To_v1beta1_VPCLoadBalancerSpec(in *infrav1.VPCLoadBalancerSpec, out *VPCLoadBalancerSpec, s apiconversion.Scope) error { return autoConvert_v1beta2_VPCLoadBalancerSpec_To_v1beta1_VPCLoadBalancerSpec(in, out, s) } -func Convert_v1beta2_IBMVPCClusterSpec_To_v1beta1_IBMVPCClusterSpec(in *infrav1beta2.IBMVPCClusterSpec, out *IBMVPCClusterSpec, s apiconversion.Scope) error { +func Convert_v1beta2_IBMVPCClusterSpec_To_v1beta1_IBMVPCClusterSpec(in *infrav1.IBMVPCClusterSpec, out *IBMVPCClusterSpec, s apiconversion.Scope) error { return autoConvert_v1beta2_IBMVPCClusterSpec_To_v1beta1_IBMVPCClusterSpec(in, out, s) } -func Convert_v1beta2_IBMVPCClusterStatus_To_v1beta1_IBMVPCClusterStatus(in *infrav1beta2.IBMVPCClusterStatus, out *IBMVPCClusterStatus, s apiconversion.Scope) error { +func Convert_v1beta2_IBMVPCClusterStatus_To_v1beta1_IBMVPCClusterStatus(in *infrav1.IBMVPCClusterStatus, out *IBMVPCClusterStatus, s apiconversion.Scope) error { return autoConvert_v1beta2_IBMVPCClusterStatus_To_v1beta1_IBMVPCClusterStatus(in, out, s) } -func Convert_v1beta2_IBMVPCMachineStatus_To_v1beta1_IBMVPCMachineStatus(in *infrav1beta2.IBMVPCMachineStatus, out *IBMVPCMachineStatus, s apiconversion.Scope) error { +func Convert_v1beta2_IBMVPCMachineStatus_To_v1beta1_IBMVPCMachineStatus(in *infrav1.IBMVPCMachineStatus, out *IBMVPCMachineStatus, s apiconversion.Scope) error { return autoConvert_v1beta2_IBMVPCMachineStatus_To_v1beta1_IBMVPCMachineStatus(in, out, s) } -func Convert_v1beta2_NetworkInterface_To_v1beta1_NetworkInterface(in *infrav1beta2.NetworkInterface, out *NetworkInterface, s apiconversion.Scope) error { +func Convert_v1beta2_NetworkInterface_To_v1beta1_NetworkInterface(in *infrav1.NetworkInterface, out *NetworkInterface, s apiconversion.Scope) error { return autoConvert_v1beta2_NetworkInterface_To_v1beta1_NetworkInterface(in, out, s) } diff --git a/api/v1beta1/ibmvpccluster_types.go b/api/v1beta1/ibmvpccluster_types.go index 813b3df7b..a0a3017f9 100644 --- a/api/v1beta1/ibmvpccluster_types.go +++ b/api/v1beta1/ibmvpccluster_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -49,7 +49,7 @@ type IBMVPCClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint capiv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. // +optional @@ -83,7 +83,7 @@ type IBMVPCClusterStatus struct { // Conditions defines current service state of the load balancer. // +optional - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // VPC holds the VPC information. @@ -121,11 +121,11 @@ func init() { } // GetConditions returns the observations of the operational state of the IBMVPCCluster resource. -func (r *IBMVPCCluster) GetConditions() capiv1beta1.Conditions { +func (r *IBMVPCCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMVPCCluster to the predescribed clusterv1.Conditions. -func (r *IBMVPCCluster) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMVPCCluster to the predescribed clusterv1beta1.Conditions. +func (r *IBMVPCCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index ffeb92e41..1752c066e 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -28,7 +28,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -138,11 +138,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.IBMPowerVSImageStatus)(nil), (*IBMPowerVSImageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus(a.(*v1beta2.IBMPowerVSImageStatus), b.(*IBMPowerVSImageStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*IBMPowerVSMachine)(nil), (*v1beta2.IBMPowerVSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_IBMPowerVSMachine_To_v1beta2_IBMPowerVSMachine(a.(*IBMPowerVSMachine), b.(*v1beta2.IBMPowerVSMachine), scope) }); err != nil { @@ -413,6 +408,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.IBMPowerVSImageStatus)(nil), (*IBMPowerVSImageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus(a.(*v1beta2.IBMPowerVSImageStatus), b.(*IBMPowerVSImageStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.IBMPowerVSMachineSpec)(nil), (*IBMPowerVSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_IBMPowerVSMachineSpec_To_v1beta1_IBMPowerVSMachineSpec(a.(*v1beta2.IBMPowerVSMachineSpec), b.(*IBMPowerVSMachineSpec), scope) }); err != nil { @@ -821,7 +821,7 @@ func autoConvert_v1beta1_IBMPowerVSImageStatus_To_v1beta2_IBMPowerVSImageStatus( out.ImageID = in.ImageID out.ImageState = v1beta2.PowerVSImageState(in.ImageState) out.JobID = in.JobID - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -835,15 +835,11 @@ func autoConvert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus( out.ImageID = in.ImageID out.ImageState = PowerVSImageState(in.ImageState) out.JobID = in.JobID - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.V1Beta2 requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus is an autogenerated conversion function. -func Convert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus(in *v1beta2.IBMPowerVSImageStatus, out *IBMPowerVSImageStatus, s conversion.Scope) error { - return autoConvert_v1beta2_IBMPowerVSImageStatus_To_v1beta1_IBMPowerVSImageStatus(in, out, s) -} - func autoConvert_v1beta1_IBMPowerVSMachine_To_v1beta2_IBMPowerVSMachine(in *IBMPowerVSMachine, out *v1beta2.IBMPowerVSMachine, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_IBMPowerVSMachineSpec_To_v1beta2_IBMPowerVSMachineSpec(&in.Spec, &out.Spec, s); err != nil { @@ -960,7 +956,7 @@ func autoConvert_v1beta1_IBMPowerVSMachineStatus_To_v1beta2_IBMPowerVSMachineSta out.Fault = in.Fault out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Region = (*string)(unsafe.Pointer(in.Region)) out.Zone = (*string)(unsafe.Pointer(in.Zone)) return nil @@ -980,7 +976,7 @@ func autoConvert_v1beta2_IBMPowerVSMachineStatus_To_v1beta1_IBMPowerVSMachineSta out.Fault = in.Fault out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Region = (*string)(unsafe.Pointer(in.Region)) out.Zone = (*string)(unsafe.Pointer(in.Zone)) // WARNING: in.V1Beta2 requires manual conversion: does not exist in peer-type @@ -1282,7 +1278,7 @@ func autoConvert_v1beta1_IBMVPCClusterStatus_To_v1beta2_IBMVPCClusterStatus(in * return err } out.ControlPlaneLoadBalancerState = v1beta2.VPCLoadBalancerState(in.ControlPlaneLoadBalancerState) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1306,7 +1302,8 @@ func autoConvert_v1beta2_IBMVPCClusterStatus_To_v1beta1_IBMVPCClusterStatus(in * return err } out.ControlPlaneLoadBalancerState = VPCLoadBalancerState(in.ControlPlaneLoadBalancerState) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.V1Beta2 requires manual conversion: does not exist in peer-type return nil } @@ -1443,6 +1440,7 @@ func autoConvert_v1beta2_IBMVPCMachineStatus_To_v1beta1_IBMVPCMachineStatus(in * // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type out.InstanceStatus = in.InstanceStatus // WARNING: in.LoadBalancerPoolMembers requires manual conversion: does not exist in peer-type + // WARNING: in.V1Beta2 requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 95204009b..134be0c1d 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta1 import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -302,7 +302,7 @@ func (in *IBMPowerVSImageStatus) DeepCopyInto(out *IBMPowerVSImageStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -429,7 +429,7 @@ func (in *IBMPowerVSMachineStatus) DeepCopyInto(out *IBMPowerVSMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -687,7 +687,7 @@ func (in *IBMVPCClusterStatus) DeepCopyInto(out *IBMVPCClusterStatus) { in.VPCEndpoint.DeepCopyInto(&out.VPCEndpoint) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index 1b8390e3c..7777739d0 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -17,26 +17,55 @@ limitations under the License. package v1beta2 import ( - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // IBMPowerVSMachine's InstanceReady condition and corresponding reasons that will be used in v1Beta2 API version. const ( // IBMPowerVSMachineReadyV1Beta2Condition is true if the IBMPowerVSMachine's deletionTimestamp is not set, IBMPowerVSMachine's // IBMPowerVSMachineInstanceReadyV1Beta2Condition is true. - IBMPowerVSMachineReadyV1Beta2Condition = capiv1beta1.ReadyV1Beta2Condition + IBMPowerVSMachineReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition // IBMPowerVSMachineReadyV1Beta2Reason surfaces when the IBMPowerVSMachine readiness criteria is met. - IBMPowerVSMachineReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + IBMPowerVSMachineReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // IBMPowerVSMachineNotReadyV1Beta2Reason surfaces when the IBMPowerVSMachine readiness criteria is not met. - IBMPowerVSMachineNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + IBMPowerVSMachineNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // IBMPowerVSMachineReadyUnknownV1Beta2Reason surfaces when at least one IBMPowerVSMachine readiness criteria is unknown // and no IBMPowerVSMachine readiness criteria is not met. - IBMPowerVSMachineReadyUnknownV1Beta2Reason = capiv1beta1.ReadyUnknownV1Beta2Reason + IBMPowerVSMachineReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) +const ( + // IBMVPCMachineReadyV1Beta2Condition is true if the IBMVPCMachine's deletionTimestamp is not set, IBMVPCMachine's + // IBMVPCMachineInstanceReadyV1Beta2Condition is true. + IBMVPCMachineReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition + + // IBMVPCMachineReadyV1Beta2Reason surfaces when the IBMVPCMachine readiness criteria is met. + IBMVPCMachineReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason + + // IBMVPCMachineNotReadyV1Beta2Reason surfaces when the IBMVPCMachine readiness criteria is not met. + IBMVPCMachineNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason + + // IBMVPCMachineReadyUnknownV1Beta2Reason surfaces when at least one IBMVPCMachine readiness criteria is unknown. + IBMVPCMachineReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason +) + +// IBMVPCMachine's InstanceReady condition and corresponding reasons that will be used in v1Beta2 API version. +const ( + // IBMVPCMachineInstanceReadyV1Beta2Condition documents the status of the instance that is controlled + // by the IBMVPCMachine. + IBMVPCMachineInstanceReadyV1Beta2Condition = "InstanceReady" + + // IBMVPCMachineInstanceReadyV1Beta2Reason surfaces when the instance that is controlled + // by the IBMVPCMachine is ready. + IBMVPCMachineInstanceReadyV1Beta2Reason = "InstanceReady" + + // IBMVPCMachineInstanceNotReadyV1Beta2Reason surfaces when the instance that is controlled + // by the IBMVPCMachine is not ready. + IBMVPCMachineInstanceNotReadyV1Beta2Reason = "InstanceNotReady" +) const ( // IBMPowerVSMachineInstanceReadyV1Beta2Condition documents the status of the instance that is controlled // by the IBMPowerVSMachine. @@ -52,21 +81,21 @@ const ( // IBMPowerVSMachineInstanceWaitingForClusterInfrastructureReadyV1Beta2Reason documents the virtual machine that is controller by // IBMPowerVSMachine waiting for the cluster infrastructure to be ready. - // TODO: Use when CAPI version is updated: IBMPowerVSMachineInstanceWaitingForClusterInfrastructureReadyV1Beta2Reason = capiv1beta1.WaitingForClusterInfrastructureReadyV1Beta2Reason. + // TODO: Use when CAPI version is updated: IBMPowerVSMachineInstanceWaitingForClusterInfrastructureReadyV1Beta2Reason = clusterv1beta1.WaitingForClusterInfrastructureReadyV1Beta2Reason. IBMPowerVSMachineInstanceWaitingForClusterInfrastructureReadyV1Beta2Reason = "WaitingForClusterInfrastructureReady" // IBMPowerVSMachineInstanceWaitingForControlPlaneInitializedV1Beta2Reason documents the virtual machine that is controller by IBMPowerVSMachine waiting // for the control plane to be initialized. - // TODO: Use when CAPI version is updated: IBMPowerVSMachineInstanceWaitingForControlPlaneInitializedV1Beta2Reason = capiv1beta1.WaitingForControlPlaneInitializedV1Beta2Reason. + // TODO: Use when CAPI version is updated: IBMPowerVSMachineInstanceWaitingForControlPlaneInitializedV1Beta2Reason = clusterv1beta1.WaitingForControlPlaneInitializedV1Beta2Reason. IBMPowerVSMachineInstanceWaitingForControlPlaneInitializedV1Beta2Reason = "WaitingForControlPlaneInitialized" // IBMPowerVSMachineInstanceWaitingForBootstrapDataV1Beta2Reason documents the virtual machine that is controller by IBMPowerVSMachine waiting for the bootstrap // data to be ready. - // TODO: Use when CAPI version is updated: IBMPowerVSMachineInstanceWaitingForBootstrapDataV1Beta2Reason = capiv1beta1.WaitingForBootstrapDataV1Beta2Reason. + // TODO: Use when CAPI version is updated: IBMPowerVSMachineInstanceWaitingForBootstrapDataV1Beta2Reason = clusterv1beta1.WaitingForBootstrapDataV1Beta2Reason. IBMPowerVSMachineInstanceWaitingForBootstrapDataV1Beta2Reason = "WaitingForBootstrapData" // IBMPowerVSMachineInstanceDeletingV1Beta2Reason surfaces when the virtual machine controller by IBMPowerVSMachine is deleting. - IBMPowerVSMachineInstanceDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + IBMPowerVSMachineInstanceDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason // IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason surfaces when configuring the virtual machine IP to load balancer fails. IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason = "LoadBalancerConfigurationFailed" @@ -95,13 +124,16 @@ const ( // InstanceNotReadyReason used when the instance is in a not ready state. InstanceNotReadyReason = "InstanceNotReady" + // InstanceDeletingReason is used when the instance is in deleting state. + InstanceDeletingReason = "InstanceDeleting" + // InstanceStateUnknownReason used when the instance is in a unknown state. InstanceStateUnknownReason = "InstanceStateUnknown" ) const ( // InstanceReadyCondition reports on current status of the instance. Ready indicates the instance is in a Running state. - InstanceReadyCondition capiv1beta1.ConditionType = "InstanceReady" + InstanceReadyCondition clusterv1beta1.ConditionType = "InstanceReady" ) const ( @@ -110,7 +142,7 @@ const ( ) const ( - // ImageNotReadyReason used when the image is in a queued state. + // ImageNotReadyReason used when the image is not ready. ImageNotReadyReason = "ImageNotReady" // ImageImportFailedReason used when the image import is failed. @@ -118,14 +150,20 @@ const ( // ImageReconciliationFailedReason used when an error occurs during VPC Custom Image reconciliation. ImageReconciliationFailedReason = "ImageReconciliationFailed" + + // ImageQueuedReason used when the image is in queued state. + ImageQueuedReason = "ImageQueued" ) const ( // ImageReadyCondition reports on current status of the image. Ready indicates the image is in a active state. - ImageReadyCondition capiv1beta1.ConditionType = "ImageReady" + ImageReadyCondition clusterv1beta1.ConditionType = "ImageReady" // ImageImportedCondition reports on current status of the image import job. Ready indicates the import job is finished. - ImageImportedCondition capiv1beta1.ConditionType = "ImageImported" + ImageImportedCondition clusterv1beta1.ConditionType = "ImageImported" + + // IBMPowerVSImageDeletingV1Beta2Reason surfaces when the image is in deleting state. + IBMPowerVSImageDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) const ( @@ -135,42 +173,42 @@ const ( const ( // ServiceInstanceReadyCondition reports on the successful reconciliation of a Power VS workspace. - ServiceInstanceReadyCondition capiv1beta1.ConditionType = "ServiceInstanceReady" + ServiceInstanceReadyCondition clusterv1beta1.ConditionType = "ServiceInstanceReady" // ServiceInstanceReconciliationFailedReason used when an error occurs during workspace reconciliation. ServiceInstanceReconciliationFailedReason = "ServiceInstanceReconciliationFailed" // NetworkReadyCondition reports on the successful reconciliation of a Power VS network. - NetworkReadyCondition capiv1beta1.ConditionType = "NetworkReady" + NetworkReadyCondition clusterv1beta1.ConditionType = "NetworkReady" // NetworkReconciliationFailedReason used when an error occurs during network reconciliation. NetworkReconciliationFailedReason = "NetworkReconciliationFailed" // VPCSecurityGroupReadyCondition reports on the successful reconciliation of a VPC. - VPCSecurityGroupReadyCondition capiv1beta1.ConditionType = "VPCSecurityGroupReady" + VPCSecurityGroupReadyCondition clusterv1beta1.ConditionType = "VPCSecurityGroupReady" // VPCSecurityGroupReconciliationFailedReason used when an error occurs during VPC reconciliation. VPCSecurityGroupReconciliationFailedReason = "VPCSecurityGroupReconciliationFailed" // VPCReadyCondition reports on the successful reconciliation of a VPC. - VPCReadyCondition capiv1beta1.ConditionType = "VPCReady" + VPCReadyCondition clusterv1beta1.ConditionType = "VPCReady" // VPCReconciliationFailedReason used when an error occurs during VPC reconciliation. VPCReconciliationFailedReason = "VPCReconciliationFailed" // VPCSubnetReadyCondition reports on the successful reconciliation of a VPC subnet. - VPCSubnetReadyCondition capiv1beta1.ConditionType = "VPCSubnetReady" + VPCSubnetReadyCondition clusterv1beta1.ConditionType = "VPCSubnetReady" // VPCSubnetReconciliationFailedReason used when an error occurs during VPC subnet reconciliation. VPCSubnetReconciliationFailedReason = "VPCSubnetReconciliationFailed" // TransitGatewayReadyCondition reports on the successful reconciliation of a Power VS transit gateway. - TransitGatewayReadyCondition capiv1beta1.ConditionType = "TransitGatewayReady" + TransitGatewayReadyCondition clusterv1beta1.ConditionType = "TransitGatewayReady" // TransitGatewayReconciliationFailedReason used when an error occurs during transit gateway reconciliation. TransitGatewayReconciliationFailedReason = "TransitGatewayReconciliationFailed" // LoadBalancerReadyCondition reports on the successful reconciliation of a Power VS network. - LoadBalancerReadyCondition capiv1beta1.ConditionType = "LoadBalancerReady" + LoadBalancerReadyCondition clusterv1beta1.ConditionType = "LoadBalancerReady" // LoadBalancerReconciliationFailedReason used when an error occurs during loadbalancer reconciliation. LoadBalancerReconciliationFailedReason = "LoadBalancerReconciliationFailed" // COSInstanceReadyCondition reports on the successful reconciliation of a COS instance. - COSInstanceReadyCondition capiv1beta1.ConditionType = "COSInstanceCreated" + COSInstanceReadyCondition clusterv1beta1.ConditionType = "COSInstanceCreated" // COSInstanceReconciliationFailedReason used when an error occurs during COS instance reconciliation. COSInstanceReconciliationFailedReason = "COSInstanceCreationFailed" ) @@ -185,17 +223,34 @@ const ( const ( // IBMPowerVSClusterReadyV1Beta2Condition is true if the IBMPowerVSCluster's deletionTimestamp is not set, IBMPowerVSCluster's // FailureDomainsReady, VCenterAvailable and ClusterModulesReady conditions are true. - IBMPowerVSClusterReadyV1Beta2Condition = capiv1beta1.ReadyV1Beta2Condition + IBMPowerVSClusterReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition // IBMPowerVSClusterReadyV1Beta2Reason surfaces when the IBMPowerVSCluster readiness criteria is met. - IBMPowerVSClusterReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + IBMPowerVSClusterReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // IBMPowerVSClusterNotReadyV1Beta2Reason surfaces when the IBMPowerVSCluster readiness criteria is not met. - IBMPowerVSClusterNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + IBMPowerVSClusterNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // IBMPowerVSClusterReadyUnknownV1Beta2Reason surfaces when at least one of the IBMPowerVSCluster readiness criteria is unknown // and none of the IBMPowerVSCluster readiness criteria is met. - IBMPowerVSClusterReadyUnknownV1Beta2Reason = capiv1beta1.ReadyUnknownV1Beta2Reason + IBMPowerVSClusterReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason +) + +// IBMVPCCluster's Ready condition and corresponding reasons that will be used in v1Beta2 API version. +const ( + // IBMVPCClusterReadyV1Beta2Condition is true if the IBMVPCCluster's deletionTimestamp is not set and IBMVPCCluster's + // conditions are true. + IBMVPCClusterReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition + + // IBMVPCClusterReadyV1Beta2Reason surfaces when the IBMVPCCluster readiness criteria is met. + IBMVPCClusterReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason + + // IBMVPCClusterNotReadyV1Beta2Reason surfaces when the IBMVPCCluster readiness criteria is not met. + IBMVPCClusterNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason + + // IBMVPCClusterReadyUnknownV1Beta2Reason surfaces when at least one of the IBMVPCCluster readiness criteria is unknown + // and none of the IBMVPCCluster readiness criteria is met. + IBMVPCClusterReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) const ( @@ -203,95 +258,123 @@ const ( WorkspaceReadyV1Beta2Condition = "WorkspaceReady" // WorkspaceReadyV1Beta2Reason surfaces when the PowerVS workspace is ready. - WorkspaceReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + WorkspaceReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // WorkspaceNotReadyV1Beta2Reason surfaces when PowerVS workspace is not ready. - WorkspaceNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + WorkspaceNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // WorkspaceDeletingV1Beta2Reason surfaces when the PowerVS workspace is being deleted. - WorkspaceDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + WorkspaceDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason // NetworkReadyV1Beta2Condition reports on the successful reconciliation of a PowerVS network. NetworkReadyV1Beta2Condition = "NetworkReady" // NetworkReadyV1Beta2Reason surfaces when PowerVS workspace is ready. - NetworkReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + NetworkReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // NetworkNotReadyV1Beta2Reason surfaces when the PowerVS network is not ready. - NetworkNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + NetworkNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // NetworkDeletingV1Beta2Reason surfaces when the PowerVS network is being deleted. - NetworkDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + NetworkDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason // VPCReadyV1Beta2Condition reports on the successful reconciliation of a VPC. VPCReadyV1Beta2Condition = "VPCReady" // VPCReadyV1Beta2Reason surfaces when the VPC is ready. - VPCReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + VPCReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VPCNotReadyV1Beta2Reason surfaces when VPC is not ready. - VPCNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + VPCNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VPCDeletingV1Beta2Reason surfaces when the VPC is being deleted. - VPCDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + VPCDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason // VPCSubnetReadyV1Beta2Condition reports on the successful reconciliation of a VPC subnet. VPCSubnetReadyV1Beta2Condition = "VPCSubnetReady" // VPCSubnetReadyV1Beta2Reason surfaces when the VPC subnet is ready. - VPCSubnetReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + VPCSubnetReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VPCSubnetNotReadyV1Beta2Reason surfaces when VPC subnet is not ready. - VPCSubnetNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + VPCSubnetNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VPCSubnetDeletingV1Beta2Reason surfaces when the VPC subnet is being deleted. - VPCSubnetDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + VPCSubnetDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason // VPCSecurityGroupReadyV1Beta2Condition reports on the successful reconciliation of a VPC Security Group. VPCSecurityGroupReadyV1Beta2Condition = "VPCSecurityGroupReady" // VPCSecurityGroupReadyV1Beta2Reason surfaces when the VPC security group is ready. - VPCSecurityGroupReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + VPCSecurityGroupReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VPCSecurityGroupNotReadyV1Beta2Reason surfaces when VPC security group is not ready. - VPCSecurityGroupNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + VPCSecurityGroupNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VPCSecurityGroupDeletingV1Beta2Reason surfaces when the VPC security group is being deleted. - VPCSecurityGroupDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + VPCSecurityGroupDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason // TransitGatewayReadyV1Beta2Condition reports on the successful reconciliation of a transit gateway. TransitGatewayReadyV1Beta2Condition = "TransitGatewayReady" // TransitGatewayReadyV1Beta2Reason surfaces when the transit gateway is ready. - TransitGatewayReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + TransitGatewayReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // TransitGatewayNotReadyV1Beta2Reason surfaces when the transit gateway is not ready. - TransitGatewayNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + TransitGatewayNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // TransitGatewayDeletingV1Beta2Reason surfaces when the transit gateway is being deleted. - TransitGatewayDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + TransitGatewayDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason // VPCLoadBalancerReadyV1Beta2Condition reports on the successful reconciliation of a VPC LoadBalancer. VPCLoadBalancerReadyV1Beta2Condition = "LoadBalancerReady" // VPCLoadBalancerReadyV1Beta2Reason surfaces when the VPC LoadBalancer is ready. - VPCLoadBalancerReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + VPCLoadBalancerReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VPCLoadBalancerNotReadyV1Beta2Reason surfaces when VPC LoadBalancer is not ready. - VPCLoadBalancerNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + VPCLoadBalancerNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VPCLoadBalancerDeletingV1Beta2Reason surfaces when the VPC LoadBalancer is being deleted. - VPCLoadBalancerDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + VPCLoadBalancerDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason + + // VPCImageReadyV1Beta2Condition reports on the successful reconciliation of a VPC custom image. + VPCImageReadyV1Beta2Condition = "VPCImageReady" + + // VPCImageReadyV1Beta2Reason surfaces when the VPC custom image is ready. + VPCImageReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason + + // VPCImageNotReadyV1Beta2Reason surfaces when the VPC custom image is not ready. + VPCImageNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // COSInstanceReadyV1Beta2Condition reports on the successful reconciliation of a COS instance. COSInstanceReadyV1Beta2Condition = "COSInstanceReady" // COSInstanceReadyV1Beta2Reason surfaces when the COS instance is ready. - COSInstanceReadyV1Beta2Reason = capiv1beta1.ReadyV1Beta2Reason + COSInstanceReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // COSInstanceNotReadyV1Beta2Reason surfaces when the COS instance is not ready. - COSInstanceNotReadyV1Beta2Reason = capiv1beta1.NotReadyV1Beta2Reason + COSInstanceNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // COSInstanceDeletingV1Beta2Reason surfaces when the COS instance is being deleted. - COSInstanceDeletingV1Beta2Reason = capiv1beta1.DeletingV1Beta2Reason + COSInstanceDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason +) + +// IBMPowerVSImage's Ready condition and corresponding reasons that will be used in v1Beta2 API version. +const ( + // IBMPowerVSImageReadyCondition is true if the IBMPowerVSImage's deletionTimestamp is not set, IBMPowerVSImage's IBMPowerVSImageReadyV1Beta2Condition is true. + IBMPowerVSImageReadyCondition = clusterv1beta1.ReadyV1Beta2Condition + + // IBMPowerVSImageReadyV1Beta2Condition documents the Ready status of the image. + IBMPowerVSImageReadyV1Beta2Condition = "ImageReady" + + // IBMPowerVSImageReadyV1Beta2Reason surfaces when the IBMPowerVSImage readiness criteria is met. + IBMPowerVSImageReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason + + // IBMPowerVSImageNotReadyV1Beta2Reason surfaces when the IBMPowerVSImage readiness criteria is not met. + IBMPowerVSImageNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason + + // IBMPowerVSImageReadyUnknownV1Beta2Reason surfaces when at least one of the IBMPowerVSImage readiness criteria is unknown + // and none of the IBMPowerVSImage readiness criteria is met. + IBMPowerVSImageReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) diff --git a/api/v1beta2/ibmpowervscluster_types.go b/api/v1beta2/ibmpowervscluster_types.go index 6d6b69acf..bebd4d516 100644 --- a/api/v1beta2/ibmpowervscluster_types.go +++ b/api/v1beta2/ibmpowervscluster_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -54,7 +54,7 @@ type IBMPowerVSClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint capiv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // serviceInstance is the reference to the Power VS server workspace on which the server instance(VM) will be created. // Power VS server workspace is a container for all Power VS instances at a specific geographic region. @@ -231,7 +231,7 @@ type IBMPowerVSClusterStatus struct { LoadBalancers map[string]VPCLoadBalancerStatus `json:"loadBalancers,omitempty"` // Conditions defines current service state of the IBMPowerVSCluster. - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // v1beta2 groups all the fields that will be added or modified in IBMPowerVSCluster's status with the V1Beta2 version. // +optional @@ -335,12 +335,12 @@ type CosInstance struct { } // GetConditions returns the observations of the operational state of the IBMPowerVSCluster resource. -func (r *IBMPowerVSCluster) GetConditions() capiv1beta1.Conditions { +func (r *IBMPowerVSCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMPowerVSCluster to the predescribed clusterv1.Conditions. -func (r *IBMPowerVSCluster) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMPowerVSCluster to the predescribed clusterv1beta1.Conditions. +func (r *IBMPowerVSCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/ibmpowervsclustertemplate_types.go b/api/v1beta2/ibmpowervsclustertemplate_types.go index 202a6b188..b1b375a1f 100644 --- a/api/v1beta2/ibmpowervsclustertemplate_types.go +++ b/api/v1beta2/ibmpowervsclustertemplate_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // IBMPowerVSClusterTemplateSpec defines the desired state of IBMPowerVSClusterTemplate. @@ -54,8 +54,8 @@ type IBMPowerVSClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta capiv1beta1.ObjectMeta `json:"metadata,omitempty"` - Spec IBMPowerVSClusterSpec `json:"spec"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` + Spec IBMPowerVSClusterSpec `json:"spec"` } func init() { diff --git a/api/v1beta2/ibmpowervsimage_types.go b/api/v1beta2/ibmpowervsimage_types.go index 4431e5fa0..2cd33c8e3 100644 --- a/api/v1beta2/ibmpowervsimage_types.go +++ b/api/v1beta2/ibmpowervsimage_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -62,7 +62,7 @@ type IBMPowerVSImageSpec struct { // Type of storage, storage pool with the most available space will be selected. // +kubebuilder:default=tier1 - // +kubebuilder:validation:Enum=tier1;tier3 + // +kubebuilder:validation:Enum=tier0;tier1;tier3 // +optional StorageType string `json:"storageType,omitempty"` @@ -93,7 +93,22 @@ type IBMPowerVSImageStatus struct { // Conditions defines current service state of the IBMPowerVSImage. // +optional - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` + + // v1beta2 groups all the fields that will be added or modified in IBMPowerVSCluster's status with the V1Beta2 version. + // +optional + V1Beta2 *IBMPowerVSImageV1Beta2Status `json:"v1beta2,omitempty"` +} + +// IBMPowerVSImageV1Beta2Status groups all the fields that will be added or modified in IBMPowerVSCluster with the V1Beta2 version. +// See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more context. +type IBMPowerVSImageV1Beta2Status struct { + // conditions represents the observations of a DevCluster's current state. + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` } //+kubebuilder:object:root=true @@ -112,15 +127,31 @@ type IBMPowerVSImage struct { } // GetConditions returns the observations of the operational state of the IBMPowerVSImage resource. -func (r *IBMPowerVSImage) GetConditions() capiv1beta1.Conditions { +func (r *IBMPowerVSImage) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMPowerVSImage to the predescribed clusterv1.Conditions. -func (r *IBMPowerVSImage) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMPowerVSImage to the predescribed clusterv1beta1.Conditions. +func (r *IBMPowerVSImage) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } +// GetV1Beta2Conditions returns the set of conditions for this object. +func (r *IBMPowerVSImage) GetV1Beta2Conditions() []metav1.Condition { + if r.Status.V1Beta2 == nil { + return nil + } + return r.Status.V1Beta2.Conditions +} + +// SetV1Beta2Conditions sets conditions for an API object. +func (r *IBMPowerVSImage) SetV1Beta2Conditions(conditions []metav1.Condition) { + if r.Status.V1Beta2 == nil { + r.Status.V1Beta2 = &IBMPowerVSImageV1Beta2Status{} + } + r.Status.V1Beta2.Conditions = conditions +} + //+kubebuilder:object:root=true // IBMPowerVSImageList contains a list of IBMPowerVSImage. diff --git a/api/v1beta2/ibmpowervsmachine_types.go b/api/v1beta2/ibmpowervsmachine_types.go index 523e2b805..2e12ff208 100644 --- a/api/v1beta2/ibmpowervsmachine_types.go +++ b/api/v1beta2/ibmpowervsmachine_types.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -220,7 +220,7 @@ type IBMPowerVSMachineStatus struct { // Conditions defines current service state of the IBMPowerVSMachine. // +optional - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Region specifies the Power VS Service instance region. Region *string `json:"region,omitempty"` @@ -267,12 +267,12 @@ type IBMPowerVSMachine struct { } // GetConditions returns the observations of the operational state of the IBMPowerVSMachine resource. -func (r *IBMPowerVSMachine) GetConditions() capiv1beta1.Conditions { +func (r *IBMPowerVSMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMPowerVSMachine to the predescribed clusterv1.Conditions. -func (r *IBMPowerVSMachine) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMPowerVSMachine to the predescribed clusterv1beta1.Conditions. +func (r *IBMPowerVSMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/ibmvpccluster_types.go b/api/v1beta2/ibmvpccluster_types.go index 60caf354d..848f7f570 100644 --- a/api/v1beta2/ibmvpccluster_types.go +++ b/api/v1beta2/ibmvpccluster_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -49,7 +49,7 @@ type IBMVPCClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint capiv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. // Use this for legacy support, use Network.LoadBalancers for the extended VPC support. @@ -127,6 +127,11 @@ type AdditionalListenerSpec struct { // Will default to TCP protocol if not specified. // +optional Protocol *VPCLoadBalancerListenerProtocol `json:"protocol,omitempty"` + + // The selector is used to find IBMPowerVSMachines with matching labels. + // If the label matches, the machine is then added to the load balancer listener configuration. + // +kubebuilder:validation:Optional + Selector metav1.LabelSelector `json:"selector,omitempty"` } // VPCLoadBalancerBackendPoolSpec defines the desired configuration of a VPC Load Balancer Backend Pool. @@ -314,7 +319,22 @@ type IBMVPCClusterStatus struct { // Conditions defines current service state of the load balancer. // +optional - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` + + // V1beta2 groups all the fields that will be added or modified in IBMVPCCluster's status with the V1Beta2 version. + // +optional + V1Beta2 *IBMVPCClusterV1Beta2Status `json:"v1beta2,omitempty"` +} + +// IBMVPCClusterV1Beta2Status groups all the fields that will be added or modified in IBMVPCClusterStatus with the V1Beta2 version. +// See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more context. +type IBMVPCClusterV1Beta2Status struct { + // Conditions represents the observations of a IBMVPCCluster's current state. + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` } // VPCNetworkStatus provides details on the status of VPC network resources for extended VPC Infrastructure support. @@ -386,15 +406,31 @@ type IBMVPCClusterList struct { } // GetConditions returns the observations of the operational state of the IBMVPCCluster resource. -func (r *IBMVPCCluster) GetConditions() capiv1beta1.Conditions { +func (r *IBMVPCCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMVPCCluster to the predescribed clusterv1.Conditions. -func (r *IBMVPCCluster) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMVPCCluster to the predescribed clusterv1beta1.Conditions. +func (r *IBMVPCCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } +// GetV1Beta2Conditions returns the set of conditions for IBMVPCCluster object. +func (r *IBMVPCCluster) GetV1Beta2Conditions() []metav1.Condition { + if r.Status.V1Beta2 == nil { + return nil + } + return r.Status.V1Beta2.Conditions +} + +// SetV1Beta2Conditions sets conditions for IBMVPCCluster object. +func (r *IBMVPCCluster) SetV1Beta2Conditions(conditions []metav1.Condition) { + if r.Status.V1Beta2 == nil { + r.Status.V1Beta2 = &IBMVPCClusterV1Beta2Status{} + } + r.Status.V1Beta2.Conditions = conditions +} + func init() { objectTypes = append(objectTypes, &IBMVPCCluster{}, &IBMVPCClusterList{}) } diff --git a/api/v1beta2/ibmvpcclustertemplate_types.go b/api/v1beta2/ibmvpcclustertemplate_types.go index 855920c0c..e018d719f 100644 --- a/api/v1beta2/ibmvpcclustertemplate_types.go +++ b/api/v1beta2/ibmvpcclustertemplate_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // IBMVPCClusterTemplateSpec defines the desired state of IBMVPCClusterTemplate. @@ -53,8 +53,8 @@ type IBMVPCClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta capiv1beta1.ObjectMeta `json:"metadata,omitempty"` - Spec IBMVPCClusterSpec `json:"spec"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` + Spec IBMVPCClusterSpec `json:"spec"` } func init() { diff --git a/api/v1beta2/ibmvpcmachine_types.go b/api/v1beta2/ibmvpcmachine_types.go index df3aae0db..065ed282d 100644 --- a/api/v1beta2/ibmvpcmachine_types.go +++ b/api/v1beta2/ibmvpcmachine_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -154,7 +154,7 @@ type IBMVPCMachineStatus struct { // Conditions deefines current service state of the IBMVPCMachine. // +optional - Conditions capiv1beta1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // FailureReason will be set in the event that there is a terminal problem // reconciling the Machine and will contain a succinct value suitable @@ -175,6 +175,21 @@ type IBMVPCMachineStatus struct { // LoadBalancerPoolMembers is the status of IBM Cloud VPC Load Balancer Backend Pools the machine is a member. // +optional LoadBalancerPoolMembers []VPCLoadBalancerBackendPoolMember `json:"loadBalancerPoolMembers,omitempty"` + + // V1beta2 groups all the fields that will be added or modified in IBMVPCMachine's status with the V1Beta2 version. + // +optional + V1Beta2 *IBMVPCMachineV1Beta2Status `json:"v1beta2,omitempty"` +} + +// IBMVPCMachineV1Beta2Status groups all the fields that will be added or modified in IBMVPCMachineStatus with the V1Beta2 version. +// See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more context. +type IBMVPCMachineV1Beta2Status struct { + // Conditions represents the observations of a IBMVPCMachine's current state. + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -193,12 +208,12 @@ type IBMVPCMachine struct { } // GetConditions returns the observations of the operational state of the IBMVPCMachine resource. -func (r *IBMVPCMachine) GetConditions() capiv1beta1.Conditions { +func (r *IBMVPCMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the IBMVPCMachine to the predescribed clusterv1.Conditions. -func (r *IBMVPCMachine) SetConditions(conditions capiv1beta1.Conditions) { +// SetConditions sets the underlying service state of the IBMVPCMachine to the predescribed clusterv1beta1.Conditions. +func (r *IBMVPCMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } @@ -211,6 +226,22 @@ type IBMVPCMachineList struct { Items []IBMVPCMachine `json:"items"` } +// GetV1Beta2Conditions returns the set of conditions for IBMVPCMachine object. +func (r *IBMVPCMachine) GetV1Beta2Conditions() []metav1.Condition { + if r.Status.V1Beta2 == nil { + return nil + } + return r.Status.V1Beta2.Conditions +} + +// SetV1Beta2Conditions sets conditions for IBMVPCMachine object. +func (r *IBMVPCMachine) SetV1Beta2Conditions(conditions []metav1.Condition) { + if r.Status.V1Beta2 == nil { + r.Status.V1Beta2 = &IBMVPCMachineV1Beta2Status{} + } + r.Status.V1Beta2.Conditions = conditions +} + func init() { objectTypes = append(objectTypes, &IBMVPCMachine{}, &IBMVPCMachineList{}) } diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index 4ebd4da6a..237a7cf37 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -56,14 +56,17 @@ var ( // PowerVSImageStateACTIVE is the string representing an image in a active state. PowerVSImageStateACTIVE = PowerVSImageState("active") - // PowerVSImageStateQue is the string representing an image in a queued state. - PowerVSImageStateQue = PowerVSImageState("queued") + // PowerVSImageStateQueued is the string representing an image in a queued state. + PowerVSImageStateQueued = PowerVSImageState("queued") // PowerVSImageStateFailed is the string representing an image in a failed state. PowerVSImageStateFailed = PowerVSImageState("failed") // PowerVSImageStateImporting is the string representing an image in a failed state. PowerVSImageStateImporting = PowerVSImageState("importing") + + // PowerVSImageStateCompleted is the string representing an image in a completed state. + PowerVSImageStateCompleted = PowerVSImageState("completed") ) // ServiceInstanceState describes the state of a service instance. diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index b0ba240ba..faad44717 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -40,6 +40,7 @@ func (in *AdditionalListenerSpec) DeepCopyInto(out *AdditionalListenerSpec) { *out = new(VPCLoadBalancerListenerProtocol) **out = **in } + in.Selector.DeepCopyInto(&out.Selector) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalListenerSpec. @@ -594,6 +595,11 @@ func (in *IBMPowerVSImageStatus) DeepCopyInto(out *IBMPowerVSImageStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.V1Beta2 != nil { + in, out := &in.V1Beta2, &out.V1Beta2 + *out = new(IBMPowerVSImageV1Beta2Status) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMPowerVSImageStatus. @@ -606,6 +612,28 @@ func (in *IBMPowerVSImageStatus) DeepCopy() *IBMPowerVSImageStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMPowerVSImageV1Beta2Status) DeepCopyInto(out *IBMPowerVSImageV1Beta2Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMPowerVSImageV1Beta2Status. +func (in *IBMPowerVSImageV1Beta2Status) DeepCopy() *IBMPowerVSImageV1Beta2Status { + if in == nil { + return nil + } + out := new(IBMPowerVSImageV1Beta2Status) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IBMPowerVSMachine) DeepCopyInto(out *IBMPowerVSMachine) { *out = *in @@ -1037,6 +1065,11 @@ func (in *IBMVPCClusterStatus) DeepCopyInto(out *IBMVPCClusterStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.V1Beta2 != nil { + in, out := &in.V1Beta2, &out.V1Beta2 + *out = new(IBMVPCClusterV1Beta2Status) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMVPCClusterStatus. @@ -1140,6 +1173,28 @@ func (in *IBMVPCClusterTemplateSpec) DeepCopy() *IBMVPCClusterTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMVPCClusterV1Beta2Status) DeepCopyInto(out *IBMVPCClusterV1Beta2Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMVPCClusterV1Beta2Status. +func (in *IBMVPCClusterV1Beta2Status) DeepCopy() *IBMVPCClusterV1Beta2Status { + if in == nil { + return nil + } + out := new(IBMVPCClusterV1Beta2Status) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IBMVPCMachine) DeepCopyInto(out *IBMVPCMachine) { *out = *in @@ -1290,6 +1345,11 @@ func (in *IBMVPCMachineStatus) DeepCopyInto(out *IBMVPCMachineStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.V1Beta2 != nil { + in, out := &in.V1Beta2, &out.V1Beta2 + *out = new(IBMVPCMachineV1Beta2Status) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMVPCMachineStatus. @@ -1415,6 +1475,28 @@ func (in *IBMVPCMachineTemplateStatus) DeepCopy() *IBMVPCMachineTemplateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMVPCMachineV1Beta2Status) DeepCopyInto(out *IBMVPCMachineV1Beta2Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMVPCMachineV1Beta2Status. +func (in *IBMVPCMachineV1Beta2Status) DeepCopy() *IBMVPCMachineV1Beta2Status { + if in == nil { + return nil + } + out := new(IBMVPCMachineV1Beta2Status) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IBMVPCResourceReference) DeepCopyInto(out *IBMVPCResourceReference) { *out = *in diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 0ad2d0667..8556a5eea 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -28,15 +28,16 @@ import ( "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" - "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/utils" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/pagingutils" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/record" ) @@ -47,8 +48,8 @@ type ClusterScopeParams struct { IBMVPCClient vpc.Vpc Client client.Client Logger logr.Logger - Cluster *capiv1beta1.Cluster - IBMVPCCluster *infrav1beta2.IBMVPCCluster + Cluster *clusterv1.Cluster + IBMVPCCluster *infrav1.IBMVPCCluster ServiceEndpoint []endpoints.ServiceEndpoint } @@ -56,11 +57,11 @@ type ClusterScopeParams struct { type ClusterScope struct { logr.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper IBMVPCClient vpc.Vpc - Cluster *capiv1beta1.Cluster - IBMVPCCluster *infrav1beta2.IBMVPCCluster + Cluster *clusterv1.Cluster + IBMVPCCluster *infrav1.IBMVPCCluster ServiceEndpoint []endpoints.ServiceEndpoint } @@ -77,7 +78,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { params.Logger = klog.Background() } - helper, err := patch.NewHelper(params.IBMVPCCluster, params.Client) + helper, err := v1beta1patch.NewHelper(params.IBMVPCCluster, params.Client) if err != nil { return nil, fmt.Errorf("failed to init patch helper: %w", err) } @@ -180,7 +181,7 @@ func (s *ClusterScope) ensureVPCUnique(vpcName string) (*vpcv1.VPC, error) { return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return nil, err } @@ -285,7 +286,7 @@ func (s *ClusterScope) getSubnetAddrPrefix(vpcID, zone string) (string, error) { return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return "", err } @@ -326,7 +327,7 @@ func (s *ClusterScope) ensureSubnetUnique(subnetName string) (*vpcv1.Subnet, err return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return nil, err } @@ -334,7 +335,8 @@ func (s *ClusterScope) ensureSubnetUnique(subnetName string) (*vpcv1.Subnet, err } // DeleteSubnet deletes a subnet associated with subnet id. -func (s *ClusterScope) DeleteSubnet() error { +func (s *ClusterScope) DeleteSubnet(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) if s.IBMVPCCluster.Status.Subnet.ID == nil { return nil } @@ -372,12 +374,12 @@ func (s *ClusterScope) DeleteSubnet() error { return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return err } if !found { - s.Logger.V(3).Info("No subnets found with ID", "Subnet ID", subnetID) + log.V(3).Info("No subnets found with ID", "subnetID", subnetID) return nil } @@ -554,7 +556,7 @@ func (s *ClusterScope) getLoadBalancerByHostname(loadBalancerHostname string) (* return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return nil, err } @@ -592,7 +594,7 @@ func (s *ClusterScope) ensureLoadBalancerUnique(loadBalancerName string) (*vpcv1 return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return nil, err } @@ -622,7 +624,7 @@ func (s *ClusterScope) DeleteLoadBalancer() (bool, error) { for _, lb := range loadBalancersList.LoadBalancers { if (*lb.ID) == lbipID { deleted = true - if *lb.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateDeletePending) { + if *lb.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateDeletePending) { deleteLoadBalancerOption := &vpcv1.DeleteLoadBalancerOptions{} deleteLoadBalancerOption.SetID(lbipID) _, err := s.IBMVPCClient.DeleteLoadBalancer(deleteLoadBalancerOption) @@ -640,7 +642,7 @@ func (s *ClusterScope) DeleteLoadBalancer() (bool, error) { return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return false, err } } @@ -664,11 +666,11 @@ func (s *ClusterScope) IsReady() bool { // SetLoadBalancerState will set the state for the load balancer. func (s *ClusterScope) SetLoadBalancerState(status string) { - s.IBMVPCCluster.Status.ControlPlaneLoadBalancerState = infrav1beta2.VPCLoadBalancerState(status) + s.IBMVPCCluster.Status.ControlPlaneLoadBalancerState = infrav1.VPCLoadBalancerState(status) } // GetLoadBalancerState will get the state for the load balancer. -func (s *ClusterScope) GetLoadBalancerState() infrav1beta2.VPCLoadBalancerState { +func (s *ClusterScope) GetLoadBalancerState() infrav1.VPCLoadBalancerState { return s.IBMVPCCluster.Status.ControlPlaneLoadBalancerState } @@ -712,8 +714,8 @@ func (s *ClusterScope) Close() error { // APIServerPort returns the APIServerPort to use when creating the ControlPlaneEndpoint. func (s *ClusterScope) APIServerPort() int32 { - if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return *s.Cluster.Spec.ClusterNetwork.APIServerPort + if s.Cluster.Spec.ClusterNetwork.APIServerPort > 0 { + return s.Cluster.Spec.ClusterNetwork.APIServerPort } - return infrav1beta2.DefaultAPIServerPort + return infrav1.DefaultAPIServerPort } diff --git a/cloud/scope/cluster_test.go b/cloud/scope/cluster_test.go index 5344f7afb..70f34cb62 100644 --- a/cloud/scope/cluster_test.go +++ b/cloud/scope/cluster_test.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock" . "github.com/onsi/gomega" @@ -98,8 +98,8 @@ func TestCreateVPC(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcCluster := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ + vpcCluster := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ Region: "foo-region", ResourceGroup: "foo-resource-group", VPC: "foo-vpc", @@ -140,8 +140,8 @@ func TestCreateVPC(t *testing.T) { mockController, mockvpc := setup(t) t.Cleanup(mockController.Finish) scope := setupClusterScope(clusterName, mockvpc) - vpcClusterCustom := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ + vpcClusterCustom := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ Region: "foo-region-1", ResourceGroup: "foo-resource-group-1", VPC: "foo-vpc-1", @@ -216,12 +216,12 @@ func TestDeleteVPC(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcCluster := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ + vpcCluster := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ VPC: "foo-vpc", }, - Status: infrav1beta2.IBMVPCClusterStatus{ - VPC: infrav1beta2.VPC{ + Status: infrav1.IBMVPCClusterStatus{ + VPC: infrav1.VPC{ ID: "foo-vpc", }, }, @@ -260,15 +260,15 @@ func TestCreateSubnet(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcCluster := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ + vpcCluster := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ Region: "foo-region", ResourceGroup: "foo-resource-group", VPC: "foo-vpc", Zone: "foo-zone", }, - Status: infrav1beta2.IBMVPCClusterStatus{ - VPC: infrav1beta2.VPC{ + Status: infrav1.IBMVPCClusterStatus{ + VPC: infrav1.VPC{ ID: *core.StringPtr("foo-vpc"), }, }, @@ -435,12 +435,12 @@ func TestDeleteSubnet(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcCluster := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ + vpcCluster := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ VPC: "foo-vpc", }, - Status: infrav1beta2.IBMVPCClusterStatus{ - Subnet: infrav1beta2.Subnet{ + Status: infrav1.IBMVPCClusterStatus{ + Subnet: infrav1.Subnet{ ID: core.StringPtr("foo-vpc-subnet-id"), }, }, @@ -468,7 +468,7 @@ func TestDeleteSubnet(t *testing.T) { mockvpc.EXPECT().UnsetSubnetPublicGateway(gomock.AssignableToTypeOf(&vpcv1.UnsetSubnetPublicGatewayOptions{})).Return(&core.DetailedResponse{}, nil) mockvpc.EXPECT().DeletePublicGateway(gomock.AssignableToTypeOf(&vpcv1.DeletePublicGatewayOptions{})).Return(&core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteSubnet(gomock.AssignableToTypeOf(&vpcv1.DeleteSubnetOptions{})).Return(&core.DetailedResponse{}, nil) - err := scope.DeleteSubnet() + err := scope.DeleteSubnet(ctx) g.Expect(err).To(BeNil()) }) @@ -482,7 +482,7 @@ func TestDeleteSubnet(t *testing.T) { mockvpc.EXPECT().ListSubnets(gomock.AssignableToTypeOf(&vpcv1.ListSubnetsOptions{})).Return(subnet, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetSubnetPublicGateway(gomock.AssignableToTypeOf(&vpcv1.GetSubnetPublicGatewayOptions{})).Return(publicGateway, &core.DetailedResponse{}, nil) mockvpc.EXPECT().UnsetSubnetPublicGateway(gomock.AssignableToTypeOf(&vpcv1.UnsetSubnetPublicGatewayOptions{})).Return(&core.DetailedResponse{}, errors.New("Error when unsetting publicgateway for subnet")) - err := scope.DeleteSubnet() + err := scope.DeleteSubnet(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -497,7 +497,7 @@ func TestDeleteSubnet(t *testing.T) { mockvpc.EXPECT().GetSubnetPublicGateway(gomock.AssignableToTypeOf(&vpcv1.GetSubnetPublicGatewayOptions{})).Return(publicGateway, &core.DetailedResponse{}, nil) mockvpc.EXPECT().UnsetSubnetPublicGateway(gomock.AssignableToTypeOf(&vpcv1.UnsetSubnetPublicGatewayOptions{})).Return(&core.DetailedResponse{}, nil) mockvpc.EXPECT().DeletePublicGateway(gomock.AssignableToTypeOf(&vpcv1.DeletePublicGatewayOptions{})).Return(&core.DetailedResponse{}, errors.New("Error when deleting publicgateway for subnet")) - err := scope.DeleteSubnet() + err := scope.DeleteSubnet(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -513,7 +513,7 @@ func TestDeleteSubnet(t *testing.T) { mockvpc.EXPECT().UnsetSubnetPublicGateway(gomock.AssignableToTypeOf(&vpcv1.UnsetSubnetPublicGatewayOptions{})).Return(&core.DetailedResponse{}, nil) mockvpc.EXPECT().DeletePublicGateway(gomock.AssignableToTypeOf(&vpcv1.DeletePublicGatewayOptions{})).Return(&core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteSubnet(gomock.AssignableToTypeOf(&vpcv1.DeleteSubnetOptions{})).Return(&core.DetailedResponse{}, errors.New("Error when deleting subnet")) - err := scope.DeleteSubnet() + err := scope.DeleteSubnet(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -525,7 +525,7 @@ func TestDeleteSubnet(t *testing.T) { scope.IBMVPCCluster.Spec = vpcCluster.Spec scope.IBMVPCCluster.Status = vpcCluster.Status mockvpc.EXPECT().ListSubnets(gomock.AssignableToTypeOf(&vpcv1.ListSubnetsOptions{})).Return(nil, &core.DetailedResponse{}, errors.New("Error listing subnets")) - err := scope.DeleteSubnet() + err := scope.DeleteSubnet(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("Subnet doesn't exist", func(t *testing.T) { @@ -536,7 +536,7 @@ func TestDeleteSubnet(t *testing.T) { scope.IBMVPCCluster.Spec = vpcCluster.Spec scope.IBMVPCCluster.Status = vpcCluster.Status mockvpc.EXPECT().ListSubnets(gomock.AssignableToTypeOf(&vpcv1.ListSubnetsOptions{})).Return(&vpcv1.SubnetCollection{Subnets: []vpcv1.Subnet{}}, &core.DetailedResponse{}, nil) - err := scope.DeleteSubnet() + err := scope.DeleteSubnet(ctx) g.Expect(err).To(BeNil()) }) }) @@ -548,14 +548,14 @@ func TestCreateLoadBalancer(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcCluster := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + vpcCluster := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: "foo-load-balancer", }, }, - Status: infrav1beta2.IBMVPCClusterStatus{ - Subnet: infrav1beta2.Subnet{ + Status: infrav1.IBMVPCClusterStatus{ + Subnet: infrav1.Subnet{ ID: core.StringPtr("foo-subnet-id"), }, }, @@ -578,14 +578,14 @@ func TestCreateLoadBalancer(t *testing.T) { mockController, mockvpc := setup(t) t.Cleanup(mockController.Finish) scope := setupClusterScope(clusterName, mockvpc) - vpcClusterCustom := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + vpcClusterCustom := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: "foo-load-balancer-1", }, }, - Status: infrav1beta2.IBMVPCClusterStatus{ - Subnet: infrav1beta2.Subnet{ + Status: infrav1.IBMVPCClusterStatus{ + Subnet: infrav1.Subnet{ ID: core.StringPtr("foo-subnet-id"), }, }, @@ -692,14 +692,14 @@ func TestDeleteLoadBalancer(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcCluster := infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + vpcCluster := infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: "foo-load-balancer", }, }, - Status: infrav1beta2.IBMVPCClusterStatus{ - VPCEndpoint: infrav1beta2.VPCEndpoint{ + Status: infrav1.IBMVPCClusterStatus{ + VPCEndpoint: infrav1.VPCEndpoint{ LBID: core.StringPtr("foo-load-balancer-id"), }, }, diff --git a/cloud/scope/common_test.go b/cloud/scope/common_test.go index 8cbff8edb..e7c449c5e 100644 --- a/cloud/scope/common_test.go +++ b/cloud/scope/common_test.go @@ -22,9 +22,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" ) const ( @@ -34,18 +34,18 @@ const ( pvsNetwork = "foo-network" ) -func newCluster(name string) *capiv1beta1.Cluster { - return &capiv1beta1.Cluster{ +func newCluster(name string) *clusterv1.Cluster { + return &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", }, - Spec: capiv1beta1.ClusterSpec{}, + Spec: clusterv1.ClusterSpec{}, } } -func newVPCCluster(name string) *infrav1beta2.IBMVPCCluster { - return &infrav1beta2.IBMVPCCluster{ +func newVPCCluster(name string) *infrav1.IBMVPCCluster { + return &infrav1.IBMVPCCluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", @@ -53,8 +53,8 @@ func newVPCCluster(name string) *infrav1beta2.IBMVPCCluster { } } -func newPowerVSCluster(name string) *infrav1beta2.IBMPowerVSCluster { - return &infrav1beta2.IBMPowerVSCluster{ +func newPowerVSCluster(name string) *infrav1.IBMPowerVSCluster { + return &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", @@ -62,14 +62,14 @@ func newPowerVSCluster(name string) *infrav1beta2.IBMPowerVSCluster { } } -func newMachine(machineName string) *capiv1beta1.Machine { - return &capiv1beta1.Machine{ +func newMachine(machineName string) *clusterv1.Machine { + return &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: machineName, Namespace: "default", }, - Spec: capiv1beta1.MachineSpec{ - Bootstrap: capiv1beta1.Bootstrap{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: core.StringPtr(machineName), }, }, @@ -80,7 +80,7 @@ func newBootstrapSecret(clusterName, machineName string) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - capiv1beta1.ClusterNameLabel: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index ee5a36582..9e10883a2 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -33,18 +33,20 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/accounts" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/authenticator" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/globaltagging" - "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/utils" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/options" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/pagingutils" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/record" ) @@ -53,25 +55,24 @@ type MachineScopeParams struct { IBMVPCClient vpc.Vpc Client client.Client Logger logr.Logger - Cluster *capiv1beta1.Cluster - Machine *capiv1beta1.Machine - IBMVPCCluster *infrav1beta2.IBMVPCCluster - IBMVPCMachine *infrav1beta2.IBMVPCMachine + Cluster *clusterv1.Cluster + Machine *clusterv1.Machine + IBMVPCCluster *infrav1.IBMVPCCluster + IBMVPCMachine *infrav1.IBMVPCMachine ServiceEndpoint []endpoints.ServiceEndpoint } // MachineScope defines a scope defined around a machine and its cluster. type MachineScope struct { - logr.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper IBMVPCClient vpc.Vpc GlobalTaggingClient globaltagging.GlobalTagging - Cluster *capiv1beta1.Cluster - Machine *capiv1beta1.Machine - IBMVPCCluster *infrav1beta2.IBMVPCCluster - IBMVPCMachine *infrav1beta2.IBMVPCMachine + Cluster *clusterv1.Cluster + Machine *clusterv1.Machine + IBMVPCCluster *infrav1.IBMVPCCluster + IBMVPCMachine *infrav1.IBMVPCMachine ServiceEndpoint []endpoints.ServiceEndpoint } @@ -88,7 +89,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { params.Logger = klog.Background() } - helper, err := patch.NewHelper(params.IBMVPCMachine, params.Client) + helper, err := v1beta1patch.NewHelper(params.IBMVPCMachine, params.Client) if err != nil { return nil, fmt.Errorf("failed to init patch helper: %w", err) } @@ -127,7 +128,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { } return &MachineScope{ - Logger: params.Logger, Client: params.Client, IBMVPCClient: vpcClient, GlobalTaggingClient: globalTaggingClient, @@ -140,7 +140,8 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { } // CreateMachine creates a vpc machine. -func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocyclo +func (m *MachineScope) CreateMachine(ctx context.Context) (*vpcv1.Instance, error) { //nolint: gocyclo + log := ctrl.LoggerFrom(ctx) instanceReply, err := m.ensureInstanceUnique(m.IBMVPCMachine.Name) if err != nil { return nil, err @@ -270,7 +271,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy // Populate Placement target details, if provided. var placementTarget vpcv1.InstancePlacementTargetPrototypeIntf if m.IBMVPCMachine.Spec.PlacementTarget != nil { - placementTarget, err = m.configurePlacementTarget() + placementTarget, err = m.configurePlacementTarget(ctx) if err != nil { return nil, fmt.Errorf("error configuration machine placement target: %w", err) } @@ -280,7 +281,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy sshKeys := make([]vpcv1.KeyIdentityIntf, 0) if m.IBMVPCMachine.Spec.SSHKeys != nil { for _, sshKey := range m.IBMVPCMachine.Spec.SSHKeys { - keyID, err := fetchKeyID(sshKey, m) + keyID, err := fetchKeyID(ctx, sshKey, m) if err != nil { return nil, fmt.Errorf("error while fetching SSHKey: %v error: %v", sshKey, err) } @@ -294,7 +295,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy // Populate boot volume attachment, if provided. var bootVolumeAttachment *vpcv1.VolumeAttachmentPrototypeInstanceByImageContext if m.IBMVPCMachine.Spec.BootVolume != nil { - bootVolumeAttachment = m.volumeToVPCVolumeAttachment(m.IBMVPCMachine.Spec.BootVolume) + bootVolumeAttachment = m.volumeToVPCVolumeAttachment(ctx, m.IBMVPCMachine.Spec.BootVolume) } // Configure the Machine's Image or CatalogOffering based on provided fields. @@ -309,7 +310,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy VPC: vpcIdentity, Zone: zone, } - imageID, err := fetchImageID(m.IBMVPCMachine.Spec.Image, m) + imageID, err := fetchImageID(ctx, m.IBMVPCMachine.Spec.Image, m) if err != nil { record.Warnf(m.IBMVPCMachine, "FailedRetrieveImage", "Failed image retrieval - %w", err) return nil, fmt.Errorf("error while fetching image ID: %w", err) @@ -329,7 +330,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy imageInstancePrototype.BootVolumeAttachment = bootVolumeAttachment } - m.Logger.Info("machine creation configured with existing image", "machineName", m.IBMVPCMachine.Name, "imageID", *imageID) + log.Info("Machine creation configured with existing image", "imageID", *imageID) options.SetInstancePrototype(imageInstancePrototype) } else if m.IBMVPCMachine.Spec.CatalogOffering != nil { catalogInstancePrototype := &vpcv1.InstancePrototypeInstanceByCatalogOffering{ @@ -347,13 +348,13 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy catalogOfferingPrototype.Offering = &vpcv1.CatalogOfferingIdentityCatalogOfferingByCRN{ CRN: m.IBMVPCMachine.Spec.CatalogOffering.OfferingCRN, } - m.Logger.Info("machine creation configured with catalog offering", "machineName", m.IBMVPCMachine.Name, "offeringCRN", *m.IBMVPCMachine.Spec.CatalogOffering.OfferingCRN) + log.Info("Machine creation configured with catalog offering", "offeringCRN", *m.IBMVPCMachine.Spec.CatalogOffering.OfferingCRN) } else if m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN != nil { // TODO(cjschaef): Perform lookup or use webhook validation to confirm Catalog Offering Version CRN. catalogOfferingPrototype.Version = &vpcv1.CatalogOfferingVersionIdentityCatalogOfferingVersionByCRN{ CRN: m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN, } - m.Logger.Info("machine creation configured with catalog version", "machineName", m.IBMVPCMachine.Name, "versionCRN", *m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN) + log.Info("Machine creation configured with catalog version", "versionCRN", *m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN) } else { // TODO(cjschaef): Look to add webhook validation to ensure one is provided. return nil, fmt.Errorf("error catalog offering missing offering crn and version crn, one must be provided") @@ -363,7 +364,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy catalogOfferingPrototype.Plan = &vpcv1.CatalogOfferingVersionPlanIdentityCatalogOfferingVersionPlanByCRN{ CRN: m.IBMVPCMachine.Spec.CatalogOffering.PlanCRN, } - m.Logger.Info("machine creation configured with catalog plan", "machineName", m.IBMVPCMachine.Name, "planCRN", *m.IBMVPCMachine.Spec.CatalogOffering.PlanCRN) + log.Info("Machine creation configured with catalog plan", "planCRN", *m.IBMVPCMachine.Spec.CatalogOffering.PlanCRN) } // Configure additional fields if they were populated. @@ -384,7 +385,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy return nil, fmt.Errorf("error no machine image or catalog offering provided to build: %s", m.IBMVPCMachine.Spec.Name) } - m.Logger.Info("creating instance", "createOptions", options, "name", m.IBMVPCMachine.Name, "profile", *profile.Name, "resourceGroup", resourceGroupIdentity, "vpc", vpcIdentity, "zone", zone) + log.Info("Creating instance", "createOptions", options, "name", m.IBMVPCMachine.Name, "profile", *profile.Name, "resourceGroup", resourceGroupIdentity, "vpc", vpcIdentity, "zone", zone) instance, _, err := m.IBMVPCClient.CreateInstance(options) if err != nil { record.Warnf(m.IBMVPCMachine, "FailedCreateInstance", "Failed instance creation - %s, %v", options, err) @@ -395,7 +396,8 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy } // configurePlacementTarget will configure a Machine's Placement Target based on the Machine's provided configuration, if supplied. -func (m *MachineScope) configurePlacementTarget() (vpcv1.InstancePlacementTargetPrototypeIntf, error) { +func (m *MachineScope) configurePlacementTarget(ctx context.Context) (vpcv1.InstancePlacementTargetPrototypeIntf, error) { + log := ctrl.LoggerFrom(ctx) // TODO(cjschaef): We currently don't support the other placement target options (Dedicated Host Group, Placement Group), they need to be added. if m.IBMVPCMachine.Spec.PlacementTarget.DedicatedHost != nil { // Lookup Dedicated Host ID by Name if it was provided. @@ -412,7 +414,7 @@ func (m *MachineScope) configurePlacementTarget() (vpcv1.InstancePlacementTarget dedicatedHostID = dHost.ID } - m.Logger.Info("machine creation configured with dedicated host placement", "machineName", m.IBMVPCMachine.Name, "dedicatedHostID", *dedicatedHostID) + log.Info("Machine creation configured with dedicated host placement", "dedicatedHostID", *dedicatedHostID) return &vpcv1.InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID{ ID: dedicatedHostID, }, nil @@ -420,7 +422,8 @@ func (m *MachineScope) configurePlacementTarget() (vpcv1.InstancePlacementTarget return nil, nil } -func (m *MachineScope) volumeToVPCVolumeAttachment(volume *infrav1beta2.VPCVolume) *vpcv1.VolumeAttachmentPrototypeInstanceByImageContext { +func (m *MachineScope) volumeToVPCVolumeAttachment(ctx context.Context, volume *infrav1.VPCVolume) *vpcv1.VolumeAttachmentPrototypeInstanceByImageContext { + log := ctrl.LoggerFrom(ctx) bootVolume := &vpcv1.VolumeAttachmentPrototypeInstanceByImageContext{ DeleteVolumeOnInstanceDelete: core.BoolPtr(volume.DeleteVolumeOnInstanceDelete), Volume: &vpcv1.VolumePrototypeInstanceByImageContext{}, @@ -448,7 +451,7 @@ func (m *MachineScope) volumeToVPCVolumeAttachment(volume *infrav1beta2.VPCVolum bootVolume.Volume.EncryptionKey = &vpcv1.EncryptionKeyIdentity{ CRN: core.StringPtr(volume.EncryptionKeyCRN), } - m.Logger.Info("machine creation configured with volumn encryption key", "machineName", m.IBMVPCMachine.Name, "encryptionKeyCRN", volume.EncryptionKeyCRN) + log.Info("Machine creation configured with volumn encryption key", "encryptionKeyCRN", volume.EncryptionKeyCRN) } return bootVolume @@ -501,7 +504,7 @@ func (m *MachineScope) ensureInstanceUnique(instanceName string) (*vpcv1.Instanc return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return nil, err } @@ -509,7 +512,7 @@ func (m *MachineScope) ensureInstanceUnique(instanceName string) (*vpcv1.Instanc } // getLoadBalancerID will return the ID of a Load Balancer. -func (m *MachineScope) getLoadBalancerID(loadBalancer *infrav1beta2.VPCResource) (*string, error) { +func (m *MachineScope) getLoadBalancerID(loadBalancer *infrav1.VPCResource) (*string, error) { // Lookup Load Balancer ID by Name if necessary if loadBalancer.ID != nil { return loadBalancer.ID, nil @@ -527,7 +530,7 @@ func (m *MachineScope) getLoadBalancerID(loadBalancer *infrav1beta2.VPCResource) } // getLoadBalancerPoolID will return the ID of a Load Balancer Pool. -func (m *MachineScope) getLoadBalancerPoolID(pool *infrav1beta2.VPCResource, loadBalancerID string) (*string, error) { +func (m *MachineScope) getLoadBalancerPoolID(pool *infrav1.VPCResource, loadBalancerID string) (*string, error) { // Lookup Load Balancer Pool ID by Name if necessary if pool.ID != nil { return pool.ID, nil @@ -545,7 +548,7 @@ func (m *MachineScope) getLoadBalancerPoolID(pool *infrav1beta2.VPCResource, loa } // ReconcileVPCLoadBalancerPoolMember reconciles a Machine's Load Balancer Pool membership. -func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(poolMember infrav1beta2.VPCLoadBalancerBackendPoolMember) (bool, error) { +func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(ctx context.Context, poolMember infrav1.VPCLoadBalancerBackendPoolMember) (bool, error) { // Collect the Machine's internal IP. internalIP := m.GetMachineInternalIP() if internalIP == nil { @@ -554,7 +557,7 @@ func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(poolMember infrav1beta } // Check if Instance is already a member of Load Balancer Backend Pool. - existingMember, err := m.checkVPCLoadBalancerPoolMemberExists(poolMember, internalIP) + existingMember, err := m.checkVPCLoadBalancerPoolMemberExists(ctx, poolMember, internalIP) if err != nil { return false, fmt.Errorf("error failed to check if member exists in pool") } else if existingMember != nil { @@ -567,11 +570,12 @@ func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(poolMember infrav1beta } // Otherwise, create VPC Load Balancer Backend Pool Member - return m.createVPCLoadBalancerPoolMember(poolMember, internalIP) + return m.createVPCLoadBalancerPoolMember(ctx, poolMember, internalIP) } // checkVPCLoadBalancerPoolMemberExists determines whether a Machine's Load Balancer Pool membership already exists. -func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(poolMember infrav1beta2.VPCLoadBalancerBackendPoolMember, internalIP *string) (*vpcv1.LoadBalancerPoolMember, error) { +func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(ctx context.Context, poolMember infrav1.VPCLoadBalancerBackendPoolMember, internalIP *string) (*vpcv1.LoadBalancerPoolMember, error) { + log := ctrl.LoggerFrom(ctx) loadBalancerID, err := m.getLoadBalancerID(&poolMember.LoadBalancer) if err != nil { return nil, fmt.Errorf("error checking if load balancer pool member exists: %w", err) @@ -600,7 +604,7 @@ func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(poolMember infrav1be if target, ok := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget); ok { // Verify the target address matches the Machine's internal IP. if *target.Address == *internalIP { - m.Logger.Info("found existing load balancer pool member for machine", "machineName", m.IBMVPCMachine.Spec.Name, "internalIP", *internalIP, "poolID", *poolID, "loadBalancerID", *loadBalancerID) + log.Info("Found existing load balancer pool member for machine", "internalIP", *internalIP, "poolID", *poolID, "loadBalancerID", *loadBalancerID) return ptr.To(member), nil } } @@ -611,7 +615,8 @@ func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(poolMember infrav1be } // createVPCLoadBalancerPoolMember will create a new member within a Load Balancer Pool for the Machine's internal IP. -func (m *MachineScope) createVPCLoadBalancerPoolMember(poolMember infrav1beta2.VPCLoadBalancerBackendPoolMember, internalIP *string) (bool, error) { +func (m *MachineScope) createVPCLoadBalancerPoolMember(ctx context.Context, poolMember infrav1.VPCLoadBalancerBackendPoolMember, internalIP *string) (bool, error) { + log := ctrl.LoggerFrom(ctx) // Retrieve the Load Balancer ID. loadBalancerID, err := m.getLoadBalancerID(&poolMember.LoadBalancer) if err != nil { @@ -644,15 +649,15 @@ func (m *MachineScope) createVPCLoadBalancerPoolMember(poolMember infrav1beta2.V if err != nil { return false, fmt.Errorf("error failed creating load balancer backend pool member: %w", err) } - m.Logger.Info("created load balancer backend pool member", "instanceID", m.IBMVPCMachine.Status.InstanceID, "loadBalancerID", loadBalancerID, "loadBalancerBackendPoolID", loadBalancerBackendPoolID, "port", poolMember.Port, "loadBalancerBackendPoolMemberID", loadBalancerPoolMember.ID) + log.Info("Created load balancer backend pool member", "instanceID", m.IBMVPCMachine.Status.InstanceID, "loadBalancerID", loadBalancerID, "loadBalancerBackendPoolID", loadBalancerBackendPoolID, "port", poolMember.Port, "loadBalancerBackendPoolMemberID", loadBalancerPoolMember.ID) // Add the new pool member details to the Machine Status. // To prevent additional API calls, only use ID's and not Name's, as reconciliation does not rely on Name's for these resources in Status. - newMember := infrav1beta2.VPCLoadBalancerBackendPoolMember{ - LoadBalancer: infrav1beta2.VPCResource{ + newMember := infrav1.VPCLoadBalancerBackendPoolMember{ + LoadBalancer: infrav1.VPCResource{ ID: loadBalancerID, }, - Pool: infrav1beta2.VPCResource{ + Pool: infrav1.VPCResource{ ID: loadBalancerBackendPoolID, }, Port: poolMember.Port, @@ -670,7 +675,8 @@ func (m *MachineScope) createVPCLoadBalancerPoolMember(poolMember infrav1beta2.V } // CreateVPCLoadBalancerPoolMember creates a new pool member and adds it to the load balancer pool. -func (m *MachineScope) CreateVPCLoadBalancerPoolMember(internalIP *string, targetPort int64) (*vpcv1.LoadBalancerPoolMember, error) { +func (m *MachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Context, internalIP *string, targetPort int64) (*vpcv1.LoadBalancerPoolMember, error) { + log := ctrl.LoggerFrom(ctx) loadBalancer, _, err := m.IBMVPCClient.GetLoadBalancer(&vpcv1.GetLoadBalancerOptions{ ID: m.IBMVPCCluster.Status.VPCEndpoint.LBID, }) @@ -678,7 +684,7 @@ func (m *MachineScope) CreateVPCLoadBalancerPoolMember(internalIP *string, targe return nil, err } - if *loadBalancer.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) { + if *loadBalancer.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { return nil, fmt.Errorf("error load balancer is not in active state") } @@ -706,7 +712,7 @@ func (m *MachineScope) CreateVPCLoadBalancerPoolMember(internalIP *string, targe if _, ok := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget); ok { mtarget := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget) if *mtarget.Address == *internalIP && *member.Port == targetPort { - m.Logger.V(3).Info("PoolMember already exist") + log.V(3).Info("PoolMember already exist") return nil, nil } } @@ -720,15 +726,16 @@ func (m *MachineScope) CreateVPCLoadBalancerPoolMember(internalIP *string, targe } // DeleteVPCLoadBalancerPoolMember deletes a pool member from the load balancer pool. -func (m *MachineScope) DeleteVPCLoadBalancerPoolMember() error { +func (m *MachineScope) DeleteVPCLoadBalancerPoolMember(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) if m.IBMVPCMachine.Status.InstanceID == "" { - m.Info("instance is not created, ignore deleting load balancer pool member") + log.Info("Instance is not created, ignore deleting load balancer pool member") return nil } // If the Machine has Load Balancer Pool Members defined in its Status (part of extended VPC Machine support), process the removal of those members versus the legacy single LB design. if len(m.IBMVPCMachine.Status.LoadBalancerPoolMembers) > 0 { - return m.deleteVPCLoadBalancerPoolMembers() + return m.deleteVPCLoadBalancerPoolMembers(ctx) } loadBalancer, _, err := m.IBMVPCClient.GetLoadBalancer(&vpcv1.GetLoadBalancerOptions{ @@ -761,7 +768,7 @@ func (m *MachineScope) DeleteVPCLoadBalancerPoolMember() error { if _, ok := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget); ok { mtarget := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget) if *mtarget.Address == *instance.PrimaryNetworkInterface.PrimaryIP.Address { - if *loadBalancer.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) { + if *loadBalancer.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { return fmt.Errorf("load balancer is not in active state") } @@ -782,7 +789,8 @@ func (m *MachineScope) DeleteVPCLoadBalancerPoolMember() error { // deleteVPCLoadBalancerPoolMembers provides support to delete Load Balancer Pools Members for a Machine that are tracked in the Machine's Status, which is part of the extended VPC Machine support. // This new support allows a Machine to have members in multiple Load Balancers, as defined by the Machine Spec, rather than defaulting (legacy) to the single Cluster Load Balancer. -func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { +func (m *MachineScope) deleteVPCLoadBalancerPoolMembers(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) // Retrieve the Instance details immediately (without them the member cannot be safely deleted). instanceOptions := &vpcv1.GetInstanceOptions{ ID: ptr.To(m.IBMVPCMachine.Status.InstanceID), @@ -795,7 +803,7 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { if instanceDetails.PrimaryNetworkInterface == nil || instanceDetails.PrimaryNetworkInterface.PrimaryIP == nil || instanceDetails.PrimaryNetworkInterface.PrimaryIP.Address == nil { return fmt.Errorf("error instance is missing the primary network interface IP address for load balancer pool member deletion for machine: %s", m.IBMVPCMachine.Name) } - m.Logger.V(5).Info("collected instance details for load balancer pool member deletion", "machienName", m.IBMVPCMachine.Name, "instanceID", *instanceDetails.ID, "instanceIP", *instanceDetails.PrimaryNetworkInterface.PrimaryIP.Address) + log.V(5).Info("collected instance details for load balancer pool member deletion", "machienName", m.IBMVPCMachine.Name, "instanceID", *instanceDetails.ID, "instanceIP", *instanceDetails.PrimaryNetworkInterface.PrimaryIP.Address) cleanupIncomplete := false for _, member := range m.IBMVPCMachine.Status.LoadBalancerPoolMembers { @@ -813,21 +821,21 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { if err != nil { return fmt.Errorf("error retrieving load balancer for load balancer pool member deletion for machine %s: %w", m.IBMVPCMachine.Name, err) } - m.Logger.V(5).Info("collected load balancer for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID) + log.V(5).Info("collected load balancer for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID) // Lookup the Load Balancer Pool ID, if only name is available. loadBalancerPoolID, err := m.getLoadBalancerPoolID(ptr.To(member.Pool), *loadBalancerDetails.ID) if err != nil { return fmt.Errorf("error retrieving load balancer pool id for load balancer pool member deletion for machine %s: %w", m.IBMVPCMachine.Name, err) } - m.Logger.V(5).Info("collected load balancer pool id for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerPoolID", *loadBalancerPoolID) + log.V(5).Info("collected load balancer pool id for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerPoolID", *loadBalancerPoolID) listMembersOptions := &vpcv1.ListLoadBalancerPoolMembersOptions{ LoadBalancerID: loadBalancerDetails.ID, PoolID: loadBalancerPoolID, } - m.Logger.V(5).Info("list load balancer pool members options", "machineName", m.IBMVPCMachine.Name, "options", *listMembersOptions) + log.V(5).Info("list load balancer pool members options", "machineName", m.IBMVPCMachine.Name, "options", *listMembersOptions) poolMembers, _, err := m.IBMVPCClient.ListLoadBalancerPoolMembers(listMembersOptions) if err != nil { return fmt.Errorf("error retrieving load balancer pool members for load balancer pool member deletion for machine %s: %w", m.IBMVPCMachine.Name, err) @@ -840,10 +848,10 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { continue } - m.Logger.V(3).Info("found load balancer pool member to delete", "machineName", m.IBMVPCMachine.Name, "poolMemberID", *poolMember.ID) + log.V(3).Info("found load balancer pool member to delete", "machineName", m.IBMVPCMachine.Name, "poolMemberID", *poolMember.ID) // Make LB status check now that it has been determined a change is required. - if *loadBalancerDetails.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) { - m.Logger.V(5).Info("load balancer not in active status prior to load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerProvisioningStatus", *loadBalancerDetails.ProvisioningStatus) + if *loadBalancerDetails.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { + log.V(5).Info("load balancer not in active status prior to load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerProvisioningStatus", *loadBalancerDetails.ProvisioningStatus) // Set flag that some cleanup was not completed, and break out of member target loop, to try next member from Machine Status. cleanupIncomplete = true break @@ -855,13 +863,13 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { PoolID: loadBalancerPoolID, } - m.Logger.V(5).Info("delete load balancer pool member options", "machineName", m.IBMVPCMachine.Name, "options", *deleteOptions) + log.V(5).Info("delete load balancer pool member options", "machineName", m.IBMVPCMachine.Name, "options", *deleteOptions) // Delete the matching Load Balancer Pool Member. _, err := m.IBMVPCClient.DeleteLoadBalancerPoolMember(deleteOptions) if err != nil { return fmt.Errorf("error deleting load balancer pool member for machine: %s: %w", m.IBMVPCMachine.Name, err) } - m.Logger.V(3).Info("deleted load balancer pool member", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerPoolID", *loadBalancerPoolID, "loadBalancerPoolMemberID", *poolMember.ID) + log.V(3).Info("deleted load balancer pool member", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerPoolID", *loadBalancerPoolID, "loadBalancerPoolMemberID", *poolMember.ID) } } @@ -902,7 +910,8 @@ func (m *MachineScope) GetBootstrapData() (string, error) { return string(value), nil } -func fetchKeyID(key *infrav1beta2.IBMVPCResourceReference, m *MachineScope) (*string, error) { +func fetchKeyID(ctx context.Context, key *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, error) { + log := ctrl.LoggerFrom(ctx) if key.ID == nil && key.Name == nil { return nil, fmt.Errorf("both ID and Name can't be nil") } @@ -921,8 +930,7 @@ func fetchKeyID(key *infrav1beta2.IBMVPCResourceReference, m *MachineScope) (*st keysList, _, err := m.IBMVPCClient.ListKeys(listKeysOptions) if err != nil { - m.Logger.Error(err, "Failed to get keys") - return false, "", err + return false, "", fmt.Errorf("failed to get keys: %w", err) } if keysList == nil { @@ -931,7 +939,7 @@ func fetchKeyID(key *infrav1beta2.IBMVPCResourceReference, m *MachineScope) (*st for i, ks := range keysList.Keys { if *ks.Name == *key.Name { - m.Logger.V(3).Info("Key found with ID", "Key", *ks.Name, "ID", *ks.ID) + log.V(3).Info("Key found with ID", "Key", *ks.Name, "ID", *ks.ID) k = &keysList.Keys[i] return true, "", nil } @@ -943,7 +951,7 @@ func fetchKeyID(key *infrav1beta2.IBMVPCResourceReference, m *MachineScope) (*st return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return nil, err } @@ -954,7 +962,8 @@ func fetchKeyID(key *infrav1beta2.IBMVPCResourceReference, m *MachineScope) (*st return nil, fmt.Errorf("sshkey does not exist - failed to find Key ID") } -func fetchImageID(image *infrav1beta2.IBMVPCResourceReference, m *MachineScope) (*string, error) { +func fetchImageID(ctx context.Context, image *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, error) { + log := ctrl.LoggerFrom(ctx) if image.ID == nil && image.Name == nil { return nil, fmt.Errorf("both ID and Name can't be nil") } @@ -979,8 +988,7 @@ func fetchImageID(image *infrav1beta2.IBMVPCResourceReference, m *MachineScope) imagesList, _, err := m.IBMVPCClient.ListImages(listImagesOptions) if err != nil { - m.Logger.Error(err, "Failed to get images") - return false, "", err + return false, "", fmt.Errorf("failed to get images: %w", err) } if imagesList == nil { @@ -989,7 +997,7 @@ func fetchImageID(image *infrav1beta2.IBMVPCResourceReference, m *MachineScope) for j, i := range imagesList.Images { if *image.Name == *i.Name { - m.Logger.Info("Image found with ID", "Image", *i.Name, "ID", *i.ID) + log.Info("Image found with ID", "Image", *i.Name, "ID", *i.ID) img = &imagesList.Images[j] return true, "", nil } @@ -1001,7 +1009,7 @@ func fetchImageID(image *infrav1beta2.IBMVPCResourceReference, m *MachineScope) return true, "", nil } - if err := utils.PagingHelper(f); err != nil { + if err := pagingutils.PagingHelper(f); err != nil { return nil, err } @@ -1087,10 +1095,9 @@ func (m *MachineScope) SetNotReady() { func (m *MachineScope) SetProviderID(id *string) error { // Based on the ProviderIDFormat version the providerID format will be decided. if options.ProviderIDFormatType(options.ProviderIDFormat) == options.ProviderIDFormatV2 { - accountID, err := utils.GetAccountIDWrapper() + accountID, err := accounts.GetAccountIDWrapper() if err != nil { - m.Logger.Error(err, "failed to get cloud account id", err.Error()) - return err + return fmt.Errorf("failed to get cloud account id: %w", err) } m.IBMVPCMachine.Spec.ProviderID = ptr.To(fmt.Sprintf("ibm://%s///%s/%s", accountID, m.Machine.Spec.ClusterName, *id)) } else { @@ -1150,8 +1157,8 @@ func (m *MachineScope) TagResource(tagName string, resourceCRN string) error { // APIServerPort returns the APIServerPort. func (m *MachineScope) APIServerPort() int32 { - if m.Cluster.Spec.ClusterNetwork != nil && m.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return *m.Cluster.Spec.ClusterNetwork.APIServerPort + if m.Cluster.Spec.ClusterNetwork.APIServerPort > 0 { + return m.Cluster.Spec.ClusterNetwork.APIServerPort } - return infrav1beta2.DefaultAPIServerPort + return infrav1.DefaultAPIServerPort } diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index 9ddf386ab..70aa9645a 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -29,25 +29,24 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" - "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/utils" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/accounts" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/options" . "github.com/onsi/gomega" ) -func newVPCMachine(clusterName, machineName string) *infrav1beta2.IBMVPCMachine { - return &infrav1beta2.IBMVPCMachine{ +func newVPCMachine(clusterName, machineName string) *infrav1.IBMVPCMachine { + return &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - capiv1beta1.ClusterNameLabel: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", @@ -61,13 +60,13 @@ func setupMachineScope(clusterName string, machineName string, mockvpc *mock.Moc secret := newBootstrapSecret(clusterName, machineName) vpcMachine := newVPCMachine(clusterName, machineName) vpcCluster := newVPCCluster(clusterName) - vpcCluster.Status = infrav1beta2.IBMVPCClusterStatus{ - Network: &infrav1beta2.VPCNetworkStatus{ - VPC: &infrav1beta2.ResourceStatus{ + vpcCluster.Status = infrav1.IBMVPCClusterStatus{ + Network: &infrav1.VPCNetworkStatus{ + VPC: &infrav1.ResourceStatus{ ID: "vpc-id", }, }, - ResourceGroup: &infrav1beta2.ResourceStatus{ + ResourceGroup: &infrav1.ResourceStatus{ ID: "resource-group-id", }, } @@ -79,7 +78,6 @@ func setupMachineScope(clusterName string, machineName string, mockvpc *mock.Moc client := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(initObjects...).Build() return &MachineScope{ Client: client, - Logger: klog.Background(), IBMVPCClient: mockvpc, Cluster: cluster, Machine: machine, @@ -142,7 +140,7 @@ func TestSetVPCProviderID(t *testing.T) { g := NewWithT(t) scope := setupMachineScope(clusterName, machineName, mock.NewMockVpc(gomock.NewController(t))) options.ProviderIDFormat = string("v2") - utils.GetAccountIDFunc = func() (string, error) { + accounts.GetAccountIDFunc = func() (string, error) { return "dummy-account-id", nil // Return dummy value } err := scope.SetProviderID(ptr.To(providerID)) @@ -153,7 +151,7 @@ func TestSetVPCProviderID(t *testing.T) { g := NewWithT(t) scope := setupMachineScope(clusterName, machineName, mock.NewMockVpc(gomock.NewController(t))) options.ProviderIDFormat = string("v2") - utils.GetAccountIDFunc = func() (string, error) { + accounts.GetAccountIDFunc = func() (string, error) { return "", errors.New("error getting accountID") // Return dummy error } err := scope.SetProviderID(ptr.To(providerID)) @@ -167,14 +165,14 @@ func TestCreateMachine(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - SSHKeys: []*infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + SSHKeys: []*infrav1.IBMVPCResourceReference{ { ID: core.StringPtr("foo-ssh-key-id"), }, }, - Image: &infrav1beta2.IBMVPCResourceReference{ + Image: &infrav1.IBMVPCResourceReference{ ID: core.StringPtr("foo-image-id"), }, Profile: "machine-profile", @@ -197,7 +195,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-name")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -218,7 +216,7 @@ func TestCreateMachine(t *testing.T) { }, } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(instanceCollection, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -229,7 +227,7 @@ func TestCreateMachine(t *testing.T) { t.Cleanup(mockController.Finish) scope := setupMachineScope(clusterName, machineName, mockvpc) mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, errors.New("Error when listing instances")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -240,7 +238,7 @@ func TestCreateMachine(t *testing.T) { scope := setupMachineScope(clusterName, machineName, mockvpc) scope.Machine.Spec.Bootstrap.DataSecretName = nil mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -251,7 +249,7 @@ func TestCreateMachine(t *testing.T) { scope := setupMachineScope(clusterName, machineName, mockvpc) scope.Machine.Spec.Bootstrap.DataSecretName = core.StringPtr("foo-secret-temp") mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -263,7 +261,7 @@ func TestCreateMachine(t *testing.T) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - capiv1beta1.ClusterNameLabel: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", @@ -273,7 +271,7 @@ func TestCreateMachine(t *testing.T) { }} g.Expect(scope.Client.Update(context.Background(), secret)).To(Succeed()) mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -286,7 +284,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(nil, &core.DetailedResponse{}, errors.New("Failed when creating instance")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -299,12 +297,12 @@ func TestCreateMachine(t *testing.T) { Name: core.StringPtr("foo-machine"), } scope.IBMVPCMachine.Spec = vpcMachine.Spec - scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1beta2.NetworkInterface{ + scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1.NetworkInterface{ Subnet: "subnet-name-1", } - scope.IBMVPCCluster.Status = infrav1beta2.IBMVPCClusterStatus{ - Network: &infrav1beta2.VPCNetworkStatus{ - ControlPlaneSubnets: map[string]*infrav1beta2.ResourceStatus{ + scope.IBMVPCCluster.Status = infrav1.IBMVPCClusterStatus{ + Network: &infrav1.VPCNetworkStatus{ + ControlPlaneSubnets: map[string]*infrav1.ResourceStatus{ "subnet-name-1": { ID: "subnet-id", }, @@ -319,7 +317,7 @@ func TestCreateMachine(t *testing.T) { // TODO(cjschaef): Enhance the mock Options parameter to validate the Network Status ControlPlaneSubnets ID was used. mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -333,22 +331,22 @@ func TestCreateMachine(t *testing.T) { Name: core.StringPtr("foo-machine"), } scope.IBMVPCMachine.Spec = vpcMachine.Spec - scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1beta2.NetworkInterface{ - SecurityGroups: []infrav1beta2.VPCResource{ + scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1.NetworkInterface{ + SecurityGroups: []infrav1.VPCResource{ { Name: core.StringPtr("security-group-1"), }, }, Subnet: "subnet-name", } - scope.IBMVPCCluster.Status = infrav1beta2.IBMVPCClusterStatus{ - Network: &infrav1beta2.VPCNetworkStatus{ - ControlPlaneSubnets: map[string]*infrav1beta2.ResourceStatus{ + scope.IBMVPCCluster.Status = infrav1.IBMVPCClusterStatus{ + Network: &infrav1.VPCNetworkStatus{ + ControlPlaneSubnets: map[string]*infrav1.ResourceStatus{ "subnet-name": { ID: "subnet-id", }, }, - SecurityGroups: map[string]*infrav1beta2.ResourceStatus{ + SecurityGroups: map[string]*infrav1.ResourceStatus{ "security-group-1": { ID: "security-group-id-1", }, @@ -363,7 +361,7 @@ func TestCreateMachine(t *testing.T) { // TODO(cjschaef): Enhance the mock Options parameter to validate the Network Status Security Group ID was used. mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -377,8 +375,8 @@ func TestCreateMachine(t *testing.T) { Name: core.StringPtr("foo-machine"), } scope.IBMVPCMachine.Spec = vpcMachine.Spec - scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1beta2.NetworkInterface{ - SecurityGroups: []infrav1beta2.VPCResource{ + scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1.NetworkInterface{ + SecurityGroups: []infrav1.VPCResource{ { Name: core.StringPtr("security-group-1"), }, @@ -394,7 +392,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().GetSecurityGroupByName("security-group-1").Return(&vpcv1.SecurityGroup{ID: core.StringPtr("security-group-id-1")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -408,8 +406,8 @@ func TestCreateMachine(t *testing.T) { Name: core.StringPtr("foo-machine"), } scope.IBMVPCMachine.Spec = vpcMachine.Spec - scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1beta2.NetworkInterface{ - SecurityGroups: []infrav1beta2.VPCResource{ + scope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1.NetworkInterface{ + SecurityGroups: []infrav1.VPCResource{ { ID: core.StringPtr("security-group-id-1"), }, @@ -425,7 +423,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().GetSecurityGroup(gomock.AssignableToTypeOf(&vpcv1.GetSecurityGroupOptions{})).Return(&vpcv1.SecurityGroup{ID: core.StringPtr("security-group-id-1")}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -439,9 +437,9 @@ func TestCreateMachine(t *testing.T) { Name: core.StringPtr("foo-machine"), } scope.IBMVPCMachine.Spec = vpcMachine.Spec - scope.IBMVPCCluster.Status = infrav1beta2.IBMVPCClusterStatus{ - Network: &infrav1beta2.VPCNetworkStatus{ - VPC: &infrav1beta2.ResourceStatus{ + scope.IBMVPCCluster.Status = infrav1.IBMVPCClusterStatus{ + Network: &infrav1.VPCNetworkStatus{ + VPC: &infrav1.ResourceStatus{ ID: "network-vpc-id", }, }, @@ -455,7 +453,7 @@ func TestCreateMachine(t *testing.T) { // TODO(cjschaef): Enhance the mock Options parameter to validate the Network Status VPC ID was used. mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -466,12 +464,12 @@ func TestCreateMachine(t *testing.T) { mockController, mockvpc := setup(t) t.Cleanup(mockController.Finish) scope := setupMachineScope(clusterName, machineName, mockvpc) - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{}, + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{}, } scope.IBMVPCMachine.Spec = vpcMachine.Spec mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -480,15 +478,15 @@ func TestCreateMachine(t *testing.T) { mockController, mockvpc := setup(t) t.Cleanup(mockController.Finish) scope := setupMachineScope(clusterName, machineName, mockvpc) - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - SSHKeys: []*infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + SSHKeys: []*infrav1.IBMVPCResourceReference{ {}, }, - Image: &infrav1beta2.IBMVPCResourceReference{ + Image: &infrav1.IBMVPCResourceReference{ ID: core.StringPtr("foo-image-id"), }, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -497,7 +495,7 @@ func TestCreateMachine(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -506,17 +504,17 @@ func TestCreateMachine(t *testing.T) { mockController, mockvpc := setup(t) t.Cleanup(mockController.Finish) scope := setupMachineScope(clusterName, machineName, mockvpc) - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - SSHKeys: []*infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + SSHKeys: []*infrav1.IBMVPCResourceReference{ { Name: core.StringPtr("foo-ssh-key"), }, }, - Image: &infrav1beta2.IBMVPCResourceReference{ + Image: &infrav1.IBMVPCResourceReference{ ID: core.StringPtr("foo-image-id"), }, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -526,7 +524,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListKeys(gomock.AssignableToTypeOf(&vpcv1.ListKeysOptions{})).Return(nil, &core.DetailedResponse{}, errors.New("Failed when creating instance")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -543,17 +541,17 @@ func TestCreateMachine(t *testing.T) { }, }, } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - SSHKeys: []*infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + SSHKeys: []*infrav1.IBMVPCResourceReference{ { Name: core.StringPtr("foo-ssh-key"), }, }, - Image: &infrav1beta2.IBMVPCResourceReference{ + Image: &infrav1.IBMVPCResourceReference{ ID: core.StringPtr("foo-image-id"), }, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -563,7 +561,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListKeys(gomock.AssignableToTypeOf(&vpcv1.ListKeysOptions{})).Return(keyCollection, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -591,17 +589,17 @@ func TestCreateMachine(t *testing.T) { }, }, } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - SSHKeys: []*infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + SSHKeys: []*infrav1.IBMVPCResourceReference{ { Name: core.StringPtr("foo-ssh-key"), }, }, - Image: &infrav1beta2.IBMVPCResourceReference{ + Image: &infrav1.IBMVPCResourceReference{ Name: core.StringPtr("foo-image"), }, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -616,7 +614,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListImages(gomock.AssignableToTypeOf(&vpcv1.ListImagesOptions{})).Return(imageCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListKeys(gomock.AssignableToTypeOf(&vpcv1.ListKeysOptions{})).Return(keyCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -626,10 +624,10 @@ func TestCreateMachine(t *testing.T) { mockController, mockvpc := setup(t) t.Cleanup(mockController.Finish) scope := setupMachineScope(clusterName, machineName, mockvpc) - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{}, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{}, + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -638,7 +636,7 @@ func TestCreateMachine(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -647,12 +645,12 @@ func TestCreateMachine(t *testing.T) { mockController, mockvpc := setup(t) t.Cleanup(mockController.Finish) scope := setupMachineScope(clusterName, machineName, mockvpc) - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{ Name: core.StringPtr("foo-image"), }, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -662,7 +660,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListImages(gomock.AssignableToTypeOf(&vpcv1.ListImagesOptions{})).Return(nil, &core.DetailedResponse{}, errors.New("Failed when listing Images")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -679,12 +677,12 @@ func TestCreateMachine(t *testing.T) { }, }, } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{ Name: core.StringPtr("foo-image"), }, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -694,7 +692,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListImages(gomock.AssignableToTypeOf(&vpcv1.ListImagesOptions{})).Return(imageCollection, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -706,19 +704,19 @@ func TestCreateMachine(t *testing.T) { expectedOutput := &vpcv1.Instance{ Name: core.StringPtr("foo-machine"), } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - SSHKeys: []*infrav1beta2.IBMVPCResourceReference{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ + SSHKeys: []*infrav1.IBMVPCResourceReference{ { Name: core.StringPtr("foo-ssh-key"), ID: core.StringPtr("foo-ssh-key-id"), }, }, - Image: &infrav1beta2.IBMVPCResourceReference{ + Image: &infrav1.IBMVPCResourceReference{ Name: core.StringPtr("foo-image"), ID: core.StringPtr("foo-image-id"), }, - PrimaryNetworkInterface: infrav1beta2.NetworkInterface{ + PrimaryNetworkInterface: infrav1.NetworkInterface{ Subnet: "subnet-name", }, Profile: "machine-profile", @@ -731,7 +729,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -743,11 +741,11 @@ func TestDeleteMachine(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ Name: "foo-machine", }, - Status: infrav1beta2.IBMVPCMachineStatus{ + Status: infrav1.IBMVPCMachineStatus{ InstanceID: "foo-instance-id", }, } @@ -795,11 +793,11 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ Name: "foo-machine", }, - Status: infrav1beta2.IBMVPCMachineStatus{ + Status: infrav1.IBMVPCMachineStatus{ Addresses: []corev1.NodeAddress{ { Type: corev1.NodeInternalIP, @@ -828,7 +826,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(&vpcv1.LoadBalancer{}, &core.DetailedResponse{}, errors.New("Could not fetch LoadBalancer")) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1beta2.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when LoadBalancer is not active", func(t *testing.T) { @@ -843,7 +841,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { ProvisioningStatus: core.StringPtr("pending"), } mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1beta2.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when no pool exist", func(t *testing.T) { @@ -859,7 +857,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { Pools: []vpcv1.LoadBalancerPoolReference{}, } mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1beta2.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when listing LoadBalancerPoolMembers", func(t *testing.T) { @@ -871,7 +869,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("Failed to list LoadBalancerPoolMembers")) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1beta2.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("PoolMember already exist", func(t *testing.T) { @@ -884,7 +882,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { loadBalancerPoolMemberCollection := &vpcv1.LoadBalancerPoolMemberCollection{ Members: []vpcv1.LoadBalancerPoolMember{ { - Port: core.Int64Ptr(int64(infrav1beta2.DefaultAPIServerPort)), + Port: core.Int64Ptr(int64(infrav1.DefaultAPIServerPort)), Target: &vpcv1.LoadBalancerPoolMemberTarget{ Address: core.StringPtr("192.168.1.1"), }, @@ -893,7 +891,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { } mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1beta2.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(BeNil()) }) t.Run("Error when creating LoadBalancerPoolMember", func(t *testing.T) { @@ -906,7 +904,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(&vpcv1.LoadBalancerPoolMember{}, &core.DetailedResponse{}, errors.New("Failed to create LoadBalancerPoolMember")) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(64)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(64)) g.Expect(err).To(Not(BeNil())) }) t.Run("Should create VPCLoadBalancerPoolMember", func(t *testing.T) { @@ -916,18 +914,18 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { scope := setupMachineScope(clusterName, machineName, mockvpc) expectedOutput := &vpcv1.LoadBalancerPoolMember{ ID: core.StringPtr("foo-load-balancer-pool-member-id"), - Port: core.Int64Ptr(int64(infrav1beta2.DefaultAPIServerPort)), + Port: core.Int64Ptr(int64(infrav1.DefaultAPIServerPort)), } scope.IBMVPCMachine.Spec = vpcMachine.Spec scope.IBMVPCMachine.Status = vpcMachine.Status loadBalancerPoolMember := &vpcv1.LoadBalancerPoolMember{ ID: core.StringPtr("foo-load-balancer-pool-member-id"), - Port: core.Int64Ptr(int64(infrav1beta2.DefaultAPIServerPort)), + Port: core.Int64Ptr(int64(infrav1.DefaultAPIServerPort)), } mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(loadBalancerPoolMember, &core.DetailedResponse{}, nil) - out, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1beta2.DefaultAPIServerPort)) + out, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -940,11 +938,11 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { return gomock.NewController(t), mock.NewMockVpc(gomock.NewController(t)) } - vpcMachine := infrav1beta2.IBMVPCMachine{ - Spec: infrav1beta2.IBMVPCMachineSpec{ + vpcMachine := infrav1.IBMVPCMachine{ + Spec: infrav1.IBMVPCMachineSpec{ Name: "foo-machine", }, - Status: infrav1beta2.IBMVPCMachineStatus{ + Status: infrav1.IBMVPCMachineStatus{ InstanceID: "foo-instance-id", Addresses: []corev1.NodeAddress{ { @@ -976,7 +974,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { Members: []vpcv1.LoadBalancerPoolMember{ { ID: core.StringPtr("foo-lb-pool-member-id"), - Port: core.Int64Ptr(int64(infrav1beta2.DefaultAPIServerPort)), + Port: core.Int64Ptr(int64(infrav1.DefaultAPIServerPort)), Target: &vpcv1.LoadBalancerPoolMemberTarget{ Address: core.StringPtr("192.168.1.1"), }, @@ -992,7 +990,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(&vpcv1.LoadBalancer{}, &core.DetailedResponse{}, errors.New("Could not fetch LoadBalancer")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("No pools associated with load balancer", func(t *testing.T) { @@ -1003,7 +1001,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(&vpcv1.LoadBalancer{}, &core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(BeNil()) }) t.Run("Error when fetching Instance", func(t *testing.T) { @@ -1015,7 +1013,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, errors.New("Failed to fetch Instance")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when listing LoadBalancerPoolMembers", func(t *testing.T) { @@ -1028,7 +1026,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("Failed to list LoadBalancerPoolMembers")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("No members in load balancer pool", func(t *testing.T) { @@ -1041,7 +1039,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(BeNil()) }) t.Run("Error when load balancer is not in active state", func(t *testing.T) { @@ -1063,7 +1061,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when deleting load balancer pool member", func(t *testing.T) { @@ -1077,7 +1075,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.DeleteLoadBalancerPoolMemberOptions{})).Return(&core.DetailedResponse{}, errors.New("Failed to delete LoadBalancerPoolMember")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("Should delete load balancer pool", func(t *testing.T) { @@ -1091,7 +1089,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.DeleteLoadBalancerPoolMemberOptions{})).Return(&core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(BeNil()) }) }) diff --git a/cloud/scope/powervs_cluster.go b/cloud/scope/powervs_cluster.go index 1fe0ed2c5..92678d4da 100644 --- a/cloud/scope/powervs_cluster.go +++ b/cloud/scope/powervs_cluster.go @@ -45,20 +45,20 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-ibmcloud/internal/genutil" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/accounts" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/authenticator" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/cos" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/resourcecontroller" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/resourcemanager" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/transitgateway" - "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/utils" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" - genUtil "sigs.k8s.io/cluster-api-provider-ibmcloud/util" ) const ( @@ -86,8 +86,8 @@ const ( type PowerVSClusterScopeParams struct { Client client.Client Logger logr.Logger - Cluster *capiv1beta1.Cluster - IBMPowerVSCluster *infrav1beta2.IBMPowerVSCluster + Cluster *clusterv1.Cluster + IBMPowerVSCluster *infrav1.IBMPowerVSCluster ServiceEndpoint []endpoints.ServiceEndpoint // ClientFactory contains collection of functions to override actual client, which helps in testing. @@ -107,7 +107,7 @@ type ClientFactory struct { // PowerVSClusterScope defines a scope defined around a Power VS Cluster. type PowerVSClusterScope struct { Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper IBMPowerVSClient powervs.PowerVS IBMVPCClient vpc.Vpc @@ -116,8 +116,8 @@ type PowerVSClusterScope struct { COSClient cos.Cos ResourceManagerClient resourcemanager.ResourceManager - Cluster *capiv1beta1.Cluster - IBMPowerVSCluster *infrav1beta2.IBMPowerVSCluster + Cluster *clusterv1.Cluster + IBMPowerVSCluster *infrav1.IBMPowerVSCluster ServiceEndpoint []endpoints.ServiceEndpoint } @@ -147,7 +147,7 @@ func NewPowerVSClusterScope(params PowerVSClusterScopeParams) (*PowerVSClusterSc params.Logger = klog.Background() } - helper, err := patch.NewHelper(params.IBMPowerVSCluster, params.Client) + helper, err := v1beta1patch.NewHelper(params.IBMPowerVSCluster, params.Client) if err != nil { err = fmt.Errorf("failed to init patch helper: %w", err) return nil, err @@ -369,7 +369,7 @@ func (s *PowerVSClusterScope) Zone() *string { } // ResourceGroup returns the cluster resource group. -func (s *PowerVSClusterScope) ResourceGroup() *infrav1beta2.IBMPowerVSResourceReference { +func (s *PowerVSClusterScope) ResourceGroup() *infrav1.IBMPowerVSResourceReference { return s.IBMPowerVSCluster.Spec.ResourceGroup } @@ -380,14 +380,14 @@ func (s *PowerVSClusterScope) InfraCluster() string { // APIServerPort returns the APIServerPort to use when creating the ControlPlaneEndpoint. func (s *PowerVSClusterScope) APIServerPort() int32 { - if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return *s.Cluster.Spec.ClusterNetwork.APIServerPort + if s.Cluster.Spec.ClusterNetwork.APIServerPort > 0 { + return s.Cluster.Spec.ClusterNetwork.APIServerPort } - return infrav1beta2.DefaultAPIServerPort + return infrav1.DefaultAPIServerPort } // ServiceInstance returns the cluster ServiceInstance. -func (s *PowerVSClusterScope) ServiceInstance() *infrav1beta2.IBMPowerVSResourceReference { +func (s *PowerVSClusterScope) ServiceInstance() *infrav1.IBMPowerVSResourceReference { return s.IBMPowerVSCluster.Spec.ServiceInstance } @@ -400,7 +400,7 @@ func (s *PowerVSClusterScope) GetServiceInstanceID() string { } // SetTransitGatewayConnectionStatus sets the connection status of Transit gateway. -func (s *PowerVSClusterScope) SetTransitGatewayConnectionStatus(networkType networkConnectionType, resource *infrav1beta2.ResourceReference) { +func (s *PowerVSClusterScope) SetTransitGatewayConnectionStatus(networkType networkConnectionType, resource *infrav1.ResourceReference) { if s.IBMPowerVSCluster.Status.TransitGateway == nil || resource == nil { return } @@ -415,7 +415,7 @@ func (s *PowerVSClusterScope) SetTransitGatewayConnectionStatus(networkType netw // SetTransitGatewayStatus sets the status of Transit gateway. func (s *PowerVSClusterScope) SetTransitGatewayStatus(id *string, controllerCreated *bool) { - s.IBMPowerVSCluster.Status.TransitGateway = &infrav1beta2.TransitGatewayStatus{ + s.IBMPowerVSCluster.Status.TransitGateway = &infrav1.TransitGatewayStatus{ ID: id, ControllerCreated: controllerCreated, } @@ -424,41 +424,41 @@ func (s *PowerVSClusterScope) SetTransitGatewayStatus(id *string, controllerCrea // TODO: Can we use generic here. // SetStatus set the IBMPowerVSCluster status for provided ResourceType. -func (s *PowerVSClusterScope) SetStatus(ctx context.Context, resourceType infrav1beta2.ResourceType, resource infrav1beta2.ResourceReference) { +func (s *PowerVSClusterScope) SetStatus(ctx context.Context, resourceType infrav1.ResourceType, resource infrav1.ResourceReference) { log := ctrl.LoggerFrom(ctx) log.V(3).Info("Setting status", "resourceType", resourceType, "resource", resource) switch resourceType { - case infrav1beta2.ResourceTypeServiceInstance: + case infrav1.ResourceTypeServiceInstance: if s.IBMPowerVSCluster.Status.ServiceInstance == nil { s.IBMPowerVSCluster.Status.ServiceInstance = &resource return } s.IBMPowerVSCluster.Status.ServiceInstance.Set(resource) - case infrav1beta2.ResourceTypeNetwork: + case infrav1.ResourceTypeNetwork: if s.IBMPowerVSCluster.Status.Network == nil { s.IBMPowerVSCluster.Status.Network = &resource return } s.IBMPowerVSCluster.Status.Network.Set(resource) - case infrav1beta2.ResourceTypeVPC: + case infrav1.ResourceTypeVPC: if s.IBMPowerVSCluster.Status.VPC == nil { s.IBMPowerVSCluster.Status.VPC = &resource return } s.IBMPowerVSCluster.Status.VPC.Set(resource) - case infrav1beta2.ResourceTypeDHCPServer: + case infrav1.ResourceTypeDHCPServer: if s.IBMPowerVSCluster.Status.DHCPServer == nil { s.IBMPowerVSCluster.Status.DHCPServer = &resource return } s.IBMPowerVSCluster.Status.DHCPServer.Set(resource) - case infrav1beta2.ResourceTypeCOSInstance: + case infrav1.ResourceTypeCOSInstance: if s.IBMPowerVSCluster.Status.COSInstance == nil { s.IBMPowerVSCluster.Status.COSInstance = &resource return } s.IBMPowerVSCluster.Status.COSInstance.Set(resource) - case infrav1beta2.ResourceTypeResourceGroup: + case infrav1.ResourceTypeResourceGroup: if s.IBMPowerVSCluster.Status.ResourceGroup == nil { s.IBMPowerVSCluster.Status.ResourceGroup = &resource return @@ -476,7 +476,7 @@ func (s *PowerVSClusterScope) GetNetworkID() *string { } // Network returns the cluster Network. -func (s *PowerVSClusterScope) Network() *infrav1beta2.IBMPowerVSResourceReference { +func (s *PowerVSClusterScope) Network() *infrav1.IBMPowerVSResourceReference { return &s.IBMPowerVSCluster.Spec.Network } @@ -489,12 +489,12 @@ func (s *PowerVSClusterScope) GetDHCPServerID() *string { } // DHCPServer returns the DHCP server details. -func (s *PowerVSClusterScope) DHCPServer() *infrav1beta2.DHCPServer { +func (s *PowerVSClusterScope) DHCPServer() *infrav1.DHCPServer { return s.IBMPowerVSCluster.Spec.DHCPServer } // VPC returns the cluster VPC information. -func (s *PowerVSClusterScope) VPC() *infrav1beta2.VPCResourceReference { +func (s *PowerVSClusterScope) VPC() *infrav1.VPCResourceReference { return s.IBMPowerVSCluster.Spec.VPC } @@ -530,11 +530,11 @@ func (s *PowerVSClusterScope) GetVPCSubnetIDs() []*string { } // SetVPCSubnetStatus set the VPC subnet id. -func (s *PowerVSClusterScope) SetVPCSubnetStatus(ctx context.Context, name string, resource infrav1beta2.ResourceReference) { +func (s *PowerVSClusterScope) SetVPCSubnetStatus(ctx context.Context, name string, resource infrav1.ResourceReference) { log := ctrl.LoggerFrom(ctx) log.V(3).Info("Setting status", "name", name, "resource", resource) if s.IBMPowerVSCluster.Status.VPCSubnet == nil { - s.IBMPowerVSCluster.Status.VPCSubnet = make(map[string]infrav1beta2.ResourceReference) + s.IBMPowerVSCluster.Status.VPCSubnet = make(map[string]infrav1.ResourceReference) } if val, ok := s.IBMPowerVSCluster.Status.VPCSubnet[name]; ok { if val.ControllerCreated != nil && *val.ControllerCreated { @@ -569,11 +569,11 @@ func (s *PowerVSClusterScope) GetVPCSecurityGroupByID(securityGroupID string) (* } // SetVPCSecurityGroupStatus set the VPC security group id. -func (s *PowerVSClusterScope) SetVPCSecurityGroupStatus(ctx context.Context, name string, resource infrav1beta2.VPCSecurityGroupStatus) { +func (s *PowerVSClusterScope) SetVPCSecurityGroupStatus(ctx context.Context, name string, resource infrav1.VPCSecurityGroupStatus) { log := ctrl.LoggerFrom(ctx) log.V(3).Info("Setting VPC security group status", "name", name, "resource", resource) if s.IBMPowerVSCluster.Status.VPCSecurityGroups == nil { - s.IBMPowerVSCluster.Status.VPCSecurityGroups = make(map[string]infrav1beta2.VPCSecurityGroupStatus) + s.IBMPowerVSCluster.Status.VPCSecurityGroups = make(map[string]infrav1.VPCSecurityGroupStatus) } if val, ok := s.IBMPowerVSCluster.Status.VPCSecurityGroups[name]; ok { if val.ControllerCreated != nil && *val.ControllerCreated { @@ -584,7 +584,7 @@ func (s *PowerVSClusterScope) SetVPCSecurityGroupStatus(ctx context.Context, nam } // TransitGateway returns the cluster Transit Gateway information. -func (s *PowerVSClusterScope) TransitGateway() *infrav1beta2.TransitGateway { +func (s *PowerVSClusterScope) TransitGateway() *infrav1.TransitGateway { return s.IBMPowerVSCluster.Spec.TransitGateway } @@ -597,11 +597,11 @@ func (s *PowerVSClusterScope) GetTransitGatewayID() *string { } // SetLoadBalancerStatus set the loadBalancer id. -func (s *PowerVSClusterScope) SetLoadBalancerStatus(ctx context.Context, name string, loadBalancer infrav1beta2.VPCLoadBalancerStatus) { +func (s *PowerVSClusterScope) SetLoadBalancerStatus(ctx context.Context, name string, loadBalancer infrav1.VPCLoadBalancerStatus) { log := ctrl.LoggerFrom(ctx) log.V(3).Info("Setting status", "name", name, "status", loadBalancer) if s.IBMPowerVSCluster.Status.LoadBalancers == nil { - s.IBMPowerVSCluster.Status.LoadBalancers = make(map[string]infrav1beta2.VPCLoadBalancerStatus) + s.IBMPowerVSCluster.Status.LoadBalancers = make(map[string]infrav1.VPCLoadBalancerStatus) } if val, ok := s.IBMPowerVSCluster.Status.LoadBalancers[name]; ok { if val.ControllerCreated != nil && *val.ControllerCreated { @@ -623,7 +623,7 @@ func (s *PowerVSClusterScope) GetLoadBalancerID(loadBalancerName string) *string } // GetLoadBalancerState will return the state for the load balancer. -func (s *PowerVSClusterScope) GetLoadBalancerState(name string) *infrav1beta2.VPCLoadBalancerState { +func (s *PowerVSClusterScope) GetLoadBalancerState(name string) *infrav1.VPCLoadBalancerState { if s.IBMPowerVSCluster.Status.LoadBalancers == nil { return nil } @@ -641,7 +641,7 @@ func (s *PowerVSClusterScope) GetPublicLoadBalancerHostName() (*string, error) { var name string if len(s.IBMPowerVSCluster.Spec.LoadBalancers) == 0 { - name = *s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancer) + name = *s.GetServiceName(infrav1.ResourceTypeLoadBalancer) } for _, lb := range s.IBMPowerVSCluster.Spec.LoadBalancers { @@ -719,7 +719,7 @@ func (s *PowerVSClusterScope) ReconcileResourceGroup(ctx context.Context) error } log.Info("Fetched resource group ID", "resourceGroupID", resourceGroupID) // Set the status of IBMPowerVSCluster object with resource group id. - s.SetStatus(ctx, infrav1beta2.ResourceTypeResourceGroup, infrav1beta2.ResourceReference{ID: &resourceGroupID, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeResourceGroup, infrav1.ResourceReference{ID: &resourceGroupID, ControllerCreated: ptr.To(false)}) return nil } @@ -757,7 +757,7 @@ func (s *PowerVSClusterScope) ReconcilePowerVSServiceInstance(ctx context.Contex // Set the status of IBMPowerVSCluster object with serviceInstanceID and ControllerCreated to false as PowerVS service instance is already exist in cloud. if serviceInstanceID != "" { log.V(3).Info("Found PowerVS service instance in cloud", "serviceInstanceID", serviceInstanceID) - s.SetStatus(ctx, infrav1beta2.ResourceTypeServiceInstance, infrav1beta2.ResourceReference{ID: &serviceInstanceID, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeServiceInstance, infrav1.ResourceReference{ID: &serviceInstanceID, ControllerCreated: ptr.To(false)}) return requeue, nil } @@ -772,7 +772,7 @@ func (s *PowerVSClusterScope) ReconcilePowerVSServiceInstance(ctx context.Contex log.Info("Created PowerVS service instance", "serviceInstanceID", serviceInstance.GUID) // Set the status of IBMPowerVSCluster object with serviceInstanceID and ControllerCreated to true as new PowerVS service instance is created. - s.SetStatus(ctx, infrav1beta2.ResourceTypeServiceInstance, infrav1beta2.ResourceReference{ID: serviceInstance.GUID, ControllerCreated: ptr.To(true)}) + s.SetStatus(ctx, infrav1.ResourceTypeServiceInstance, infrav1.ResourceReference{ID: serviceInstance.GUID, ControllerCreated: ptr.To(true)}) return true, nil } @@ -783,13 +783,13 @@ func (s *PowerVSClusterScope) checkServiceInstanceState(ctx context.Context, ins log := ctrl.LoggerFrom(ctx) log.V(3).Info("Checking the state of PowerVS service instance", "name", *instance.Name) switch *instance.State { - case string(infrav1beta2.ServiceInstanceStateActive): + case string(infrav1.ServiceInstanceStateActive): log.V(3).Info("PowerVS service instance is in active state") return false, nil - case string(infrav1beta2.ServiceInstanceStateProvisioning): + case string(infrav1.ServiceInstanceStateProvisioning): log.V(3).Info("PowerVS service instance is in provisioning state") return true, nil - case string(infrav1beta2.ServiceInstanceStateFailed): + case string(infrav1.ServiceInstanceStateFailed): return false, fmt.Errorf("PowerVS service instance is in failed state") } return false, fmt.Errorf("PowerVS service instance is in %s state", *instance.State) @@ -841,7 +841,7 @@ func (s *PowerVSClusterScope) isServiceInstanceExists(ctx context.Context) (stri // getServiceInstance return resource instance by name. func (s *PowerVSClusterScope) getServiceInstance() (*resourcecontrollerv2.ResourceInstance, error) { //TODO: Support regular expression - return s.ResourceClient.GetServiceInstance("", *s.GetServiceName(infrav1beta2.ResourceTypeServiceInstance), s.IBMPowerVSCluster.Spec.Zone) + return s.ResourceClient.GetServiceInstance("", *s.GetServiceName(infrav1.ResourceTypeServiceInstance), s.IBMPowerVSCluster.Spec.Zone) } // createServiceInstance creates the service instance. @@ -854,13 +854,13 @@ func (s *PowerVSClusterScope) createServiceInstance(ctx context.Context) (*resou } // create service instance. - log.V(3).Info("Creating new PowerVS service instance", "serviceInstanceName", s.GetServiceName(infrav1beta2.ResourceTypeServiceInstance)) + log.V(3).Info("Creating new PowerVS service instance", "serviceInstanceName", s.GetServiceName(infrav1.ResourceTypeServiceInstance)) zone := s.Zone() if zone == nil { return nil, fmt.Errorf("PowerVS zone is not set") } serviceInstance, _, err := s.ResourceClient.CreateResourceInstance(&resourcecontrollerv2.CreateResourceInstanceOptions{ - Name: s.GetServiceName(infrav1beta2.ResourceTypeServiceInstance), + Name: s.GetServiceName(infrav1.ResourceTypeServiceInstance), Target: zone, ResourceGroup: &resourceGroupID, ResourcePlanID: ptr.To(resourcecontroller.PowerVSResourcePlanID), @@ -909,7 +909,7 @@ func (s *PowerVSClusterScope) ReconcileNetwork(ctx context.Context) (bool, error } if networkID != nil { log.V(3).Info("Found PowerVS network in cloud", "networkID", networkID) - s.SetStatus(ctx, infrav1beta2.ResourceTypeNetwork, infrav1beta2.ResourceReference{ID: networkID, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeNetwork, infrav1.ResourceReference{ID: networkID, ControllerCreated: ptr.To(false)}) } dhcpServerID, err := s.checkDHCPServer(ctx) if err != nil { @@ -917,7 +917,7 @@ func (s *PowerVSClusterScope) ReconcileNetwork(ctx context.Context) (bool, error } if dhcpServerID != nil { log.V(3).Info("Found DHCP server in cloud", "dhcpServerID", dhcpServerID) - s.SetStatus(ctx, infrav1beta2.ResourceTypeDHCPServer, infrav1beta2.ResourceReference{ID: dhcpServerID, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeDHCPServer, infrav1.ResourceReference{ID: dhcpServerID, ControllerCreated: ptr.To(false)}) } if s.GetNetworkID() != nil { return true, nil @@ -929,7 +929,7 @@ func (s *PowerVSClusterScope) ReconcileNetwork(ctx context.Context) (bool, error } log.Info("Created DHCP Server", "dhcpServerID", *dhcpServerID) - s.SetStatus(ctx, infrav1beta2.ResourceTypeDHCPServer, infrav1beta2.ResourceReference{ID: dhcpServerID, ControllerCreated: ptr.To(true)}) + s.SetStatus(ctx, infrav1.ResourceTypeDHCPServer, infrav1.ResourceReference{ID: dhcpServerID, ControllerCreated: ptr.To(true)}) return false, nil } @@ -948,7 +948,7 @@ func (s *PowerVSClusterScope) checkDHCPServer(ctx context.Context) (*string, err if _, err := s.IBMPowerVSClient.GetNetworkByID(*dhcpServer.Network.ID); err != nil { return nil, fmt.Errorf("failed to fetch network by ID: %w", err) } - s.SetStatus(ctx, infrav1beta2.ResourceTypeNetwork, infrav1beta2.ResourceReference{ID: dhcpServer.Network.ID, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeNetwork, infrav1.ResourceReference{ID: dhcpServer.Network.ID, ControllerCreated: ptr.To(false)}) } else { return nil, fmt.Errorf("found DHCP server with ID `%s`, but network is nil", *s.DHCPServer().ID) } @@ -959,7 +959,7 @@ func (s *PowerVSClusterScope) checkDHCPServer(ctx context.Context) (*string, err } // if user provides DHCP server name then we can use network name to match the existing DHCP server - networkName := dhcpNetworkName(*s.GetServiceName(infrav1beta2.ResourceTypeDHCPServer)) + networkName := dhcpNetworkName(*s.GetServiceName(infrav1.ResourceTypeDHCPServer)) log.V(3).Info("Checking DHCP server's network list by network name", "name", networkName) dhcpServers, err := s.IBMPowerVSClient.GetAllDHCPServers() @@ -972,7 +972,7 @@ func (s *PowerVSClusterScope) checkDHCPServer(ctx context.Context) (*string, err if _, err := s.IBMPowerVSClient.GetNetworkByID(*dhcpServer.Network.ID); err != nil { return nil, fmt.Errorf("failed to fetch network by ID: %w", err) } - s.SetStatus(ctx, infrav1beta2.ResourceTypeNetwork, infrav1beta2.ResourceReference{ID: dhcpServer.Network.ID, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeNetwork, infrav1.ResourceReference{ID: dhcpServer.Network.ID, ControllerCreated: ptr.To(false)}) } else if *dhcpServer.Network.ID != *s.GetNetworkID() { return nil, fmt.Errorf("error network set via spec and DHCP server's networkID are not matching") } @@ -1031,13 +1031,13 @@ func (s *PowerVSClusterScope) checkDHCPServerStatus(ctx context.Context, dhcpSer log := ctrl.LoggerFrom(ctx) log.V(3).Info("Checking the status of DHCP server", "dhcpServerID", *dhcpServer.ID) switch *dhcpServer.Status { - case string(infrav1beta2.DHCPServerStateActive): + case string(infrav1.DHCPServerStateActive): log.V(3).Info("DHCP server is in active state") return true, nil - case string(infrav1beta2.DHCPServerStateBuild): + case string(infrav1.DHCPServerStateBuild): log.V(3).Info("DHCP server is in build state") return false, nil - case string(infrav1beta2.DHCPServerStateError): + case string(infrav1.DHCPServerStateError): return false, fmt.Errorf("DHCP server creation failed and is in error state") } return false, nil @@ -1049,10 +1049,10 @@ func (s *PowerVSClusterScope) createDHCPServer(ctx context.Context) (*string, er var dhcpServerCreateParams models.DHCPServerCreate dhcpServerDetails := s.DHCPServer() if dhcpServerDetails == nil { - dhcpServerDetails = &infrav1beta2.DHCPServer{} + dhcpServerDetails = &infrav1.DHCPServer{} } - dhcpServerCreateParams.Name = s.GetServiceName(infrav1beta2.ResourceTypeDHCPServer) + dhcpServerCreateParams.Name = s.GetServiceName(infrav1.ResourceTypeDHCPServer) log.V(3).Info("Creating a new DHCP server with name", "name", dhcpServerCreateParams.Name) if dhcpServerDetails.DNSServer != nil { dhcpServerCreateParams.DNSServer = dhcpServerDetails.DNSServer @@ -1076,7 +1076,7 @@ func (s *PowerVSClusterScope) createDHCPServer(ctx context.Context) (*string, er } log.Info("DHCP Server network details", "details", *dhcpServer.Network) - s.SetStatus(ctx, infrav1beta2.ResourceTypeNetwork, infrav1beta2.ResourceReference{ID: dhcpServer.Network.ID, ControllerCreated: ptr.To(true)}) + s.SetStatus(ctx, infrav1.ResourceTypeNetwork, infrav1.ResourceReference{ID: dhcpServer.Network.ID, ControllerCreated: ptr.To(true)}) return dhcpServer.ID, nil } @@ -1097,7 +1097,7 @@ func (s *PowerVSClusterScope) ReconcileVPC(ctx context.Context) (bool, error) { return false, fmt.Errorf("vpc with ID %s not found", *vpcID) } - if vpcDetails.Status != nil && *vpcDetails.Status == string(infrav1beta2.VPCStatePending) { + if vpcDetails.Status != nil && *vpcDetails.Status == string(infrav1.VPCStatePending) { log.V(3).Info("VPC creation is in pending state") return true, nil } @@ -1112,7 +1112,7 @@ func (s *PowerVSClusterScope) ReconcileVPC(ctx context.Context) (bool, error) { } if id != "" { log.V(3).Info("VPC found in cloud", "vpcID", id) - s.SetStatus(ctx, infrav1beta2.ResourceTypeVPC, infrav1beta2.ResourceReference{ID: &id, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeVPC, infrav1.ResourceReference{ID: &id, ControllerCreated: ptr.To(false)}) return false, nil } @@ -1125,7 +1125,7 @@ func (s *PowerVSClusterScope) ReconcileVPC(ctx context.Context) (bool, error) { return false, fmt.Errorf("failed to create VPC: %w", err) } log.Info("Created VPC", "vpcID", *vpcID) - s.SetStatus(ctx, infrav1beta2.ResourceTypeVPC, infrav1beta2.ResourceReference{ID: vpcID, ControllerCreated: ptr.To(true)}) + s.SetStatus(ctx, infrav1.ResourceTypeVPC, infrav1.ResourceReference{ID: vpcID, ControllerCreated: ptr.To(true)}) return true, nil } @@ -1156,7 +1156,7 @@ func (s *PowerVSClusterScope) checkVPC(ctx context.Context) (string, error) { } func (s *PowerVSClusterScope) getVPCByName() (*vpcv1.VPC, error) { - vpcDetails, err := s.IBMVPCClient.GetVPCByName(*s.GetServiceName(infrav1beta2.ResourceTypeVPC)) + vpcDetails, err := s.IBMVPCClient.GetVPCByName(*s.GetServiceName(infrav1.ResourceTypeVPC)) if err != nil { return nil, fmt.Errorf("error fetching VPC details with name: %w", err) } @@ -1172,7 +1172,7 @@ func (s *PowerVSClusterScope) createVPC() (*string, error) { addressPrefixManagement := "auto" vpcOption := &vpcv1.CreateVPCOptions{ ResourceGroup: &vpcv1.ResourceGroupIdentity{ID: &resourceGroupID}, - Name: s.GetServiceName(infrav1beta2.ResourceTypeVPC), + Name: s.GetServiceName(infrav1.ResourceTypeVPC), AddressPrefixManagement: &addressPrefixManagement, } vpcDetails, _, err := s.IBMVPCClient.CreateVPC(vpcOption) @@ -1199,7 +1199,7 @@ func (s *PowerVSClusterScope) createVPC() (*string, error) { // ReconcileVPCSubnets reconciles VPC subnet. func (s *PowerVSClusterScope) ReconcileVPCSubnets(ctx context.Context) (bool, error) { log := ctrl.LoggerFrom(ctx) - subnets := make([]infrav1beta2.Subnet, 0) + subnets := make([]infrav1.Subnet, 0) vpcZones, err := regionUtil.VPCZonesForVPCRegion(*s.VPC().Region) if err != nil { return false, fmt.Errorf("error fetching VPC zones associated with VPC region: %w", err) @@ -1212,8 +1212,8 @@ func (s *PowerVSClusterScope) ReconcileVPCSubnets(ctx context.Context) (bool, er // if the user did not set any subnet, we try to create subnet in all the zones. log.V(3).Info("VPC subnets details are not set in spec, creating subnets in all zones in the region", "region", *s.VPC().Region) for _, zone := range vpcZones { - subnet := infrav1beta2.Subnet{ - Name: ptr.To(fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1beta2.ResourceTypeSubnet), zone)), + subnet := infrav1.Subnet{ + Name: ptr.To(fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1.ResourceTypeSubnet), zone)), Zone: ptr.To(zone), } subnets = append(subnets, subnet) @@ -1229,7 +1229,7 @@ func (s *PowerVSClusterScope) ReconcileVPCSubnets(ctx context.Context) (bool, er subnetID = subnet.ID } else { if subnet.Name == nil { - subnet.Name = ptr.To(fmt.Sprintf("%s-%d", *s.GetServiceName(infrav1beta2.ResourceTypeSubnet), index)) + subnet.Name = ptr.To(fmt.Sprintf("%s-%d", *s.GetServiceName(infrav1.ResourceTypeSubnet), index)) } subnetID = s.GetVPCSubnetID(*subnet.Name) } @@ -1246,7 +1246,7 @@ func (s *PowerVSClusterScope) ReconcileVPCSubnets(ctx context.Context) (bool, er return false, fmt.Errorf("failed to get VPC subnet with ID %s", *subnetID) } // check for next subnet - s.SetVPCSubnetStatus(ctx, *subnetDetails.Name, infrav1beta2.ResourceReference{ID: subnetDetails.ID}) + s.SetVPCSubnetStatus(ctx, *subnetDetails.Name, infrav1.ResourceReference{ID: subnetDetails.ID}) continue } @@ -1257,7 +1257,7 @@ func (s *PowerVSClusterScope) ReconcileVPCSubnets(ctx context.Context) (bool, er } if vpcSubnetID != "" { log.V(3).Info("Found VPC subnet in cloud", "subnetID", vpcSubnetID) - s.SetVPCSubnetStatus(ctx, *subnet.Name, infrav1beta2.ResourceReference{ID: &vpcSubnetID, ControllerCreated: ptr.To(false)}) + s.SetVPCSubnetStatus(ctx, *subnet.Name, infrav1.ResourceReference{ID: &vpcSubnetID, ControllerCreated: ptr.To(false)}) // check for next subnet continue } @@ -1271,7 +1271,7 @@ func (s *PowerVSClusterScope) ReconcileVPCSubnets(ctx context.Context) (bool, er return false, fmt.Errorf("error creating VPC subnet: %w", err) } log.Info("Created VPC subnet", "subnetID", subnetID) - s.SetVPCSubnetStatus(ctx, *subnet.Name, infrav1beta2.ResourceReference{ID: subnetID, ControllerCreated: ptr.To(true)}) + s.SetVPCSubnetStatus(ctx, *subnet.Name, infrav1.ResourceReference{ID: subnetID, ControllerCreated: ptr.To(true)}) // Requeue only when the creation of all subnets has been triggered. if index == len(subnets)-1 { return true, nil @@ -1295,7 +1295,7 @@ func (s *PowerVSClusterScope) checkVPCSubnet(ctx context.Context, subnetName str } // createVPCSubnet creates a VPC subnet. -func (s *PowerVSClusterScope) createVPCSubnet(subnet infrav1beta2.Subnet) (*string, error) { +func (s *PowerVSClusterScope) createVPCSubnet(subnet infrav1.Subnet) (*string, error) { // TODO(karthik-k-n): consider moving to clusterscope // fetch resource group id resourceGroupID := s.GetResourceGroupID() @@ -1373,7 +1373,7 @@ func (s *PowerVSClusterScope) ReconcileVPCSecurityGroups(ctx context.Context) er } if sg != nil { log.V(3).Info("VPC security group already exists", "name", *sg.Name) - s.SetVPCSecurityGroupStatus(ctx, *sg.Name, infrav1beta2.VPCSecurityGroupStatus{ + s.SetVPCSecurityGroupStatus(ctx, *sg.Name, infrav1.VPCSecurityGroupStatus{ ID: sg.ID, RuleIDs: ruleIDs, ControllerCreated: ptr.To(false), @@ -1386,7 +1386,7 @@ func (s *PowerVSClusterScope) ReconcileVPCSecurityGroups(ctx context.Context) er return fmt.Errorf("failed to create VPC security group: %w", err) } log.Info("VPC security group created", "securityGroupName", *securityGroup.Name) - s.SetVPCSecurityGroupStatus(ctx, *securityGroup.Name, infrav1beta2.VPCSecurityGroupStatus{ + s.SetVPCSecurityGroupStatus(ctx, *securityGroup.Name, infrav1.VPCSecurityGroupStatus{ ID: securityGroupID, ControllerCreated: ptr.To(true), }) @@ -1400,11 +1400,11 @@ func (s *PowerVSClusterScope) ReconcileVPCSecurityGroups(ctx context.Context) er } // createVPCSecurityGroupRule creates a specific rule for a existing security group. -func (s *PowerVSClusterScope) createVPCSecurityGroupRule(ctx context.Context, securityGroupID, direction, protocol *string, portMin, portMax *int64, remote infrav1beta2.VPCSecurityGroupRuleRemote) (*string, error) { +func (s *PowerVSClusterScope) createVPCSecurityGroupRule(ctx context.Context, securityGroupID, direction, protocol *string, portMin, portMax *int64, remote infrav1.VPCSecurityGroupRuleRemote) (*string, error) { log := ctrl.LoggerFrom(ctx) - setRemote := func(remote infrav1beta2.VPCSecurityGroupRuleRemote, remoteOption *vpcv1.SecurityGroupRuleRemotePrototype) error { + setRemote := func(remote infrav1.VPCSecurityGroupRuleRemote, remoteOption *vpcv1.SecurityGroupRuleRemotePrototype) error { switch remote.RemoteType { - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR: + case infrav1.VPCSecurityGroupRuleRemoteTypeCIDR: cidrSubnet, err := s.IBMVPCClient.GetVPCSubnetByName(*remote.CIDRSubnetName) if err != nil { return fmt.Errorf("failed to find VPC subnet by name '%s' for fetching CIDR block: %w", *remote.CIDRSubnetName, err) @@ -1414,10 +1414,10 @@ func (s *PowerVSClusterScope) createVPCSecurityGroupRule(ctx context.Context, se } log.V(3).Info("Creating VPC security group rule", "securityGroupID", *securityGroupID, "direction", *direction, "protocol", *protocol, "cidrBlockSubnet", *remote.CIDRSubnetName, "cidr", *cidrSubnet.Ipv4CIDRBlock) remoteOption.CIDRBlock = cidrSubnet.Ipv4CIDRBlock - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress: + case infrav1.VPCSecurityGroupRuleRemoteTypeAddress: log.V(3).Info("Creating VPC security group rule", "securityGroupID", *securityGroupID, "direction", *direction, "protocol", *protocol, "ip", *remote.Address) remoteOption.Address = remote.Address - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG: + case infrav1.VPCSecurityGroupRuleRemoteTypeSG: sg, err := s.IBMVPCClient.GetSecurityGroupByName(*remote.SecurityGroupName) if err != nil { return fmt.Errorf("failed to find VPC security group by name '%s', err: %w", *remote.SecurityGroupName, err) @@ -1474,7 +1474,7 @@ func (s *PowerVSClusterScope) createVPCSecurityGroupRule(ctx context.Context, se } // createVPCSecurityGroupRules creates rules for a security group. -func (s *PowerVSClusterScope) createVPCSecurityGroupRules(ctx context.Context, ogSecurityGroupRules []*infrav1beta2.VPCSecurityGroupRule, securityGroupID *string) ([]*string, error) { +func (s *PowerVSClusterScope) createVPCSecurityGroupRules(ctx context.Context, ogSecurityGroupRules []*infrav1.VPCSecurityGroupRule, securityGroupID *string) ([]*string, error) { log := ctrl.LoggerFrom(ctx) var ruleIDs []*string log.V(3).Info("Creating VPC security group rules") @@ -1485,7 +1485,7 @@ func (s *PowerVSClusterScope) createVPCSecurityGroupRules(ctx context.Context, o direction := ptr.To(string(rule.Direction)) switch rule.Direction { - case infrav1beta2.VPCSecurityGroupRuleDirectionInbound: + case infrav1.VPCSecurityGroupRuleDirectionInbound: protocol = ptr.To(string(rule.Source.Protocol)) if rule.Source.PortRange != nil { portMin = ptr.To(rule.Source.PortRange.MinimumPort) @@ -1499,7 +1499,7 @@ func (s *PowerVSClusterScope) createVPCSecurityGroupRules(ctx context.Context, o } ruleIDs = append(ruleIDs, id) } - case infrav1beta2.VPCSecurityGroupRuleDirectionOutbound: + case infrav1.VPCSecurityGroupRuleDirectionOutbound: protocol = ptr.To(string(rule.Destination.Protocol)) if rule.Destination.PortRange != nil { portMin = ptr.To(rule.Destination.PortRange.MinimumPort) @@ -1520,7 +1520,7 @@ func (s *PowerVSClusterScope) createVPCSecurityGroupRules(ctx context.Context, o } // createVPCSecurityGroupRulesAndSetStatus creates VPC security group rules and sets its status. -func (s *PowerVSClusterScope) createVPCSecurityGroupRulesAndSetStatus(ctx context.Context, ogSecurityGroupRules []*infrav1beta2.VPCSecurityGroupRule, securityGroupID, securityGroupName *string) error { +func (s *PowerVSClusterScope) createVPCSecurityGroupRulesAndSetStatus(ctx context.Context, ogSecurityGroupRules []*infrav1.VPCSecurityGroupRule, securityGroupID, securityGroupName *string) error { log := ctrl.LoggerFrom(ctx) ruleIDs, err := s.createVPCSecurityGroupRules(ctx, ogSecurityGroupRules, securityGroupID) if err != nil { @@ -1528,7 +1528,7 @@ func (s *PowerVSClusterScope) createVPCSecurityGroupRulesAndSetStatus(ctx contex } log.Info("VPC security group rules created", "securityGroupName", *securityGroupName) - s.SetVPCSecurityGroupStatus(ctx, *securityGroupName, infrav1beta2.VPCSecurityGroupStatus{ + s.SetVPCSecurityGroupStatus(ctx, *securityGroupName, infrav1.VPCSecurityGroupStatus{ ID: securityGroupID, RuleIDs: ruleIDs, ControllerCreated: ptr.To(true), @@ -1538,7 +1538,7 @@ func (s *PowerVSClusterScope) createVPCSecurityGroupRulesAndSetStatus(ctx contex } // createVPCSecurityGroup creates a VPC security group. -func (s *PowerVSClusterScope) createVPCSecurityGroup(ctx context.Context, spec infrav1beta2.VPCSecurityGroup) (*string, error) { +func (s *PowerVSClusterScope) createVPCSecurityGroup(ctx context.Context, spec infrav1.VPCSecurityGroup) (*string, error) { log := ctrl.LoggerFrom(ctx) log.V(3).Info("Creating VPC security group", "name", *spec.Name) @@ -1561,19 +1561,19 @@ func (s *PowerVSClusterScope) createVPCSecurityGroup(ctx context.Context, spec i } // validateVPCSecurityGroupRuleRemote compares a specific security group rule's remote with the spec and existing security group rule's remote. -func (s *PowerVSClusterScope) validateVPCSecurityGroupRuleRemote(originalSGRemote *vpcv1.SecurityGroupRuleRemote, expectedSGRemote infrav1beta2.VPCSecurityGroupRuleRemote) (bool, error) { +func (s *PowerVSClusterScope) validateVPCSecurityGroupRuleRemote(originalSGRemote *vpcv1.SecurityGroupRuleRemote, expectedSGRemote infrav1.VPCSecurityGroupRuleRemote) (bool, error) { var match bool switch expectedSGRemote.RemoteType { - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny: + case infrav1.VPCSecurityGroupRuleRemoteTypeAny: if originalSGRemote.CIDRBlock != nil && *originalSGRemote.CIDRBlock == "0.0.0.0/0" { match = true } - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress: + case infrav1.VPCSecurityGroupRuleRemoteTypeAddress: if originalSGRemote.Address != nil && *originalSGRemote.Address == *expectedSGRemote.Address { match = true } - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR: + case infrav1.VPCSecurityGroupRuleRemoteTypeCIDR: cidrSubnet, err := s.IBMVPCClient.GetVPCSubnetByName(*expectedSGRemote.CIDRSubnetName) if err != nil { return false, fmt.Errorf("failed to find VPC subnet by name '%s' for fetching CIDR block: %w", *expectedSGRemote.CIDRSubnetName, err) @@ -1582,7 +1582,7 @@ func (s *PowerVSClusterScope) validateVPCSecurityGroupRuleRemote(originalSGRemot if originalSGRemote.CIDRBlock != nil && cidrSubnet != nil && *originalSGRemote.CIDRBlock == *cidrSubnet.Ipv4CIDRBlock { match = true } - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG: + case infrav1.VPCSecurityGroupRuleRemoteTypeSG: securityGroup, err := s.IBMVPCClient.GetSecurityGroupByName(*expectedSGRemote.SecurityGroupName) if err != nil { return false, fmt.Errorf("failed to find ID for resource group '%s': %w", *expectedSGRemote.SecurityGroupName, err) @@ -1597,7 +1597,7 @@ func (s *PowerVSClusterScope) validateVPCSecurityGroupRuleRemote(originalSGRemot } // validateSecurityGroupRule compares a specific security group's rule with the spec and existing security group's rule. -func (s *PowerVSClusterScope) validateSecurityGroupRule(originalSecurityGroupRules []vpcv1.SecurityGroupRuleIntf, direction infrav1beta2.VPCSecurityGroupRuleDirection, rule *infrav1beta2.VPCSecurityGroupRulePrototype, remote infrav1beta2.VPCSecurityGroupRuleRemote) (ruleID *string, match bool, err error) { +func (s *PowerVSClusterScope) validateSecurityGroupRule(originalSecurityGroupRules []vpcv1.SecurityGroupRuleIntf, direction infrav1.VPCSecurityGroupRuleDirection, rule *infrav1.VPCSecurityGroupRulePrototype, remote infrav1.VPCSecurityGroupRuleRemote) (ruleID *string, match bool, err error) { updateError := func(e error) { err = fmt.Errorf("failed to validate VPC security group rule's remote: %w", e) } @@ -1656,13 +1656,13 @@ func (s *PowerVSClusterScope) validateSecurityGroupRule(originalSecurityGroupRul } // validateVPCSecurityGroupRules compares a specific security group rules spec with the existing security group's rules. -func (s *PowerVSClusterScope) validateVPCSecurityGroupRules(originalSecurityGroupRules []vpcv1.SecurityGroupRuleIntf, expectedSecurityGroupRules []*infrav1beta2.VPCSecurityGroupRule) ([]*string, bool, error) { +func (s *PowerVSClusterScope) validateVPCSecurityGroupRules(originalSecurityGroupRules []vpcv1.SecurityGroupRuleIntf, expectedSecurityGroupRules []*infrav1.VPCSecurityGroupRule) ([]*string, bool, error) { ruleIDs := []*string{} for _, expectedRule := range expectedSecurityGroupRules { direction := expectedRule.Direction switch direction { - case infrav1beta2.VPCSecurityGroupRuleDirectionInbound: + case infrav1.VPCSecurityGroupRuleDirectionInbound: for _, remote := range expectedRule.Source.Remotes { id, match, err := s.validateSecurityGroupRule(originalSecurityGroupRules, direction, expectedRule.Source, remote) if err != nil { @@ -1673,7 +1673,7 @@ func (s *PowerVSClusterScope) validateVPCSecurityGroupRules(originalSecurityGrou } ruleIDs = append(ruleIDs, id) } - case infrav1beta2.VPCSecurityGroupRuleDirectionOutbound: + case infrav1.VPCSecurityGroupRuleDirectionOutbound: for _, remote := range expectedRule.Destination.Remotes { id, match, err := s.validateSecurityGroupRule(originalSecurityGroupRules, direction, expectedRule.Destination, remote) if err != nil { @@ -1691,7 +1691,7 @@ func (s *PowerVSClusterScope) validateVPCSecurityGroupRules(originalSecurityGrou } // validateVPCSecurityGroup validates the security group and it's rules provided by user via spec. -func (s *PowerVSClusterScope) validateVPCSecurityGroup(ctx context.Context, securityGroup infrav1beta2.VPCSecurityGroup) (*vpcv1.SecurityGroup, []*string, error) { +func (s *PowerVSClusterScope) validateVPCSecurityGroup(ctx context.Context, securityGroup infrav1.VPCSecurityGroup) (*vpcv1.SecurityGroup, []*string, error) { var securityGroupDet *vpcv1.SecurityGroup var err error @@ -1788,7 +1788,7 @@ func (s *PowerVSClusterScope) isTransitGatewayExists(ctx context.Context) (*tgap ID: s.IBMPowerVSCluster.Spec.TransitGateway.ID, }) } else { - transitGateway, err = s.TransitGatewayClient.GetTransitGatewayByName(*s.GetServiceName(infrav1beta2.ResourceTypeTransitGateway)) + transitGateway, err = s.TransitGatewayClient.GetTransitGatewayByName(*s.GetServiceName(infrav1.ResourceTypeTransitGateway)) } if err != nil { @@ -1826,11 +1826,11 @@ func (s *PowerVSClusterScope) checkTransitGatewayStatus(ctx context.Context, tg log := ctrl.LoggerFrom(ctx) log.V(3).Info("Checking the status of transit gateway", "name", *tg.Name) switch *tg.Status { - case string(infrav1beta2.TransitGatewayStateAvailable): + case string(infrav1.TransitGatewayStateAvailable): log.V(3).Info("Transit gateway is in available state") - case string(infrav1beta2.TransitGatewayStateFailed): + case string(infrav1.TransitGatewayStateFailed): return false, fmt.Errorf("failed to create transit gateway, current status: %s", *tg.Status) - case string(infrav1beta2.TransitGatewayStatePending): + case string(infrav1.TransitGatewayStatePending): log.V(3).Info("Transit gateway is in pending state") return true, nil } @@ -1911,7 +1911,7 @@ func (s *PowerVSClusterScope) validateTransitGatewayConnections(ctx context.Cont } if s.IBMPowerVSCluster.Status.TransitGateway != nil && s.IBMPowerVSCluster.Status.TransitGateway.VPCConnection == nil { - s.SetTransitGatewayConnectionStatus(vpcNetworkConnectionType, &infrav1beta2.ResourceReference{ID: conn.ID, ControllerCreated: ptr.To(false)}) + s.SetTransitGatewayConnectionStatus(vpcNetworkConnectionType, &infrav1.ResourceReference{ID: conn.ID, ControllerCreated: ptr.To(false)}) } vpcConnStatus = true } @@ -1923,7 +1923,7 @@ func (s *PowerVSClusterScope) validateTransitGatewayConnections(ctx context.Cont } if s.IBMPowerVSCluster.Status.TransitGateway != nil && s.IBMPowerVSCluster.Status.TransitGateway.PowerVSConnection == nil { - s.SetTransitGatewayConnectionStatus(powervsNetworkConnectionType, &infrav1beta2.ResourceReference{ID: conn.ID, ControllerCreated: ptr.To(false)}) + s.SetTransitGatewayConnectionStatus(powervsNetworkConnectionType, &infrav1.ResourceReference{ID: conn.ID, ControllerCreated: ptr.To(false)}) } powerVSConnStatus = true } @@ -1939,11 +1939,11 @@ func (s *PowerVSClusterScope) checkTransitGatewayConnectionStatus(ctx context.Co log := ctrl.LoggerFrom(ctx) log.V(3).Info("Checking the status of transit gateway connection", "name", *con.Name) switch *con.Status { - case string(infrav1beta2.TransitGatewayConnectionStateAttached): + case string(infrav1.TransitGatewayConnectionStateAttached): return false, nil - case string(infrav1beta2.TransitGatewayConnectionStateFailed): + case string(infrav1.TransitGatewayConnectionStateFailed): return false, fmt.Errorf("failed to attach connection to transit gateway, current status: %s", *con.Status) - case string(infrav1beta2.TransitGatewayConnectionStatePending): + case string(infrav1.TransitGatewayConnectionStatePending): log.V(3).Info("Transit gateway connection is in pending state") return true, nil } @@ -1963,7 +1963,7 @@ func (s *PowerVSClusterScope) createTransitGatewayConnection(ctx context.Context if err != nil { return err } - s.SetTransitGatewayConnectionStatus(networkType, &infrav1beta2.ResourceReference{ID: conn.ID, ControllerCreated: ptr.To(true)}) + s.SetTransitGatewayConnectionStatus(networkType, &infrav1.ResourceReference{ID: conn.ID, ControllerCreated: ptr.To(true)}) return nil } @@ -1993,7 +1993,7 @@ func (s *PowerVSClusterScope) createTransitGateway(ctx context.Context) error { return fmt.Errorf("failed to proeceed with transit gateway creation as either one of VPC or PowerVS service instance reconciliation is not successful") } - location, globalRouting, err := genUtil.GetTransitGatewayLocationAndRouting(s.Zone(), s.VPC().Region) + location, globalRouting, err := genutil.GetTransitGatewayLocationAndRouting(s.Zone(), s.VPC().Region) if err != nil { return fmt.Errorf("failed to get transit gateway location and routing: %w", err) } @@ -2008,7 +2008,7 @@ func (s *PowerVSClusterScope) createTransitGateway(ctx context.Context) error { globalRouting = ptr.To(true) } - tgName := s.GetServiceName(infrav1beta2.ResourceTypeTransitGateway) + tgName := s.GetServiceName(infrav1.ResourceTypeTransitGateway) tg, _, err := s.TransitGatewayClient.CreateTransitGateway(&tgapiv1.CreateTransitGatewayOptions{ Location: location, Name: tgName, @@ -2040,11 +2040,11 @@ func (s *PowerVSClusterScope) createTransitGateway(ctx context.Context) error { // ReconcileLoadBalancers reconcile loadBalancer. func (s *PowerVSClusterScope) ReconcileLoadBalancers(ctx context.Context) (bool, error) { log := ctrl.LoggerFrom(ctx) - loadBalancers := make([]infrav1beta2.VPCLoadBalancerSpec, 0) + loadBalancers := make([]infrav1.VPCLoadBalancerSpec, 0) if len(s.IBMPowerVSCluster.Spec.LoadBalancers) == 0 { log.V(3).Info("VPC load balancer is not set, constructing one") - loadBalancer := infrav1beta2.VPCLoadBalancerSpec{ - Name: *s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancer), + loadBalancer := infrav1.VPCLoadBalancerSpec{ + Name: *s.GetServiceName(infrav1.ResourceTypeLoadBalancer), Public: ptr.To(true), } loadBalancers = append(loadBalancers, loadBalancer) @@ -2060,7 +2060,7 @@ func (s *PowerVSClusterScope) ReconcileLoadBalancers(ctx context.Context) (bool, loadBalancerID = loadBalancer.ID } else { if loadBalancer.Name == "" { - loadBalancer.Name = fmt.Sprintf("%s-%d", *s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancer), index) + loadBalancer.Name = fmt.Sprintf("%s-%d", *s.GetServiceName(infrav1.ResourceTypeLoadBalancer), index) } loadBalancerID = s.GetLoadBalancerID(loadBalancer.Name) } @@ -2078,9 +2078,9 @@ func (s *PowerVSClusterScope) ReconcileLoadBalancers(ctx context.Context) (bool, isAnyLoadBalancerNotReady = true } - loadBalancerStatus := infrav1beta2.VPCLoadBalancerStatus{ + loadBalancerStatus := infrav1.VPCLoadBalancerStatus{ ID: loadBalancer.ID, - State: infrav1beta2.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus), + State: infrav1.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus), Hostname: loadBalancer.Hostname, } s.SetLoadBalancerStatus(ctx, *loadBalancer.Name, loadBalancerStatus) @@ -2125,18 +2125,18 @@ func (s *PowerVSClusterScope) checkLoadBalancerStatus(ctx context.Context, lb vp log := ctrl.LoggerFrom(ctx) log.V(3).Info("Checking the status of VPC load balancer", "loadBalancerName", *lb.Name) switch *lb.ProvisioningStatus { - case string(infrav1beta2.VPCLoadBalancerStateActive): + case string(infrav1.VPCLoadBalancerStateActive): log.V(3).Info("Load balancer is in active state") return true - case string(infrav1beta2.VPCLoadBalancerStateCreatePending): + case string(infrav1.VPCLoadBalancerStateCreatePending): log.V(3).Info("Load balancer creation is in pending state") - case string(infrav1beta2.VPCLoadBalancerStateUpdatePending): + case string(infrav1.VPCLoadBalancerStateUpdatePending): log.V(3).Info("Load balancer is in updating state") } return false } -func (s *PowerVSClusterScope) checkLoadBalancerPort(lb infrav1beta2.VPCLoadBalancerSpec) error { +func (s *PowerVSClusterScope) checkLoadBalancerPort(lb infrav1.VPCLoadBalancerSpec) error { for _, listener := range lb.AdditionalListeners { if listener.Port == int64(s.APIServerPort()) { return fmt.Errorf("port %d for the %s load balancer cannot be used as an additional listener port, as it is already assigned to the API server", listener.Port, lb.Name) @@ -2146,7 +2146,7 @@ func (s *PowerVSClusterScope) checkLoadBalancerPort(lb infrav1beta2.VPCLoadBalan } // checkLoadBalancer checks if VPC load balancer by the given name exists in cloud. -func (s *PowerVSClusterScope) checkLoadBalancer(ctx context.Context, lb infrav1beta2.VPCLoadBalancerSpec) (*infrav1beta2.VPCLoadBalancerStatus, error) { +func (s *PowerVSClusterScope) checkLoadBalancer(ctx context.Context, lb infrav1.VPCLoadBalancerSpec) (*infrav1.VPCLoadBalancerStatus, error) { log := ctrl.LoggerFrom(ctx) loadBalancer, err := s.IBMVPCClient.GetLoadBalancerByName(lb.Name) if err != nil { @@ -2156,15 +2156,15 @@ func (s *PowerVSClusterScope) checkLoadBalancer(ctx context.Context, lb infrav1b log.V(3).Info("VPC load balancer not found in cloud") return nil, nil } - return &infrav1beta2.VPCLoadBalancerStatus{ + return &infrav1.VPCLoadBalancerStatus{ ID: loadBalancer.ID, - State: infrav1beta2.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus), + State: infrav1.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus), Hostname: loadBalancer.Hostname, }, nil } // createLoadBalancer creates loadBalancer. -func (s *PowerVSClusterScope) createLoadBalancer(ctx context.Context, lb infrav1beta2.VPCLoadBalancerSpec) (*infrav1beta2.VPCLoadBalancerStatus, error) { +func (s *PowerVSClusterScope) createLoadBalancer(ctx context.Context, lb infrav1.VPCLoadBalancerSpec) (*infrav1.VPCLoadBalancerStatus, error) { log := ctrl.LoggerFrom(ctx) options := &vpcv1.CreateLoadBalancerOptions{} // TODO(karthik-k-n): consider moving resource group id to clusterscope @@ -2239,8 +2239,8 @@ func (s *PowerVSClusterScope) createLoadBalancer(ctx context.Context, lb infrav1 if err != nil { return nil, fmt.Errorf("failed to create load balancer: %w", err) } - lbState := infrav1beta2.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus) - return &infrav1beta2.VPCLoadBalancerStatus{ + lbState := infrav1.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus) + return &infrav1.VPCLoadBalancerStatus{ ID: loadBalancer.ID, State: lbState, Hostname: loadBalancer.Hostname, @@ -2249,7 +2249,7 @@ func (s *PowerVSClusterScope) createLoadBalancer(ctx context.Context, lb infrav1 } // COSInstance returns the COS instance reference. -func (s *PowerVSClusterScope) COSInstance() *infrav1beta2.CosInstance { +func (s *PowerVSClusterScope) COSInstance() *infrav1.CosInstance { return s.IBMPowerVSCluster.Spec.CosInstance } @@ -2263,7 +2263,7 @@ func (s *PowerVSClusterScope) ReconcileCOSInstance(ctx context.Context) error { } if cosServiceInstanceStatus != nil { log.V(3).Info("COS service instance found in cloud") - s.SetStatus(ctx, infrav1beta2.ResourceTypeCOSInstance, infrav1beta2.ResourceReference{ID: cosServiceInstanceStatus.GUID, ControllerCreated: ptr.To(false)}) + s.SetStatus(ctx, infrav1.ResourceTypeCOSInstance, infrav1.ResourceReference{ID: cosServiceInstanceStatus.GUID, ControllerCreated: ptr.To(false)}) } else { // create COS service instance log.V(3).Info("Creating COS service instance") @@ -2272,7 +2272,7 @@ func (s *PowerVSClusterScope) ReconcileCOSInstance(ctx context.Context) error { return fmt.Errorf("failed to create COS service instance: %w", err) } log.Info("Created COS service instance", "cosID", cosServiceInstanceStatus.GUID) - s.SetStatus(ctx, infrav1beta2.ResourceTypeCOSInstance, infrav1beta2.ResourceReference{ID: cosServiceInstanceStatus.GUID, ControllerCreated: ptr.To(true)}) + s.SetStatus(ctx, infrav1.ResourceTypeCOSInstance, infrav1.ResourceReference{ID: cosServiceInstanceStatus.GUID, ControllerCreated: ptr.To(true)}) } props, err := authenticator.GetProperties() @@ -2329,7 +2329,7 @@ func (s *PowerVSClusterScope) ReconcileCOSInstance(ctx context.Context) error { } func (s *PowerVSClusterScope) checkCOSBucket() (bool, error) { - if _, err := s.COSClient.GetBucketByName(*s.GetServiceName(infrav1beta2.ResourceTypeCOSBucket)); err != nil { + if _, err := s.COSClient.GetBucketByName(*s.GetServiceName(infrav1.ResourceTypeCOSBucket)); err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case s3.ErrCodeNoSuchBucket, "Forbidden", "NotFound": @@ -2347,7 +2347,7 @@ func (s *PowerVSClusterScope) checkCOSBucket() (bool, error) { func (s *PowerVSClusterScope) createCOSBucket() error { input := &s3.CreateBucketInput{ - Bucket: ptr.To(*s.GetServiceName(infrav1beta2.ResourceTypeCOSBucket)), + Bucket: ptr.To(*s.GetServiceName(infrav1.ResourceTypeCOSBucket)), } _, err := s.COSClient.CreateBucket(input) if err == nil { @@ -2373,15 +2373,15 @@ func (s *PowerVSClusterScope) createCOSBucket() error { func (s *PowerVSClusterScope) checkCOSServiceInstance(ctx context.Context) (*resourcecontrollerv2.ResourceInstance, error) { log := ctrl.LoggerFrom(ctx) // check cos service instance - serviceInstance, err := s.ResourceClient.GetInstanceByName(*s.GetServiceName(infrav1beta2.ResourceTypeCOSInstance), resourcecontroller.CosResourceID, resourcecontroller.CosResourcePlanID) + serviceInstance, err := s.ResourceClient.GetInstanceByName(*s.GetServiceName(infrav1.ResourceTypeCOSInstance), resourcecontroller.CosResourceID, resourcecontroller.CosResourcePlanID) if err != nil { return nil, fmt.Errorf("failed to get COS service instance: %w", err) } if serviceInstance == nil { - log.V(3).Info("COS service instance is not found", "cosInstanceName", *s.GetServiceName(infrav1beta2.ResourceTypeCOSInstance)) + log.V(3).Info("COS service instance is not found", "cosInstanceName", *s.GetServiceName(infrav1.ResourceTypeCOSInstance)) return nil, nil } - if *serviceInstance.State != string(infrav1beta2.ServiceInstanceStateActive) { + if *serviceInstance.State != string(infrav1.ServiceInstanceStateActive) { return nil, fmt.Errorf("COS service instance is not in active state, current state: %s", *serviceInstance.State) } return serviceInstance, nil @@ -2397,7 +2397,7 @@ func (s *PowerVSClusterScope) createCOSServiceInstance() (*resourcecontrollerv2. target := "Global" // create service instance serviceInstance, _, err := s.ResourceClient.CreateResourceInstance(&resourcecontrollerv2.CreateResourceInstanceOptions{ - Name: s.GetServiceName(infrav1beta2.ResourceTypeCOSInstance), + Name: s.GetServiceName(infrav1.ResourceTypeCOSInstance), Target: &target, ResourceGroup: &resourceGroupID, ResourcePlanID: ptr.To(resourcecontroller.CosResourcePlanID), @@ -2419,7 +2419,7 @@ func (s *PowerVSClusterScope) fetchResourceGroupID() (string, error) { return "", err } - account, err := utils.GetAccount(auth) + account, err := accounts.GetAccount(auth) if err != nil { return "", err } @@ -2480,14 +2480,14 @@ func (s *PowerVSClusterScope) fetchPowerVSServiceInstanceCRN() (*string, error) // TODO(karthik-k-n): Decide on proper naming format for services. // GetServiceName returns name of given service type from spec or generate a name for it. -func (s *PowerVSClusterScope) GetServiceName(resourceType infrav1beta2.ResourceType) *string { //nolint:gocyclo +func (s *PowerVSClusterScope) GetServiceName(resourceType infrav1.ResourceType) *string { //nolint:gocyclo switch resourceType { - case infrav1beta2.ResourceTypeServiceInstance: + case infrav1.ResourceTypeServiceInstance: if s.ServiceInstance() == nil || s.ServiceInstance().Name == nil { return ptr.To(fmt.Sprintf("%s-serviceInstance", s.InfraCluster())) } return s.ServiceInstance().Name - case infrav1beta2.ResourceTypeDHCPServer: + case infrav1.ResourceTypeDHCPServer: if s.DHCPServer() != nil && s.DHCPServer().Name != nil { return s.DHCPServer().Name } @@ -2495,29 +2495,29 @@ func (s *PowerVSClusterScope) GetServiceName(resourceType infrav1beta2.ResourceT return s.Network().Name } return ptr.To(s.InfraCluster()) - case infrav1beta2.ResourceTypeVPC: + case infrav1.ResourceTypeVPC: if s.VPC() == nil || s.VPC().Name == nil { return ptr.To(fmt.Sprintf("%s-vpc", s.InfraCluster())) } return s.VPC().Name - case infrav1beta2.ResourceTypeTransitGateway: + case infrav1.ResourceTypeTransitGateway: if s.TransitGateway() == nil || s.TransitGateway().Name == nil { return ptr.To(fmt.Sprintf("%s-transitgateway", s.InfraCluster())) } return s.TransitGateway().Name - case infrav1beta2.ResourceTypeCOSInstance: + case infrav1.ResourceTypeCOSInstance: if s.COSInstance() == nil || s.COSInstance().Name == "" { return ptr.To(fmt.Sprintf("%s-cosinstance", s.InfraCluster())) } return &s.COSInstance().Name - case infrav1beta2.ResourceTypeCOSBucket: + case infrav1.ResourceTypeCOSBucket: if s.COSInstance() == nil || s.COSInstance().BucketName == "" { return ptr.To(fmt.Sprintf("%s-cosbucket", s.InfraCluster())) } return &s.COSInstance().BucketName - case infrav1beta2.ResourceTypeSubnet: + case infrav1.ResourceTypeSubnet: return ptr.To(fmt.Sprintf("%s-vpcsubnet", s.InfraCluster())) - case infrav1beta2.ResourceTypeLoadBalancer: + case infrav1.ResourceTypeLoadBalancer: return ptr.To(fmt.Sprintf("%s-loadbalancer", s.InfraCluster())) } return nil @@ -2547,7 +2547,7 @@ func (s *PowerVSClusterScope) DeleteLoadBalancer(ctx context.Context) (bool, err continue } - if lb != nil && lb.ProvisioningStatus != nil && *lb.ProvisioningStatus == string(infrav1beta2.VPCLoadBalancerStateDeletePending) { + if lb != nil && lb.ProvisioningStatus != nil && *lb.ProvisioningStatus == string(infrav1.VPCLoadBalancerStateDeletePending) { log.V(3).Info("Load balancer is currently being deleted") return true, nil } @@ -2620,7 +2620,7 @@ func (s *PowerVSClusterScope) DeleteVPCSubnet(ctx context.Context) (bool, error) continue } - if net != nil && net.Status != nil && *net.Status == string(infrav1beta2.VPCSubnetStateDeleting) { + if net != nil && net.Status != nil && *net.Status == string(infrav1.VPCSubnetStateDeleting) { return true, nil } @@ -2641,7 +2641,7 @@ func (s *PowerVSClusterScope) DeleteVPCSubnet(ctx context.Context) (bool, error) // DeleteVPC deletes VPC. func (s *PowerVSClusterScope) DeleteVPC(ctx context.Context) (bool, error) { log := ctrl.LoggerFrom(ctx) - if !s.isResourceCreatedByController(infrav1beta2.ResourceTypeVPC) { + if !s.isResourceCreatedByController(infrav1.ResourceTypeVPC) { log.Info("Skipping VPC deletion as resource is not created by controller") return false, nil } @@ -2662,7 +2662,7 @@ func (s *PowerVSClusterScope) DeleteVPC(ctx context.Context) (bool, error) { return false, fmt.Errorf("failed to fetch VPC: %w", err) } - if vpcDetails != nil && vpcDetails.Status != nil && *vpcDetails.Status == string(infrav1beta2.VPCStateDeleting) { + if vpcDetails != nil && vpcDetails.Status != nil && *vpcDetails.Status == string(infrav1.VPCStateDeleting) { return true, nil } @@ -2678,7 +2678,7 @@ func (s *PowerVSClusterScope) DeleteVPC(ctx context.Context) (bool, error) { func (s *PowerVSClusterScope) DeleteTransitGateway(ctx context.Context) (bool, error) { log := ctrl.LoggerFrom(ctx) skipTGDeletion := false - if !s.isResourceCreatedByController(infrav1beta2.ResourceTypeTransitGateway) { + if !s.isResourceCreatedByController(infrav1.ResourceTypeTransitGateway) { log.Info("Skipping transit gateway deletion as resource is not created by controller, but will check if connections are created by the controller") skipTGDeletion = true } @@ -2699,7 +2699,7 @@ func (s *PowerVSClusterScope) DeleteTransitGateway(ctx context.Context) (bool, e return false, fmt.Errorf("failed to fetch transit gateway: %w", err) } - if tg.Status != nil && *tg.Status == string(infrav1beta2.TransitGatewayStateDeletePending) { + if tg.Status != nil && *tg.Status == string(infrav1.TransitGatewayStateDeletePending) { log.V(3).Info("Transit gateway is being deleted") return true, nil } @@ -2737,7 +2737,7 @@ func (s *PowerVSClusterScope) deleteTransitGatewayConnections(ctx context.Contex if err != nil { return false, fmt.Errorf("failed to get transit gateway powervs connection: %w", err) } - if conn.Status != nil && *conn.Status == string(infrav1beta2.TransitGatewayConnectionStateDeleting) { + if conn.Status != nil && *conn.Status == string(infrav1.TransitGatewayConnectionStateDeleting) { log.V(3).Info("Transit gateway connection is in deleting state") return true, nil } @@ -2779,11 +2779,11 @@ func (s *PowerVSClusterScope) deleteTransitGatewayConnections(ctx context.Contex // DeleteDHCPServer deletes DHCP server. func (s *PowerVSClusterScope) DeleteDHCPServer(ctx context.Context) error { log := ctrl.LoggerFrom(ctx) - if !s.isResourceCreatedByController(infrav1beta2.ResourceTypeDHCPServer) { + if !s.isResourceCreatedByController(infrav1.ResourceTypeDHCPServer) { log.Info("Skipping DHCP server deletion as resource is not created by controller") return nil } - if s.isResourceCreatedByController(infrav1beta2.ResourceTypeServiceInstance) { + if s.isResourceCreatedByController(infrav1.ResourceTypeServiceInstance) { log.Info("Skipping DHCP server deletion as PowerVS service instance is created by controller, will directly delete the PowerVS service instance since it will delete the DHCP server internally") return nil } @@ -2810,7 +2810,7 @@ func (s *PowerVSClusterScope) DeleteDHCPServer(ctx context.Context) error { // DeleteServiceInstance deletes service instance. func (s *PowerVSClusterScope) DeleteServiceInstance(ctx context.Context) (bool, error) { log := ctrl.LoggerFrom(ctx) - if !s.isResourceCreatedByController(infrav1beta2.ResourceTypeServiceInstance) { + if !s.isResourceCreatedByController(infrav1.ResourceTypeServiceInstance) { log.Info("Skipping PowerVS service instance deletion as resource is not created by controller") return false, nil } @@ -2826,7 +2826,7 @@ func (s *PowerVSClusterScope) DeleteServiceInstance(ctx context.Context) (bool, return false, fmt.Errorf("failed to fetch PowerVS service instance: %w", err) } - if serviceInstance != nil && *serviceInstance.State == string(infrav1beta2.ServiceInstanceStateRemoved) { + if serviceInstance != nil && *serviceInstance.State == string(infrav1.ServiceInstanceStateRemoved) { log.Info("PowerVS service instance has been removed") return false, nil } @@ -2843,7 +2843,7 @@ func (s *PowerVSClusterScope) DeleteServiceInstance(ctx context.Context) (bool, // DeleteCOSInstance deletes COS instance. func (s *PowerVSClusterScope) DeleteCOSInstance(ctx context.Context) error { log := ctrl.LoggerFrom(ctx) - if !s.isResourceCreatedByController(infrav1beta2.ResourceTypeCOSInstance) { + if !s.isResourceCreatedByController(infrav1.ResourceTypeCOSInstance) { log.Info("Skipping COS instance deletion as resource is not created by controller") return nil } @@ -2862,7 +2862,7 @@ func (s *PowerVSClusterScope) DeleteCOSInstance(ctx context.Context) error { return fmt.Errorf("failed to fetch COS service instance: %w", err) } - if cosInstance != nil && (*cosInstance.State == "pending_reclamation" || *cosInstance.State == string(infrav1beta2.ServiceInstanceStateRemoved)) { + if cosInstance != nil && (*cosInstance.State == "pending_reclamation" || *cosInstance.State == string(infrav1.ServiceInstanceStateRemoved)) { log.Info("COS service instance has been removed") return nil } @@ -2879,33 +2879,33 @@ func (s *PowerVSClusterScope) DeleteCOSInstance(ctx context.Context) error { } // resourceCreatedByController helps to identify resource created by controller or not. -func (s *PowerVSClusterScope) isResourceCreatedByController(resourceType infrav1beta2.ResourceType) bool { //nolint:gocyclo +func (s *PowerVSClusterScope) isResourceCreatedByController(resourceType infrav1.ResourceType) bool { //nolint:gocyclo switch resourceType { - case infrav1beta2.ResourceTypeVPC: + case infrav1.ResourceTypeVPC: vpcStatus := s.IBMPowerVSCluster.Status.VPC if vpcStatus == nil || vpcStatus.ControllerCreated == nil || !*vpcStatus.ControllerCreated { return false } return true - case infrav1beta2.ResourceTypeServiceInstance: + case infrav1.ResourceTypeServiceInstance: serviceInstance := s.IBMPowerVSCluster.Status.ServiceInstance if serviceInstance == nil || serviceInstance.ControllerCreated == nil || !*serviceInstance.ControllerCreated { return false } return true - case infrav1beta2.ResourceTypeTransitGateway: + case infrav1.ResourceTypeTransitGateway: transitGateway := s.IBMPowerVSCluster.Status.TransitGateway if transitGateway == nil || transitGateway.ControllerCreated == nil || !*transitGateway.ControllerCreated { return false } return true - case infrav1beta2.ResourceTypeDHCPServer: + case infrav1.ResourceTypeDHCPServer: dhcpServer := s.IBMPowerVSCluster.Status.DHCPServer if dhcpServer == nil || dhcpServer.ControllerCreated == nil || !*dhcpServer.ControllerCreated { return false } return true - case infrav1beta2.ResourceTypeCOSInstance: + case infrav1.ResourceTypeCOSInstance: cosInstance := s.IBMPowerVSCluster.Status.COSInstance if cosInstance == nil || cosInstance.ControllerCreated == nil || !*cosInstance.ControllerCreated { return false diff --git a/cloud/scope/powervs_cluster_test.go b/cloud/scope/powervs_cluster_test.go index 44b774252..e509fd1de 100644 --- a/cloud/scope/powervs_cluster_test.go +++ b/cloud/scope/powervs_cluster_test.go @@ -36,10 +36,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" - "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/utils" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/pointer" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/cos" mockcos "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/cos/mock" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" @@ -90,17 +90,17 @@ func TestNewPowerVSClusterScope(t *testing.T) { params: PowerVSClusterScopeParams{ Client: testEnv.Client, Cluster: newCluster(clusterName), - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "powervs-test-", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{Zone: ptr.To("zone")}, + Spec: infrav1.IBMPowerVSClusterSpec{Zone: ptr.To("zone")}, }, ClientFactory: ClientFactory{ AuthenticatorFactory: func() (core.Authenticator, error) { @@ -118,20 +118,20 @@ func TestNewPowerVSClusterScope(t *testing.T) { params: PowerVSClusterScopeParams{ Client: testEnv.Client, Cluster: newCluster(clusterName), - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{"powervs.cluster.x-k8s.io/create-infra": "true"}, GenerateName: "powervs-test-", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ Zone: ptr.To("zone"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-gb")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-gb")}, }, }, ClientFactory: ClientFactory{ @@ -182,15 +182,15 @@ func TestGetServiceInstanceID(t *testing.T) { { name: "Service Instance ID is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "Service Instance ID is set in status.ServiceInstanceID", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("statusServiceInstanceID"), }, }, @@ -218,15 +218,15 @@ func TestGetDHCPServerID(t *testing.T) { { name: "DHCP server ID is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "DHCP server ID is set in status", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ID: ptr.To("dhcpserverid"), }, }, @@ -240,7 +240,7 @@ func TestGetDHCPServerID(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { dhcpServerID := tc.clusterScope.GetDHCPServerID() - g.Expect(utils.DereferencePointer(dhcpServerID)).To(Equal(utils.DereferencePointer(tc.expectedID))) + g.Expect(pointer.Dereference(dhcpServerID)).To(Equal(pointer.Dereference(tc.expectedID))) }) } } @@ -254,15 +254,15 @@ func TestGetVPCID(t *testing.T) { { name: "VPC server ID is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "VPC ID is set in status", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -276,7 +276,7 @@ func TestGetVPCID(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { vpcID := tc.clusterScope.GetVPCID() - g.Expect(utils.DereferencePointer(vpcID)).To(Equal(utils.DereferencePointer(tc.expectedID))) + g.Expect(pointer.Dereference(vpcID)).To(Equal(pointer.Dereference(tc.expectedID))) }) } } @@ -291,15 +291,15 @@ func TestGetVPCSubnetID(t *testing.T) { { name: "VPC subnet status is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "VPC subnet status is empty", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: make(map[string]infrav1beta2.ResourceReference), + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: make(map[string]infrav1.ResourceReference), }, }, }, @@ -307,9 +307,9 @@ func TestGetVPCSubnetID(t *testing.T) { { name: "empty subnet name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "us-south": { ID: ptr.To("us-south-1"), }, @@ -321,9 +321,9 @@ func TestGetVPCSubnetID(t *testing.T) { { name: "invalid subnet name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "us-south": { ID: ptr.To("us-south-1"), }, @@ -336,9 +336,9 @@ func TestGetVPCSubnetID(t *testing.T) { { name: "valid subnet name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "us-south": { ID: ptr.To("us-south-1"), }, @@ -355,7 +355,7 @@ func TestGetVPCSubnetID(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { subnetID := tc.clusterScope.GetVPCSubnetID(tc.subnetName) - g.Expect(utils.DereferencePointer(subnetID)).To(Equal(utils.DereferencePointer(tc.expectedID))) + g.Expect(pointer.Dereference(subnetID)).To(Equal(pointer.Dereference(tc.expectedID))) }) } } @@ -369,15 +369,15 @@ func TestGetVPCSubnetIDs(t *testing.T) { { name: "VPC subnet is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "VPC subnet id is set in status", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "us-south": {ID: ptr.To("subnet1")}, "us-south2": {ID: ptr.To("subnet2")}, }, @@ -411,15 +411,15 @@ func TestVPCSecurityGroupByName(t *testing.T) { { name: "VPC SG status is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "VPC SG status is empty", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: make(map[string]infrav1beta2.VPCSecurityGroupStatus), + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: make(map[string]infrav1.VPCSecurityGroupStatus), }, }, }, @@ -427,9 +427,9 @@ func TestVPCSecurityGroupByName(t *testing.T) { { name: "empty SG name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: map[string]infrav1beta2.VPCSecurityGroupStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: map[string]infrav1.VPCSecurityGroupStatus{ "sg": { ID: ptr.To("sg-1"), }, @@ -441,9 +441,9 @@ func TestVPCSecurityGroupByName(t *testing.T) { { name: "invalid SG name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: map[string]infrav1beta2.VPCSecurityGroupStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: map[string]infrav1.VPCSecurityGroupStatus{ "sg": { ID: ptr.To("sg-1"), }, @@ -456,9 +456,9 @@ func TestVPCSecurityGroupByName(t *testing.T) { { name: "valid SG name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: map[string]infrav1beta2.VPCSecurityGroupStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: map[string]infrav1.VPCSecurityGroupStatus{ "sg": { ID: ptr.To("sg-1"), }, @@ -475,7 +475,7 @@ func TestVPCSecurityGroupByName(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { sgID, _, _ := tc.clusterScope.GetVPCSecurityGroupByName(tc.sgName) - g.Expect(utils.DereferencePointer(sgID)).To(Equal(utils.DereferencePointer(tc.expectedID))) + g.Expect(pointer.Dereference(sgID)).To(Equal(pointer.Dereference(tc.expectedID))) }) } } @@ -490,15 +490,15 @@ func TestVPCSecurityGroupByID(t *testing.T) { { name: "VPC SG status is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "VPC SG status is empty", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: make(map[string]infrav1beta2.VPCSecurityGroupStatus), + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: make(map[string]infrav1.VPCSecurityGroupStatus), }, }, }, @@ -506,9 +506,9 @@ func TestVPCSecurityGroupByID(t *testing.T) { { name: "empty SG ID is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: map[string]infrav1beta2.VPCSecurityGroupStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: map[string]infrav1.VPCSecurityGroupStatus{ "sg": { ID: ptr.To("sg-1"), }, @@ -520,9 +520,9 @@ func TestVPCSecurityGroupByID(t *testing.T) { { name: "invalid SG ID is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: map[string]infrav1beta2.VPCSecurityGroupStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: map[string]infrav1.VPCSecurityGroupStatus{ "sg": { ID: ptr.To("sg-1"), }, @@ -535,9 +535,9 @@ func TestVPCSecurityGroupByID(t *testing.T) { { name: "valid SG ID is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: map[string]infrav1beta2.VPCSecurityGroupStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: map[string]infrav1.VPCSecurityGroupStatus{ "sg": { ID: ptr.To("sg-1"), }, @@ -554,7 +554,7 @@ func TestVPCSecurityGroupByID(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { sgID, _, _ := tc.clusterScope.GetVPCSecurityGroupByID(tc.sgID) - g.Expect(utils.DereferencePointer(sgID)).To(Equal(utils.DereferencePointer(tc.expectedID))) + g.Expect(pointer.Dereference(sgID)).To(Equal(pointer.Dereference(tc.expectedID))) }) } } @@ -568,15 +568,15 @@ func TestGetTransitGatewayID(t *testing.T) { { name: "TransitGateway ID is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "TransitGateway ID is set in spec", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - TransitGateway: &infrav1beta2.TransitGateway{ID: ptr.To("tgID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + TransitGateway: &infrav1.TransitGateway{ID: ptr.To("tgID")}, }, }, }, @@ -585,9 +585,9 @@ func TestGetTransitGatewayID(t *testing.T) { { name: "TransitGateway ID is set in status", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ID: ptr.To("tgID"), }, }, @@ -601,7 +601,7 @@ func TestGetTransitGatewayID(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { tgID := tc.clusterScope.GetTransitGatewayID() - g.Expect(utils.DereferencePointer(tgID)).To(Equal(utils.DereferencePointer(tc.expectedID))) + g.Expect(pointer.Dereference(tgID)).To(Equal(pointer.Dereference(tc.expectedID))) }) } } @@ -616,15 +616,15 @@ func TestGetLoadBalancerID(t *testing.T) { { name: "LoadBalancer status is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "LoadBalancer status is empty", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: make(map[string]infrav1beta2.VPCLoadBalancerStatus), + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: make(map[string]infrav1.VPCLoadBalancerStatus), }, }, }, @@ -632,9 +632,9 @@ func TestGetLoadBalancerID(t *testing.T) { { name: "empty LoadBalancer name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb": { ID: ptr.To("lb-1"), }, @@ -646,9 +646,9 @@ func TestGetLoadBalancerID(t *testing.T) { { name: "invalid LoadBalancer name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb": { ID: ptr.To("lb-1"), }, @@ -661,9 +661,9 @@ func TestGetLoadBalancerID(t *testing.T) { { name: "valid LoadBalancer name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb": { ID: ptr.To("lb-1"), }, @@ -680,7 +680,7 @@ func TestGetLoadBalancerID(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { lbID := tc.clusterScope.GetLoadBalancerID(tc.lbName) - g.Expect(utils.DereferencePointer(lbID)).To(Equal(utils.DereferencePointer(tc.expectedID))) + g.Expect(pointer.Dereference(lbID)).To(Equal(pointer.Dereference(tc.expectedID))) }) } } @@ -689,21 +689,21 @@ func TestGetLoadBalancerState(t *testing.T) { testCases := []struct { name string lbName string - expectedState *infrav1beta2.VPCLoadBalancerState + expectedState *infrav1.VPCLoadBalancerState clusterScope PowerVSClusterScope }{ { name: "LoadBalancer status is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "LoadBalancer status is empty", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: make(map[string]infrav1beta2.VPCLoadBalancerStatus), + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: make(map[string]infrav1.VPCLoadBalancerStatus), }, }, }, @@ -711,11 +711,11 @@ func TestGetLoadBalancerState(t *testing.T) { { name: "empty LoadBalancer name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb": { - State: infrav1beta2.VPCLoadBalancerStateActive, + State: infrav1.VPCLoadBalancerStateActive, }, }, }, @@ -725,11 +725,11 @@ func TestGetLoadBalancerState(t *testing.T) { { name: "invalid LoadBalancer name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb": { - State: infrav1beta2.VPCLoadBalancerStateActive, + State: infrav1.VPCLoadBalancerStateActive, }, }, }, @@ -740,18 +740,18 @@ func TestGetLoadBalancerState(t *testing.T) { { name: "valid LoadBalancer name is passed", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb": { - State: infrav1beta2.VPCLoadBalancerStateActive, + State: infrav1.VPCLoadBalancerStateActive, }, }, }, }, }, lbName: "lb", - expectedState: ptr.To(infrav1beta2.VPCLoadBalancerStateActive), + expectedState: ptr.To(infrav1.VPCLoadBalancerStateActive), }, } @@ -788,8 +788,8 @@ func TestGetLoadBalancerHostName(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{}, }, } @@ -803,10 +803,10 @@ func TestGetLoadBalancerHostName(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{}, + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "-loadbalancer": { Hostname: ptr.To("lb-hostname"), }, @@ -825,17 +825,17 @@ func TestGetLoadBalancerHostName(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "lb", Public: core.BoolPtr(true), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "loadbalancer": { Hostname: ptr.To("lb-hostname"), }, @@ -854,17 +854,17 @@ func TestGetLoadBalancerHostName(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "loadbalancer", Public: core.BoolPtr(true), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "loadbalancer": { Hostname: ptr.To("lb-hostname"), }, @@ -883,9 +883,9 @@ func TestGetLoadBalancerHostName(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "lb1", Public: core.BoolPtr(false), @@ -896,8 +896,8 @@ func TestGetLoadBalancerHostName(t *testing.T) { }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb1": { Hostname: ptr.To("lb1-hostname"), }, @@ -919,9 +919,9 @@ func TestGetLoadBalancerHostName(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "lb1", Public: core.BoolPtr(true), @@ -932,8 +932,8 @@ func TestGetLoadBalancerHostName(t *testing.T) { }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb1": { Hostname: ptr.To("lb1-hostname"), }, @@ -956,17 +956,17 @@ func TestGetLoadBalancerHostName(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("loadbalancer-id"), Public: core.BoolPtr(true), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "loadbalancer": { Hostname: ptr.To("lb-hostname"), }, @@ -990,17 +990,17 @@ func TestGetLoadBalancerHostName(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("loadbalancer-id1"), Public: core.BoolPtr(true), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "loadbalancer": { Hostname: ptr.To("lb-hostname"), }, @@ -1022,9 +1022,9 @@ func TestGetLoadBalancerHostName(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("lb1"), Public: core.BoolPtr(true), @@ -1035,8 +1035,8 @@ func TestGetLoadBalancerHostName(t *testing.T) { }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "loadbalancer1": { Hostname: ptr.To("lb1-hostname"), }, @@ -1065,9 +1065,9 @@ func TestGetLoadBalancerHostName(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("lb1"), Public: core.BoolPtr(false), @@ -1078,8 +1078,8 @@ func TestGetLoadBalancerHostName(t *testing.T) { }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "loadbalancer1": { Hostname: ptr.To("lb1-hostname"), }, @@ -1111,15 +1111,15 @@ func TestGetResourceGroupID(t *testing.T) { { name: "Resource group ID is not set", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "Resource group ID is set in spec", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("rgID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("rgID")}, }, }, }, @@ -1128,9 +1128,9 @@ func TestGetResourceGroupID(t *testing.T) { { name: "Resource group ID is set in status", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ResourceGroup: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ResourceGroup: &infrav1.ResourceReference{ ID: ptr.To("rgID"), }, }, @@ -1141,12 +1141,12 @@ func TestGetResourceGroupID(t *testing.T) { { name: "spec Resource group ID takes precedence over status Resource group ID", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("rgID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("rgID")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ResourceGroup: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ResourceGroup: &infrav1.ResourceReference{ ID: ptr.To("rgID1"), }, }, @@ -1188,9 +1188,9 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("test-lb-instanceid"), }, @@ -1213,9 +1213,9 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("test-lb-instanceid"), }, @@ -1242,9 +1242,9 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("test-active-lb-instanceid"), }, @@ -1280,9 +1280,9 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("test-lb-instanceid"), }, @@ -1305,7 +1305,7 @@ func TestReconcileLoadBalancers(t *testing.T) { loadBalancerStatus, ok := clusterScope.IBMPowerVSCluster.Status.LoadBalancers["test-lb"] g.Expect(ok).To(BeTrue()) g.Expect(loadBalancerStatus.ID).To(Equal(ptr.To("test-lb-instanceid"))) - g.Expect(loadBalancerStatus.State).To(BeEquivalentTo(infrav1beta2.VPCLoadBalancerStateActive)) + g.Expect(loadBalancerStatus.State).To(BeEquivalentTo(infrav1.VPCLoadBalancerStateActive)) g.Expect(loadBalancerStatus.Hostname).To(Equal(ptr.To("test-lb-hostname"))) }) @@ -1316,9 +1316,9 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: nil, }, @@ -1341,9 +1341,9 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: nil, }, @@ -1366,9 +1366,9 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, @@ -1391,7 +1391,7 @@ func TestReconcileLoadBalancers(t *testing.T) { loadBalancerStatus, ok := clusterScope.IBMPowerVSCluster.Status.LoadBalancers["test-lb"] g.Expect(ok).To(BeTrue()) - g.Expect(loadBalancerStatus.State).To(BeEquivalentTo(infrav1beta2.VPCLoadBalancerStateActive)) + g.Expect(loadBalancerStatus.State).To(BeEquivalentTo(infrav1.VPCLoadBalancerStateActive)) g.Expect(loadBalancerStatus.ID).To(Equal(ptr.To("test-lb-instanceid"))) g.Expect(loadBalancerStatus.Hostname).To(Equal(ptr.To("test-lb-hostname"))) }) @@ -1404,13 +1404,13 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterNetworkAPIServerPort := int32(9090) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, - AdditionalListeners: []infrav1beta2.AdditionalListenerSpec{ + AdditionalListeners: []infrav1.AdditionalListenerSpec{ { Port: 9090, }, @@ -1419,10 +1419,10 @@ func TestReconcileLoadBalancers(t *testing.T) { }, }, }, - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: &clusterNetworkAPIServerPort, + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: clusterNetworkAPIServerPort, }, }, }, @@ -1443,37 +1443,37 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterAPIServerPort := int32(9090) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-gid"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, }, }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { Name: ptr.To("test-subnet"), ID: ptr.To("test-subnetid"), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "test-subnet": { ID: ptr.To("test-resource-reference-id"), }, }, }, }, - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: &clusterAPIServerPort, + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: clusterAPIServerPort, }, }, }, @@ -1494,37 +1494,37 @@ func TestReconcileLoadBalancers(t *testing.T) { clusterAPIServerPort := int32(9090) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-gid"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, }, }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { Name: ptr.To("test-subnet"), ID: ptr.To("test-subnetid"), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "test-subnet": { ID: ptr.To("test-resource-reference-id"), }, }, }, }, - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: &clusterAPIServerPort, + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: clusterAPIServerPort, }, }, }, @@ -1543,7 +1543,7 @@ func TestReconcileLoadBalancers(t *testing.T) { loadBalancer, ok := clusterScope.IBMPowerVSCluster.Status.LoadBalancers["test-lb"] g.Expect(ok).To(BeTrue()) - g.Expect(loadBalancer.State).To(BeEquivalentTo(infrav1beta2.VPCLoadBalancerStateActive)) + g.Expect(loadBalancer.State).To(BeEquivalentTo(infrav1.VPCLoadBalancerStateActive)) g.Expect(loadBalancer.ControllerCreated).To(Equal(ptr.To(true))) g.Expect(loadBalancer.Hostname).To(Equal(ptr.To("test-lb-hostname"))) }) @@ -1572,9 +1572,9 @@ func TestCreateLoadbalancer(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, @@ -1584,9 +1584,9 @@ func TestCreateLoadbalancer(t *testing.T) { }, } - lb := infrav1beta2.VPCLoadBalancerSpec{ + lb := infrav1.VPCLoadBalancerSpec{ Name: "test-lb", - AdditionalListeners: []infrav1beta2.AdditionalListenerSpec{ + AdditionalListeners: []infrav1.AdditionalListenerSpec{ { Port: int64(9090), }, @@ -1605,12 +1605,12 @@ func TestCreateLoadbalancer(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-gid"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, @@ -1620,9 +1620,9 @@ func TestCreateLoadbalancer(t *testing.T) { }, } - lb := infrav1beta2.VPCLoadBalancerSpec{ + lb := infrav1.VPCLoadBalancerSpec{ Name: "test-lb", - AdditionalListeners: []infrav1beta2.AdditionalListenerSpec{ + AdditionalListeners: []infrav1.AdditionalListenerSpec{ { Port: int64(9090), }, @@ -1642,45 +1642,45 @@ func TestCreateLoadbalancer(t *testing.T) { clusterAPIServerPort := int32(9090) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-gid"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, }, }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { Name: ptr.To("test-subnet"), ID: ptr.To("test-subnetid"), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "test-subnet": { ID: ptr.To("test-resource-reference-id"), }, }, }, }, - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: &clusterAPIServerPort, + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: clusterAPIServerPort, }, }, }, } - lb := infrav1beta2.VPCLoadBalancerSpec{ + lb := infrav1.VPCLoadBalancerSpec{ Name: "test-lb", - AdditionalListeners: []infrav1beta2.AdditionalListenerSpec{ + AdditionalListeners: []infrav1.AdditionalListenerSpec{ { Port: int64(9090), }, @@ -1701,45 +1701,45 @@ func TestCreateLoadbalancer(t *testing.T) { clusterAPIServerPort := int32(9090) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-gid"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, }, }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { Name: ptr.To("test-subnet"), ID: ptr.To("test-subnetid"), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "test-subnet": { ID: ptr.To("test-resource-reference-id"), }, }, }, }, - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: &clusterAPIServerPort, + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: clusterAPIServerPort, }, }, }, } - lb := infrav1beta2.VPCLoadBalancerSpec{ + lb := infrav1.VPCLoadBalancerSpec{ Name: "test-lb", - AdditionalListeners: []infrav1beta2.AdditionalListenerSpec{ + AdditionalListeners: []infrav1.AdditionalListenerSpec{ { Port: int64(9090), }, @@ -1754,7 +1754,7 @@ func TestCreateLoadbalancer(t *testing.T) { loadBalancerStatus, err := clusterScope.createLoadBalancer(ctx, lb) g.Expect(err).To(BeNil()) - g.Expect(loadBalancerStatus.State).To(BeEquivalentTo(infrav1beta2.VPCLoadBalancerStateActive)) + g.Expect(loadBalancerStatus.State).To(BeEquivalentTo(infrav1.VPCLoadBalancerStateActive)) g.Expect(loadBalancerStatus.ControllerCreated).To(Equal(ptr.To(true))) g.Expect(loadBalancerStatus.Hostname).To(Equal(ptr.To("test-lb-hostname"))) }) @@ -1768,16 +1768,16 @@ func TestCheckLoadBalancerPort(t *testing.T) { expectedErr := fmt.Errorf("port %d for the %s load balancer cannot be used as an additional listener port, as it is already assigned to the API server", port, lbName) clusterScope := PowerVSClusterScope{ - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: ptr.To(int32(port)), + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: int32(port), }, }, }, } - loadBalancer := infrav1beta2.VPCLoadBalancerSpec{Name: lbName, AdditionalListeners: []infrav1beta2.AdditionalListenerSpec{ + loadBalancer := infrav1.VPCLoadBalancerSpec{Name: lbName, AdditionalListeners: []infrav1.AdditionalListenerSpec{ { Port: int64(port), }, @@ -1790,16 +1790,16 @@ func TestCheckLoadBalancerPort(t *testing.T) { t.Run("When load balancer listener port and powerVS API server port are different", func(t *testing.T) { g := NewWithT(t) clusterScope := PowerVSClusterScope{ - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: ptr.To(int32(8080)), + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: int32(8080), }, }, }, } - loadBalancer := infrav1beta2.VPCLoadBalancerSpec{Name: "test-loadbalancer", AdditionalListeners: []infrav1beta2.AdditionalListenerSpec{ + loadBalancer := infrav1.VPCLoadBalancerSpec{Name: "test-loadbalancer", AdditionalListeners: []infrav1.AdditionalListenerSpec{ { Port: int64(9090), }, @@ -1832,9 +1832,9 @@ func TestCheckLoadBalancer(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: nil, }, @@ -1843,7 +1843,7 @@ func TestCheckLoadBalancer(t *testing.T) { }, } - lb := infrav1beta2.VPCLoadBalancerSpec{ + lb := infrav1.VPCLoadBalancerSpec{ Name: "test-lb", } @@ -1860,9 +1860,9 @@ func TestCheckLoadBalancer(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: nil, }, @@ -1871,7 +1871,7 @@ func TestCheckLoadBalancer(t *testing.T) { }, } - lb := infrav1beta2.VPCLoadBalancerSpec{ + lb := infrav1.VPCLoadBalancerSpec{ Name: "test-lb", } @@ -1889,9 +1889,9 @@ func TestCheckLoadBalancer(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVpc, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "test-lb", ID: nil, @@ -1908,14 +1908,14 @@ func TestCheckLoadBalancer(t *testing.T) { ID: ptr.To("test-lb-instanceid"), }, nil) - lb := infrav1beta2.VPCLoadBalancerSpec{ + lb := infrav1.VPCLoadBalancerSpec{ Name: "test-lb", } loadBalancerStatus, err := clusterScope.checkLoadBalancer(ctx, lb) g.Expect(err).To(BeNil()) g.Expect(loadBalancerStatus.ID).To(Equal(ptr.To("test-lb-instanceid"))) - g.Expect(loadBalancerStatus.State).To(Equal(infrav1beta2.VPCLoadBalancerStateActive)) + g.Expect(loadBalancerStatus.State).To(Equal(infrav1.VPCLoadBalancerStateActive)) g.Expect(loadBalancerStatus.Hostname).To(Equal(ptr.To("test-lb-hostname"))) }) } @@ -1928,17 +1928,17 @@ func TestCheckLoadBalancerStatus(t *testing.T) { }{ { name: "VPC load balancer is in active state", - loadbalancer: vpcv1.LoadBalancer{Name: ptr.To("loadbalancer-active"), ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive))}, + loadbalancer: vpcv1.LoadBalancer{Name: ptr.To("loadbalancer-active"), ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive))}, expectedStatus: true, }, { name: "VPC load balancer creation is in pending state", - loadbalancer: vpcv1.LoadBalancer{Name: ptr.To("loadbalancer-createPending"), ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateCreatePending))}, + loadbalancer: vpcv1.LoadBalancer{Name: ptr.To("loadbalancer-createPending"), ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateCreatePending))}, expectedStatus: false, }, { name: "VPC load balancer is in updating state", - loadbalancer: vpcv1.LoadBalancer{Name: ptr.To("loadbalancer-updatePending"), ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateUpdatePending))}, + loadbalancer: vpcv1.LoadBalancer{Name: ptr.To("loadbalancer-updatePending"), ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateUpdatePending))}, expectedStatus: false, }, } @@ -1973,9 +1973,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To(serviceInstanceID), }, }, @@ -1996,9 +1996,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To(serviceInstanceID), }, }, @@ -2019,9 +2019,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To(serviceInstanceID), }, }, @@ -2046,9 +2046,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To(serviceInstanceID), }, }, @@ -2073,9 +2073,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To(serviceInstanceID), }, }, @@ -2096,9 +2096,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To(serviceInstanceID), }, }, @@ -2126,14 +2126,14 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("serviceInstanceIDSpec"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceIDStatus"), }, }, @@ -2162,8 +2162,8 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{}, }, } @@ -2181,9 +2181,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resource-group-id"), }, Zone: ptr.To("zone1"), @@ -2206,9 +2206,9 @@ func TestReconcilePowerVSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resource-group-id"), }, Zone: ptr.To("zone1"), @@ -2296,8 +2296,8 @@ func TestIsServiceInstanceExists(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "instance-id", }, }, @@ -2317,9 +2317,9 @@ func TestIsServiceInstanceExists(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("instance-id"), }, }, @@ -2340,9 +2340,9 @@ func TestIsServiceInstanceExists(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ Name: ptr.To("instance-name"), }, }, @@ -2364,9 +2364,9 @@ func TestIsServiceInstanceExists(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ Name: ptr.To("instance"), }, }, @@ -2388,9 +2388,9 @@ func TestIsServiceInstanceExists(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ Name: ptr.To("instance"), }, }, @@ -2426,8 +2426,8 @@ func TestCreateServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{}, }, } @@ -2442,9 +2442,9 @@ func TestCreateServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resource-group-id"), }, }, @@ -2462,9 +2462,9 @@ func TestCreateServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resource-group-id"), }, Zone: ptr.To("zone1"), @@ -2485,9 +2485,9 @@ func TestCreateServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resource-group-id"), }, Zone: ptr.To("zone1"), @@ -2524,7 +2524,7 @@ func TestReconcileVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("VPCID")} mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(vpcOutput, nil) @@ -2541,7 +2541,7 @@ func TestReconcileVPC(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, } mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(nil, fmt.Errorf("GetVPCByName error")) requeue, err := clusterScope.ReconcileVPC(ctx) @@ -2555,9 +2555,9 @@ func TestReconcileVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - Cluster: &capiv1beta1.Cluster{Spec: capiv1beta1.ClusterSpec{ClusterNetwork: nil}}, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, + Cluster: &clusterv1.Cluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("vpcID"), DefaultSecurityGroup: &vpcv1.SecurityGroupReference{ID: ptr.To("DefaultSecurityGroupID")}} mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(nil, nil) @@ -2576,8 +2576,8 @@ func TestReconcileVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, } mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(nil, nil) mockVPC.EXPECT().CreateVPC(gomock.Any()).Return(nil, nil, fmt.Errorf("CreateVPC returns error")) @@ -2594,8 +2594,8 @@ func TestReconcileVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{ID: ptr.To("VPCID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{ID: ptr.To("VPCID")}, }}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("VPCID")} @@ -2615,8 +2615,8 @@ func TestReconcileVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("VPCID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("VPCID")}}}, } mockVPC.EXPECT().GetVPC(gomock.Any()).Return(nil, nil, fmt.Errorf("GetVPC returns error")) requeue, err := clusterScope.ReconcileVPC(ctx) @@ -2631,8 +2631,8 @@ func TestReconcileVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("VPCID")}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("VPCID")}}, }, } mockVPC.EXPECT().GetVPC(gomock.Any()).Return(nil, nil, nil) @@ -2646,11 +2646,11 @@ func TestReconcileVPC(t *testing.T) { setup(t) t.Cleanup(teardown) - vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("VPCID"), Status: ptr.To(string(infrav1beta2.VPCStatePending))} + vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("VPCID"), Status: ptr.To(string(infrav1.VPCStatePending))} clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("VPCID")}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("VPCID")}}, }, } mockVPC.EXPECT().GetVPC(gomock.Any()).Return(vpcOutput, nil, nil) @@ -2667,8 +2667,8 @@ func TestReconcileVPC(t *testing.T) { vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("VPCID")} clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("VPCID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("VPCID")}}}, } mockVPC.EXPECT().GetVPC(gomock.Any()).Return(vpcOutput, nil, nil) requeue, err := clusterScope.ReconcileVPC(ctx) @@ -2698,7 +2698,7 @@ func TestPowerVSScopeCreateVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, } vpcID, err := clusterScope.createVPC() @@ -2712,9 +2712,9 @@ func TestPowerVSScopeCreateVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - Cluster: &capiv1beta1.Cluster{Spec: capiv1beta1.ClusterSpec{ClusterNetwork: nil}}, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, + Cluster: &clusterv1.Cluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("vpcID"), DefaultSecurityGroup: &vpcv1.SecurityGroupReference{ID: ptr.To("DefaultSecurityGroupID")}} mockVPC.EXPECT().CreateVPC(gomock.Any()).Return(vpcOutput, nil, nil) @@ -2732,9 +2732,9 @@ func TestPowerVSScopeCreateVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - Cluster: &capiv1beta1.Cluster{Spec: capiv1beta1.ClusterSpec{ClusterNetwork: nil}}, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, + Cluster: &clusterv1.Cluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("vpcID"), DefaultSecurityGroup: &vpcv1.SecurityGroupReference{ID: ptr.To("DefaultSecurityGroupID")}} mockVPC.EXPECT().CreateVPC(gomock.Any()).Return(vpcOutput, nil, nil) @@ -2748,134 +2748,134 @@ func TestPowerVSScopeCreateVPC(t *testing.T) { func TestGetServiceName(t *testing.T) { testCases := []struct { name string - resourceType infrav1beta2.ResourceType + resourceType infrav1.ResourceType expectedName *string clusterScope PowerVSClusterScope }{ { name: "Resource type is service instance and ServiceInstance is nil", - resourceType: infrav1beta2.ResourceTypeServiceInstance, + resourceType: infrav1.ResourceTypeServiceInstance, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName-serviceInstance"), }, { name: "Resource type is service instance and ServiceInstance is not nil", - resourceType: infrav1beta2.ResourceTypeServiceInstance, + resourceType: infrav1.ResourceTypeServiceInstance, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{Name: ptr.To("ServiceInstanceName")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ServiceInstance: &infrav1.IBMPowerVSResourceReference{Name: ptr.To("ServiceInstanceName")}}}, }, expectedName: ptr.To("ServiceInstanceName"), }, { name: "Resource type is vpc and VPC is nil", - resourceType: infrav1beta2.ResourceTypeVPC, + resourceType: infrav1.ResourceTypeVPC, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName-vpc"), }, { name: "Resource type is vpc and VPC is not nil", - resourceType: infrav1beta2.ResourceTypeVPC, + resourceType: infrav1.ResourceTypeVPC, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{VPC: &infrav1beta2.VPCResourceReference{Name: ptr.To("VPCName")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{VPC: &infrav1.VPCResourceReference{Name: ptr.To("VPCName")}}}, }, expectedName: ptr.To("VPCName"), }, { name: "Resource type is transit gateway and transitgateway is nil", - resourceType: infrav1beta2.ResourceTypeTransitGateway, + resourceType: infrav1.ResourceTypeTransitGateway, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName-transitgateway"), }, { name: "Resource type is transit gateway and transitgateway is not nil", - resourceType: infrav1beta2.ResourceTypeTransitGateway, + resourceType: infrav1.ResourceTypeTransitGateway, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{TransitGateway: &infrav1beta2.TransitGateway{Name: ptr.To("TransitGatewayName")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{TransitGateway: &infrav1.TransitGateway{Name: ptr.To("TransitGatewayName")}}}, }, expectedName: ptr.To("TransitGatewayName"), }, { name: "Resource type is dhcp server and dhcpserver is nil", - resourceType: infrav1beta2.ResourceTypeDHCPServer, + resourceType: infrav1.ResourceTypeDHCPServer, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName"), }, { name: "Resource type is dhcp server and dhcpserver is not nil", - resourceType: infrav1beta2.ResourceTypeDHCPServer, + resourceType: infrav1.ResourceTypeDHCPServer, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{DHCPServer: &infrav1beta2.DHCPServer{Name: ptr.To("DHCPServerName")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{DHCPServer: &infrav1.DHCPServer{Name: ptr.To("DHCPServerName")}}}, }, expectedName: ptr.To("DHCPServerName"), }, { name: "Resource type is dhcp server and dhcpserver is not nil and network is not nil", - resourceType: infrav1beta2.ResourceTypeDHCPServer, + resourceType: infrav1.ResourceTypeDHCPServer, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{Network: infrav1beta2.IBMPowerVSResourceReference{Name: ptr.To("NetworkName")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{Network: infrav1.IBMPowerVSResourceReference{Name: ptr.To("NetworkName")}}}, }, expectedName: ptr.To("NetworkName"), }, { name: "Resource type is cos instance and cos instance is nil", - resourceType: infrav1beta2.ResourceTypeCOSInstance, + resourceType: infrav1.ResourceTypeCOSInstance, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName-cosinstance"), }, { name: "Resource type is cos instance and cos instance is not nil", - resourceType: infrav1beta2.ResourceTypeCOSInstance, + resourceType: infrav1.ResourceTypeCOSInstance, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{CosInstance: &infrav1beta2.CosInstance{Name: "CosInstanceName"}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{CosInstance: &infrav1.CosInstance{Name: "CosInstanceName"}}}, }, expectedName: ptr.To("CosInstanceName"), }, { name: "Resource type is cos bucket and cos bucket is nil", - resourceType: infrav1beta2.ResourceTypeCOSBucket, + resourceType: infrav1.ResourceTypeCOSBucket, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName-cosbucket"), }, { name: "Resource type is cos bucket and cos bucket is not nil", - resourceType: infrav1beta2.ResourceTypeCOSBucket, + resourceType: infrav1.ResourceTypeCOSBucket, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{CosInstance: &infrav1beta2.CosInstance{BucketName: "CosBucketName"}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{CosInstance: &infrav1.CosInstance{BucketName: "CosBucketName"}}}, }, expectedName: ptr.To("CosBucketName"), }, { name: "Resource type is subnet", - resourceType: infrav1beta2.ResourceTypeSubnet, + resourceType: infrav1.ResourceTypeSubnet, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName-vpcsubnet"), }, { name: "Resource type is load balancer", - resourceType: infrav1beta2.ResourceTypeLoadBalancer, + resourceType: infrav1.ResourceTypeLoadBalancer, clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, }, expectedName: ptr.To("ClusterName-loadbalancer"), }, { name: "Resource type is invalid", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, expectedName: nil, }, @@ -2910,7 +2910,7 @@ func TestGetVPCByName(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, } mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(nil, fmt.Errorf("GetVPCByName returns error")) vpcResponse, err := clusterScope.getVPCByName() @@ -2924,7 +2924,7 @@ func TestGetVPCByName(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("vpcID"), DefaultSecurityGroup: &vpcv1.SecurityGroupReference{ID: ptr.To("DefaultSecurityGroupID")}} mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(vpcOutput, nil) @@ -2956,7 +2956,7 @@ func TestCheckVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{VPC: &infrav1beta2.VPCResourceReference{ID: ptr.To("VPCID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{VPC: &infrav1.VPCResourceReference{ID: ptr.To("VPCID")}}}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("VPCID"), DefaultSecurityGroup: &vpcv1.SecurityGroupReference{ID: ptr.To("DefaultSecurityGroupID")}} mockVPC.EXPECT().GetVPC(gomock.Any()).Return(vpcOutput, nil, nil) @@ -2971,7 +2971,7 @@ func TestCheckVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, } vpcOutput := &vpcv1.VPC{Name: ptr.To("VPCName"), ID: ptr.To("vpcID"), DefaultSecurityGroup: &vpcv1.SecurityGroupReference{ID: ptr.To("DefaultSecurityGroupID")}} mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(vpcOutput, nil) @@ -2988,7 +2988,7 @@ func TestCheckVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, } mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(nil, nil) @@ -3003,7 +3003,7 @@ func TestCheckVPC(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}}, } mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(nil, fmt.Errorf("GetVPCByName returns error")) @@ -3034,7 +3034,7 @@ func TestIsDHCPServerActive(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Status: infrav1beta2.IBMPowerVSClusterStatus{DHCPServer: &infrav1beta2.ResourceReference{ID: ptr.To("dhcpID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Status: infrav1.IBMPowerVSClusterStatus{DHCPServer: &infrav1.ResourceReference{ID: ptr.To("dhcpID")}}}, } mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(nil, fmt.Errorf("GetDHCPServer returns error")) isActive, err := clusterScope.isDHCPServerActive(ctx) @@ -3046,10 +3046,10 @@ func TestIsDHCPServerActive(t *testing.T) { setup(t) t.Cleanup(teardown) - dhcpServer := &models.DHCPServerDetail{ID: ptr.To("dhcpID"), Status: ptr.To(string(infrav1beta2.DHCPServerStateError))} + dhcpServer := &models.DHCPServerDetail{ID: ptr.To("dhcpID"), Status: ptr.To(string(infrav1.DHCPServerStateError))} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Status: infrav1beta2.IBMPowerVSClusterStatus{DHCPServer: &infrav1beta2.ResourceReference{ID: ptr.To("dhcpID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Status: infrav1.IBMPowerVSClusterStatus{DHCPServer: &infrav1.ResourceReference{ID: ptr.To("dhcpID")}}}, } mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(dhcpServer, nil) @@ -3062,10 +3062,10 @@ func TestIsDHCPServerActive(t *testing.T) { setup(t) t.Cleanup(teardown) - dhcpServer := &models.DHCPServerDetail{ID: ptr.To("dhcpID"), Status: ptr.To(string(infrav1beta2.DHCPServerStateActive))} + dhcpServer := &models.DHCPServerDetail{ID: ptr.To("dhcpID"), Status: ptr.To(string(infrav1.DHCPServerStateActive))} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Status: infrav1beta2.IBMPowerVSClusterStatus{DHCPServer: &infrav1beta2.ResourceReference{ID: ptr.To("dhcpID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Status: infrav1.IBMPowerVSClusterStatus{DHCPServer: &infrav1.ResourceReference{ID: ptr.To("dhcpID")}}}, } mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(dhcpServer, nil) @@ -3083,17 +3083,17 @@ func TestCheckDHCPServerStatus(t *testing.T) { }{ { name: "DHCP server is in build state", - dhcpServer: models.DHCPServerDetail{ID: ptr.To("dhcpIDBuild"), Status: ptr.To(string(infrav1beta2.DHCPServerStateBuild))}, + dhcpServer: models.DHCPServerDetail{ID: ptr.To("dhcpIDBuild"), Status: ptr.To(string(infrav1.DHCPServerStateBuild))}, expectedStatus: false, }, { name: "DHCP server is in active state", - dhcpServer: models.DHCPServerDetail{ID: ptr.To("dhcpIDActive"), Status: ptr.To(string(infrav1beta2.DHCPServerStateActive))}, + dhcpServer: models.DHCPServerDetail{ID: ptr.To("dhcpIDActive"), Status: ptr.To(string(infrav1.DHCPServerStateActive))}, expectedStatus: true, }, { name: "DHCP server is in error state", - dhcpServer: models.DHCPServerDetail{ID: ptr.To("dhcpIDError"), Status: ptr.To(string(infrav1beta2.DHCPServerStateError))}, + dhcpServer: models.DHCPServerDetail{ID: ptr.To("dhcpIDError"), Status: ptr.To(string(infrav1.DHCPServerStateError))}, expectedStatus: false, }, { @@ -3137,7 +3137,7 @@ func TestCreateDHCPServer(t *testing.T) { dhcpServer := &models.DHCPServer{ID: ptr.To("dhcpID"), Network: dhcpNetwork} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, } mockPowerVS.EXPECT().CreateDHCPServer(gomock.Any()).Return(dhcpServer, nil) dhcpID, err := clusterScope.createDHCPServer(ctx) @@ -3154,9 +3154,9 @@ func TestCreateDHCPServer(t *testing.T) { dhcpServer := &models.DHCPServer{ID: ptr.To("dhcpID")} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: clusterName}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{DHCPServer: &infrav1beta2.DHCPServer{ + Spec: infrav1.IBMPowerVSClusterSpec{DHCPServer: &infrav1.DHCPServer{ ID: ptr.To("dhcpID"), DNSServer: ptr.To("DNSServer"), Cidr: ptr.To("10.10.1.10/24"), @@ -3179,7 +3179,7 @@ func TestCreateDHCPServer(t *testing.T) { dhcpServer := &models.DHCPServer{ID: ptr.To("dhcpID"), Network: dhcpNetwork} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, } mockPowerVS.EXPECT().CreateDHCPServer(gomock.Any()).Return(dhcpServer, nil) dhcpID, err := clusterScope.createDHCPServer(ctx) @@ -3195,7 +3195,7 @@ func TestCreateDHCPServer(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, } mockPowerVS.EXPECT().CreateDHCPServer(gomock.Any()).Return(nil, nil) dhcpID, err := clusterScope.createDHCPServer(ctx) @@ -3210,7 +3210,7 @@ func TestCreateDHCPServer(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, } mockPowerVS.EXPECT().CreateDHCPServer(gomock.Any()).Return(nil, fmt.Errorf("CreateDHCPServer returns error")) dhcpID, err := clusterScope.createDHCPServer(ctx) @@ -3242,7 +3242,7 @@ func TestReconcileNetwork(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Status: infrav1beta2.IBMPowerVSClusterStatus{Network: &infrav1beta2.ResourceReference{ID: ptr.To("netID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Status: infrav1.IBMPowerVSClusterStatus{Network: &infrav1.ResourceReference{ID: ptr.To("netID")}}}, } network := &models.Network{NetworkID: ptr.To("netID")} @@ -3259,7 +3259,7 @@ func TestReconcileNetwork(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Status: infrav1beta2.IBMPowerVSClusterStatus{Network: &infrav1beta2.ResourceReference{ID: ptr.To("netID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Status: infrav1.IBMPowerVSClusterStatus{Network: &infrav1.ResourceReference{ID: ptr.To("netID")}}}, } mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(nil, fmt.Errorf("GetNetworkByID error")) @@ -3274,10 +3274,10 @@ func TestReconcileNetwork(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Status: infrav1beta2.IBMPowerVSClusterStatus{DHCPServer: &infrav1beta2.ResourceReference{ID: ptr.To("dhcpID")}, Network: &infrav1beta2.ResourceReference{ID: ptr.To("netID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Status: infrav1.IBMPowerVSClusterStatus{DHCPServer: &infrav1.ResourceReference{ID: ptr.To("dhcpID")}, Network: &infrav1.ResourceReference{ID: ptr.To("netID")}}}, } - dhcpServer := &models.DHCPServerDetail{ID: ptr.To("dhcpID"), Status: ptr.To(string(infrav1beta2.DHCPServerStateActive))} + dhcpServer := &models.DHCPServerDetail{ID: ptr.To("dhcpID"), Status: ptr.To(string(infrav1.DHCPServerStateActive))} mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(dhcpServer, nil) network := &models.Network{NetworkID: ptr.To("netID")} mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, nil) @@ -3293,7 +3293,7 @@ func TestReconcileNetwork(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Status: infrav1beta2.IBMPowerVSClusterStatus{DHCPServer: &infrav1beta2.ResourceReference{ID: ptr.To("dhcpID")}, Network: &infrav1beta2.ResourceReference{ID: ptr.To("netID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Status: infrav1.IBMPowerVSClusterStatus{DHCPServer: &infrav1.ResourceReference{ID: ptr.To("dhcpID")}, Network: &infrav1.ResourceReference{ID: ptr.To("netID")}}}, } mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(nil, fmt.Errorf("GetDHCPServer error")) @@ -3311,8 +3311,8 @@ func TestReconcileNetwork(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Network: infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("networkID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + Network: infrav1.IBMPowerVSResourceReference{ID: ptr.To("networkID")}}}, } network := &models.Network{} mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, fmt.Errorf("GetNetworkByID error")) @@ -3329,8 +3329,8 @@ func TestReconcileNetwork(t *testing.T) { network := &models.Network{NetworkID: ptr.To(netID)} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Network: infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To(netID)}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + Network: infrav1.IBMPowerVSResourceReference{ID: ptr.To(netID)}}}, } mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, nil) mockPowerVS.EXPECT().GetAllDHCPServers().Return(nil, nil) @@ -3349,8 +3349,8 @@ func TestReconcileNetwork(t *testing.T) { network := &models.NetworkReference{Name: ptr.To(netName), NetworkID: ptr.To(netID)} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Network: infrav1beta2.IBMPowerVSResourceReference{Name: ptr.To(netName)}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + Network: infrav1.IBMPowerVSResourceReference{Name: ptr.To(netName)}}}, } mockPowerVS.EXPECT().GetAllDHCPServers().Return(nil, nil) mockPowerVS.EXPECT().GetNetworkByName(gomock.Any()).Return(network, nil) @@ -3369,8 +3369,8 @@ func TestReconcileNetwork(t *testing.T) { dhcpServer := &models.DHCPServerDetail{ID: ptr.To(dhcpID), Network: &models.DHCPServerNetwork{ID: ptr.To(netID)}} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{Network: infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To(netID)}, DHCPServer: &infrav1beta2.DHCPServer{ID: ptr.To(dhcpID)}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{Network: infrav1.IBMPowerVSResourceReference{ID: ptr.To(netID)}, DHCPServer: &infrav1.DHCPServer{ID: ptr.To(dhcpID)}}, }, } mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, nil) @@ -3392,8 +3392,8 @@ func TestReconcileNetwork(t *testing.T) { dhcpServer := &models.DHCPServerDetail{ID: ptr.To(dhcpID), Network: &models.DHCPServerNetwork{ID: ptr.To("netID2")}} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{Network: infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To(netID)}, DHCPServer: &infrav1beta2.DHCPServer{ID: ptr.To(dhcpID)}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{Network: infrav1.IBMPowerVSResourceReference{ID: ptr.To(netID)}, DHCPServer: &infrav1.DHCPServer{ID: ptr.To(dhcpID)}}, }, } mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, nil) @@ -3411,8 +3411,8 @@ func TestReconcileNetwork(t *testing.T) { dhcpServer := &models.DHCPServerDetail{ID: ptr.To(dhcpID), Network: &models.DHCPServerNetwork{ID: ptr.To(netID)}} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - DHCPServer: &infrav1beta2.DHCPServer{ID: ptr.To(dhcpID)}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + DHCPServer: &infrav1.DHCPServer{ID: ptr.To(dhcpID)}}}, } mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(dhcpServer, nil) mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, nil) @@ -3431,8 +3431,8 @@ func TestReconcileNetwork(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - DHCPServer: &infrav1beta2.DHCPServer{ID: ptr.To("dhcpID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + DHCPServer: &infrav1.DHCPServer{ID: ptr.To("dhcpID")}}}, } mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(nil, fmt.Errorf("dhcp server by ID not found")) isNetworkAvailable, err := clusterScope.ReconcileNetwork(ctx) @@ -3450,8 +3450,8 @@ func TestReconcileNetwork(t *testing.T) { dhcpServers := models.DHCPServers{&models.DHCPServer{ID: ptr.To(dhcpID), Network: &models.DHCPServerNetwork{ID: ptr.To(netID), Name: ptr.To(netName)}}} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - DHCPServer: &infrav1beta2.DHCPServer{Name: ptr.To(dhcpServerName)}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + DHCPServer: &infrav1.DHCPServer{Name: ptr.To(dhcpServerName)}}}, } mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, nil) mockPowerVS.EXPECT().GetAllDHCPServers().Return(dhcpServers, nil) @@ -3474,7 +3474,7 @@ func TestReconcileNetwork(t *testing.T) { dhcpServers := models.DHCPServers{&models.DHCPServer{ID: ptr.To(dhcpID), Network: &models.DHCPServerNetwork{ID: ptr.To(netID), Name: ptr.To(netName)}}} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}, } mockPowerVS.EXPECT().GetAllDHCPServers().Return(dhcpServers, nil) mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(network, nil) @@ -3495,8 +3495,8 @@ func TestReconcileNetwork(t *testing.T) { dhcpServer := &models.DHCPServer{ID: ptr.To("dhcpID"), Network: dhcpNetwork} clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Network: infrav1beta2.IBMPowerVSResourceReference{Name: ptr.To("networkName")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + Network: infrav1.IBMPowerVSResourceReference{Name: ptr.To("networkName")}}}, } mockPowerVS.EXPECT().GetAllDHCPServers().Return(nil, nil) mockPowerVS.EXPECT().GetNetworkByName(gomock.Any()).Return(nil, nil) @@ -3516,8 +3516,8 @@ func TestReconcileNetwork(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMPowerVSClient: mockPowerVS, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Network: infrav1beta2.IBMPowerVSResourceReference{Name: ptr.To("networkName")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{ + Network: infrav1.IBMPowerVSResourceReference{Name: ptr.To("networkName")}}}, } mockPowerVS.EXPECT().GetAllDHCPServers().Return(nil, nil) mockPowerVS.EXPECT().GetNetworkByName(gomock.Any()).Return(nil, nil) @@ -3548,11 +3548,11 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, - VPCSubnets: []infrav1beta2.Subnet{{ID: ptr.To("subnet1ID"), Name: ptr.To("subnet1Name")}, {ID: ptr.To("subnet2ID"), Name: ptr.To("subnet2Name")}}}, + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, + VPCSubnets: []infrav1.Subnet{{ID: ptr.To("subnet1ID"), Name: ptr.To("subnet1Name")}, {ID: ptr.To("subnet2ID"), Name: ptr.To("subnet2Name")}}}, }, } subnet1Details := &vpcv1.Subnet{ID: ptr.To("subnet1ID"), Name: ptr.To("subnet1Name")} @@ -3578,15 +3578,15 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}, + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, - VPCSubnets: []infrav1beta2.Subnet{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, + VPCSubnets: []infrav1.Subnet{ {Name: ptr.To("subnet1Name"), Zone: ptr.To("eu-de-2")}, {Name: ptr.To("subnet2Name")}, {Name: ptr.To("subnet3Name")}, @@ -3639,15 +3639,15 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}, + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, - VPCSubnets: []infrav1beta2.Subnet{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, + VPCSubnets: []infrav1.Subnet{ {Name: ptr.To("subnet1Name"), Zone: ptr.To("eu-de-1")}, {Name: ptr.To("subnet2Name"), Zone: ptr.To("eu-de-2")}, {Name: ptr.To("subnet3Name"), Zone: ptr.To("eu-de-3")}, @@ -3690,14 +3690,14 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}, + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}}, + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}}, }, } subnet1Details := &vpcv1.Subnet{ID: ptr.To("subnet1ID"), Name: ptr.To("ClusterName-vpcsubnet-eu-de-1")} @@ -3718,15 +3718,15 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}, + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, - VPCSubnets: []infrav1beta2.Subnet{{Name: ptr.To("subnet1Name")}}}, + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, + VPCSubnets: []infrav1.Subnet{{Name: ptr.To("subnet1Name")}}}, }, } subnet1Details := &vpcv1.Subnet{ID: ptr.To("subnet1ID"), Name: ptr.To("subnet1Name")} @@ -3746,9 +3746,9 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("aa-dde")}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("aa-dde")}}, }, } requeue, err := clusterScope.ReconcileVPCSubnets(ctx) @@ -3762,9 +3762,9 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("")}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("")}}, }, } requeue, err := clusterScope.ReconcileVPCSubnets(ctx) @@ -3778,14 +3778,14 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, - VPCSubnets: []infrav1beta2.Subnet{{Zone: ptr.To("eu-de-1")}}, + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, + VPCSubnets: []infrav1.Subnet{{Zone: ptr.To("eu-de-1")}}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "subnet1Name": {ID: ptr.To("subnet1ID"), ControllerCreated: ptr.To(true)}, }}, }, @@ -3808,11 +3808,11 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, - VPCSubnets: []infrav1beta2.Subnet{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, + VPCSubnets: []infrav1.Subnet{ { ID: ptr.To("subnet1ID"), Name: ptr.To("subnet1Name"), @@ -3834,11 +3834,11 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, - VPCSubnets: []infrav1beta2.Subnet{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, + VPCSubnets: []infrav1.Subnet{ { ID: ptr.To("subnet1ID"), Name: ptr.To("subnet1Name"), @@ -3860,10 +3860,10 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, }, }, } @@ -3880,10 +3880,10 @@ func TestReconcileVPCSubnets(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "ClusterName"}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, }, }, } @@ -3898,24 +3898,24 @@ func TestSetVPCSubnetStatus(t *testing.T) { testCases := []struct { name string subnetName string - resource infrav1beta2.ResourceReference + resource infrav1.ResourceReference clusterScope PowerVSClusterScope }{ { name: "VPC subnet status is nil", subnetName: "subnet1Name", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, - resource: infrav1beta2.ResourceReference{ID: ptr.To("ID1")}, + resource: infrav1.ResourceReference{ID: ptr.To("ID1")}, }, { name: "VPC subnet status is not nil", subnetName: "subnet1Name", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "subnet1Name": { ControllerCreated: ptr.To(true), }, @@ -3923,7 +3923,7 @@ func TestSetVPCSubnetStatus(t *testing.T) { }, }, }, - resource: infrav1beta2.ResourceReference{ID: ptr.To("ID1"), ControllerCreated: ptr.To(true)}, + resource: infrav1.ResourceReference{ID: ptr.To("ID1"), ControllerCreated: ptr.To(true)}, }, } @@ -4012,14 +4012,14 @@ func TestCreateVPCSubnet(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}}, + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}}, }, } - subnet := infrav1beta2.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} + subnet := infrav1.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} subnet1Details := &vpcv1.Subnet{ID: ptr.To("subnet1ID"), Name: ptr.To("ClusterName-vpcsubnet-eu-de-1")} mockVPC.EXPECT().CreateSubnet(gomock.Any()).Return(subnet1Details, nil, nil) @@ -4034,15 +4034,15 @@ func TestCreateVPCSubnet(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("eu-de")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("eu-de")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}}, + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}}, }, } - subnet := infrav1beta2.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1")} + subnet := infrav1.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1")} subnet1Details := &vpcv1.Subnet{ID: ptr.To("subnet1ID"), Name: ptr.To("ClusterName-vpcsubnet-eu-de-1")} mockVPC.EXPECT().CreateSubnet(gomock.Any()).Return(subnet1Details, nil, nil) @@ -4058,9 +4058,9 @@ func TestCreateVPCSubnet(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{Spec: infrav1beta2.IBMPowerVSClusterSpec{}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{Spec: infrav1.IBMPowerVSClusterSpec{}}, } - subnet := infrav1beta2.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1")} + subnet := infrav1.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1")} subnetID, err := clusterScope.createVPCSubnet(subnet) g.Expect(subnetID).To(BeNil()) g.Expect(err).ToNot(BeNil()) @@ -4073,10 +4073,10 @@ func TestCreateVPCSubnet(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}}, } - subnet := infrav1beta2.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} + subnet := infrav1.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} subnetID, err := clusterScope.createVPCSubnet(subnet) g.Expect(subnetID).To(BeNil()) g.Expect(err).ToNot(BeNil()) @@ -4089,11 +4089,11 @@ func TestCreateVPCSubnet(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}, - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}, + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}}}, } - subnet := infrav1beta2.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} + subnet := infrav1.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} mockVPC.EXPECT().CreateSubnet(gomock.Any()).Return(nil, nil, fmt.Errorf("error creating subnet")) subnetID, err := clusterScope.createVPCSubnet(subnet) g.Expect(subnetID).To(BeNil()) @@ -4106,11 +4106,11 @@ func TestCreateVPCSubnet(t *testing.T) { clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}, - Status: infrav1beta2.IBMPowerVSClusterStatus{VPC: &infrav1beta2.ResourceReference{ID: ptr.To("vpcID")}}}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}}, + Status: infrav1.IBMPowerVSClusterStatus{VPC: &infrav1.ResourceReference{ID: ptr.To("vpcID")}}}, } - subnet := infrav1beta2.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} + subnet := infrav1.Subnet{Name: ptr.To("ClusterName-vpcsubnet-eu-de-1"), Zone: ptr.To("eu-de-1")} mockVPC.EXPECT().CreateSubnet(gomock.Any()).Return(nil, nil, nil) subnetID, err := clusterScope.createVPCSubnet(subnet) g.Expect(subnetID).To(BeNil()) @@ -4134,12 +4134,12 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { } powervsClusterScope := func() *PowerVSClusterScope { return &PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "lb": { ID: ptr.To("lb-id"), ControllerCreated: ptr.To(true), @@ -4169,7 +4169,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { mockVpc.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), }, nil, nil) mockVpc.EXPECT().DeleteLoadBalancer(gomock.Any()).Return(&core.DetailedResponse{}, errors.New("failed to delete load balancer")) clusterScope.IBMVPCClient = mockVpc @@ -4186,7 +4186,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { mockVpc.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateDeletePending)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateDeletePending)), }, nil, nil) clusterScope.IBMVPCClient = mockVpc requeue, err := clusterScope.DeleteLoadBalancer(ctx) @@ -4214,7 +4214,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { mockVpc.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), }, nil, nil) mockVpc.EXPECT().DeleteLoadBalancer(gomock.Any()).Return(&core.DetailedResponse{}, nil) clusterScope.IBMVPCClient = mockVpc @@ -4228,7 +4228,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1beta2.VPCLoadBalancerStatus{ + clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1.VPCLoadBalancerStatus{ "lb": { ID: ptr.To("lb-id"), ControllerCreated: ptr.To(false), @@ -4245,7 +4245,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1beta2.VPCLoadBalancerStatus{ + clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1.VPCLoadBalancerStatus{ "lb1": { ID: ptr.To("lb-id"), ControllerCreated: ptr.To(true), @@ -4258,7 +4258,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { mockVpc.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), }, nil, nil) mockVpc.EXPECT().DeleteLoadBalancer(gomock.Any()).Return(&core.DetailedResponse{}, nil) clusterScope.IBMVPCClient = mockVpc @@ -4272,7 +4272,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1beta2.VPCLoadBalancerStatus{ + clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1.VPCLoadBalancerStatus{ "lb1": { ID: ptr.To("lb-id"), ControllerCreated: ptr.To(true), @@ -4289,7 +4289,7 @@ func TestPowerVSDeleteLoadBalancer(t *testing.T) { mockVpc.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), }, nil, nil).Times(3) mockVpc.EXPECT().DeleteLoadBalancer(gomock.Any()).Return(&core.DetailedResponse{}, nil).Times(3) clusterScope.IBMVPCClient = mockVpc @@ -4316,9 +4316,9 @@ func TestDeleteVPCSecurityGroups(t *testing.T) { } powervsClusterScope := func() *PowerVSClusterScope { return &PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSecurityGroups: map[string]infrav1beta2.VPCSecurityGroupStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSecurityGroups: map[string]infrav1.VPCSecurityGroupStatus{ "sc": { ID: ptr.To("sc-id"), ControllerCreated: ptr.To(true), @@ -4386,7 +4386,7 @@ func TestDeleteVPCSecurityGroups(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1beta2.VPCSecurityGroupStatus{ + clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1.VPCSecurityGroupStatus{ "sc1": { ID: ptr.To("sc-id"), ControllerCreated: ptr.To(true), @@ -4415,7 +4415,7 @@ func TestDeleteVPCSecurityGroups(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1beta2.VPCSecurityGroupStatus{ + clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1.VPCSecurityGroupStatus{ "sc1": { ID: ptr.To("sc-id"), ControllerCreated: ptr.To(false), @@ -4440,7 +4440,7 @@ func TestDeleteVPCSecurityGroups(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1beta2.VPCSecurityGroupStatus{ + clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1.VPCSecurityGroupStatus{ "sc": { ID: ptr.To("sc-id"), ControllerCreated: ptr.To(false), @@ -4469,9 +4469,9 @@ func TestDeleteVPCSubnet(t *testing.T) { } powervsClusterScope := func() *PowerVSClusterScope { return &PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPCSubnet: map[string]infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPCSubnet: map[string]infrav1.ResourceReference{ "subent1": { ID: ptr.To("subent1"), ControllerCreated: ptr.To(true), @@ -4511,7 +4511,7 @@ func TestDeleteVPCSubnet(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - mockVpc.EXPECT().GetSubnet(gomock.Any()).Return(&vpcv1.Subnet{Name: ptr.To("subnet1"), Status: ptr.To(string(infrav1beta2.VPCSubnetStateDeleting))}, nil, nil) + mockVpc.EXPECT().GetSubnet(gomock.Any()).Return(&vpcv1.Subnet{Name: ptr.To("subnet1"), Status: ptr.To(string(infrav1.VPCSubnetStateDeleting))}, nil, nil) clusterScope.IBMVPCClient = mockVpc requeue, err := clusterScope.DeleteVPCSubnet(ctx) g.Expect(err).To(BeNil()) @@ -4548,7 +4548,7 @@ func TestDeleteVPCSubnet(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1.ResourceReference{ "subent1": { ID: ptr.To("subentid"), ControllerCreated: ptr.To(true), @@ -4575,7 +4575,7 @@ func TestDeleteVPCSubnet(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1.ResourceReference{ "subent1": { ID: ptr.To("subentid"), ControllerCreated: ptr.To(false), @@ -4598,7 +4598,7 @@ func TestDeleteVPCSubnet(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1.ResourceReference{ "subent1": { ID: ptr.To("subent1"), ControllerCreated: ptr.To(false), @@ -4628,9 +4628,9 @@ func TestPowerVSDeleteVPC(t *testing.T) { } powervsClusterScope := func() *PowerVSClusterScope { return &PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcid"), ControllerCreated: ptr.To(true), }, @@ -4682,7 +4682,7 @@ func TestPowerVSDeleteVPC(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - mockVpc.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{ID: ptr.To("vpcid"), Status: ptr.To(string(infrav1beta2.VPCStateDeleting))}, nil, nil) + mockVpc.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{ID: ptr.To("vpcid"), Status: ptr.To(string(infrav1.VPCStateDeleting))}, nil, nil) clusterScope.IBMVPCClient = mockVpc requeue, err := clusterScope.DeleteVPC(ctx) g.Expect(err).To(BeNil()) @@ -4719,7 +4719,7 @@ func TestPowerVSDeleteVPC(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPC = &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPC = &infrav1.ResourceReference{ ID: ptr.To("vpcid"), ControllerCreated: ptr.To(false), } @@ -4746,16 +4746,16 @@ func TestDeleteTransitGateway(t *testing.T) { } powervsClusterScope := func() *PowerVSClusterScope { return &PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ID: ptr.To("transitgatewayID"), ControllerCreated: ptr.To(true), - PowerVSConnection: &infrav1beta2.ResourceReference{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), ID: ptr.To("connectionID"), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), ID: ptr.To("connectionID"), }, @@ -4770,7 +4770,7 @@ func TestDeleteTransitGateway(t *testing.T) { setup(t) t.Cleanup(teardown) clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status = infrav1beta2.IBMPowerVSClusterStatus{} + clusterScope.IBMPowerVSCluster.Status = infrav1.IBMPowerVSClusterStatus{} clusterScope.TransitGatewayClient = mockTG requeue, err := clusterScope.DeleteTransitGateway(ctx) g.Expect(err).To(BeNil()) @@ -4783,7 +4783,7 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} clusterScope := powervsClusterScope() mockTG.EXPECT().GetTransitGateway(gomock.Any()).Return(tgw, nil, nil) mockTG.EXPECT().GetTransitGatewayConnection(gomock.Any()).Return(nil, &core.DetailedResponse{StatusCode: 404}, nil).Times(2) @@ -4801,7 +4801,7 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} clusterScope := powervsClusterScope() mockTG.EXPECT().GetTransitGateway(gomock.Any()).Return(tgw, &core.DetailedResponse{StatusCode: 404}, errors.New("not found")) clusterScope.TransitGatewayClient = mockTG @@ -4817,7 +4817,7 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} clusterScope := powervsClusterScope() mockTG.EXPECT().GetTransitGateway(gomock.Any()).Return(tgw, nil, errors.New("failed to get transit gateway")) clusterScope.TransitGatewayClient = mockTG @@ -4833,7 +4833,7 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateDeletePending))} + Status: ptr.To(string(infrav1.TransitGatewayStateDeletePending))} clusterScope := powervsClusterScope() mockTG.EXPECT().GetTransitGateway(gomock.Any()).Return(tgw, nil, nil) clusterScope.TransitGatewayClient = mockTG @@ -4851,7 +4851,7 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} mockTG.EXPECT().GetTransitGateway(gomock.Any()).Return(tgw, nil, nil) mockTG.EXPECT().GetTransitGatewayConnection(gomock.Any()).Return(nil, &core.DetailedResponse{StatusCode: 404}, nil).Times(2) mockTG.EXPECT().DeleteTransitGateway(gomock.Any()).Return(&core.DetailedResponse{}, nil) @@ -4870,7 +4870,7 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} mockTG.EXPECT().GetTransitGateway(gomock.Any()).Return(tgw, nil, nil) mockTG.EXPECT().GetTransitGatewayConnection(gomock.Any()).Return(nil, &core.DetailedResponse{}, errors.New("failed to get transit gateway connections")) clusterScope.TransitGatewayClient = mockTG @@ -4888,8 +4888,8 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} - tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateDeleting))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} + tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1.TransitGatewayConnectionStateDeleting))} mockTG.EXPECT().GetTransitGateway(gomock.Any()).Return(tgw, nil, nil) mockTG.EXPECT().GetTransitGatewayConnection(gomock.Any()).Return(tgResponse, &core.DetailedResponse{}, nil) clusterScope.TransitGatewayClient = mockTG @@ -4905,16 +4905,16 @@ func TestDeleteTransitGateway(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} clusterScope := powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.TransitGateway = &infrav1beta2.TransitGatewayStatus{ + clusterScope.IBMPowerVSCluster.Status.TransitGateway = &infrav1.TransitGatewayStatus{ ID: ptr.To("transitgatewayID"), ControllerCreated: ptr.To(false), - PowerVSConnection: &infrav1beta2.ResourceReference{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(false), ID: ptr.To("connectionID"), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(false), ID: ptr.To("connectionID"), }, @@ -4929,126 +4929,126 @@ func TestDeleteTransitGateway(t *testing.T) { func TestIsResourceCreatedByController(t *testing.T) { testCases := []struct { name string - resourceType infrav1beta2.ResourceType + resourceType infrav1.ResourceType clusterScope PowerVSClusterScope expectedResult bool }{ { name: "When resourceType is VPC and VPC status is nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, - resourceType: infrav1beta2.ResourceTypeVPC, + resourceType: infrav1.ResourceTypeVPC, expectedResult: false, }, { name: "When resourceType is VPC and VPC status is not nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, }, }, - resourceType: infrav1beta2.ResourceTypeVPC, + resourceType: infrav1.ResourceTypeVPC, expectedResult: true, }, { name: "When resourceType is ServiceInstance and ServiceInstance status is nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, - resourceType: infrav1beta2.ResourceTypeServiceInstance, + resourceType: infrav1.ResourceTypeServiceInstance, expectedResult: false, }, { name: "When resourceType is ServiceInstance and ServiceInstance status is not nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, }, }, - resourceType: infrav1beta2.ResourceTypeServiceInstance, + resourceType: infrav1.ResourceTypeServiceInstance, expectedResult: true, }, { name: "When resourceType is TransitGateway and TransitGateway status is nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, - resourceType: infrav1beta2.ResourceTypeTransitGateway, + resourceType: infrav1.ResourceTypeTransitGateway, expectedResult: false, }, { name: "When resourceType is TransitGateway and TransitGateway status is not nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ControllerCreated: ptr.To(true), }, }, }, }, - resourceType: infrav1beta2.ResourceTypeTransitGateway, + resourceType: infrav1.ResourceTypeTransitGateway, expectedResult: true, }, { name: "When resourceType is DHCPServer and DHCPServer status is nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, - resourceType: infrav1beta2.ResourceTypeDHCPServer, + resourceType: infrav1.ResourceTypeDHCPServer, expectedResult: false, }, { name: "When resourceType is DHCPServer and DHCPServer status is not nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, }, }, - resourceType: infrav1beta2.ResourceTypeDHCPServer, + resourceType: infrav1.ResourceTypeDHCPServer, expectedResult: true, }, { name: "When resourceType is COSInstance and COSInstance status is nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, - resourceType: infrav1beta2.ResourceTypeCOSInstance, + resourceType: infrav1.ResourceTypeCOSInstance, expectedResult: false, }, { name: "When resourceType is COSInstance and COSInstance status is not nil", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - COSInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + COSInstance: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, }, }, - resourceType: infrav1beta2.ResourceTypeCOSInstance, + resourceType: infrav1.ResourceTypeCOSInstance, expectedResult: true, }, { name: "When resourceType is not valid", clusterScope: PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, - resourceType: infrav1beta2.ResourceTypePublicGateway, + resourceType: infrav1.ResourceTypePublicGateway, expectedResult: false, }, } @@ -5080,7 +5080,7 @@ func TestDeleteCOSInstance(t *testing.T) { setup(t) t.Cleanup(teardown) - clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}} + clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}} err := clusterScope.DeleteCOSInstance(ctx) g.Expect(err).To(BeNil()) }) @@ -5089,9 +5089,9 @@ func TestDeleteCOSInstance(t *testing.T) { setup(t) t.Cleanup(teardown) - clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - COSInstance: &infrav1beta2.ResourceReference{ + clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + COSInstance: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, @@ -5105,9 +5105,9 @@ func TestDeleteCOSInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - COSInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + COSInstance: &infrav1.ResourceReference{ ID: ptr.To("cosInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5126,9 +5126,9 @@ func TestDeleteCOSInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - COSInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + COSInstance: &infrav1.ResourceReference{ ID: ptr.To("cosInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5146,9 +5146,9 @@ func TestDeleteCOSInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - COSInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + COSInstance: &infrav1.ResourceReference{ ID: ptr.To("cosInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5166,9 +5166,9 @@ func TestDeleteCOSInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - COSInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + COSInstance: &infrav1.ResourceReference{ ID: ptr.To("cosInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5176,7 +5176,7 @@ func TestDeleteCOSInstance(t *testing.T) { }, ResourceClient: mockResourceController, } - cosInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("cosInstanceID"), State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive))} + cosInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("cosInstanceID"), State: ptr.To(string(infrav1.ServiceInstanceStateActive))} mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(cosInstance, nil, nil) mockResourceController.EXPECT().DeleteResourceInstance(gomock.Any()).Return(nil, nil) err := clusterScope.DeleteCOSInstance(ctx) @@ -5189,9 +5189,9 @@ func TestDeleteCOSInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - COSInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + COSInstance: &infrav1.ResourceReference{ ID: ptr.To("cosInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5199,7 +5199,7 @@ func TestDeleteCOSInstance(t *testing.T) { }, ResourceClient: mockResourceController, } - cosInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("cosInstanceID"), State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive))} + cosInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("cosInstanceID"), State: ptr.To(string(infrav1.ServiceInstanceStateActive))} mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(cosInstance, nil, nil) mockResourceController.EXPECT().DeleteResourceInstance(gomock.Any()).Return(nil, fmt.Errorf("error deleting resource instance")) err := clusterScope.DeleteCOSInstance(ctx) @@ -5225,7 +5225,7 @@ func TestDeleteServiceInstance(t *testing.T) { setup(t) t.Cleanup(teardown) - clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}} + clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}} requeue, err := clusterScope.DeleteServiceInstance(ctx) g.Expect(err).To(BeNil()) g.Expect(requeue).To(BeFalse()) @@ -5235,9 +5235,9 @@ func TestDeleteServiceInstance(t *testing.T) { setup(t) t.Cleanup(teardown) - clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, @@ -5252,9 +5252,9 @@ func TestDeleteServiceInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5262,7 +5262,7 @@ func TestDeleteServiceInstance(t *testing.T) { }, ResourceClient: mockResourceController, } - serviceInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("serviceInstanceID"), State: ptr.To(string(infrav1beta2.ServiceInstanceStateRemoved))} + serviceInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("serviceInstanceID"), State: ptr.To(string(infrav1.ServiceInstanceStateRemoved))} mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(serviceInstance, nil, nil) requeue, err := clusterScope.DeleteServiceInstance(ctx) g.Expect(err).To(BeNil()) @@ -5274,9 +5274,9 @@ func TestDeleteServiceInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5295,9 +5295,9 @@ func TestDeleteServiceInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5305,7 +5305,7 @@ func TestDeleteServiceInstance(t *testing.T) { }, ResourceClient: mockResourceController, } - serviceInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("serviceInstanceID"), State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive))} + serviceInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("serviceInstanceID"), State: ptr.To(string(infrav1.ServiceInstanceStateActive))} mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(serviceInstance, nil, nil) mockResourceController.EXPECT().DeleteResourceInstance(gomock.Any()).Return(nil, nil) requeue, err := clusterScope.DeleteServiceInstance(ctx) @@ -5319,9 +5319,9 @@ func TestDeleteServiceInstance(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), ControllerCreated: ptr.To(true), }, @@ -5329,7 +5329,7 @@ func TestDeleteServiceInstance(t *testing.T) { }, ResourceClient: mockResourceController, } - serviceInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("serviceInstanceID"), State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive))} + serviceInstance := &resourcecontrollerv2.ResourceInstance{ID: ptr.To("serviceInstanceID"), State: ptr.To(string(infrav1.ServiceInstanceStateActive))} mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(serviceInstance, nil, nil) mockResourceController.EXPECT().DeleteResourceInstance(gomock.Any()).Return(nil, fmt.Errorf("error deleting resource instance")) requeue, err := clusterScope.DeleteServiceInstance(ctx) @@ -5356,7 +5356,7 @@ func TestDeleteDHCPServer(t *testing.T) { setup(t) t.Cleanup(teardown) - clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}} + clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}} err := clusterScope.DeleteDHCPServer(ctx) g.Expect(err).To(BeNil()) }) @@ -5365,12 +5365,12 @@ func TestDeleteDHCPServer(t *testing.T) { setup(t) t.Cleanup(teardown) - clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, - ServiceInstance: &infrav1beta2.ResourceReference{ + ServiceInstance: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, @@ -5384,12 +5384,12 @@ func TestDeleteDHCPServer(t *testing.T) { setup(t) t.Cleanup(teardown) - clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + clusterScope := PowerVSClusterScope{IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, - ServiceInstance: &infrav1beta2.ResourceReference{}, + ServiceInstance: &infrav1.ResourceReference{}, }, }} err := clusterScope.DeleteDHCPServer(ctx) @@ -5401,13 +5401,13 @@ func TestDeleteDHCPServer(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ID: ptr.To("dhcpServerID"), ControllerCreated: ptr.To(true), }, - ServiceInstance: &infrav1beta2.ResourceReference{}, + ServiceInstance: &infrav1.ResourceReference{}, }, }, IBMPowerVSClient: mockPowerVS, @@ -5422,13 +5422,13 @@ func TestDeleteDHCPServer(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ID: ptr.To("dhcpServerID"), ControllerCreated: ptr.To(true), }, - ServiceInstance: &infrav1beta2.ResourceReference{}, + ServiceInstance: &infrav1.ResourceReference{}, }, }, IBMPowerVSClient: mockPowerVS, @@ -5443,13 +5443,13 @@ func TestDeleteDHCPServer(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ID: ptr.To("dhcpServerID"), ControllerCreated: ptr.To(true), }, - ServiceInstance: &infrav1beta2.ResourceReference{}, + ServiceInstance: &infrav1.ResourceReference{}, }, }, IBMPowerVSClient: mockPowerVS, @@ -5466,13 +5466,13 @@ func TestDeleteDHCPServer(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - DHCPServer: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + DHCPServer: &infrav1.ResourceReference{ ID: ptr.To("dhcpServerID"), ControllerCreated: ptr.To(true), }, - ServiceInstance: &infrav1beta2.ResourceReference{}, + ServiceInstance: &infrav1.ResourceReference{}, }, }, IBMPowerVSClient: mockPowerVS, @@ -5504,10 +5504,10 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, @@ -5515,7 +5515,7 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { }, TransitGatewayClient: mockTransitGateway, } - tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateDeleting))} + tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1.TransitGatewayConnectionStateDeleting))} tg := &tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID")} mockTransitGateway.EXPECT().GetTransitGatewayConnection(gomock.Any()).Return(tgResponse, &core.DetailedResponse{StatusCode: 200}, nil) requeue, err := clusterScope.deleteTransitGatewayConnections(ctx, tg) @@ -5529,10 +5529,10 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, @@ -5540,7 +5540,7 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { }, TransitGatewayClient: mockTransitGateway, } - tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))} + tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))} tg := &tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID")} mockTransitGateway.EXPECT().GetTransitGatewayConnection(gomock.Any()).Return(tgResponse, &core.DetailedResponse{StatusCode: 200}, nil) mockTransitGateway.EXPECT().DeleteTransitGatewayConnection(gomock.Any()).Return(nil, fmt.Errorf("error deleting transit gateway connection")) @@ -5555,10 +5555,10 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), }, }, @@ -5566,7 +5566,7 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { }, TransitGatewayClient: mockTransitGateway, } - tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))} + tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))} tg := &tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID")} mockTransitGateway.EXPECT().GetTransitGatewayConnection(gomock.Any()).Return(tgResponse, &core.DetailedResponse{StatusCode: 200}, nil) mockTransitGateway.EXPECT().DeleteTransitGatewayConnection(gomock.Any()).Return(nil, nil) @@ -5581,10 +5581,10 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ID: ptr.To("powerVStgID"), ControllerCreated: ptr.To(true), }, @@ -5605,14 +5605,14 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ID: ptr.To("powerVStgID"), ControllerCreated: ptr.To(true), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ID: ptr.To("vpctgID"), ControllerCreated: ptr.To(true), }, @@ -5622,7 +5622,7 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { TransitGatewayClient: mockTransitGateway, } tg := &tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID")} - tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))} + tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))} powerVSTGOptions := &tgapiv1.GetTransitGatewayConnectionOptions{TransitGatewayID: tg.ID, ID: ptr.To("powerVStgID")} mockTransitGateway.EXPECT().GetTransitGatewayConnection(powerVSTGOptions).Return(nil, &core.DetailedResponse{StatusCode: ResourceNotFoundCode}, nil) vpcTGOptions := &tgapiv1.GetTransitGatewayConnectionOptions{TransitGatewayID: tg.ID, ID: ptr.To("vpctgID")} @@ -5638,13 +5638,13 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(false), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ID: ptr.To("vpctgID"), ControllerCreated: ptr.To(true), }, @@ -5667,13 +5667,13 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(false), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ID: ptr.To("vpctgID"), ControllerCreated: ptr.To(true), }, @@ -5683,7 +5683,7 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { TransitGatewayClient: mockTransitGateway, } tg := &tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID")} - tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))} + tgResponse := &tgapiv1.TransitGatewayConnectionCust{Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))} vpcTGOptions := &tgapiv1.GetTransitGatewayConnectionOptions{TransitGatewayID: tg.ID, ID: ptr.To("vpctgID")} mockTransitGateway.EXPECT().GetTransitGatewayConnection(vpcTGOptions).Return(tgResponse, &core.DetailedResponse{StatusCode: 200}, nil) mockTransitGateway.EXPECT().DeleteTransitGatewayConnection(gomock.Any()).Return(nil, nil) @@ -5698,13 +5698,13 @@ func TestDeleteTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ - PowerVSConnection: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(false), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ID: ptr.To("vpctgID"), ControllerCreated: ptr.To(true), }, @@ -5747,14 +5747,14 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-region", }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -5778,18 +5778,18 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-region", }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-group-id"), }, Zone: ptr.To("test-zone"), }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -5798,7 +5798,7 @@ func TestReconcileCOSInstance(t *testing.T) { mockResourceController.EXPECT().GetInstanceByName(gomock.Any(), gomock.Any(), gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{ Name: ptr.To("test-cos-resource-name"), - State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), + State: ptr.To(string(infrav1.ServiceInstanceStateActive)), GUID: ptr.To("test-cos-instance-guid"), }, nil) @@ -5818,18 +5818,18 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-region", }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-group-id"), }, Zone: ptr.To("test-zone"), }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -5859,18 +5859,18 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-region", }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-group-id"), }, Zone: ptr.To("test-zone"), }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -5892,14 +5892,14 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-region", }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -5908,7 +5908,7 @@ func TestReconcileCOSInstance(t *testing.T) { mockResourceController.EXPECT().GetInstanceByName(gomock.Any(), gomock.Any(), gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{ Name: ptr.To("test-cos-resource-name"), - State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), + State: ptr.To(string(infrav1.ServiceInstanceStateActive)), GUID: ptr.To("test-cos-instance-guid"), }, nil) @@ -5928,15 +5928,15 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{}, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{}, + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-group-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -5966,17 +5966,17 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-bucket-region", }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-group-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6011,17 +6011,17 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-bucket-region", }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-group-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6058,17 +6058,17 @@ func TestReconcileCOSInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: "test-bucket-region", }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resource-group-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6117,9 +6117,9 @@ func TestCheckCOSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6140,9 +6140,9 @@ func TestCheckCOSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6163,9 +6163,9 @@ func TestCheckCOSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6189,9 +6189,9 @@ func TestCheckCOSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6200,12 +6200,12 @@ func TestCheckCOSServiceInstance(t *testing.T) { mockResourceController.EXPECT().GetInstanceByName(gomock.Any(), gomock.Any(), gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{ Name: ptr.To("test-cos-resource-name"), - State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), + State: ptr.To(string(infrav1.ServiceInstanceStateActive)), }, nil) cosResourceInstance, err := clusterScope.checkCOSServiceInstance(ctx) g.Expect(cosResourceInstance.Name).To(Equal(ptr.To("test-cos-resource-name"))) - g.Expect(cosResourceInstance.State).To(Equal(ptr.To(string(infrav1beta2.ServiceInstanceStateActive)))) + g.Expect(cosResourceInstance.State).To(Equal(ptr.To(string(infrav1.ServiceInstanceStateActive)))) g.Expect(err).To(BeNil()) }) } @@ -6234,9 +6234,9 @@ func TestCreateCOSBucket(t *testing.T) { clusterScope := PowerVSClusterScope{ COSClient: mockCOSController, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6255,9 +6255,9 @@ func TestCreateCOSBucket(t *testing.T) { clusterScope := PowerVSClusterScope{ COSClient: mockCOSController, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6278,9 +6278,9 @@ func TestCreateCOSBucket(t *testing.T) { clusterScope := PowerVSClusterScope{ COSClient: mockCOSController, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6299,9 +6299,9 @@ func TestCreateCOSBucket(t *testing.T) { clusterScope := PowerVSClusterScope{ COSClient: mockCOSController, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6323,9 +6323,9 @@ func TestCreateCOSBucket(t *testing.T) { clusterScope := PowerVSClusterScope{ COSClient: mockCOSController, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6362,9 +6362,9 @@ func TestCheckCOSBucket(t *testing.T) { clusterScope := PowerVSClusterScope{ COSClient: mockCOSController, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6445,9 +6445,9 @@ func TestCreateCOSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6466,14 +6466,14 @@ func TestCreateCOSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resourcegroup-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6494,14 +6494,14 @@ func TestCreateCOSServiceInstance(t *testing.T) { clusterScope := PowerVSClusterScope{ ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("test-resourcegroup-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("test-serviceinstance-id"), }, }, @@ -6543,9 +6543,9 @@ func TestReconcileTransitGateway(t *testing.T) { clusterScope := PowerVSClusterScope{ TransitGatewayClient: mockTransitGateway, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ID: ptr.To("transitGatewayID"), }, }, @@ -6565,16 +6565,16 @@ func TestReconcileTransitGateway(t *testing.T) { clusterScope := PowerVSClusterScope{ TransitGatewayClient: mockTransitGateway, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ID: ptr.To("transitGatewayID"), }, }, }, } - mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))}, nil, nil) + mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))}, nil, nil) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(nil, nil, errors.New("failed to get transitGateway connections")) requeue, err := clusterScope.ReconcileTransitGateway(ctx) g.Expect(requeue).To(BeFalse()) @@ -6588,16 +6588,16 @@ func TestReconcileTransitGateway(t *testing.T) { clusterScope := PowerVSClusterScope{ TransitGatewayClient: mockTransitGateway, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ID: ptr.To("transitGatewayID"), }, }, }, } - mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStatePending))}, nil, nil) + mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStatePending))}, nil, nil) requeue, err := clusterScope.ReconcileTransitGateway(ctx) g.Expect(requeue).To(BeTrue()) g.Expect(err).To(BeNil()) @@ -6610,22 +6610,22 @@ func TestReconcileTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - TransitGateway: &infrav1beta2.TransitGateway{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + TransitGateway: &infrav1.TransitGateway{ ID: ptr.To("transitGatewayID"), }, - VPC: &infrav1beta2.VPCResourceReference{ + VPC: &infrav1.VPCResourceReference{ ID: ptr.To("vpcID"), }, - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("serviceInstanceID"), }, }, }, } - mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))}, nil, nil) + mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))}, nil, nil) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("crn")}, nil, nil) @@ -6648,9 +6648,9 @@ func TestReconcileTransitGateway(t *testing.T) { clusterScope := PowerVSClusterScope{ TransitGatewayClient: mockTransitGateway, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - TransitGateway: &infrav1beta2.TransitGateway{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + TransitGateway: &infrav1.TransitGateway{ ID: ptr.To("transitGatewayID"), }, }, @@ -6670,12 +6670,12 @@ func TestReconcileTransitGateway(t *testing.T) { clusterScope := PowerVSClusterScope{ TransitGatewayClient: mockTransitGateway, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{}, }, } - mockTransitGateway.EXPECT().GetTransitGatewayByName(gomock.Any()).Return(&tgapiv1.TransitGateway{Name: ptr.To("transitGatewayName"), ID: ptr.To("transitGatewayID"), Status: ptr.To(string(infrav1beta2.TransitGatewayStateFailed))}, nil) + mockTransitGateway.EXPECT().GetTransitGatewayByName(gomock.Any()).Return(&tgapiv1.TransitGateway{Name: ptr.To("transitGatewayName"), ID: ptr.To("transitGatewayID"), Status: ptr.To(string(infrav1.TransitGatewayStateFailed))}, nil) requeue, err := clusterScope.ReconcileTransitGateway(ctx) g.Expect(requeue).To(BeFalse()) g.Expect(err).ToNot(BeNil()) @@ -6688,12 +6688,12 @@ func TestReconcileTransitGateway(t *testing.T) { clusterScope := PowerVSClusterScope{ TransitGatewayClient: mockTransitGateway, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{}, }, } - mockTransitGateway.EXPECT().GetTransitGatewayByName(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStatePending))}, nil) + mockTransitGateway.EXPECT().GetTransitGatewayByName(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStatePending))}, nil) requeue, err := clusterScope.ReconcileTransitGateway(ctx) g.Expect(requeue).To(BeTrue()) g.Expect(err).To(BeNil()) @@ -6708,17 +6708,17 @@ func TestReconcileTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("us-east-1"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -6726,7 +6726,7 @@ func TestReconcileTransitGateway(t *testing.T) { } mockTransitGateway.EXPECT().GetTransitGatewayByName(gomock.Any()).Return(nil, nil) - mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))}, nil, nil) + mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("crn")}, nil, nil) mockTransitGateway.EXPECT().CreateTransitGatewayConnection(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCust{ID: ptr.To("pvs-connID")}, nil, nil) @@ -6751,7 +6751,7 @@ func TestReconcileTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, } mockTransitGateway.EXPECT().GetTransitGatewayByName(gomock.Any()).Return(nil, nil) @@ -6847,8 +6847,8 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), ID: ptr.To("vpc-connID"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) - conn = append(conn, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), ID: ptr.To("pvs-connID"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), ID: ptr.To("vpc-connID"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) + conn = append(conn, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), ID: ptr.To("pvs-connID"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6867,8 +6867,8 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) - conn = append(conn, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) + conn = append(conn, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6883,7 +6883,7 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStatePending))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStatePending))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6898,7 +6898,7 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateFailed))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateFailed))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6913,8 +6913,8 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) - conn = append(conn, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateFailed))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) + conn = append(conn, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateFailed))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6929,7 +6929,7 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6947,7 +6947,7 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("vpc"), NetworkType: ptr.To("vpc"), NetworkID: ptr.To("vpc-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6963,7 +6963,7 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -6981,7 +6981,7 @@ func TestCheckAndUpdateTransitGatewayConnections(t *testing.T) { t.Cleanup(teardown) clusterScope := makePowerVSClusterScope(mockTransitGateway, mockVPC, mockResourceController) - conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached))}) + conn := append([]tgapiv1.TransitGatewayConnectionCust{}, tgapiv1.TransitGatewayConnectionCust{Name: ptr.To("pvs"), NetworkType: ptr.To("power_virtual_server"), NetworkID: ptr.To("pvs-crn"), Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached))}) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{Connections: conn}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("vpc-crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("pvs-crn")}, nil, nil) @@ -7019,11 +7019,11 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("us-east-1"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, }, } @@ -7042,17 +7042,17 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("zone-ID"), - VPC: &infrav1beta2.VPCResourceReference{}, + VPC: &infrav1.VPCResourceReference{}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -7073,17 +7073,17 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("us-east-1"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -7105,17 +7105,17 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("us-east-1"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -7139,24 +7139,24 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("us-east-1"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, }, } - mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))}, nil, nil) + mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(nil, nil, errors.New("failed to get power vs instance")) err := clusterScope.createTransitGateway(ctx) @@ -7174,24 +7174,24 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("us-east-1"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, }, } - mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))}, nil, nil) + mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("crn")}, nil, nil) mockTransitGateway.EXPECT().CreateTransitGatewayConnection(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCust{ID: ptr.To("pvs-connID")}, nil, nil) @@ -7213,20 +7213,20 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - TransitGateway: &infrav1beta2.TransitGateway{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + TransitGateway: &infrav1.TransitGateway{ GlobalRouting: ptr.To(false), }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("us-east-1"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -7247,27 +7247,27 @@ func TestCreateTransitGateway(t *testing.T) { TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - TransitGateway: &infrav1beta2.TransitGateway{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + TransitGateway: &infrav1.TransitGateway{ GlobalRouting: ptr.To(true), }, - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ID: ptr.To("resourceGroupID")}, Zone: ptr.To("zone-ID"), - VPC: &infrav1beta2.VPCResourceReference{Region: ptr.To("region")}, + VPC: &infrav1.VPCResourceReference{Region: ptr.To("region")}, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, }, } - mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))}, nil, nil) + mockTransitGateway.EXPECT().CreateTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ID: ptr.To("transitGatewayID"), Name: ptr.To("transitGatewayName"), Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))}, nil, nil) mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{CRN: ptr.To("crn")}, nil, nil) mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{CRN: ptr.To("crn")}, nil, nil) mockTransitGateway.EXPECT().CreateTransitGatewayConnection(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCust{ID: ptr.To("pvs-connID")}, nil, nil) @@ -7288,15 +7288,15 @@ func makePowerVSClusterScope(mockTransitGateway *tgmock.MockTransitGateway, mock TransitGatewayClient: mockTransitGateway, IBMVPCClient: mockVPC, ResourceClient: mockResourceController, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ID: ptr.To("transitGatewayID"), }, - ServiceInstance: &infrav1beta2.ResourceReference{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -7329,9 +7329,9 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), }), }, @@ -7348,9 +7348,9 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), }, @@ -7369,9 +7369,9 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), }, @@ -7389,14 +7389,14 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), }, @@ -7414,14 +7414,14 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), }), }, @@ -7436,16 +7436,16 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - vpcSecurityGroupStatus := make(map[string]infrav1beta2.VPCSecurityGroupStatus) - vpcSecurityGroupStatus[securityGroupName] = infrav1beta2.VPCSecurityGroupStatus{ID: ptr.To("securityGroupID"), RuleIDs: []*string{ptr.To("ruleID")}, ControllerCreated: ptr.To(true)} + vpcSecurityGroupStatus := make(map[string]infrav1.VPCSecurityGroupStatus) + vpcSecurityGroupStatus[securityGroupName] = infrav1.VPCSecurityGroupStatus{ID: ptr.To("securityGroupID"), RuleIDs: []*string{ptr.To("ruleID")}, ControllerCreated: ptr.To(true)} clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ VPCSecurityGroups: vpcSecurityGroupStatus, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), }, @@ -7460,16 +7460,16 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - vpcSecurityGroupStatus := make(map[string]infrav1beta2.VPCSecurityGroupStatus) - vpcSecurityGroupStatus[securityGroupName] = infrav1beta2.VPCSecurityGroupStatus{ID: &securityGroupID, RuleIDs: []*string{ptr.To("ruleID")}, ControllerCreated: ptr.To(true)} + vpcSecurityGroupStatus := make(map[string]infrav1.VPCSecurityGroupStatus) + vpcSecurityGroupStatus[securityGroupName] = infrav1.VPCSecurityGroupStatus{ID: &securityGroupID, RuleIDs: []*string{ptr.To("ruleID")}, ControllerCreated: ptr.To(true)} clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ VPCSecurityGroups: vpcSecurityGroupStatus, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), }, @@ -7486,29 +7486,29 @@ func TestReconcileVPCSecurityGroups(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -7540,13 +7540,13 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -7558,20 +7558,20 @@ func TestValidateVPCSecurityGroup(t *testing.T) { ID: ptr.To("ruleID"), } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7588,13 +7588,13 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -7606,20 +7606,20 @@ func TestValidateVPCSecurityGroup(t *testing.T) { ID: ptr.To("ruleID"), } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7636,19 +7636,19 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupNamw"), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7664,19 +7664,19 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7692,17 +7692,17 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, @@ -7721,19 +7721,19 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7750,19 +7750,19 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7779,15 +7779,15 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, - PortRange: &infrav1beta2.VPCSecurityGroupPortRange{MaximumPort: 65535, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, + PortRange: &infrav1.VPCSecurityGroupPortRange{MaximumPort: 65535, MinimumPort: 1, }, }, @@ -7804,20 +7804,20 @@ func TestValidateVPCSecurityGroup(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7834,31 +7834,31 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.1.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7876,13 +7876,13 @@ func TestValidateVPCSecurityGroup(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -7895,23 +7895,23 @@ func TestValidateVPCSecurityGroup(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroupStatus := make(map[string]infrav1beta2.VPCSecurityGroupStatus) - vpcSecurityGroupStatus["securityGroupName"] = infrav1beta2.VPCSecurityGroupStatus{ID: ptr.To("securityGroupID"), RuleIDs: []*string{}, ControllerCreated: ptr.To(false)} - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroupStatus := make(map[string]infrav1.VPCSecurityGroupStatus) + vpcSecurityGroupStatus["securityGroupName"] = infrav1.VPCSecurityGroupStatus{ID: ptr.To("securityGroupID"), RuleIDs: []*string{}, ControllerCreated: ptr.To(false)} + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ VPCSecurityGroups: vpcSecurityGroupStatus, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7943,15 +7943,15 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + remote := infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, - PortRange: &infrav1beta2.VPCSecurityGroupPortRange{MaximumPort: 65535, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, + PortRange: &infrav1.VPCSecurityGroupPortRange{MaximumPort: 65535, MinimumPort: 1, }, }, @@ -7968,20 +7968,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -7995,15 +7995,15 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + remote := infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, - PortRange: &infrav1beta2.VPCSecurityGroupPortRange{MaximumPort: 65535, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, + PortRange: &infrav1.VPCSecurityGroupPortRange{MaximumPort: 65535, MinimumPort: 1, }, }, @@ -8020,20 +8020,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8047,16 +8047,16 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, - } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, - PortRange: &infrav1beta2.VPCSecurityGroupPortRange{MaximumPort: 65535, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, + } + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, + PortRange: &infrav1.VPCSecurityGroupPortRange{MaximumPort: 65535, MinimumPort: 1, }, }, @@ -8073,20 +8073,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8101,14 +8101,14 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + remote := infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8122,20 +8122,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { var vpcSecurityGroupRules []vpcv1.SecurityGroupRuleIntf vpcSecurityGroupRules = append(vpcSecurityGroupRules, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8149,15 +8149,15 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8171,20 +8171,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { var vpcSecurityGroupRules []vpcv1.SecurityGroupRuleIntf vpcSecurityGroupRules = append(vpcSecurityGroupRules, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8198,15 +8198,15 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8219,20 +8219,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8247,15 +8247,15 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, ICMPCode: ptr.To(int64(12)), ICMPType: ptr.To(int64(3)), }, @@ -8272,20 +8272,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8300,15 +8300,15 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, ICMPCode: ptr.To(int64(12)), ICMPType: ptr.To(int64(3)), }, @@ -8325,20 +8325,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8353,15 +8353,15 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, ICMPCode: ptr.To(int64(12)), ICMPType: ptr.To(int64(3)), }, @@ -8378,20 +8378,20 @@ func TestValidateVPCSecurityGroupRule(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8422,13 +8422,13 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8441,20 +8441,20 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8468,13 +8468,13 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8487,20 +8487,20 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("securityGroupID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8514,15 +8514,15 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8534,21 +8534,21 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { ID: ptr.To("ruleID"), } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8563,13 +8563,13 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8582,20 +8582,20 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8609,13 +8609,13 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } vpcSecurityGroupRule := vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll{ @@ -8628,20 +8628,20 @@ func TestValidateVPCSecurityGroupRules(t *testing.T) { } vpcSecurityGroupRules := append([]vpcv1.SecurityGroupRuleIntf{}, &vpcSecurityGroupRule) - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ ID: ptr.To("securityGroupID"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -8670,30 +8670,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8707,30 +8707,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8744,29 +8744,29 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + remote := infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8780,29 +8780,29 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + remote := infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8816,30 +8816,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8854,30 +8854,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8892,30 +8892,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8930,30 +8930,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -8968,30 +8968,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9006,30 +9006,30 @@ func TestValidateVPCSecurityGroupRuleRemote(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9064,30 +9064,30 @@ func TestCreateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9102,30 +9102,30 @@ func TestCreateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9141,30 +9141,30 @@ func TestCreateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9179,29 +9179,29 @@ func TestCreateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny, + remote := infrav1.VPCSecurityGroupRuleRemote{ + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAny, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9216,30 +9216,30 @@ func TestCreateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9255,30 +9255,30 @@ func TestCreateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9293,30 +9293,30 @@ func TestCreateVPCSecurityGroupRule(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - remote := infrav1beta2.VPCSecurityGroupRuleRemote{ + remote := infrav1.VPCSecurityGroupRuleRemote{ SecurityGroupName: ptr.To("securityGroupName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeSG, } - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, remote), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, remote), + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), }), }, }, @@ -9347,34 +9347,34 @@ func TestCreateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, - PortRange: &infrav1beta2.VPCSecurityGroupPortRange{MaximumPort: 65535, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, + PortRange: &infrav1.VPCSecurityGroupPortRange{MaximumPort: 65535, MinimumPort: 1, }, }, } - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -9388,31 +9388,31 @@ func TestCreateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -9426,34 +9426,34 @@ func TestCreateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, - PortRange: &infrav1beta2.VPCSecurityGroupPortRange{MaximumPort: 65535, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, + PortRange: &infrav1.VPCSecurityGroupPortRange{MaximumPort: 65535, MinimumPort: 1, }, }, } - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -9467,31 +9467,31 @@ func TestCreateVPCSecurityGroupRules(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionInbound, - Source: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionInbound, + Source: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ CIDRSubnetName: ptr.To("CIDRSubnetName"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeCIDR, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("VPCID"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, vpcSecurityGroup), + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, vpcSecurityGroup), }, }, } @@ -9520,28 +9520,28 @@ func TestCreateVPCSecurityGroupRulesAndSetStatus(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resourceGroupID"), }, }, @@ -9556,28 +9556,28 @@ func TestCreateVPCSecurityGroupRulesAndSetStatus(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - rules := infrav1beta2.VPCSecurityGroupRule{ - Direction: infrav1beta2.VPCSecurityGroupRuleDirectionOutbound, - Destination: &infrav1beta2.VPCSecurityGroupRulePrototype{ - Remotes: append([]infrav1beta2.VPCSecurityGroupRuleRemote{}, infrav1beta2.VPCSecurityGroupRuleRemote{ + rules := infrav1.VPCSecurityGroupRule{ + Direction: infrav1.VPCSecurityGroupRuleDirectionOutbound, + Destination: &infrav1.VPCSecurityGroupRulePrototype{ + Remotes: append([]infrav1.VPCSecurityGroupRuleRemote{}, infrav1.VPCSecurityGroupRuleRemote{ Address: ptr.To("192.168.0.1/24"), - RemoteType: infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress, + RemoteType: infrav1.VPCSecurityGroupRuleRemoteTypeAddress, }), - Protocol: infrav1beta2.VPCSecurityGroupRuleProtocolTCP, + Protocol: infrav1.VPCSecurityGroupRuleProtocolTCP, }, } - vpcSecurityGroup := infrav1beta2.VPCSecurityGroup{ + vpcSecurityGroup := infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), - Rules: append([]*infrav1beta2.VPCSecurityGroupRule{}, &rules), + Rules: append([]*infrav1.VPCSecurityGroupRule{}, &rules), } clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resourceGroupID"), }, }, @@ -9609,12 +9609,12 @@ func TestCreateVPCSecurityGroup(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resourceGroupID"), }, }, @@ -9632,12 +9632,12 @@ func TestCreateVPCSecurityGroup(t *testing.T) { t.Cleanup(teardown) clusterScope := PowerVSClusterScope{ IBMVPCClient: mockVPC, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPCSecurityGroups: append([]infrav1beta2.VPCSecurityGroup{}, infrav1beta2.VPCSecurityGroup{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPCSecurityGroups: append([]infrav1.VPCSecurityGroup{}, infrav1.VPCSecurityGroup{ Name: ptr.To("securityGroupName"), }), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("resourceGroupID"), }, }, diff --git a/cloud/scope/powervs_image.go b/cloud/scope/powervs_image.go index 2fc28a67b..8bbca2066 100644 --- a/cloud/scope/powervs_image.go +++ b/cloud/scope/powervs_image.go @@ -21,20 +21,15 @@ import ( "errors" "fmt" - "github.com/go-logr/logr" - "github.com/IBM-Cloud/power-go-client/ibmpisession" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM/go-sdk-core/v5/core" "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" - "k8s.io/klog/v2" - + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/cluster-api/util/patch" - - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/resourcecontroller" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" @@ -44,28 +39,30 @@ import ( // BucketAccess indicates if the bucket has public or private access public access. const BucketAccess = "public" +var ( + // ErrServiceInsanceNotInActiveState indicates error if serviceInstance is inactive. + ErrServiceInsanceNotInActiveState = errors.New("service instance is not in active state") +) + // PowerVSImageScopeParams defines the input parameters used to create a new PowerVSImageScope. type PowerVSImageScopeParams struct { Client client.Client - Logger logr.Logger - IBMPowerVSImage *infrav1beta2.IBMPowerVSImage + IBMPowerVSImage *infrav1.IBMPowerVSImage ServiceEndpoint []endpoints.ServiceEndpoint Zone *string } // PowerVSImageScope defines a scope defined around a Power VS Cluster. type PowerVSImageScope struct { - logr.Logger - Client client.Client - patchHelper *patch.Helper - + Client client.Client IBMPowerVSClient powervs.PowerVS - IBMPowerVSImage *infrav1beta2.IBMPowerVSImage + IBMPowerVSImage *infrav1.IBMPowerVSImage ServiceEndpoint []endpoints.ServiceEndpoint } // NewPowerVSImageScope creates a new PowerVSImageScope from the supplied parameters. -func NewPowerVSImageScope(params PowerVSImageScopeParams) (scope *PowerVSImageScope, err error) { +func NewPowerVSImageScope(ctx context.Context, params PowerVSImageScopeParams) (scope *PowerVSImageScope, err error) { + log := ctrl.LoggerFrom(ctx) scope = &PowerVSImageScope{} if params.Client == nil { @@ -80,25 +77,13 @@ func NewPowerVSImageScope(params PowerVSImageScopeParams) (scope *PowerVSImageSc } scope.IBMPowerVSImage = params.IBMPowerVSImage - if params.Logger == (logr.Logger{}) { - params.Logger = klog.Background() - } - scope.Logger = params.Logger - - helper, err := patch.NewHelper(params.IBMPowerVSImage, params.Client) - if err != nil { - err = fmt.Errorf("failed to init patch helper: %w", err) - return nil, err - } - scope.patchHelper = helper - // Create Resource Controller client. var serviceOption resourcecontroller.ServiceOptions // Fetch the resource controller endpoint. rcEndpoint := endpoints.FetchEndpoints(string(endpoints.RC), params.ServiceEndpoint) if rcEndpoint != "" { serviceOption.URL = rcEndpoint - params.Logger.V(3).Info("Overriding the default resource controller endpoint", "ResourceControllerEndpoint", rcEndpoint) + log.V(3).Info("Overriding the default resource controller endpoint", "ResourceControllerEndpoint", rcEndpoint) } rc, err := resourcecontroller.NewService(serviceOption) @@ -119,14 +104,14 @@ func NewPowerVSImageScope(params PowerVSImageScopeParams) (scope *PowerVSImageSc } serviceInstance, err := rc.GetServiceInstance("", name, params.Zone) if err != nil { - params.Logger.Error(err, "error failed to get service instance id from name", "name", name) + log.Error(err, "error failed to get service instance id from name", "name", name) return nil, err } if serviceInstance == nil { return nil, fmt.Errorf("service instance %s is not yet created", name) } - if *serviceInstance.State != string(infrav1beta2.ServiceInstanceStateActive) { - return nil, fmt.Errorf("service instance %s is not in active state", name) + if *serviceInstance.State != string(infrav1.ServiceInstanceStateActive) { + return scope, ErrServiceInsanceNotInActiveState } serviceInstanceID = *serviceInstance.GUID } @@ -142,7 +127,7 @@ func NewPowerVSImageScope(params PowerVSImageScopeParams) (scope *PowerVSImageSc options := powervs.ServiceOptions{ IBMPIOptions: &ibmpisession.IBMPIOptions{ - Debug: params.Logger.V(DEBUGLEVEL).Enabled(), + Debug: log.V(DEBUGLEVEL).Enabled(), Zone: *res.RegionID, }, } @@ -150,7 +135,7 @@ func NewPowerVSImageScope(params PowerVSImageScopeParams) (scope *PowerVSImageSc // Fetch the service endpoint. if svcEndpoint := endpoints.FetchPVSEndpoint(endpoints.ConstructRegionFromZone(*res.RegionID), params.ServiceEndpoint); svcEndpoint != "" { options.IBMPIOptions.URL = svcEndpoint - scope.Logger.V(3).Info("overriding the default powervs service endpoint") + log.V(3).Info("Overriding the default PowerVS service endpoint", "serviceEndpoint", svcEndpoint) } c, err := powervs.NewService(options) @@ -179,8 +164,9 @@ func (i *PowerVSImageScope) ensureImageUnique(imageName string) (*models.ImageRe } // CreateImageCOSBucket creates a power vs image. -func (i *PowerVSImageScope) CreateImageCOSBucket() (*models.ImageReference, *models.JobReference, error) { - s := i.IBMPowerVSImage.Spec +func (i *PowerVSImageScope) CreateImageCOSBucket(ctx context.Context) (*models.ImageReference, *models.JobReference, error) { + log := ctrl.LoggerFrom(ctx) + imageSpec := i.IBMPowerVSImage.Spec m := i.IBMPowerVSImage.ObjectMeta imageReply, err := i.ensureImageUnique(m.Name) @@ -188,47 +174,37 @@ func (i *PowerVSImageScope) CreateImageCOSBucket() (*models.ImageReference, *mod record.Warnf(i.IBMPowerVSImage, "FailedRetrieveImage", "Failed to retrieve image %q", m.Name) return nil, nil, err } else if imageReply != nil { - i.Info("Image already exists") + log.Info("Image already exists", "imageName", m.Name) return imageReply, nil, nil } if lastJob, _ := i.GetImportJob(); lastJob != nil { - if *lastJob.Status.State != "completed" && *lastJob.Status.State != "failed" { - i.Info("Previous import job not yet finished", "state", *lastJob.Status.State) + if *lastJob.Status.State != string(infrav1.PowerVSImageStateCompleted) && *lastJob.Status.State != string(infrav1.PowerVSImageStateFailed) { + log.Info("Previous import job not yet finished", "state", *lastJob.Status.State) return nil, nil, nil } } body := &models.CreateCosImageImportJob{ ImageName: &m.Name, - BucketName: s.Bucket, + BucketName: imageSpec.Bucket, BucketAccess: core.StringPtr(BucketAccess), - Region: s.Region, - ImageFilename: s.Object, - StorageType: s.StorageType, + Region: imageSpec.Region, + ImageFilename: imageSpec.Object, + StorageType: imageSpec.StorageType, } jobRef, err := i.IBMPowerVSClient.CreateCosImage(body) if err != nil { - i.Info("Unable to create new import job request") + log.Info("Unable to create new import job request") record.Warnf(i.IBMPowerVSImage, "FailedCreateImageImportJob", "Failed image import job creation - %v", err) return nil, nil, err } - i.Info("New import job request created") + log.Info("New import job request created") record.Eventf(i.IBMPowerVSImage, "SuccessfulCreateImageImportJob", "Created image import job %q", *jobRef.ID) return nil, jobRef, nil } -// PatchObject persists the cluster configuration and status. -func (i *PowerVSImageScope) PatchObject() error { - return i.patchHelper.Patch(context.TODO(), i.IBMPowerVSImage) -} - -// Close closes the current scope persisting the cluster configuration and status. -func (i *PowerVSImageScope) Close() error { - return i.PatchObject() -} - // DeleteImage will delete the image. func (i *PowerVSImageScope) DeleteImage() error { if err := i.IBMPowerVSClient.DeleteImage(i.IBMPowerVSImage.Status.ImageID); err != nil { @@ -283,11 +259,11 @@ func (i *PowerVSImageScope) GetImageID() string { // SetImageState will set the state for the image. func (i *PowerVSImageScope) SetImageState(status string) { - i.IBMPowerVSImage.Status.ImageState = infrav1beta2.PowerVSImageState(status) + i.IBMPowerVSImage.Status.ImageState = infrav1.PowerVSImageState(status) } // GetImageState will get the state for the image. -func (i *PowerVSImageScope) GetImageState() infrav1beta2.PowerVSImageState { +func (i *PowerVSImageScope) GetImageState() infrav1.PowerVSImageState { return i.IBMPowerVSImage.Status.ImageState } diff --git a/cloud/scope/powervs_image_test.go b/cloud/scope/powervs_image_test.go index b62691a5b..ee87b519b 100644 --- a/cloud/scope/powervs_image_test.go +++ b/cloud/scope/powervs_image_test.go @@ -27,11 +27,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs/mock" . "github.com/onsi/gomega" @@ -39,13 +38,13 @@ import ( const idSuffix = "-id" -func newPowervsImage(imageName string) *infrav1beta2.IBMPowerVSImage { - return &infrav1beta2.IBMPowerVSImage{ +func newPowervsImage(imageName string) *infrav1.IBMPowerVSImage { + return &infrav1.IBMPowerVSImage{ ObjectMeta: metav1.ObjectMeta{ Name: imageName, Namespace: "default", }, - Spec: infrav1beta2.IBMPowerVSImageSpec{ + Spec: infrav1.IBMPowerVSImageSpec{ ClusterName: "test-cluster", ServiceInstanceID: "test-service-ID", StorageType: "foo-tier", @@ -56,16 +55,15 @@ func newPowervsImage(imageName string) *infrav1beta2.IBMPowerVSImage { } } -func setupPowerVSImageScope(imageName string, mockpowervs *mock.MockPowerVS) *PowerVSImageScope { - powervsImage := newPowervsImage(imageName) - initObjects := []client.Object{powervsImage} +func setupPowerVSImageScope(imageName string, mockPowerVS *mock.MockPowerVS) *PowerVSImageScope { + powerVSImage := newPowervsImage(imageName) + initObjects := []client.Object{powerVSImage} client := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(initObjects...).Build() return &PowerVSImageScope{ Client: client, - Logger: klog.Background(), - IBMPowerVSClient: mockpowervs, - IBMPowerVSImage: powervsImage, + IBMPowerVSClient: mockPowerVS, + IBMPowerVSImage: powerVSImage, } } @@ -98,7 +96,7 @@ func TestNewPowerVSImageScope(t *testing.T) { for _, tc := range testCases { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { - _, err := NewPowerVSImageScope(tc.params) + _, err := NewPowerVSImageScope(ctx, tc.params) // Note: only error/failure cases covered // TO-DO: cover success cases g.Expect(err).To(Not(BeNil())) @@ -148,7 +146,7 @@ func TestCreateImageCOSBucket(t *testing.T) { mockpowervs.EXPECT().GetAllImage().Return(images, nil) mockpowervs.EXPECT().GetCosImages(gomock.AssignableToTypeOf(serviceInstanceID)).Return(job, nil) mockpowervs.EXPECT().CreateCosImage(gomock.AssignableToTypeOf(body)).Return(jobReference, nil) - _, out, err := scope.CreateImageCOSBucket() + _, out, err := scope.CreateImageCOSBucket(ctx) g.Expect(err).To(BeNil()) require.Equal(t, jobReference, out) }) @@ -162,7 +160,7 @@ func TestCreateImageCOSBucket(t *testing.T) { } scope := setupPowerVSImageScope("foo-image-1", mockpowervs) mockpowervs.EXPECT().GetAllImage().Return(images, nil) - out, _, err := scope.CreateImageCOSBucket() + out, _, err := scope.CreateImageCOSBucket(ctx) g.Expect(err).To(BeNil()) require.Equal(t, imageReference, out) }) @@ -173,7 +171,7 @@ func TestCreateImageCOSBucket(t *testing.T) { t.Cleanup(teardown) scope := setupPowerVSImageScope(pvsImage, mockpowervs) mockpowervs.EXPECT().GetAllImage().Return(images, errors.New("Failed to list images")) - _, _, err := scope.CreateImageCOSBucket() + _, _, err := scope.CreateImageCOSBucket(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -189,7 +187,7 @@ func TestCreateImageCOSBucket(t *testing.T) { scope := setupPowerVSImageScope(pvsImage, mockpowervs) mockpowervs.EXPECT().GetAllImage().Return(images, nil) mockpowervs.EXPECT().GetCosImages(gomock.AssignableToTypeOf(serviceInstanceID)).Return(job, nil) - _, _, err := scope.CreateImageCOSBucket() + _, _, err := scope.CreateImageCOSBucket(ctx) g.Expect(err).To(BeNil()) }) @@ -201,7 +199,7 @@ func TestCreateImageCOSBucket(t *testing.T) { mockpowervs.EXPECT().GetAllImage().Return(images, nil) mockpowervs.EXPECT().GetCosImages(gomock.AssignableToTypeOf(serviceInstanceID)).Return(job, nil) mockpowervs.EXPECT().CreateCosImage(gomock.AssignableToTypeOf(body)).Return(jobReference, errors.New("Failed to create image import job")) - _, _, err := scope.CreateImageCOSBucket() + _, _, err := scope.CreateImageCOSBucket(ctx) g.Expect(err).To((Not(BeNil()))) }) }) diff --git a/cloud/scope/powervs_machine.go b/cloud/scope/powervs_machine.go index 0ca986350..7b7c67458 100644 --- a/cloud/scope/powervs_machine.go +++ b/cloud/scope/powervs_machine.go @@ -44,6 +44,8 @@ import ( "github.com/IBM/vpc-go-sdk/vpcv1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/cache" @@ -53,10 +55,10 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/authenticator" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/cos" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" @@ -74,11 +76,11 @@ const cosURLDomain = "cloud-object-storage.appdomain.cloud" type PowerVSMachineScopeParams struct { Logger logr.Logger Client client.Client - Cluster *capiv1beta1.Cluster - Machine *capiv1beta1.Machine - IBMPowerVSCluster *infrav1beta2.IBMPowerVSCluster - IBMPowerVSMachine *infrav1beta2.IBMPowerVSMachine - IBMPowerVSImage *infrav1beta2.IBMPowerVSImage + Cluster *clusterv1.Cluster + Machine *clusterv1.Machine + IBMPowerVSCluster *infrav1.IBMPowerVSCluster + IBMPowerVSMachine *infrav1.IBMPowerVSMachine + IBMPowerVSImage *infrav1.IBMPowerVSImage ServiceEndpoint []endpoints.ServiceEndpoint DHCPIPCacheStore cache.Store } @@ -90,11 +92,11 @@ type PowerVSMachineScope struct { IBMPowerVSClient powervs.PowerVS IBMVPCClient vpc.Vpc ResourceClient resourcecontroller.ResourceController - Cluster *capiv1beta1.Cluster - Machine *capiv1beta1.Machine - IBMPowerVSCluster *infrav1beta2.IBMPowerVSCluster - IBMPowerVSMachine *infrav1beta2.IBMPowerVSMachine - IBMPowerVSImage *infrav1beta2.IBMPowerVSImage + Cluster *clusterv1.Cluster + Machine *clusterv1.Machine + IBMPowerVSCluster *infrav1.IBMPowerVSCluster + IBMPowerVSMachine *infrav1.IBMPowerVSMachine + IBMPowerVSImage *infrav1.IBMPowerVSImage ServiceEndpoint []endpoints.ServiceEndpoint DHCPIPCacheStore cache.Store } @@ -171,7 +173,7 @@ func NewPowerVSMachineScope(params PowerVSMachineScopeParams) (scope *PowerVSMac if serviceInstance == nil { return nil, fmt.Errorf("PowerVS service instance %s is not yet created", serviceInstanceName) } - if *serviceInstance.State != string(infrav1beta2.ServiceInstanceStateActive) { + if *serviceInstance.State != string(infrav1.ServiceInstanceStateActive) { return nil, fmt.Errorf("PowerVS service instance name: %s id: %s is not in active state", serviceInstanceName, serviceInstanceID) } serviceInstanceID = *serviceInstance.GUID @@ -252,10 +254,10 @@ func (m *PowerVSMachineScope) CreateMachine(ctx context.Context) (*models.PVMIns // Check if create request has been already triggered. // If InstanceReadyCondition is Unknown then return and wait for it to get updated. for _, con := range m.IBMPowerVSMachine.Status.Conditions { - if con.Type == infrav1beta2.InstanceReadyCondition && con.Status == corev1.ConditionUnknown { + if con.Type == infrav1.InstanceReadyCondition && con.Status == corev1.ConditionUnknown { return nil, nil } - if con.Type == infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition && con.Status == corev1.ConditionUnknown { + if con.Type == infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition && con.Status == corev1.ConditionUnknown { return nil, nil } } @@ -353,10 +355,10 @@ func (m *PowerVSMachineScope) resolveUserData(ctx context.Context) (string, erro func getIgnitionVersion(scope *PowerVSMachineScope) string { if scope.IBMPowerVSCluster.Spec.Ignition == nil { - scope.IBMPowerVSCluster.Spec.Ignition = &infrav1beta2.Ignition{} + scope.IBMPowerVSCluster.Spec.Ignition = &infrav1.Ignition{} } if scope.IBMPowerVSCluster.Spec.Ignition.Version == "" { - scope.IBMPowerVSCluster.Spec.Ignition.Version = infrav1beta2.DefaultIgnitionVersion + scope.IBMPowerVSCluster.Spec.Ignition.Version = infrav1.DefaultIgnitionVersion } return scope.IBMPowerVSCluster.Spec.Ignition.Version } @@ -566,7 +568,7 @@ func (m *PowerVSMachineScope) createCOSClient(ctx context.Context) (cos.Cos, err log.V(3).Info("COS service instance is nil") return nil, errors.New("COS service instance is nil") } - if *serviceInstance.State != string(infrav1beta2.ServiceInstanceStateActive) { + if *serviceInstance.State != string(infrav1.ServiceInstanceStateActive) { return nil, fmt.Errorf("COS service instance is not in active state, current state: %s", *serviceInstance.State) } @@ -628,7 +630,7 @@ func (m *PowerVSMachineScope) GetRawBootstrapData() ([]byte, error) { return value, nil } -func getImageID(image *infrav1beta2.IBMPowerVSResourceReference, m *PowerVSMachineScope) (*string, error) { +func getImageID(image *infrav1.IBMPowerVSResourceReference, m *PowerVSMachineScope) (*string, error) { if image.ID != nil { return image.ID, nil } else if image.Name != nil { @@ -652,7 +654,7 @@ func (m *PowerVSMachineScope) GetImages() (*models.Images, error) { return m.IBMPowerVSClient.GetAllImage() } -func getNetworkID(network infrav1beta2.IBMPowerVSResourceReference, m *PowerVSMachineScope) (*string, error) { +func getNetworkID(network infrav1.IBMPowerVSResourceReference, m *PowerVSMachineScope) (*string, error) { if network.ID != nil { return network.ID, nil } else if network.Name != nil { @@ -872,11 +874,11 @@ func (m *PowerVSMachineScope) SetAddresses(ctx context.Context, instance *models // SetInstanceState will set the state for the machine. func (m *PowerVSMachineScope) SetInstanceState(status *string) { - m.IBMPowerVSMachine.Status.InstanceState = infrav1beta2.PowerVSInstanceState(*status) + m.IBMPowerVSMachine.Status.InstanceState = infrav1.PowerVSInstanceState(*status) } // GetInstanceState will get the state for the machine. -func (m *PowerVSMachineScope) GetInstanceState() infrav1beta2.PowerVSInstanceState { +func (m *PowerVSMachineScope) GetInstanceState() infrav1.PowerVSInstanceState { return m.IBMPowerVSMachine.Status.InstanceState } @@ -956,9 +958,9 @@ func (m *PowerVSMachineScope) GetMachineInternalIP() string { // CreateVPCLoadBalancerPoolMember creates a member in load balancer pool. func (m *PowerVSMachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Context) (*vpcv1.LoadBalancerPoolMember, error) { //nolint:gocyclo log := ctrl.LoggerFrom(ctx) - loadBalancers := make([]infrav1beta2.VPCLoadBalancerSpec, 0) + loadBalancers := make([]infrav1.VPCLoadBalancerSpec, 0) if len(m.IBMPowerVSCluster.Spec.LoadBalancers) == 0 { - loadBalancer := infrav1beta2.VPCLoadBalancerSpec{ + loadBalancer := infrav1.VPCLoadBalancerSpec{ Name: fmt.Sprintf("%s-loadbalancer", m.IBMPowerVSCluster.Name), Public: ptr.To(true), } @@ -987,7 +989,7 @@ func (m *PowerVSMachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Contex if err != nil { return nil, fmt.Errorf("failed to find VPC load balancer details: %w", err) } - if *loadBalancer.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) { + if *loadBalancer.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { return nil, fmt.Errorf("VPC load balancer is not in active state, current state %s", *loadBalancer.ProvisioningStatus) } if len(loadBalancer.Pools) == 0 { @@ -996,7 +998,49 @@ func (m *PowerVSMachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Contex internalIP := m.GetMachineInternalIP() + // lbAdditionalListeners is a mapping of additionalListener's port-protocol to the additionalListener as defined in the specification + // It will be used later to get the default pool associated with the listener + lbAdditionalListeners := map[string]infrav1.AdditionalListenerSpec{} + for _, additionalListener := range lb.AdditionalListeners { + if additionalListener.Protocol == nil { + additionalListener.Protocol = &infrav1.VPCLoadBalancerListenerProtocolTCP + } + lbAdditionalListeners[fmt.Sprintf("%d-%s", additionalListener.Port, *additionalListener.Protocol)] = additionalListener + } + + // loadBalancerListeners is a mapping of the loadBalancer listener's defaultPoolName to the additionalListener + // as the default pool name might be empty in spec and should be fetched from the cloud's listener + loadBalancerListeners := map[string]infrav1.AdditionalListenerSpec{} + for _, listener := range loadBalancer.Listeners { + listenerOptions := &vpcv1.GetLoadBalancerListenerOptions{} + listenerOptions.SetLoadBalancerID(*loadBalancer.ID) + listenerOptions.SetID(*listener.ID) + loadBalancerListener, _, err := m.IBMVPCClient.GetLoadBalancerListener(listenerOptions) + if err != nil { + return nil, fmt.Errorf("failed to list %s load balancer listener: %w", *listener.ID, err) + } + if additionalListener, ok := lbAdditionalListeners[fmt.Sprintf("%d-%s", *loadBalancerListener.Port, *loadBalancerListener.Protocol)]; ok { + if loadBalancerListener.DefaultPool != nil { + loadBalancerListeners[*loadBalancerListener.DefaultPool.Name] = additionalListener + } + // loadBalancerListeners map is created only with the listeners provided in the spec, + // and targetPort is populated only if there is an entry in the map. + // Inorder for the default pool 6443 to be added to all control plane machines, creating an entry in the map for the same. + } else if loadBalancerListener.Port != nil && *loadBalancerListener.Port == int64(6443) { + protocol := infrav1.VPCLoadBalancerListenerProtocol(*loadBalancerListener.Protocol) + listener := infrav1.AdditionalListenerSpec{ + Port: *loadBalancerListener.Port, + Protocol: &protocol, + } + if loadBalancerListener.DefaultPool != nil { + loadBalancerListeners[*loadBalancerListener.DefaultPool.Name] = listener + } else { + log.V(3).Error(fmt.Errorf("unable to get the default pool details"), "default pool is nil", "port", loadBalancerListener.Port) + } + } + } // Update each LoadBalancer pool + // For each pool, get the additionalListener associated with the pool from the loadBalancerListeners map. for _, pool := range loadBalancer.Pools { log.V(3).Info("Updating LoadBalancer pool member", "pool", *pool.Name, "loadBalancerName", *loadBalancer.Name, "IP", internalIP) listOptions := &vpcv1.ListLoadBalancerPoolMembersOptions{} @@ -1009,32 +1053,35 @@ func (m *PowerVSMachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Contex var targetPort int64 var alreadyRegistered bool - if len(listLoadBalancerPoolMembers.Members) == 0 { - // For adding the first member to the pool we depend on the pool name to get the target port - // pool name will have port number appended at the end - lbNameSplit := strings.Split(*pool.Name, "-") - if len(lbNameSplit) == 0 { - // user might have created additional pool - log.V(3).Info("Not updating pool as it might be created externally", "poolName", *pool.Name) + if loadBalancerListener, ok := loadBalancerListeners[*pool.Name]; ok { + targetPort = loadBalancerListener.Port + log.V(3).Info("Checking if machine label matches with the label selector in listener", "machineLabel", m.IBMPowerVSMachine.Labels, "labelSelector", loadBalancerListener.Selector) + selector, err := metav1.LabelSelectorAsSelector(&loadBalancerListener.Selector) + if err != nil { + log.V(5).Error(err, "Skipping listener addition, failed to get label selector from spec selector") continue } - targetPort, err = strconv.ParseInt(lbNameSplit[len(lbNameSplit)-1], 10, 64) - if err != nil { - // user might have created additional pool - log.Error(err, "unable to fetch target port from pool name", "poolName", *pool.Name) + + if selector.Empty() && !util.IsControlPlaneMachine(m.Machine) { + log.V(3).Info("Skipping listener addition as the selector is empty and not a control plane machine") continue } - } else { - for _, member := range listLoadBalancerPoolMembers.Members { - if target, ok := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget); ok { - targetPort = *member.Port - if *target.Address == internalIP { - alreadyRegistered = true - log.V(3).Info("Target IP already configured for pool", "IP", internalIP, "poolName", *pool.Name) - } + // Skip adding the listener if the selector does not match + if !selector.Empty() && !selector.Matches(labels.Set(m.IBMPowerVSMachine.Labels)) { + log.V(3).Info("Skip adding listener, machine label doesn't match with the listener label selector", "pool", *pool.Name, "IP", internalIP) + continue + } + } + + for _, member := range listLoadBalancerPoolMembers.Members { + if target, ok := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget); ok { + if *target.Address == internalIP { + alreadyRegistered = true + log.V(3).Info("Target IP already configured for pool", "IP", internalIP, "poolName", *pool.Name) } } } + if alreadyRegistered { log.V(3).Info("PoolMember already exist", "poolName", *pool.Name, "IP", internalIP, "targetPort", targetPort) continue @@ -1047,7 +1094,7 @@ func (m *PowerVSMachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Contex if err != nil { return nil, fmt.Errorf("failed to fetch VPC load balancer details with ID: %s error: %v", *lbID, err) } - if *loadBalancer.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) { + if *loadBalancer.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { log.V(3).Info("Unable to update pool for VPC load balancer as it is not in active state", "loadBalancerName", *loadBalancer.Name, "loadBalancerState", *loadBalancer.ProvisioningStatus) return nil, fmt.Errorf("VPC load balancer %s not in active state to update pool member", *loadBalancer.Name) } @@ -1073,10 +1120,10 @@ func (m *PowerVSMachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Contex // APIServerPort returns the APIServerPort. func (m *PowerVSMachineScope) APIServerPort() int32 { - if m.Cluster.Spec.ClusterNetwork != nil && m.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return *m.Cluster.Spec.ClusterNetwork.APIServerPort + if m.Cluster.Spec.ClusterNetwork.APIServerPort > 0 { + return m.Cluster.Spec.ClusterNetwork.APIServerPort } - return infrav1beta2.DefaultAPIServerPort + return infrav1.DefaultAPIServerPort } // TODO: reuse getServiceName function instead. diff --git a/cloud/scope/powervs_machine_test.go b/cloud/scope/powervs_machine_test.go index dbe385f8d..2bd25a5c3 100644 --- a/cloud/scope/powervs_machine_test.go +++ b/cloud/scope/powervs_machine_test.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "path" + "testing" "time" @@ -36,16 +37,20 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" "k8s.io/utils/ptr" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + resourcecontrollermock "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/resourcecontroller/mock" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs/mock" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/resourcecontroller" - resourcecontrollermock "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/resourcecontroller/mock" vpcmock "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/options" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" . "github.com/onsi/gomega" ) @@ -54,9 +59,9 @@ const ( region = "us-south" ) -func newPowerVSMachine(clusterName, machineName string, imageRef *string, networkRef *string, isID bool) *infrav1beta2.IBMPowerVSMachine { - image := &infrav1beta2.IBMPowerVSResourceReference{} - network := infrav1beta2.IBMPowerVSResourceReference{} +func newPowerVSMachine(clusterName, machineName string, imageRef *string, networkRef *string, isID bool) *infrav1.IBMPowerVSMachine { + image := &infrav1.IBMPowerVSResourceReference{} + network := infrav1.IBMPowerVSResourceReference{} if !isID { image.Name = imageRef @@ -66,15 +71,15 @@ func newPowerVSMachine(clusterName, machineName string, imageRef *string, networ network.ID = networkRef } - return &infrav1beta2.IBMPowerVSMachine{ + return &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - capiv1beta1.ClusterNameLabel: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", }, - Spec: infrav1beta2.IBMPowerVSMachineSpec{ + Spec: infrav1.IBMPowerVSMachineSpec{ MemoryGiB: 8, Processors: intstr.FromInt(1), Image: image, @@ -139,19 +144,19 @@ func TestAPIServerPort(t *testing.T) { name: "Returns assigned port number", expectedPortNumber: int32(6445), machineScope: PowerVSMachineScope{ - Cluster: &capiv1beta1.Cluster{ - Spec: capiv1beta1.ClusterSpec{ - ClusterNetwork: &capiv1beta1.ClusterNetwork{ - APIServerPort: ptr.To(int32(6445)), + Cluster: &clusterv1.Cluster{ + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetwork{ + APIServerPort: int32(6445), }, }, }, }, }, { name: "Returns DefaultAPIServerPort when machineScope.Cluster.Spec.ClusterNetwork is nil", - expectedPortNumber: infrav1beta2.DefaultAPIServerPort, + expectedPortNumber: infrav1.DefaultAPIServerPort, machineScope: PowerVSMachineScope{ - Cluster: &capiv1beta1.Cluster{}, + Cluster: &clusterv1.Cluster{}, }, }, } @@ -175,9 +180,9 @@ func TestBucketName(t *testing.T) { name: "Bucket exists in COS instance", expectedBucketName: "foo-bucket", machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketName: "foo-bucket", }, }, @@ -187,7 +192,7 @@ func TestBucketName(t *testing.T) { name: "Deriving COS bucket name from PowerVS cluster name", expectedBucketName: fmt.Sprintf("%s-%s", "foo-cluster", "cosbucket"), machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "foo-cluster", }, @@ -215,9 +220,9 @@ func TestBucketRegion(t *testing.T) { name: "Get bucket region from COS instance", expectedBucketRegion: region, machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - CosInstance: &infrav1beta2.CosInstance{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + CosInstance: &infrav1.CosInstance{ BucketRegion: region, }, }, @@ -227,9 +232,9 @@ func TestBucketRegion(t *testing.T) { name: "Get bucket region from VPC region set in spec", expectedBucketRegion: region, machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To(region), }, }, @@ -238,8 +243,8 @@ func TestBucketRegion(t *testing.T) { }, { name: "Returns empty region when both COS instance and VPC source spec are empty", machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{}, }, }, }, @@ -321,9 +326,9 @@ func TestGetServiceInstanceIDForMachineScope(t *testing.T) { name: "Returns service instance ID set in IBMPowerVSCluster.Status.ServiceInstance.ID", expectedServiceInstanceID: "service-instance-0", machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("service-instance-0"), }, }, @@ -333,8 +338,8 @@ func TestGetServiceInstanceIDForMachineScope(t *testing.T) { name: "get service instance ID from powervsClusterSpec", expectedServiceInstanceID: "service-instance-1", machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-1", }, }, @@ -343,9 +348,9 @@ func TestGetServiceInstanceIDForMachineScope(t *testing.T) { name: "get service instance ID from powervsClusterSpec's serviceInstance", expectedServiceInstanceID: "service-instance-2", machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("service-instance-2"), }, }, @@ -355,13 +360,13 @@ func TestGetServiceInstanceIDForMachineScope(t *testing.T) { name: "get service instance ID with serviceInstanceID present in both IBMPowerVSCluster Status and Spec ", expectedServiceInstanceID: "service-instance-in-status", machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("service-instance-in-status"), }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-in-spec", }, }, @@ -369,9 +374,9 @@ func TestGetServiceInstanceIDForMachineScope(t *testing.T) { }, { name: "Failed to find service instance id", machineScope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{}, }, }, }, @@ -409,15 +414,15 @@ func TestGetServiceInstanceIDForMachineScope(t *testing.T) { setup(t) t.Cleanup(teardown) scope := PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ Name: ptr.To("foo-cluster"), }, }, }, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Zone: ptr.To("us-south-1"), }, }, @@ -434,15 +439,15 @@ func TestGetServiceInstanceIDForMachineScope(t *testing.T) { setup(t) t.Cleanup(teardown) scope := PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ Name: ptr.To("foo-cluster"), }, }, }, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Zone: ptr.To("us-south-1"), }, }, @@ -459,8 +464,8 @@ func TestSetReady(t *testing.T) { t.Run("Set Machine status to ready", func(t *testing.T) { g := NewWithT(t) machineScope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, } machineScope.SetReady() @@ -472,8 +477,8 @@ func TestSetNotReady(t *testing.T) { t.Run("Set status of machine as not ready", func(t *testing.T) { g := NewWithT(t) machineScope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Ready: true, }, }, @@ -492,8 +497,8 @@ func TestGetRegion(t *testing.T) { { name: "Returns region set in spec", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Region: ptr.To(region), }, }, @@ -502,8 +507,8 @@ func TestGetRegion(t *testing.T) { }, { name: "Return empty string when region is not set in spec", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Region: nil, }, }, @@ -528,16 +533,16 @@ func TestSetRegion(t *testing.T) { { name: "Set region to us-east in IBMPowerVSMachine status", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, }, expectedRegion: "us-east", }, { name: "Set region to empty value in IBMPowerVSMachine status", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, }, }, @@ -561,8 +566,8 @@ func TestGetZone(t *testing.T) { { name: "Machine's zone is set", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Zone: ptr.To("us-south-1"), }, }, @@ -571,8 +576,8 @@ func TestGetZone(t *testing.T) { }, { name: "Machine's zone is nil", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Zone: nil, }, }, @@ -597,16 +602,16 @@ func TestSetZone(t *testing.T) { { name: "Set machine's zone to us-east-1", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, }, expectedZone: "us-east-1", }, { name: "Set machine's zone to an empty value", scope: PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, }, }, @@ -625,12 +630,12 @@ func TestGetInstanceState(t *testing.T) { t.Run("Set PowerVS instance state to ready", func(t *testing.T) { g := NewWithT(t) machineScope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, } machineScope.SetInstanceState(ptr.To("ready")) - g.Expect(machineScope.GetInstanceState()).To(Equal(infrav1beta2.PowerVSInstanceState("ready"))) + g.Expect(machineScope.GetInstanceState()).To(Equal(infrav1.PowerVSInstanceState("ready"))) }) } @@ -642,17 +647,17 @@ func TestGetIgnitionVersion(t *testing.T) { }{ { name: "Ignition version is nil", - expectedIgnitionVersion: infrav1beta2.DefaultIgnitionVersion, + expectedIgnitionVersion: infrav1.DefaultIgnitionVersion, scope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, }, }, { name: "Custom Ignition Version is set", expectedIgnitionVersion: "3.4", scope: PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Ignition: &infrav1beta2.Ignition{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + Ignition: &infrav1.Ignition{ Version: "3.4", }, }, @@ -678,7 +683,7 @@ func TestBootstrapDataKey(t *testing.T) { }{ { name: "Returns BootstrapDataKey for a machine in control plane", - machineLabel: capiv1beta1.MachineControlPlaneLabel, + machineLabel: clusterv1.MachineControlPlaneLabel, machineName: "foo-machine-0", expectedBootstrapDataKey: path.Join("control-plane", "foo-machine-0"), }, @@ -694,12 +699,12 @@ func TestBootstrapDataKey(t *testing.T) { t.Run(tc.name, func(_ *testing.T) { g := NewWithT(t) machineScope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Name: tc.machineName, }, }, - Machine: &capiv1beta1.Machine{ + Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ tc.machineLabel: "", @@ -733,7 +738,7 @@ func TestGetNetworkID(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{} expectedNetworkID := networkID - networkResource := infrav1beta2.IBMPowerVSResourceReference{ + networkResource := infrav1.IBMPowerVSResourceReference{ ID: ptr.To(expectedNetworkID), } networkID, err := getNetworkID(networkResource, &scope) @@ -754,7 +759,7 @@ func TestGetNetworkID(t *testing.T) { }, }, } - networkResource := infrav1beta2.IBMPowerVSResourceReference{ + networkResource := infrav1.IBMPowerVSResourceReference{ Name: ptr.To(networkName), } @@ -781,7 +786,7 @@ func TestGetNetworkID(t *testing.T) { }, }, } - networkResource := infrav1beta2.IBMPowerVSResourceReference{ + networkResource := infrav1.IBMPowerVSResourceReference{ Name: ptr.To(expectedNetworkIName), } @@ -808,7 +813,7 @@ func TestGetNetworkID(t *testing.T) { }, }, } - networkResource := infrav1beta2.IBMPowerVSResourceReference{ + networkResource := infrav1.IBMPowerVSResourceReference{ RegEx: ptr.To("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"), } @@ -834,7 +839,7 @@ func TestGetNetworkID(t *testing.T) { }, }, } - networkResource := infrav1beta2.IBMPowerVSResourceReference{ + networkResource := infrav1.IBMPowerVSResourceReference{ RegEx: ptr.To(regex), } @@ -849,7 +854,7 @@ func TestGetNetworkID(t *testing.T) { t.Run("When ID, name and regex are all nil", func(t *testing.T) { g := NewWithT(t) - networkResource := infrav1beta2.IBMPowerVSResourceReference{} + networkResource := infrav1.IBMPowerVSResourceReference{} scope := PowerVSMachineScope{} networkID, err := getNetworkID(networkResource, &scope) g.Expect(networkID).To(BeNil()) @@ -864,8 +869,8 @@ func TestGetMachineInternalIP(t *testing.T) { g := NewWithT(t) expectedAddress := "10.0.0.1" scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Addresses: []corev1.NodeAddress{ { Type: corev1.NodeInternalIP, @@ -881,8 +886,8 @@ func TestGetMachineInternalIP(t *testing.T) { t.Run("Returns empty IP for address type - node external IP", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Addresses: []corev1.NodeAddress{ { Type: corev1.NodeExternalIP, @@ -898,7 +903,7 @@ func TestGetMachineInternalIP(t *testing.T) { t.Run("Returns empty IP if powervsmachineStatus in nil", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{}, } g.Expect("").To(Equal(scope.GetMachineInternalIP())) }) @@ -919,12 +924,12 @@ func TestSetProviderID(t *testing.T) { t.Run("failed to get service instance ID", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{}, + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{}, }, }, } @@ -935,14 +940,14 @@ func TestSetProviderID(t *testing.T) { t.Run("Set Provider ID in v2 format", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("foo-service-instance-id"), }, }, }, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{}, } options.ProviderIDFormat = string(options.ProviderIDFormatV2) scope.SetZone("us-south-1") @@ -1003,13 +1008,13 @@ func TestCreateCOSClient(t *testing.T) { t.Cleanup(teardown) scope := setupPowerVSMachineScope(clusterName, machineName, ptr.To(pvsImage), ptr.To(pvsNetwork), true, mockpowervs) serviceInstance := &resourcecontrollerv2.ResourceInstance{ - State: ptr.To(string(infrav1beta2.ServiceInstanceStateProvisioning)), + State: ptr.To(string(infrav1.ServiceInstanceStateProvisioning)), } cosInstanceName := fmt.Sprintf("%s-%s", scope.IBMPowerVSCluster.GetName(), "cosinstance") mockResourceController.EXPECT().GetInstanceByName(cosInstanceName, resourcecontroller.CosResourceID, resourcecontroller.CosResourcePlanID).Return(serviceInstance, nil) scope.ResourceClient = mockResourceController result, err := scope.createCOSClient(ctx) - expectedError := fmt.Sprintf("COS service instance is not in active state, current state: %s", infrav1beta2.ServiceInstanceStateProvisioning) + expectedError := fmt.Sprintf("COS service instance is not in active state, current state: %s", infrav1.ServiceInstanceStateProvisioning) g.Expect(result).To(BeNil()) g.Expect(err.Error()).To(ContainSubstring(expectedError)) }) @@ -1020,7 +1025,7 @@ func TestCreateCOSClient(t *testing.T) { t.Cleanup(teardown) scope := setupPowerVSMachineScope(clusterName, machineName, ptr.To(pvsImage), ptr.To(pvsNetwork), true, mockpowervs) serviceInstance := &resourcecontrollerv2.ResourceInstance{ - State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), + State: ptr.To(string(infrav1.ServiceInstanceStateActive)), } scope.SetRegion(region) cosInstanceName := fmt.Sprintf("%s-%s", scope.IBMPowerVSCluster.GetName(), "cosinstance") @@ -1037,7 +1042,7 @@ func TestCreateCOSClient(t *testing.T) { t.Cleanup(teardown) scope := setupPowerVSMachineScope(clusterName, machineName, ptr.To(pvsImage), ptr.To(pvsNetwork), true, mockpowervs) serviceInstance := &resourcecontrollerv2.ResourceInstance{ - State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), + State: ptr.To(string(infrav1.ServiceInstanceStateActive)), GUID: ptr.To("foo-guid"), } scope.SetRegion(region) @@ -1045,7 +1050,7 @@ func TestCreateCOSClient(t *testing.T) { mockResourceController.EXPECT().GetInstanceByName(cosInstanceName, resourcecontroller.CosResourceID, resourcecontroller.CosResourcePlanID).Return(serviceInstance, nil) scope.ResourceClient = mockResourceController expectedBucketRegion := region - scope.IBMPowerVSCluster.Spec.CosInstance = &infrav1beta2.CosInstance{BucketRegion: expectedBucketRegion} + scope.IBMPowerVSCluster.Spec.CosInstance = &infrav1.CosInstance{BucketRegion: expectedBucketRegion} _, err := scope.createCOSClient(ctx) g.Expect(err).To(BeNil()) }) @@ -1072,8 +1077,8 @@ func TestSetInstanceID(t *testing.T) { g := NewWithT(t) t.Run(tc.name, func(_ *testing.T) { scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, } scope.SetInstanceID(tc.instanceID) @@ -1086,13 +1091,13 @@ func TestSetFailureReason(t *testing.T) { t.Run("Set failure reason to InvalidConfiguration", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, } - scope.SetFailureReason(infrav1beta2.UpdateMachineError) + scope.SetFailureReason(infrav1.UpdateMachineError) //nolint:staticcheck - g.Expect(*scope.IBMPowerVSMachine.Status.FailureReason).To(Equal(infrav1beta2.UpdateMachineError)) + g.Expect(*scope.IBMPowerVSMachine.Status.FailureReason).To(Equal(infrav1.UpdateMachineError)) }) } @@ -1101,8 +1106,8 @@ func TestSetHealth(t *testing.T) { t.Run("Set PVMInstance status to healthy", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, } healthStatus := &models.PVMInstanceHealth{ @@ -1114,8 +1119,8 @@ func TestSetHealth(t *testing.T) { t.Run("Set PVMInstance status to nil", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, } scope.SetHealth(nil) @@ -1128,8 +1133,8 @@ func TestSetFailureMessage(t *testing.T) { t.Run("Set failure message for PowerVSMachine status", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, } failureMessage := "invalid configuration provided" @@ -1142,9 +1147,9 @@ func TestDeleteMachineIgnition(t *testing.T) { t.Run("Fails to retrieve bootstrap data: linked Machine's bootstrap.dataSecretName is nil", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - Machine: &capiv1beta1.Machine{ - Spec: capiv1beta1.MachineSpec{ - Bootstrap: capiv1beta1.Bootstrap{ + Machine: &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: nil, }, }, @@ -1162,12 +1167,12 @@ func TestDeleteMachineIgnition(t *testing.T) { client := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(initObjects...).Build() scope := PowerVSMachineScope{ Client: client, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{}, }, - Machine: &capiv1beta1.Machine{ - Spec: capiv1beta1.MachineSpec{ - Bootstrap: capiv1beta1.Bootstrap{ + Machine: &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To(machineName), }, }, @@ -1192,20 +1197,20 @@ func TestDeleteMachineIgnition(t *testing.T) { mockResourceController.EXPECT().GetInstanceByName(cosInstanceName, resourcecontroller.CosResourceID, resourcecontroller.CosResourcePlanID).Return(nil, errors.New("error listing cos instances")) scope := PowerVSMachineScope{ Client: client, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Ignition: &infrav1beta2.Ignition{ + Spec: infrav1.IBMPowerVSClusterSpec{ + Ignition: &infrav1.Ignition{ Version: "3.1", }, }, }, ResourceClient: mockResourceController, - Machine: &capiv1beta1.Machine{ - Spec: capiv1beta1.MachineSpec{ - Bootstrap: capiv1beta1.Bootstrap{ + Machine: &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To(machineName), }, }, @@ -1228,7 +1233,7 @@ func TestDeleteMachineIgnition(t *testing.T) { mockResourceController := resourcecontrollermock.NewMockResourceController(gomock.NewController(t)) cosInstanceName := fmt.Sprintf("%s-%s", clusterName, "cosinstance") serviceInstance := new(resourcecontrollerv2.ResourceInstance) - state := string(infrav1beta2.ServiceInstanceStateActive) + state := string(infrav1.ServiceInstanceStateActive) serviceInstance.State = &state guid := "foo-guid" serviceInstance.GUID = &guid @@ -1236,26 +1241,26 @@ func TestDeleteMachineIgnition(t *testing.T) { mockResourceController.EXPECT().GetInstanceByName(cosInstanceName, resourcecontroller.CosResourceID, resourcecontroller.CosResourcePlanID).Return(serviceInstance, nil) scope := PowerVSMachineScope{ Client: client, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{}, }, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - Ignition: &infrav1beta2.Ignition{ + Spec: infrav1.IBMPowerVSClusterSpec{ + Ignition: &infrav1.Ignition{ Version: "3.1", }, - CosInstance: &infrav1beta2.CosInstance{ + CosInstance: &infrav1.CosInstance{ BucketRegion: expectedBucketRegion, }, }, }, ResourceClient: mockResourceController, - Machine: &capiv1beta1.Machine{ - Spec: capiv1beta1.MachineSpec{ - Bootstrap: capiv1beta1.Bootstrap{ + Machine: &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To(machineName), }, }, @@ -1344,8 +1349,8 @@ func TestCreateMachinePVS(t *testing.T) { t.Cleanup(teardown) expectedOutput := (*models.PVMInstanceReference)(nil) scope := setupPowerVSMachineScope(clusterName, "foo-machine-2", ptr.To(pvsImage), ptr.To(pvsNetwork), true, mockpowervs) - scope.IBMPowerVSMachine.Status.Conditions = append(scope.IBMPowerVSMachine.Status.Conditions, capiv1beta1.Condition{ - Type: infrav1beta2.InstanceReadyCondition, + scope.IBMPowerVSMachine.Status.Conditions = append(scope.IBMPowerVSMachine.Status.Conditions, clusterv1beta1.Condition{ + Type: infrav1.InstanceReadyCondition, Status: corev1.ConditionUnknown, }) mockpowervs.EXPECT().GetAllInstance().Return(pvmInstances, nil) @@ -1394,7 +1399,7 @@ func TestCreateMachinePVS(t *testing.T) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - capiv1beta1.ClusterNameLabel: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", @@ -1424,8 +1429,8 @@ func TestCreateMachinePVS(t *testing.T) { setup(t) t.Cleanup(teardown) scope := setupPowerVSMachineScope(clusterName, machineName, nil, ptr.To(pvsNetwork), true, mockpowervs) - scope.IBMPowerVSImage = &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + scope.IBMPowerVSImage = &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ ImageID: "foo-image", }, } @@ -1509,14 +1514,364 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { nodeAddress := "10.0.0.1" loadBalancerID := "xyz-xyz-xyz" + loadBalancerName := "load-balancer-0" + t.Run("Skip adding listener if the machine label and listener label doesnot match", func(t *testing.T) { + g := NewWithT(t) + setup(t) + t.Cleanup(teardown) + loadBalancerName := loadBalancerName + loadBalancers := &vpcv1.LoadBalancer{ + ID: ptr.To(loadBalancerID), + Name: ptr.To(loadBalancerName), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateActive), + Pools: []vpcv1.LoadBalancerPoolReference{ + { + ID: ptr.To("pool-id-23"), + Name: ptr.To("pool-23"), + }, + }, + Listeners: []vpcv1.LoadBalancerListenerReference{ + { + ID: ptr.To("pool-id-23"), + }, + }, + } + loadBalancerListener := &vpcv1.LoadBalancerListener{ + DefaultPool: &vpcv1.LoadBalancerPoolReference{ + Name: ptr.To("pool-23"), + }, + ID: ptr.To("pool-id-23"), + Port: ptr.To(int64(23)), + Protocol: ptr.To("tcp"), + } + mockClient := vpcmock.NewMockVpc(mockCtrl) + + scope := PowerVSMachineScope{ + Machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{}, + }, + IBMVPCClient: mockClient, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "listener-selector": "port-22", + }, + }, + }, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ + { + Name: loadBalancerName, + ID: ptr.To(loadBalancerID), + AdditionalListeners: []infrav1.AdditionalListenerSpec{ + { + Port: 23, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "listener-selector": "port-23", + }, + }, + }, + }, + }, + }, + }, + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ + loadBalancerName: { + ID: ptr.To(loadBalancerID), + }, + }, + }, + }, + } + + mockClient.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancers, nil, nil).AnyTimes() + mockClient.EXPECT().GetLoadBalancerListener(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerListenerOptions{})).Return(loadBalancerListener, nil, nil).AnyTimes() + mockClient.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, nil, nil).AnyTimes() + result, err := scope.CreateVPCLoadBalancerPoolMember(ctx) + + g.Expect(err).To(BeNil()) + g.Expect(result).To(BeNil()) + }) + + t.Run("Add listener if the machine label and listener label matches", func(t *testing.T) { + g := NewWithT(t) + setup(t) + t.Cleanup(teardown) + loadBalancerName := loadBalancerName + loadBalancers := &vpcv1.LoadBalancer{ + ID: ptr.To(loadBalancerID), + Name: ptr.To(loadBalancerName), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateActive), + Pools: []vpcv1.LoadBalancerPoolReference{ + { + ID: ptr.To("pool-id-22"), + Name: ptr.To("pool-22"), + }, + }, + Listeners: []vpcv1.LoadBalancerListenerReference{ + { + ID: ptr.To("pool-id-22"), + }, + { + ID: ptr.To("pool-id-23"), + }, + }, + } + loadBalancerListener := &vpcv1.LoadBalancerListener{ + DefaultPool: &vpcv1.LoadBalancerPoolReference{ + Name: ptr.To("pool-22"), + }, + ID: ptr.To("pool-id-22"), + Port: ptr.To(int64(22)), + Protocol: ptr.To("tcp"), + } + mockClient := vpcmock.NewMockVpc(mockCtrl) + + scope := PowerVSMachineScope{ + Machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{}, + }, + IBMVPCClient: mockClient, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "listener-selector": "port-22", + }, + }, + }, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ + { + Name: loadBalancerName, + ID: ptr.To(loadBalancerID), + AdditionalListeners: []infrav1.AdditionalListenerSpec{ + { + Port: 22, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "listener-selector": "port-22", + }, + }, + }, + }, + }, + }, + }, + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ + loadBalancerName: { + ID: ptr.To(loadBalancerID), + }, + }, + }, + }, + } + + mockClient.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancers, nil, nil).AnyTimes() + mockClient.EXPECT().GetLoadBalancerListener(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerListenerOptions{})).Return(loadBalancerListener, nil, nil).AnyTimes() + mockClient.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, nil, nil).AnyTimes() + expectedLoadBalancerPoolMemberID := "pool-member-3" + expectedLoadBalancerPoolMember := &vpcv1.LoadBalancerPoolMember{ID: ptr.To(expectedLoadBalancerPoolMemberID)} + mockClient.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(expectedLoadBalancerPoolMember, nil, nil).AnyTimes() + result, err := scope.CreateVPCLoadBalancerPoolMember(ctx) + + g.Expect(err).To(BeNil()) + g.Expect(*result.ID).To(Equal(expectedLoadBalancerPoolMemberID)) + }) + + t.Run("Skip adding non control plane nodes if there is no selector", func(t *testing.T) { + g := NewWithT(t) + setup(t) + t.Cleanup(teardown) + loadBalancerName := loadBalancerName + loadBalancers := &vpcv1.LoadBalancer{ + ID: ptr.To(loadBalancerID), + Name: ptr.To(loadBalancerName), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateActive), + Pools: []vpcv1.LoadBalancerPoolReference{ + { + ID: ptr.To("pool-id-6443"), + Name: ptr.To("pool-6443"), + }, + }, + Listeners: []vpcv1.LoadBalancerListenerReference{ + { + ID: ptr.To("pool-id-6443"), + }, + { + ID: ptr.To("pool-id-1"), + }, + }, + } + loadBalancerListener := &vpcv1.LoadBalancerListener{ + DefaultPool: &vpcv1.LoadBalancerPoolReference{ + Name: ptr.To("pool-6443"), + }, + ID: ptr.To("pool-id-6443"), + Port: ptr.To(int64(6443)), + Protocol: ptr.To("tcp"), + } + mockClient := vpcmock.NewMockVpc(mockCtrl) + + scope := PowerVSMachineScope{ + Machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{}, + }, + IBMVPCClient: mockClient, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "listener-selector": "port-6443", + }, + }, + }, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ + { + Name: loadBalancerName, + ID: ptr.To(loadBalancerID), + AdditionalListeners: []infrav1.AdditionalListenerSpec{ + { + Port: 6443, + }, + }, + }, + }, + }, + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ + loadBalancerName: { + ID: ptr.To(loadBalancerID), + }, + }, + }, + }, + } + + mockClient.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancers, nil, nil).AnyTimes() + mockClient.EXPECT().GetLoadBalancerListener(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerListenerOptions{})).Return(loadBalancerListener, nil, nil).AnyTimes() + mockClient.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, nil, nil).AnyTimes() + result, err := scope.CreateVPCLoadBalancerPoolMember(ctx) + + g.Expect(err).To(BeNil()) + g.Expect(result).To(BeNil()) + }) + t.Run("Adding control plane nodes even if there is no selector", func(t *testing.T) { + g := NewWithT(t) + setup(t) + t.Cleanup(teardown) + loadBalancerName := loadBalancerName + loadBalancers := &vpcv1.LoadBalancer{ + ID: ptr.To(loadBalancerID), + Name: ptr.To(loadBalancerName), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateActive), + Pools: []vpcv1.LoadBalancerPoolReference{ + { + ID: ptr.To("pool-id-6443"), + Name: ptr.To("pool-6443"), + }, + { + ID: ptr.To("pool-id-24"), + Name: ptr.To("pool-24"), + }, + }, + Listeners: []vpcv1.LoadBalancerListenerReference{ + { + ID: ptr.To("pool-id-6443"), + }, + { + ID: ptr.To("pool-id-24"), + }, + }, + } + loadBalancerListener6443 := &vpcv1.LoadBalancerListener{ + DefaultPool: &vpcv1.LoadBalancerPoolReference{ + Name: ptr.To("pool-6443"), + }, + ID: ptr.To("pool-id-6443"), + Port: ptr.To(int64(6443)), + Protocol: ptr.To("tcp"), + } + loadBalancerListener24 := &vpcv1.LoadBalancerListener{ + DefaultPool: &vpcv1.LoadBalancerPoolReference{ + Name: ptr.To("pool-24"), + }, + ID: ptr.To("pool-id-24"), + Port: ptr.To(int64(24)), + Protocol: ptr.To("tcp"), + } + mockClient := vpcmock.NewMockVpc(mockCtrl) + + scope := PowerVSMachineScope{ + Machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "cluster.x-k8s.io/control-plane": "true", + }, + }, + }, + IBMVPCClient: mockClient, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ + { + Name: loadBalancerName, + ID: ptr.To(loadBalancerID), + AdditionalListeners: []infrav1.AdditionalListenerSpec{ + { + Port: 6443, + }, + { + Port: 24, + }, + }, + }, + }, + }, + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ + loadBalancerName: { + ID: ptr.To(loadBalancerID), + }, + }, + }, + }, + } + + mockClient.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancers, nil, nil).AnyTimes() + mockClient.EXPECT().GetLoadBalancerListener(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerListenerOptions{LoadBalancerID: ptr.To(loadBalancerID), ID: ptr.To("pool-id-6443")})).Return(loadBalancerListener6443, nil, nil).AnyTimes() + mockClient.EXPECT().GetLoadBalancerListener(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerListenerOptions{LoadBalancerID: ptr.To(loadBalancerID), ID: ptr.To("pool-id-24")})).Return(loadBalancerListener24, nil, nil).AnyTimes() + mockClient.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, nil, nil).AnyTimes() + expectedLoadBalancerPoolMemberID6443 := "pool-member-6443" + expectedLoadBalancerPoolMember6443 := &vpcv1.LoadBalancerPoolMember{ID: ptr.To(expectedLoadBalancerPoolMemberID6443)} + mockClient.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(expectedLoadBalancerPoolMember6443, nil, nil).Times(1) + result, err := scope.CreateVPCLoadBalancerPoolMember(ctx) + + g.Expect(err).To(BeNil()) + g.Expect(*result.ID).To(Equal(expectedLoadBalancerPoolMemberID6443)) + + expectedLoadBalancerPoolMemberID24 := "pool-member-24" + expectedLoadBalancerPoolMember24 := &vpcv1.LoadBalancerPoolMember{ID: ptr.To(expectedLoadBalancerPoolMemberID24)} + mockClient.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(expectedLoadBalancerPoolMember24, nil, nil).Times(1) + result1, err1 := scope.CreateVPCLoadBalancerPoolMember(ctx) + + g.Expect(err1).To(BeNil()) + g.Expect(*result1.ID).To(Equal(expectedLoadBalancerPoolMemberID24)) + }) t.Run("Create VPC Load Balancer Pool Member", func(t *testing.T) { t.Run("No load balancers present in status", func(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) scope := PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ LoadBalancers: nil, }, }, @@ -1535,18 +1890,18 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { mockClient.EXPECT().GetLoadBalancer(&vpcv1.GetLoadBalancerOptions{ID: ptr.To(loadBalancerID)}).Return(nil, nil, errors.New("error getting load balancer")) scope := PowerVSMachineScope{ IBMVPCClient: mockClient, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { - Name: "load-balancer-0", + Name: loadBalancerName, ID: ptr.To(loadBalancerID), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ - "load-balancer-0": { + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ + loadBalancerName: { ID: ptr.To(loadBalancerID), }, }, @@ -1564,25 +1919,25 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { setup(t) t.Cleanup(teardown) loadBalancers := &vpcv1.LoadBalancer{ - ProvisioningStatus: (*string)(&infrav1beta2.VPCLoadBalancerStateCreatePending), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateCreatePending), } mockClient := vpcmock.NewMockVpc(mockCtrl) mockClient.EXPECT().GetLoadBalancer(&vpcv1.GetLoadBalancerOptions{ID: ptr.To(loadBalancerID)}).Return(loadBalancers, nil, nil) scope := PowerVSMachineScope{ IBMVPCClient: mockClient, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { - Name: "load-balancer-0", + Name: loadBalancerName, ID: ptr.To(loadBalancerID), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ - "load-balancer-0": { + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ + loadBalancerName: { ID: ptr.To(loadBalancerID), }, }, @@ -1600,24 +1955,24 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { setup(t) t.Cleanup(teardown) loadBalancers := &vpcv1.LoadBalancer{ - ProvisioningStatus: (*string)(&infrav1beta2.VPCLoadBalancerStateActive), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateActive), } mockClient := vpcmock.NewMockVpc(mockCtrl) mockClient.EXPECT().GetLoadBalancer(&vpcv1.GetLoadBalancerOptions{ID: ptr.To(loadBalancerID)}).Return(loadBalancers, nil, nil) scope := PowerVSMachineScope{ IBMVPCClient: mockClient, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { - Name: "load-balancer-0", + Name: loadBalancerName, ID: ptr.To(loadBalancerID), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ - "load-balancer-0": { + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ + loadBalancerName: { ID: ptr.To(loadBalancerID), }, }, @@ -1634,12 +1989,11 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - loadBalancerName := "load-balancer-0" targetPort := 3430 loadBalancers := &vpcv1.LoadBalancer{ ID: ptr.To(loadBalancerID), Name: ptr.To(loadBalancerName), - ProvisioningStatus: (*string)(&infrav1beta2.VPCLoadBalancerStateActive), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateActive), Pools: []vpcv1.LoadBalancerPoolReference{ { ID: ptr.To("pool-id-0"), @@ -1657,8 +2011,8 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { scope := PowerVSMachineScope{ IBMVPCClient: mockClient, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Addresses: []corev1.NodeAddress{ { Address: nodeAddress, @@ -1667,17 +2021,17 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { }, }, }, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: loadBalancerName, ID: ptr.To(loadBalancerID), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ loadBalancerName: { ID: ptr.To(loadBalancerID), }, @@ -1699,16 +2053,16 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { t.Run("Failed to find VPC load balancer ID", func(t *testing.T) { g := NewWithT(t) scope := PowerVSMachineScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To(loadBalancerID), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{}, + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{}, }, }, } @@ -1721,13 +2075,12 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - loadBalancerName := "load-balancer-0" targetPort := 3430 loadBalancers := &vpcv1.LoadBalancer{ ID: ptr.To(loadBalancerID), Name: ptr.To(loadBalancerName), - ProvisioningStatus: (*string)(&infrav1beta2.VPCLoadBalancerStateActive), + ProvisioningStatus: (*string)(&infrav1.VPCLoadBalancerStateActive), Pools: []vpcv1.LoadBalancerPoolReference{ { ID: ptr.To("pool-id-2"), @@ -1739,8 +2092,8 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { scope := PowerVSMachineScope{ IBMVPCClient: mockClient, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ - Status: infrav1beta2.IBMPowerVSMachineStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ + Status: infrav1.IBMPowerVSMachineStatus{ Addresses: []corev1.NodeAddress{ { Address: nodeAddress, @@ -1749,17 +2102,17 @@ func TestCreateVPCLoadBalancerPoolMemberPowerVSMachine(t *testing.T) { }, }, }, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: loadBalancerName, ID: ptr.To(loadBalancerID), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ loadBalancerName: { ID: ptr.To(loadBalancerID), }, diff --git a/cloud/scope/suite_test.go b/cloud/scope/suite_test.go index e0d0bfe97..d071f6842 100644 --- a/cloud/scope/suite_test.go +++ b/cloud/scope/suite_test.go @@ -24,10 +24,10 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrl "sigs.k8s.io/controller-runtime" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/internal/webhooks" "sigs.k8s.io/cluster-api-provider-ibmcloud/test/helpers" ) @@ -46,8 +46,8 @@ func TestMain(m *testing.M) { // Setting up the test environment. func setup() { - utilruntime.Must(infrav1beta2.AddToScheme(scheme.Scheme)) - utilruntime.Must(capiv1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/cloud/scope/util.go b/cloud/scope/util.go index 6ce84d754..8a7db04ad 100644 --- a/cloud/scope/util.go +++ b/cloud/scope/util.go @@ -24,12 +24,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" ) // GetClusterByName finds and return a Cluster object using the specified params. -func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*infrav1beta2.IBMPowerVSCluster, error) { - cluster := &infrav1beta2.IBMPowerVSCluster{} +func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*infrav1.IBMPowerVSCluster, error) { + cluster := &infrav1.IBMPowerVSCluster{} key := client.ObjectKey{ Namespace: namespace, Name: name, @@ -43,12 +43,12 @@ func GetClusterByName(ctx context.Context, c client.Client, namespace, name stri } // CheckCreateInfraAnnotation checks for annotations set on IBMPowerVSCluster object to determine cluster creation workflow. -func CheckCreateInfraAnnotation(cluster infrav1beta2.IBMPowerVSCluster) bool { +func CheckCreateInfraAnnotation(cluster infrav1.IBMPowerVSCluster) bool { annotations := cluster.GetAnnotations() if len(annotations) == 0 { return false } - value, found := annotations[infrav1beta2.CreateInfrastructureAnnotation] + value, found := annotations[infrav1.CreateInfrastructureAnnotation] if !found { return false } diff --git a/cloud/scope/vpc_cluster.go b/cloud/scope/vpc_cluster.go index a07a70feb..2d8b3eaa8 100644 --- a/cloud/scope/vpc_cluster.go +++ b/cloud/scope/vpc_cluster.go @@ -34,12 +34,13 @@ import ( "k8s.io/klog/v2/textlogger" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/authenticator" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/cos" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/globaltagging" @@ -65,8 +66,8 @@ const ( // VPCClusterScopeParams defines the input parameters used to create a new VPCClusterScope. type VPCClusterScopeParams struct { Client client.Client - Cluster *capiv1beta1.Cluster - IBMVPCCluster *infrav1beta2.IBMVPCCluster + Cluster *clusterv1.Cluster + IBMVPCCluster *infrav1.IBMVPCCluster Logger logr.Logger ServiceEndpoint []endpoints.ServiceEndpoint @@ -77,7 +78,7 @@ type VPCClusterScopeParams struct { type VPCClusterScope struct { logr.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper COSClient cos.Cos GlobalTaggingClient globaltagging.GlobalTagging @@ -85,8 +86,8 @@ type VPCClusterScope struct { ResourceManagerClient resourcemanager.ResourceManager VPCClient vpc.Vpc - Cluster *capiv1beta1.Cluster - IBMVPCCluster *infrav1beta2.IBMVPCCluster + Cluster *clusterv1.Cluster + IBMVPCCluster *infrav1.IBMVPCCluster ServiceEndpoint []endpoints.ServiceEndpoint } @@ -108,7 +109,7 @@ func NewVPCClusterScope(params VPCClusterScopeParams) (*VPCClusterScope, error) params.Logger = textlogger.NewLogger(textlogger.NewConfig()) } - helper, err := patch.NewHelper(params.IBMVPCCluster, params.Client) + helper, err := v1beta1patch.NewHelper(params.IBMVPCCluster, params.Client) if err != nil { return nil, fmt.Errorf("error failed to init patch helper: %w", err) } @@ -209,12 +210,12 @@ func (s *VPCClusterScope) Name() string { } // NetworkSpec returns the VPCClusterScope's Network spec. -func (s *VPCClusterScope) NetworkSpec() *infrav1beta2.VPCNetworkSpec { +func (s *VPCClusterScope) NetworkSpec() *infrav1.VPCNetworkSpec { return s.IBMVPCCluster.Spec.Network } // NetworkStatus returns the VPCClusterScope's Network status. -func (s *VPCClusterScope) NetworkStatus() *infrav1beta2.VPCNetworkStatus { +func (s *VPCClusterScope) NetworkStatus() *infrav1.VPCNetworkStatus { return s.IBMVPCCluster.Status.Network } @@ -230,7 +231,7 @@ func (s *VPCClusterScope) CheckTagExists(tagName string) (bool, error) { // GetAPIServerPort will return the API Server's port. func (s *VPCClusterScope) GetAPIServerPort() int32 { // TODO(cjschaef): Add logic to handle cases not default. - return infrav1beta2.DefaultAPIServerPort + return infrav1.DefaultAPIServerPort } // GetControlPlaneSubnetIDs returns all of the Control Plane subnet Id's. @@ -297,7 +298,7 @@ func (s *VPCClusterScope) GetLoadBalancerHostName() (*string, error) { if loadBalancer.Public != nil && !*loadBalancer.Public { lbSuffix = privateLBSuffix } - name = fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancer), lbSuffix) + name = fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1.ResourceTypeLoadBalancer), lbSuffix) } // Retrieve the Load Balancer hostname from API. @@ -353,7 +354,7 @@ func (s *VPCClusterScope) GetNetworkResourceGroupID() (string, error) { } // Populate the Network Status' Resource Group to shortcut future lookups. - s.SetResourceStatus(infrav1beta2.ResourceTypeResourceGroup, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeResourceGroup, &infrav1.ResourceStatus{ ID: resourceGroupID, Name: resourceGroupName, Ready: true, @@ -384,7 +385,7 @@ func (s *VPCClusterScope) GetResourceGroupID() (string, error) { } // Populate the Stauts Resource Group to shortcut future lookups. - s.SetResourceStatus(infrav1beta2.ResourceTypeResourceGroup, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeResourceGroup, &infrav1.ResourceStatus{ ID: *resourceGroup.ID, Name: ptr.To(resourceGroupName), Ready: true, @@ -424,9 +425,9 @@ func (s *VPCClusterScope) getSecurityGroupIDFromStatus(name string) *string { } // GetServiceName returns the name of a given service type from Spec or generates a name for it. -func (s *VPCClusterScope) GetServiceName(resourceType infrav1beta2.ResourceType) *string { +func (s *VPCClusterScope) GetServiceName(resourceType infrav1.ResourceType) *string { switch resourceType { - case infrav1beta2.ResourceTypeVPC: + case infrav1.ResourceTypeVPC: // Generate a name based off cluster name if no VPC defined in Spec, or no VPC name nor ID. if s.NetworkSpec().VPC == nil || (s.NetworkSpec().VPC.Name == nil && s.NetworkSpec().VPC.ID == nil) { return ptr.To(fmt.Sprintf("%s-vpc", s.Name())) @@ -434,16 +435,16 @@ func (s *VPCClusterScope) GetServiceName(resourceType infrav1beta2.ResourceType) if s.NetworkSpec().VPC.Name != nil { return s.NetworkSpec().VPC.Name } - case infrav1beta2.ResourceTypeSubnet: + case infrav1.ResourceTypeSubnet: // Generate a generic subnet name based off the cluster name, which can be extended as necessary (for Zones). return ptr.To(fmt.Sprintf("%s-subnet", s.IBMVPCCluster.Name)) - case infrav1beta2.ResourceTypePublicGateway: + case infrav1.ResourceTypePublicGateway: // Generate a generic public gateway name based off the cluster name, which can be extedned as necessary (for Zone). return ptr.To(fmt.Sprintf("%s-pgateway", s.IBMVPCCluster.Name)) - case infrav1beta2.ResourceTypeLoadBalancer: + case infrav1.ResourceTypeLoadBalancer: // Generate a generic load balancer name based off the cluster name, which can be extended as necessary (for public vs private). return ptr.To(fmt.Sprintf("%s-lb", s.IBMVPCCluster.Name)) - case infrav1beta2.ResourceTypeLoadBalancerPool: + case infrav1.ResourceTypeLoadBalancerPool: // Generate a generic load balancer pool name based off the cluster name, which can be extended as necessary (for LB). return ptr.To(fmt.Sprintf("%s-lbpool", s.IBMVPCCluster.Name)) default: @@ -497,7 +498,7 @@ func (s *VPCClusterScope) GetVPCID() (*string, error) { // Check if the VPC was found and has an ID if vpcDetails != nil && vpcDetails.ID != nil { // Set VPC ID in Status to shortcut future lookups, prior to returning the ID. - s.SetResourceStatus(infrav1beta2.ResourceTypeVPC, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeVPC, &infrav1.ResourceStatus{ ID: *vpcDetails.ID, Name: s.NetworkSpec().VPC.Name, Ready: true, @@ -510,13 +511,13 @@ func (s *VPCClusterScope) GetVPCID() (*string, error) { } // setLoadBalancerStatus sets the status for a Load Balancer. -func (s *VPCClusterScope) setLoadBalancerStatus(loadBalancer *infrav1beta2.VPCLoadBalancerStatus) { +func (s *VPCClusterScope) setLoadBalancerStatus(loadBalancer *infrav1.VPCLoadBalancerStatus) { s.V(3).Info("Setting status for Load Balancer", "loadBalancer", loadBalancer) if s.NetworkStatus() == nil { - s.IBMVPCCluster.Status.Network = &infrav1beta2.VPCNetworkStatus{} + s.IBMVPCCluster.Status.Network = &infrav1.VPCNetworkStatus{} } if s.NetworkStatus().LoadBalancers == nil { - s.IBMVPCCluster.Status.Network.LoadBalancers = make(map[string]*infrav1beta2.VPCLoadBalancerStatus) + s.IBMVPCCluster.Status.Network.LoadBalancers = make(map[string]*infrav1.VPCLoadBalancerStatus) } if lb, ok := s.NetworkStatus().LoadBalancers[*loadBalancer.ID]; ok { // ID should not change, update remaining fields. @@ -529,22 +530,22 @@ func (s *VPCClusterScope) setLoadBalancerStatus(loadBalancer *infrav1beta2.VPCLo } // SetResourceStatus sets the status for the provided ResourceType. -func (s *VPCClusterScope) SetResourceStatus(resourceType infrav1beta2.ResourceType, resource *infrav1beta2.ResourceStatus) { //nolint:gocyclo +func (s *VPCClusterScope) SetResourceStatus(resourceType infrav1.ResourceType, resource *infrav1.ResourceStatus) { //nolint:gocyclo // Ignore attempts to set status without resource. if resource == nil { return } s.V(3).Info("Setting status", "resourceType", resourceType, "resource", resource) switch resourceType { - case infrav1beta2.ResourceTypeResourceGroup: + case infrav1.ResourceTypeResourceGroup: if s.IBMVPCCluster.Status.ResourceGroup == nil { s.IBMVPCCluster.Status.ResourceGroup = resource return } s.IBMVPCCluster.Status.ResourceGroup.Set(*resource) - case infrav1beta2.ResourceTypeVPC: + case infrav1.ResourceTypeVPC: if s.NetworkStatus() == nil { - s.IBMVPCCluster.Status.Network = &infrav1beta2.VPCNetworkStatus{ + s.IBMVPCCluster.Status.Network = &infrav1.VPCNetworkStatus{ VPC: resource, } return @@ -552,9 +553,9 @@ func (s *VPCClusterScope) SetResourceStatus(resourceType infrav1beta2.ResourceTy s.IBMVPCCluster.Status.Network.VPC = resource } s.NetworkStatus().VPC.Set(*resource) - case infrav1beta2.ResourceTypeCustomImage: + case infrav1.ResourceTypeCustomImage: if s.IBMVPCCluster.Status.Image == nil { - s.IBMVPCCluster.Status.Image = &infrav1beta2.ResourceStatus{ + s.IBMVPCCluster.Status.Image = &infrav1.ResourceStatus{ ID: resource.ID, Name: resource.Name, Ready: resource.Ready, @@ -562,36 +563,36 @@ func (s *VPCClusterScope) SetResourceStatus(resourceType infrav1beta2.ResourceTy return } s.IBMVPCCluster.Status.Image.Set(*resource) - case infrav1beta2.ResourceTypeControlPlaneSubnet: + case infrav1.ResourceTypeControlPlaneSubnet: if s.NetworkStatus() == nil { - s.IBMVPCCluster.Status.Network = &infrav1beta2.VPCNetworkStatus{} + s.IBMVPCCluster.Status.Network = &infrav1.VPCNetworkStatus{} } if s.NetworkStatus().ControlPlaneSubnets == nil { - s.IBMVPCCluster.Status.Network.ControlPlaneSubnets = make(map[string]*infrav1beta2.ResourceStatus) + s.IBMVPCCluster.Status.Network.ControlPlaneSubnets = make(map[string]*infrav1.ResourceStatus) } if subnet, ok := s.NetworkStatus().ControlPlaneSubnets[*resource.Name]; ok { subnet.Set(*resource) } else { s.IBMVPCCluster.Status.Network.ControlPlaneSubnets[*resource.Name] = resource } - case infrav1beta2.ResourceTypeWorkerSubnet: + case infrav1.ResourceTypeWorkerSubnet: if s.NetworkStatus() == nil { - s.IBMVPCCluster.Status.Network = &infrav1beta2.VPCNetworkStatus{} + s.IBMVPCCluster.Status.Network = &infrav1.VPCNetworkStatus{} } if s.NetworkStatus().WorkerSubnets == nil { - s.IBMVPCCluster.Status.Network.WorkerSubnets = make(map[string]*infrav1beta2.ResourceStatus) + s.IBMVPCCluster.Status.Network.WorkerSubnets = make(map[string]*infrav1.ResourceStatus) } if subnet, ok := s.NetworkStatus().WorkerSubnets[*resource.Name]; ok { subnet.Set(*resource) } else { s.IBMVPCCluster.Status.Network.WorkerSubnets[*resource.Name] = resource } - case infrav1beta2.ResourceTypeSecurityGroup: + case infrav1.ResourceTypeSecurityGroup: if s.NetworkStatus() == nil { - s.IBMVPCCluster.Status.Network = &infrav1beta2.VPCNetworkStatus{} + s.IBMVPCCluster.Status.Network = &infrav1.VPCNetworkStatus{} } if s.IBMVPCCluster.Status.Network.SecurityGroups == nil { - s.IBMVPCCluster.Status.Network.SecurityGroups = make(map[string]*infrav1beta2.ResourceStatus) + s.IBMVPCCluster.Status.Network.SecurityGroups = make(map[string]*infrav1.ResourceStatus) } if securityGroup, ok := s.IBMVPCCluster.Status.Network.SecurityGroups[*resource.Name]; ok { securityGroup.Set(*resource) @@ -638,14 +639,15 @@ func (s *VPCClusterScope) TagResource(tagName string, resourceCRN string) error } // ReconcileVPC reconciles the cluster's VPC. -func (s *VPCClusterScope) ReconcileVPC() (bool, error) { +func (s *VPCClusterScope) ReconcileVPC(ctx context.Context) (bool, error) { + log := ctrl.LoggerFrom(ctx) // If VPC id is set, that indicates the VPC already exists. vpcID, err := s.GetVPCID() if err != nil { return false, fmt.Errorf("failed to retrieve vpc id: %w", err) } if vpcID != nil { - s.V(3).Info("VPC id is set", "id", vpcID) + log.V(3).Info("VPC id is set", "id", vpcID) vpcDetails, _, err := s.VPCClient.GetVPC(&vpcv1.GetVPCOptions{ ID: vpcID, }) @@ -654,13 +656,13 @@ func (s *VPCClusterScope) ReconcileVPC() (bool, error) { } else if vpcDetails == nil { return false, fmt.Errorf("failed to retrieve vpc with id: %s", *vpcID) } - s.V(3).Info("Found VPC with provided id", "id", vpcID) + log.V(3).Info("Found VPC with provided id", "id", vpcID) requeue := true if vpcDetails.Status != nil && *vpcDetails.Status == string(vpcv1.VPCStatusAvailableConst) { requeue = false } - s.SetResourceStatus(infrav1beta2.ResourceTypeVPC, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeVPC, &infrav1.ResourceStatus{ ID: *vpcID, Name: vpcDetails.Name, // Ready status will be invert of the need to requeue. @@ -672,13 +674,13 @@ func (s *VPCClusterScope) ReconcileVPC() (bool, error) { } // If no VPC id was found, we need to create a new VPC. - s.V(3).Info("Creating a VPC") + log.V(3).Info("Creating a VPC") err = s.createVPC() if err != nil { return false, fmt.Errorf("failed to create vpc: %w", err) } - s.V(3).Info("Successfully created VPC") + log.V(3).Info("Successfully created VPC") return true, nil } @@ -690,7 +692,7 @@ func (s *VPCClusterScope) createVPC() error { } else if resourceGroupID == "" { return fmt.Errorf("resource group id is empty cannot create vpc") } - vpcName := s.GetServiceName(infrav1beta2.ResourceTypeVPC) + vpcName := s.GetServiceName(infrav1.ResourceTypeVPC) if s.NetworkSpec() != nil && s.NetworkSpec().VPC != nil && s.NetworkSpec().VPC.Name != nil { vpcName = s.NetworkSpec().VPC.Name } @@ -710,7 +712,7 @@ func (s *VPCClusterScope) createVPC() error { } // Set the VPC status. - s.SetResourceStatus(infrav1beta2.ResourceTypeVPC, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeVPC, &infrav1.ResourceStatus{ ID: *vpcDetails.ID, Name: vpcDetails.Name, // We wait for a followup reconcile loop to set as Ready, to confirm the VPC can be found. @@ -726,7 +728,8 @@ func (s *VPCClusterScope) createVPC() error { } // ReconcileVPCCustomImage reconciles the VPC Custom Image. -func (s *VPCClusterScope) ReconcileVPCCustomImage() (bool, error) { +func (s *VPCClusterScope) ReconcileVPCCustomImage(ctx context.Context) (bool, error) { + log := ctrl.LoggerFrom(ctx) // VPC Custom Image reconciliation is based on the following possibilities. // 1. Check Status for ID or Name, from previous lookup in reconciliation loop. // 2. If no Image spec is provided, assume the image is managed externally, thus no reconciliation required. @@ -741,7 +744,7 @@ func (s *VPCClusterScope) ReconcileVPCCustomImage() (bool, error) { } } else if s.IBMVPCCluster.Spec.Image == nil { // If no Image spec was defined, we expect it is maintained externally and continue without reconciling. For example, using a Catalog Offering Custom Image, which may be in another account, which means it cannot be looked up, but can be used when creating Instances. - s.V(3).Info("No VPC Custom Image defined, skipping reconciliation") + log.V(3).Info("No VPC Custom Image defined, skipping reconciliation") return false, nil } else if s.IBMVPCCluster.Spec.Image.Name != nil { // Attempt to retrieve the image details via the name, if it already exists @@ -754,7 +757,7 @@ func (s *VPCClusterScope) ReconcileVPCCustomImage() (bool, error) { if imageDetails.Status != nil && *imageDetails.Status == string(vpcv1.ImageStatusAvailableConst) { requeue = false } - s.SetResourceStatus(infrav1beta2.ResourceTypeCustomImage, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeCustomImage, &infrav1.ResourceStatus{ ID: *imageDetails.ID, Name: s.IBMVPCCluster.Spec.Image.Name, // Ready status will be invert of the need to requeue. @@ -787,13 +790,13 @@ func (s *VPCClusterScope) ReconcileVPCCustomImage() (bool, error) { if image == nil { return false, fmt.Errorf("error failed to retrieve vpc custom image with id %s", *imageID) } - s.V(3).Info("Found VPC Custom Image with provided id", "imageID", imageID) + log.V(3).Info("Found VPC Custom Image with provided id", "imageID", imageID) requeue := true if image.Status != nil && *image.Status == string(vpcv1.ImageStatusAvailableConst) { requeue = false } - s.SetResourceStatus(infrav1beta2.ResourceTypeCustomImage, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeCustomImage, &infrav1.ResourceStatus{ ID: *imageID, Name: image.Name, // Ready status will be invert of the need to requeue. @@ -803,18 +806,18 @@ func (s *VPCClusterScope) ReconcileVPCCustomImage() (bool, error) { } // No VPC Custom Image exists or was found, so create the Custom Image. - s.V(3).Info("Creating a VPC Custom Image") - err := s.createCustomImage() + log.V(3).Info("Creating a VPC Custom Image") + err := s.createCustomImage(ctx) if err != nil { return false, fmt.Errorf("error failure trying to create vpc custom image: %w", err) } - s.V(3).Info("Successfully created VPC Custom Image") + log.V(3).Info("Successfully created VPC Custom Image") return true, nil } // createCustomImage will create a new VPC Custom Image. -func (s *VPCClusterScope) createCustomImage() error { +func (s *VPCClusterScope) createCustomImage(ctx context.Context) error { // TODO(cjschaef): Remove in favor of webhook validation. if s.IBMVPCCluster.Spec.Image.OperatingSystem == nil { return fmt.Errorf("error failed to create vpc custom image due to missing operatingSystem") @@ -843,7 +846,7 @@ func (s *VPCClusterScope) createCustomImage() error { } // Build the COS Object URL using the ImageSpec - fileHRef, err := s.buildCOSObjectHRef() + fileHRef, err := s.buildCOSObjectHRef(ctx) if err != nil { return fmt.Errorf("error building vpc custom image file href: %w", err) } @@ -872,7 +875,7 @@ func (s *VPCClusterScope) createCustomImage() error { } // Initially populate the Image's status. - s.SetResourceStatus(infrav1beta2.ResourceTypeCustomImage, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeCustomImage, &infrav1.ResourceStatus{ ID: *imageDetails.ID, Name: imageDetails.Name, // We must wait for the image to be ready, on followup reconciliation loops. @@ -887,7 +890,8 @@ func (s *VPCClusterScope) createCustomImage() error { } // buildCOSObjectHRef will build the HRef path to a COS Object that can be used for VPC Custom Image creation. -func (s *VPCClusterScope) buildCOSObjectHRef() (*string, error) { +func (s *VPCClusterScope) buildCOSObjectHRef(ctx context.Context) (*string, error) { + log := ctrl.LoggerFrom(ctx) // TODO(cjschaef): Remove in favor of webhook validation. // We need COS details in order to create the Custom Image from. if s.IBMVPCCluster.Spec.Image.COSInstance == nil || s.IBMVPCCluster.Spec.Image.COSBucket == nil || s.IBMVPCCluster.Spec.Image.COSObject == nil { @@ -903,14 +907,14 @@ func (s *VPCClusterScope) buildCOSObjectHRef() (*string, error) { // Expected HRef format: // cos://// href := fmt.Sprintf("cos://%s/%s/%s", bucketRegion, *s.IBMVPCCluster.Spec.Image.COSBucket, *s.IBMVPCCluster.Spec.Image.COSObject) - s.V(3).Info("building image ref", "href", href) + log.V(3).Info("building image ref", "href", href) return ptr.To(href), nil } // ReconcileSubnets reconciles the VPC Subnet(s). // For Subnets, we collect all of the required subnets, for each Plane, and reconcile them individually. Requeing if one is missing or just created. Reconciliation is attempted on all subnets each loop, to prevent single subnet creation per reconciliation loop. -func (s *VPCClusterScope) ReconcileSubnets() (bool, error) { - var subnets []infrav1beta2.Subnet +func (s *VPCClusterScope) ReconcileSubnets(ctx context.Context) (bool, error) { + var subnets []infrav1.Subnet var err error // If no ControlPlane Subnets were supplied, we default to create one in each availability zone of the region. if len(s.IBMVPCCluster.Spec.Network.ControlPlaneSubnets) == 0 { @@ -925,7 +929,7 @@ func (s *VPCClusterScope) ReconcileSubnets() (bool, error) { // Reconcile Control Plane subnets. requeue := false for _, subnet := range subnets { - if requiresRequeue, err := s.reconcileSubnet(subnet, true); err != nil { + if requiresRequeue, err := s.reconcileSubnet(ctx, subnet, true); err != nil { return false, fmt.Errorf("error failed reconciling control plane subnet: %w", err) } else if requiresRequeue { // If the reconcile of the subnet requires further reconciliation, plan to requeue entire ReconcileSubnets call, but attempt to further reconcile additional Subnets (attempt all subnet reconciliation). @@ -949,7 +953,7 @@ func (s *VPCClusterScope) ReconcileSubnets() (bool, error) { // Reconcile Worker subnets. for _, subnet := range subnets { - if requiresRequeue, err := s.reconcileSubnet(subnet, false); err != nil { + if requiresRequeue, err := s.reconcileSubnet(ctx, subnet, false); err != nil { return false, fmt.Errorf("error failed reconciling worker subnet: %w", err) } else if requiresRequeue { // If the reconcile of the subnet requires further reconciliation, plan to requeue entire ReconcileSubnets call, but attempt to further reconcile additional Subnets (attempt all subnet reconciliation). @@ -963,7 +967,8 @@ func (s *VPCClusterScope) ReconcileSubnets() (bool, error) { // reconcileSubnet will attempt to find the existing subnet, or create it if necessary. // The logic can handle either Control Plane or Worker subnets, but must distinguish between them for Status updates. -func (s *VPCClusterScope) reconcileSubnet(subnet infrav1beta2.Subnet, isControlPlane bool) (bool, error) { //nolint: gocyclo +func (s *VPCClusterScope) reconcileSubnet(ctx context.Context, subnet infrav1.Subnet, isControlPlane bool) (bool, error) { //nolint: gocyclo + log := ctrl.LoggerFrom(ctx) // If no ID or name was provided, that is an error to be raised. One or the other must be specified when subnets are supplied. if subnet.ID == nil && subnet.Name == nil { return false, fmt.Errorf("error subnet has no defined id or name, one is required") @@ -971,7 +976,7 @@ func (s *VPCClusterScope) reconcileSubnet(subnet infrav1beta2.Subnet, isControlP // Check Status first and update as necessary. if s.NetworkStatus() != nil { - var subnetMap map[string]*infrav1beta2.ResourceStatus + var subnetMap map[string]*infrav1.ResourceStatus var subnetID, subnetName *string if isControlPlane && s.NetworkStatus().ControlPlaneSubnets != nil { subnetMap = s.NetworkStatus().ControlPlaneSubnets @@ -1040,20 +1045,20 @@ func (s *VPCClusterScope) reconcileSubnet(subnet infrav1beta2.Subnet, isControlP } // If the subnet has not yet been at this point, assume it needs to be created. - s.V(3).Info("creating subnet", "subnetName", subnet.Name) - err := s.createSubnet(subnet, isControlPlane) + log.V(3).Info("creating subnet", "subnetName", subnet.Name) + err := s.createSubnet(ctx, subnet, isControlPlane) if err != nil { return false, err } - s.V(3).Info("Successfully created subnet", "subnetName", subnet.Name) + log.V(3).Info("Successfully created subnet", "subnetName", subnet.Name) // Recommend we requeue reconciliation after subnet was successfully created return true, nil } // buildSubnetsForZones will create a set of Subnets, using default names, for each availability zone within a Region. This is typically used when no subnets were provided, so a set of default subnets gets created. -func (s *VPCClusterScope) buildSubnetsForZones() ([]infrav1beta2.Subnet, error) { - subnets := make([]infrav1beta2.Subnet, 0) +func (s *VPCClusterScope) buildSubnetsForZones() ([]infrav1.Subnet, error) { + subnets := make([]infrav1.Subnet, 0) zones, err := s.VPCClient.GetVPCZonesByRegion(s.IBMVPCCluster.Spec.Region) if err != nil { return subnets, fmt.Errorf("error unknown failure retrieving zones for region %s: %w", s.IBMVPCCluster.Spec.Region, err) @@ -1062,8 +1067,8 @@ func (s *VPCClusterScope) buildSubnetsForZones() ([]infrav1beta2.Subnet, error) return subnets, fmt.Errorf("error retrieving subnet zones, no zones found in %s", s.IBMVPCCluster.Spec.Region) } for _, zone := range zones { - name := fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1beta2.ResourceTypeSubnet), zone) - subnets = append(subnets, infrav1beta2.Subnet{ + name := fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1.ResourceTypeSubnet), zone) + subnets = append(subnets, infrav1.Subnet{ Name: ptr.To(name), Zone: ptr.To(zone), }) @@ -1078,22 +1083,22 @@ func (s *VPCClusterScope) updateSubnetStatus(subnetDetails *vpcv1.Subnet, isCont requeue = false } - resourceStatus := &infrav1beta2.ResourceStatus{ + resourceStatus := &infrav1.ResourceStatus{ ID: *subnetDetails.ID, Name: subnetDetails.Name, // Ready status will be invert of the need to requeue Ready: !requeue, } if isControlPlane { - s.SetResourceStatus(infrav1beta2.ResourceTypeControlPlaneSubnet, resourceStatus) + s.SetResourceStatus(infrav1.ResourceTypeControlPlaneSubnet, resourceStatus) } else { - s.SetResourceStatus(infrav1beta2.ResourceTypeWorkerSubnet, resourceStatus) + s.SetResourceStatus(infrav1.ResourceTypeWorkerSubnet, resourceStatus) } return requeue, nil } // createSubnet creates a new VPC subnet. -func (s *VPCClusterScope) createSubnet(subnet infrav1beta2.Subnet, isControlPlane bool) error { +func (s *VPCClusterScope) createSubnet(ctx context.Context, subnet infrav1.Subnet, isControlPlane bool) error { // TODO(cjschaef): Move to webhook validation. if subnet.Zone == nil { return fmt.Errorf("error subnet zone must be defined for subnet %s", *subnet.Name) @@ -1120,7 +1125,7 @@ func (s *VPCClusterScope) createSubnet(subnet infrav1beta2.Subnet, isControlPlan // Find or create a Public Gateway in this zone for the subnet, only one Public Gateway is required for each zone, for this cluster. // NOTE(cjschaef): We may need to add support to not attach Public Gateways to subnets. - publicGateway, err := s.findOrCreatePublicGateway(*subnet.Zone) + publicGateway, err := s.findOrCreatePublicGateway(ctx, *subnet.Zone) if err != nil { return fmt.Errorf("error failed to find or create public gateway for subnet %s: %w", *subnet.Name, err) } @@ -1154,15 +1159,15 @@ func (s *VPCClusterScope) createSubnet(subnet infrav1beta2.Subnet, isControlPlan } // Initially populate subnet's status. - resourceStatus := &infrav1beta2.ResourceStatus{ + resourceStatus := &infrav1.ResourceStatus{ ID: *subnetDetails.ID, Name: subnetDetails.Name, Ready: false, } if isControlPlane { - s.SetResourceStatus(infrav1beta2.ResourceTypeControlPlaneSubnet, resourceStatus) + s.SetResourceStatus(infrav1.ResourceTypeControlPlaneSubnet, resourceStatus) } else { - s.SetResourceStatus(infrav1beta2.ResourceTypeWorkerSubnet, resourceStatus) + s.SetResourceStatus(infrav1.ResourceTypeWorkerSubnet, resourceStatus) } // Add a tag to the subnet for the cluster. @@ -1175,8 +1180,9 @@ func (s *VPCClusterScope) createSubnet(subnet infrav1beta2.Subnet, isControlPlan } // findOrCreatePublicGateway will attempt to find if there is an existing Public Gateway for a specific zone, for the cluster (in cluster's Resource Group and VPC), or create a new one. Only one Public Gateway is required in each zone, for any subnets in that zone. -func (s *VPCClusterScope) findOrCreatePublicGateway(zone string) (*vpcv1.PublicGateway, error) { - publicGatewayName := fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1beta2.ResourceTypePublicGateway), zone) +func (s *VPCClusterScope) findOrCreatePublicGateway(ctx context.Context, zone string) (*vpcv1.PublicGateway, error) { + log := ctrl.LoggerFrom(ctx) + publicGatewayName := fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1.ResourceTypePublicGateway), zone) // We will use the cluster Resource Group ID, as we expect to create all resources (Public Gateways and Subnets) in that Resource Group. resourceGroupID, err := s.GetResourceGroupID() if err != nil { @@ -1221,7 +1227,7 @@ func (s *VPCClusterScope) findOrCreatePublicGateway(zone string) (*vpcv1.PublicG return nil, fmt.Errorf("error failed creating public gateway for zone %s", zone) } - s.V(3).Info("created public gateway", "id", publicGatewayDetails.ID) + log.V(3).Info("created public gateway", "id", publicGatewayDetails.ID) // Add a tag to the public gateway for the cluster err = s.TagResource(s.IBMVPCCluster.Name, *publicGatewayDetails.CRN) @@ -1233,7 +1239,8 @@ func (s *VPCClusterScope) findOrCreatePublicGateway(zone string) (*vpcv1.PublicG } // ReconcileSecurityGroups will attempt to reconcile the defined SecurityGroups and their SecurityGroupRules. Our best option is to perform a first set of passes, creating all the SecurityGroups first, then reconcile the SecurityGroupRules after that, as the SecuirtyGroupRules could be dependent on an IBM Cloud Security Group that must be created first. -func (s *VPCClusterScope) ReconcileSecurityGroups() (bool, error) { +func (s *VPCClusterScope) ReconcileSecurityGroups(ctx context.Context) (bool, error) { + log := ctrl.LoggerFrom(ctx) // If no Security Groups were supplied, we have nothing to do. if len(s.IBMVPCCluster.Spec.Network.SecurityGroups) == 0 { return false, nil @@ -1241,7 +1248,7 @@ func (s *VPCClusterScope) ReconcileSecurityGroups() (bool, error) { // Reconcile each Security Group first, process rules later. for _, securityGroup := range s.IBMVPCCluster.Spec.Network.SecurityGroups { - if err := s.reconcileSecurityGroup(securityGroup); err != nil { + if err := s.reconcileSecurityGroup(ctx, securityGroup); err != nil { return false, fmt.Errorf("error failed reonciling security groups: %w", err) } } @@ -1249,10 +1256,10 @@ func (s *VPCClusterScope) ReconcileSecurityGroups() (bool, error) { // Reconcile each Security Groups's Rules. requeue := false for _, securityGroup := range s.IBMVPCCluster.Spec.Network.SecurityGroups { - if requiresRequeue, err := s.reconcileSecurityGroupRules(securityGroup); err != nil { + if requiresRequeue, err := s.reconcileSecurityGroupRules(ctx, securityGroup); err != nil { return false, fmt.Errorf("error failed reconciling security group rules: %w", err) } else if requiresRequeue { - s.V(3).Info("requeuing for security group rules") + log.V(3).Info("requeuing for security group rules") requeue = true } } @@ -1261,7 +1268,8 @@ func (s *VPCClusterScope) ReconcileSecurityGroups() (bool, error) { } // reconcileSecurityGroup will attempt to reconcile a defined SecurityGroup. By design, we confirm the IBM Cloud Security Group exists first, before attempting to reconcile the defined SecurityGroupRules. -func (s *VPCClusterScope) reconcileSecurityGroup(securityGroup infrav1beta2.VPCSecurityGroup) error { +func (s *VPCClusterScope) reconcileSecurityGroup(ctx context.Context, securityGroup infrav1.VPCSecurityGroup) error { + log := ctrl.LoggerFrom(ctx) var securityGroupID *string // If Security Group already has an ID defined, use that for lookup. if securityGroup.ID != nil { @@ -1283,7 +1291,7 @@ func (s *VPCClusterScope) reconcileSecurityGroup(securityGroup infrav1beta2.VPCS } else if securityGroupDetails != nil { // If the Security Group was found, update Status with current details. // Security Groups do not have a status, so we assume if it exists, it is ready. - s.SetResourceStatus(infrav1beta2.ResourceTypeSecurityGroup, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeSecurityGroup, &infrav1.ResourceStatus{ ID: *securityGroupDetails.ID, Name: securityGroupDetails.Name, Ready: true, @@ -1295,7 +1303,7 @@ func (s *VPCClusterScope) reconcileSecurityGroup(securityGroup infrav1beta2.VPCS // If we have an ID for the SecurityGroup, we can check the status. if securityGroupID != nil { - s.V(3).Info("checking security group status", "securityGroupName", securityGroup.Name, "securityGroupID", securityGroupID) + log.V(3).Info("checking security group status", "securityGroupName", securityGroup.Name, "securityGroupID", securityGroupID) securityGroupDetails, _, err := s.VPCClient.GetSecurityGroup(&vpcv1.GetSecurityGroupOptions{ ID: securityGroupID, }) @@ -1308,7 +1316,7 @@ func (s *VPCClusterScope) reconcileSecurityGroup(securityGroup infrav1beta2.VPCS } // Security Groups do not have a status, so we assume if it exists, it is ready. - s.SetResourceStatus(infrav1beta2.ResourceTypeSecurityGroup, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeSecurityGroup, &infrav1.ResourceStatus{ ID: *securityGroupID, Name: securityGroupDetails.Name, Ready: true, @@ -1336,16 +1344,16 @@ func (s *VPCClusterScope) reconcileSecurityGroup(securityGroup infrav1beta2.VPCS } securityGroupDetails, _, err := s.VPCClient.CreateSecurityGroup(createOptions) if err != nil { - s.V(3).Error(err, "error creating security group", "securityGroupName", securityGroup.Name) + log.V(3).Error(err, "error creating security group", "securityGroupName", securityGroup.Name) return fmt.Errorf("error failed to create security group: %w", err) } if securityGroupDetails == nil { - s.V(3).Info("error failed creating security group", "securityGroupName", securityGroup.Name) + log.V(3).Info("error failed creating security group", "securityGroupName", securityGroup.Name) return fmt.Errorf("error failed creating security group") } // Security Groups do not have a status, so just assume they are ready immediately after creation. - s.SetResourceStatus(infrav1beta2.ResourceTypeSecurityGroup, &infrav1beta2.ResourceStatus{ + s.SetResourceStatus(infrav1.ResourceTypeSecurityGroup, &infrav1.ResourceStatus{ ID: *securityGroupDetails.ID, Name: securityGroupDetails.Name, Ready: true, @@ -1362,7 +1370,8 @@ func (s *VPCClusterScope) reconcileSecurityGroup(securityGroup infrav1beta2.VPCS } // reconcile SecurityGroupRules will attempt to reconcile the set of defined SecurityGroupRules for a SecurityGroup, one Rule at a time. Each defined Rule can contain multiple remotes, requiring a unique IBM Cloud Security Group Rule, based on the expected traffic direction, inbound (Source) or outbound (Destination). -func (s *VPCClusterScope) reconcileSecurityGroupRules(securityGroup infrav1beta2.VPCSecurityGroup) (bool, error) { +func (s *VPCClusterScope) reconcileSecurityGroupRules(ctx context.Context, securityGroup infrav1.VPCSecurityGroup) (bool, error) { + log := ctrl.LoggerFrom(ctx) // If the SecurityGroup has no rules, we have nothing more to do for this Security Group. if len(securityGroup.Rules) == 0 { return false, nil @@ -1378,14 +1387,14 @@ func (s *VPCClusterScope) reconcileSecurityGroupRules(securityGroup infrav1beta2 } if securityGroupID == nil { - s.V(3).Info("security group not found, requeue", "securityGroup", securityGroup) + log.V(3).Info("security group not found, requeue", "securityGroup", securityGroup) return true, nil } // Reconcile each SecurityGroupRule in the SecurityGroup. for _, securityGroupRule := range securityGroup.Rules { - s.V(3).Info("reconcile security group rule", "securityGroupID", securityGroupID) - if err := s.reconcileSecurityGroupRule(*securityGroupID, *securityGroupRule); err != nil { + log.V(3).Info("reconcile security group rule", "securityGroupID", securityGroupID) + if err := s.reconcileSecurityGroupRule(ctx, *securityGroupID, *securityGroupRule); err != nil { return false, fmt.Errorf("error failed to reconcile security group rule: %w", err) } } @@ -1395,7 +1404,8 @@ func (s *VPCClusterScope) reconcileSecurityGroupRules(securityGroup infrav1beta2 } // reconcileSecurityGroupRule will attempt to reconcile a defined SecurityGroupRule, with one or more Remotes, for a SecurityGroup. If the IBM Cloud Security Group contains no Rules, simply attempt to create the defined Rule (via the Remote(s) provided). -func (s *VPCClusterScope) reconcileSecurityGroupRule(securityGroupID string, securityGroupRule infrav1beta2.VPCSecurityGroupRule) error { +func (s *VPCClusterScope) reconcileSecurityGroupRule(ctx context.Context, securityGroupID string, securityGroupRule infrav1.VPCSecurityGroupRule) error { + log := ctrl.LoggerFrom(ctx) existingSecurityGroupRuleIntfs, _, err := s.VPCClient.ListSecurityGroupRules(&vpcv1.ListSecurityGroupRulesOptions{ SecurityGroupID: ptr.To(securityGroupID), }) @@ -1405,38 +1415,39 @@ func (s *VPCClusterScope) reconcileSecurityGroupRule(securityGroupID string, sec // If the Security Group has no Rules at all, we simply create all the Rules if existingSecurityGroupRuleIntfs == nil || len(existingSecurityGroupRuleIntfs.Rules) == 0 { - s.V(3).Info("Creating security group rules for security group", "securityGroupID", securityGroupID) - err := s.createSecurityGroupRuleAllRemotes(securityGroupID, securityGroupRule) + log.V(3).Info("Creating security group rules for security group", "securityGroupID", securityGroupID) + err := s.createSecurityGroupRuleAllRemotes(ctx, securityGroupID, securityGroupRule) if err != nil { return fmt.Errorf("error failed creating all security group rule remotes: %w", err) } - s.V(3).Info("Created security group rules", "securityGroupID", securityGroupID, "securityGroupRule", securityGroupRule) + log.V(3).Info("Created security group rules", "securityGroupID", securityGroupID, "securityGroupRule", securityGroupRule) // Security Group Rules do not have a Status, so assume they are ready immediately. return nil } // Validate the Security Group Rule(s) exist or were created. - if err := s.findOrCreateSecurityGroupRule(securityGroupID, securityGroupRule, existingSecurityGroupRuleIntfs); err != nil { + if err := s.findOrCreateSecurityGroupRule(ctx, securityGroupID, securityGroupRule, existingSecurityGroupRuleIntfs); err != nil { return fmt.Errorf("error failed to find or create security group rule: %w", err) } return nil } // findOrCreateSecurityGroupRule will attempt to match up the SecurityGroupRule's Remote(s) (multiple Remotes can be supplied per Rule definition), and will create any missing IBM Cloud Security Group Rules based on the SecurityGroupRule and Remote(s). Remotes are defined either by a Destination (outbound) or a Source (inbound), which defines the type of IBM Cloud Security Group Rule that should exist or be created. -func (s *VPCClusterScope) findOrCreateSecurityGroupRule(securityGroupID string, securityGroupRule infrav1beta2.VPCSecurityGroupRule, existingSecurityGroupRules *vpcv1.SecurityGroupRuleCollection) error { //nolint: gocyclo +func (s *VPCClusterScope) findOrCreateSecurityGroupRule(ctx context.Context, securityGroupID string, securityGroupRule infrav1.VPCSecurityGroupRule, existingSecurityGroupRules *vpcv1.SecurityGroupRuleCollection) error { //nolint: gocyclo + log := ctrl.LoggerFrom(ctx) // Use either the SecurityGroupRule.Destination or SecurityGroupRule.Source for further details based on SecurityGroupRule.Direction - var securityGroupRulePrototype infrav1beta2.VPCSecurityGroupRulePrototype + var securityGroupRulePrototype infrav1.VPCSecurityGroupRulePrototype switch securityGroupRule.Direction { - case infrav1beta2.VPCSecurityGroupRuleDirectionInbound: + case infrav1.VPCSecurityGroupRuleDirectionInbound: securityGroupRulePrototype = *securityGroupRule.Source - case infrav1beta2.VPCSecurityGroupRuleDirectionOutbound: + case infrav1.VPCSecurityGroupRuleDirectionOutbound: securityGroupRulePrototype = *securityGroupRule.Destination default: return fmt.Errorf("error unsupported SecurityGroupRuleDirection defined") } - s.V(3).Info("checking security group rules for security group", "securityGroupID", securityGroupID) + log.V(3).Info("checking security group rules for security group", "securityGroupID", securityGroupID) // Each defined SecurityGroupRule can have multiple Remotes specified, each signifying a separate Security Group Rule (with the same Action, Direction, etc.) for _, remote := range securityGroupRulePrototype.Remotes { @@ -1444,70 +1455,70 @@ func (s *VPCClusterScope) findOrCreateSecurityGroupRule(securityGroupID string, for _, existingRuleIntf := range existingSecurityGroupRules.Rules { // Perform analysis of the existingRuleIntf, based on its Protocol type, further analysis is performed based on remaining attributes to find if the specific Rule and Remote match switch reflect.TypeOf(existingRuleIntf).String() { - case infrav1beta2.VPCSecurityGroupRuleProtocolAllType: + case infrav1.VPCSecurityGroupRuleProtocolAllType: // If our Remote doesn't define all Protocols, we don't need further checks, move on to next Rule - if securityGroupRulePrototype.Protocol != infrav1beta2.VPCSecurityGroupRuleProtocolAll { + if securityGroupRulePrototype.Protocol != infrav1.VPCSecurityGroupRuleProtocolAll { continue } existingRule := existingRuleIntf.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) // If the Remote doesn't have the same Direction as the Rule, no further checks are necessary - if securityGroupRule.Direction != infrav1beta2.VPCSecurityGroupRuleDirection(*existingRule.Direction) { + if securityGroupRule.Direction != infrav1.VPCSecurityGroupRuleDirection(*existingRule.Direction) { continue } - if found, err := s.checkSecurityGroupRuleProtocolAll(securityGroupRulePrototype, remote, existingRule); err != nil { + if found, err := s.checkSecurityGroupRuleProtocolAll(ctx, securityGroupRulePrototype, remote, existingRule); err != nil { return fmt.Errorf("error failure checking security group rule protocol all: %w", err) } else if found { // If we found the matching IBM Cloud Security Group Rule for the defined SecurityGroupRule and Remote, we can stop checking IBM Cloud Security Group Rules for this remote and move onto the next remote. // The expectation is that only one IBM Cloud Security Group Rule will match, but if at least one matches the defined SecurityGroupRule, that is sufficient. - s.V(3).Info("security group rule all protocol match found") + log.V(3).Info("security group rule all protocol match found") remoteMatch = true break } - case infrav1beta2.VPCSecurityGroupRuleProtocolIcmpType: + case infrav1.VPCSecurityGroupRuleProtocolIcmpType: // If our Remote doesn't define ICMP Protocol, we don't need further checks, move on to next Rule - if securityGroupRulePrototype.Protocol != infrav1beta2.VPCSecurityGroupRuleProtocolIcmp { + if securityGroupRulePrototype.Protocol != infrav1.VPCSecurityGroupRuleProtocolIcmp { continue } existingRule := existingRuleIntf.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) // If the Remote doesn't have the same Direction as the Rule, no further checks are necessary - if securityGroupRule.Direction != infrav1beta2.VPCSecurityGroupRuleDirection(*existingRule.Direction) { + if securityGroupRule.Direction != infrav1.VPCSecurityGroupRuleDirection(*existingRule.Direction) { continue } - if found, err := s.checkSecurityGroupRuleProtocolIcmp(securityGroupRulePrototype, remote, existingRule); err != nil { + if found, err := s.checkSecurityGroupRuleProtocolIcmp(ctx, securityGroupRulePrototype, remote, existingRule); err != nil { return fmt.Errorf("error failure checking security group rule protocol icmp: %w", err) } else if found { // If we found the matching IBM Cloud Security Group Rule for the defined SecurityGroupRule and Remote, we can stop checking IBM Cloud Security Group Rules for this remote and move onto the next remote. - s.V(3).Info("security group rule icmp match found") + log.V(3).Info("security group rule icmp match found") remoteMatch = true break } - case infrav1beta2.VPCSecurityGroupRuleProtocolTcpudpType: + case infrav1.VPCSecurityGroupRuleProtocolTcpudpType: // If our Remote doesn't define TCP/UDP Protocol, we don't need further checks, move on to next Rule - if securityGroupRulePrototype.Protocol != infrav1beta2.VPCSecurityGroupRuleProtocolTCP && securityGroupRulePrototype.Protocol != infrav1beta2.VPCSecurityGroupRuleProtocolUDP { + if securityGroupRulePrototype.Protocol != infrav1.VPCSecurityGroupRuleProtocolTCP && securityGroupRulePrototype.Protocol != infrav1.VPCSecurityGroupRuleProtocolUDP { continue } existingRule := existingRuleIntf.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) // If the Remote doesn't have the same Direction as the Rule, no further checks are necessary - if securityGroupRule.Direction != infrav1beta2.VPCSecurityGroupRuleDirection(*existingRule.Direction) { + if securityGroupRule.Direction != infrav1.VPCSecurityGroupRuleDirection(*existingRule.Direction) { continue } - if found, err := s.checkSecurityGroupRuleProtocolTcpudp(securityGroupRulePrototype, remote, existingRule); err != nil { + if found, err := s.checkSecurityGroupRuleProtocolTcpudp(ctx, securityGroupRulePrototype, remote, existingRule); err != nil { return fmt.Errorf("error failure checking security group rule protocol tcp-udp: %w", err) } else if found { // If we found the matching IBM Cloud Security Group Rule for the defined SecurityGroupRule and Remote, we can stop checking IBM Cloud Security Group Rules for this remote and move onto the next remote. - s.V(3).Info("security group rule tcp/udp match found") + log.V(3).Info("security group rule tcp/udp match found") remoteMatch = true break } default: // This is an unexpected IBM Cloud Security Group Rule Prototype, log it and move on - s.V(3).Info("unexpected security group rule prototype", "securityGroupRulePrototype", reflect.TypeOf(existingRuleIntf).String()) + log.V(3).Info("unexpected security group rule prototype", "securityGroupRulePrototype", reflect.TypeOf(existingRuleIntf).String()) } } // If we did not find a matching SecurityGroupRule for this defined Remote, create one now. if !remoteMatch { - err := s.createSecurityGroupRule(securityGroupID, securityGroupRule, remote) + err := s.createSecurityGroupRule(ctx, securityGroupID, securityGroupRule, remote) if err != nil { return fmt.Errorf("error failure creating security group rule: %w", err) } @@ -1517,19 +1528,21 @@ func (s *VPCClusterScope) findOrCreateSecurityGroupRule(securityGroupID string, } // checkSecurityGroupRuleProtocolAll analyzes an IBM Cloud Security Group Rule designated for 'all' protocols, to verify if the supplied Rule and Remote match the attributes from the existing 'ProtocolAll' Rule. -func (s *VPCClusterScope) checkSecurityGroupRuleProtocolAll(_ infrav1beta2.VPCSecurityGroupRulePrototype, securityGroupRuleRemote infrav1beta2.VPCSecurityGroupRuleRemote, existingRule *vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) (bool, error) { - if exists, err := s.checkSecurityGroupRulePrototypeRemote(securityGroupRuleRemote, existingRule.Remote); err != nil { +func (s *VPCClusterScope) checkSecurityGroupRuleProtocolAll(ctx context.Context, _ infrav1.VPCSecurityGroupRulePrototype, securityGroupRuleRemote infrav1.VPCSecurityGroupRuleRemote, existingRule *vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) (bool, error) { + log := ctrl.LoggerFrom(ctx) + if exists, err := s.checkSecurityGroupRulePrototypeRemote(ctx, securityGroupRuleRemote, existingRule.Remote); err != nil { return false, fmt.Errorf("error failed checking security group rule all remote: %w", err) } else if exists { - s.V(3).Info("security group rule all protocols match") + log.V(3).Info("security group rule all protocols match") return true, nil } return false, nil } // checkSecurityGroupRuleProtocolIcmp analyzes an IBM Cloud Security Group Rule designated for 'icmp' protocol, to verify if the supplied Rule and Remote match the attributes from the existing 'ProtocolIcmp' Rule. -func (s *VPCClusterScope) checkSecurityGroupRuleProtocolIcmp(securityGroupRulePrototype infrav1beta2.VPCSecurityGroupRulePrototype, securityGroupRuleRemote infrav1beta2.VPCSecurityGroupRuleRemote, existingRule *vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) (bool, error) { - if exists, err := s.checkSecurityGroupRulePrototypeRemote(securityGroupRuleRemote, existingRule.Remote); err != nil { +func (s *VPCClusterScope) checkSecurityGroupRuleProtocolIcmp(ctx context.Context, securityGroupRulePrototype infrav1.VPCSecurityGroupRulePrototype, securityGroupRuleRemote infrav1.VPCSecurityGroupRuleRemote, existingRule *vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) (bool, error) { + log := ctrl.LoggerFrom(ctx) + if exists, err := s.checkSecurityGroupRulePrototypeRemote(ctx, securityGroupRuleRemote, existingRule.Remote); err != nil { return false, fmt.Errorf("error failed checking security group rule icmp remote: %w", err) } else if !exists { return false, nil @@ -1538,30 +1551,31 @@ func (s *VPCClusterScope) checkSecurityGroupRuleProtocolIcmp(securityGroupRulePr if securityGroupRulePrototype.ICMPCode != nil && securityGroupRulePrototype.ICMPType != nil { // If the existingRule Code and Type are both equal to the securityGroupRulePrototype's ICMPType and ICMPCode, the existingRule matches our definition for ICMP in securityGroupRulePrototype. if *securityGroupRulePrototype.ICMPCode == *existingRule.Code && *securityGroupRulePrototype.ICMPType == *existingRule.Type { - s.V(3).Info("security group rule icmp code and type match", "icmpCode", *existingRule.Code, "icmpType", *existingRule.Type) + log.V(3).Info("security group rule icmp code and type match", "icmpCode", *existingRule.Code, "icmpType", *existingRule.Type) return true, nil } } else if existingRule.Code == nil && existingRule.Type == nil { - s.V(3).Info("security group rule unset icmp matches") + log.V(3).Info("security group rule unset icmp matches") return true, nil } return false, nil } // checkSecurityGroupRuleProtocolTcpudp analyzes an IBM Cloud Security Group Rule designated for either 'tcp' or 'udp' protocols, to verify if the supplied Rule and Remote match the attributes from the existing 'ProtocolTcpudp' Rule. -func (s *VPCClusterScope) checkSecurityGroupRuleProtocolTcpudp(securityGroupRulePrototype infrav1beta2.VPCSecurityGroupRulePrototype, securityGroupRuleRemote infrav1beta2.VPCSecurityGroupRuleRemote, existingRule *vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) (bool, error) { +func (s *VPCClusterScope) checkSecurityGroupRuleProtocolTcpudp(ctx context.Context, securityGroupRulePrototype infrav1.VPCSecurityGroupRulePrototype, securityGroupRuleRemote infrav1.VPCSecurityGroupRuleRemote, existingRule *vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) (bool, error) { + log := ctrl.LoggerFrom(ctx) // Check the protocol next, either TCP or UDP, to verify it matches - if securityGroupRulePrototype.Protocol != infrav1beta2.VPCSecurityGroupRuleProtocol(*existingRule.Protocol) { + if securityGroupRulePrototype.Protocol != infrav1.VPCSecurityGroupRuleProtocol(*existingRule.Protocol) { return false, nil } - if exists, err := s.checkSecurityGroupRulePrototypeRemote(securityGroupRuleRemote, existingRule.Remote); err != nil { + if exists, err := s.checkSecurityGroupRulePrototypeRemote(ctx, securityGroupRuleRemote, existingRule.Remote); err != nil { return false, fmt.Errorf("error failed checking security group rule tcp-udp remote: %w", err) } else if exists { // If PortRange is set, verify whether the MinimumPort and MaximumPort match the existingRule's values, if they are set. if securityGroupRulePrototype.PortRange != nil { if existingRule.PortMin != nil && securityGroupRulePrototype.PortRange.MinimumPort == *existingRule.PortMin && existingRule.PortMax != nil && securityGroupRulePrototype.PortRange.MaximumPort == *existingRule.PortMax { - s.V(3).Info("security group rule port range matches", "ruleID", *existingRule.ID, "portMin", *existingRule.PortMin, "portMax", *existingRule.PortMax) + log.V(3).Info("security group rule port range matches", "ruleID", *existingRule.ID, "portMin", *existingRule.PortMin, "portMax", *existingRule.PortMax) return true, nil } } @@ -1569,10 +1583,11 @@ func (s *VPCClusterScope) checkSecurityGroupRuleProtocolTcpudp(securityGroupRule return false, nil } -func (s *VPCClusterScope) checkSecurityGroupRulePrototypeRemote(securityGroupRuleRemote infrav1beta2.VPCSecurityGroupRuleRemote, existingRemote vpcv1.SecurityGroupRuleRemoteIntf) (bool, error) { //nolint: gocyclo +func (s *VPCClusterScope) checkSecurityGroupRulePrototypeRemote(ctx context.Context, securityGroupRuleRemote infrav1.VPCSecurityGroupRuleRemote, existingRemote vpcv1.SecurityGroupRuleRemoteIntf) (bool, error) { //nolint: gocyclo + log := ctrl.LoggerFrom(ctx) // NOTE(cjschaef): We only currently monitor Remote, not Local, as we don't support defining Local in SecurityGroup/SecurityGroupRule. switch securityGroupRuleRemote.RemoteType { - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR: + case infrav1.VPCSecurityGroupRuleRemoteTypeCIDR: cidrRule := existingRemote.(*vpcv1.SecurityGroupRuleRemote) if cidrRule.CIDRBlock == nil { return false, nil @@ -1584,19 +1599,19 @@ func (s *VPCClusterScope) checkSecurityGroupRulePrototypeRemote(securityGroupRul return false, fmt.Errorf("error failed getting subnet by name for security group rule") } if *subnetDetails.Ipv4CIDRBlock == *cidrRule.CIDRBlock { - s.V(3).Info("security group rule remote cidr's match", "remoteCIDR", *cidrRule.CIDRBlock) + log.V(3).Info("security group rule remote cidr's match", "remoteCIDR", *cidrRule.CIDRBlock) return true, nil } - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress: + case infrav1.VPCSecurityGroupRuleRemoteTypeAddress: ipRule := existingRemote.(*vpcv1.SecurityGroupRuleRemote) if ipRule.Address == nil { return false, nil } if *securityGroupRuleRemote.Address == *ipRule.Address { - s.V(3).Info("security group rule remote addresses match", "remoteAddress", *ipRule.Address) + log.V(3).Info("security group rule remote addresses match", "remoteAddress", *ipRule.Address) return true, nil } - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG: + case infrav1.VPCSecurityGroupRuleRemoteTypeSG: sgRule := existingRemote.(*vpcv1.SecurityGroupRuleRemote) if sgRule.Name == nil { return false, nil @@ -1609,7 +1624,7 @@ func (s *VPCClusterScope) checkSecurityGroupRulePrototypeRemote(securityGroupRul // Option #1: If the SecurityGroupRuleRemoteSecurityGroupReference has a name assigned, we can shortcut and simply check that if sgRule.Name != nil && *sgRule.Name == *securityGroupRuleRemote.SecurityGroupName { - s.V(3).Info("security group rule remote security group name matches", "securityGroupRuleRemoteSecurityGroupName", *sgRule.Name) + log.V(3).Info("security group rule remote security group name matches", "securityGroupRuleRemoteSecurityGroupName", *sgRule.Name) return true, nil } // Try to get the Security Group Id for quick lookup (from Network Status) @@ -1618,7 +1633,7 @@ func (s *VPCClusterScope) checkSecurityGroupRulePrototypeRemote(securityGroupRul if securityGroupID := s.getSecurityGroupIDFromStatus(*securityGroupRuleRemote.SecurityGroupName); securityGroupID != nil { // Option #2: If the SecurityGroupRuleRemoteSecurityGroupReference has an ID assigned, we can shortcut and simply check that if sgRule.ID != nil && *securityGroupID == *sgRule.ID { - s.V(3).Info("security group rule remote security group id matches", "securityGroupRuleRemoteSecurityGroupID", *sgRule.ID) + log.V(3).Info("security group rule remote security group id matches", "securityGroupRuleRemoteSecurityGroupID", *sgRule.ID) return true, nil } securityGroupDetails, _, err = s.VPCClient.GetSecurityGroup(&vpcv1.GetSecurityGroupOptions{ @@ -1635,36 +1650,36 @@ func (s *VPCClusterScope) checkSecurityGroupRulePrototypeRemote(securityGroupRul // Option #3: We check the SecurityGroupRuleRemoteSecurityGroupReference's CRN, if the Name and ID were not available if *securityGroupDetails.CRN == *sgRule.CRN { - s.V(3).Info("security group rule remote security group crn matches", "securityGroupRuleRemoteSecurityGroupCRN", *securityGroupDetails.CRN) + log.V(3).Info("security group rule remote security group crn matches", "securityGroupRuleRemoteSecurityGroupCRN", *securityGroupDetails.CRN) return true, nil } - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny: + case infrav1.VPCSecurityGroupRuleRemoteTypeAny: ipRule := existingRemote.(*vpcv1.SecurityGroupRuleRemote) if ipRule.Address == nil { - s.V(3).Info("security group rule remote has no address, defaults to any remote") + log.V(3).Info("security group rule remote has no address, defaults to any remote") return true, nil } - if *ipRule.Address == infrav1beta2.CIDRBlockAny { - s.V(3).Info("security group rule remote address matches %s", infrav1beta2.CIDRBlockAny) + if *ipRule.Address == infrav1.CIDRBlockAny { + log.V(3).Info("security group rule remote address matches %s", infrav1.CIDRBlockAny) return true, nil } default: - s.V(3).Info("unknown security group rule remote") + log.V(3).Info("unknown security group rule remote") } return false, nil } // createSecurityGroupRuleAllRemotes will create one or more IBM Cloud Security Group Rules for a specific SecurityGroup, based on the provided SecurityGroupRule and Remotes defined in the SecurityGroupRule definition (one or more Remotes can be defined per SecurityGroupRule definition). -func (s *VPCClusterScope) createSecurityGroupRuleAllRemotes(securityGroupID string, securityGroupRule infrav1beta2.VPCSecurityGroupRule) error { - var remotes []infrav1beta2.VPCSecurityGroupRuleRemote +func (s *VPCClusterScope) createSecurityGroupRuleAllRemotes(ctx context.Context, securityGroupID string, securityGroupRule infrav1.VPCSecurityGroupRule) error { + var remotes []infrav1.VPCSecurityGroupRuleRemote switch securityGroupRule.Direction { - case infrav1beta2.VPCSecurityGroupRuleDirectionInbound: + case infrav1.VPCSecurityGroupRuleDirectionInbound: remotes = securityGroupRule.Source.Remotes - case infrav1beta2.VPCSecurityGroupRuleDirectionOutbound: + case infrav1.VPCSecurityGroupRuleDirectionOutbound: remotes = securityGroupRule.Destination.Remotes } for _, remote := range remotes { - err := s.createSecurityGroupRule(securityGroupID, securityGroupRule, remote) + err := s.createSecurityGroupRule(ctx, securityGroupID, securityGroupRule, remote) if err != nil { return fmt.Errorf("error failed creating security group rule: %w", err) } @@ -1674,13 +1689,14 @@ func (s *VPCClusterScope) createSecurityGroupRuleAllRemotes(securityGroupID stri } // createSecurityGroupRule will create a new IBM Cloud Security Group Rule for a specific Security Group, based on the provided SecurityGroupRule and Remote definitions. -func (s *VPCClusterScope) createSecurityGroupRule(securityGroupID string, securityGroupRule infrav1beta2.VPCSecurityGroupRule, remote infrav1beta2.VPCSecurityGroupRuleRemote) error { +func (s *VPCClusterScope) createSecurityGroupRule(ctx context.Context, securityGroupID string, securityGroupRule infrav1.VPCSecurityGroupRule, remote infrav1.VPCSecurityGroupRuleRemote) error { + log := ctrl.LoggerFrom(ctx) options := &vpcv1.CreateSecurityGroupRuleOptions{ SecurityGroupID: &securityGroupID, } // Setup variables to use for logging details on the resulting IBM Cloud Security Group Rule creation options - var securityGroupRulePrototype *infrav1beta2.VPCSecurityGroupRulePrototype - if securityGroupRule.Direction == infrav1beta2.VPCSecurityGroupRuleDirectionInbound { + var securityGroupRulePrototype *infrav1.VPCSecurityGroupRulePrototype + if securityGroupRule.Direction == infrav1.VPCSecurityGroupRuleDirectionInbound { securityGroupRulePrototype = securityGroupRule.Source } else { securityGroupRulePrototype = securityGroupRule.Destination @@ -1690,14 +1706,14 @@ func (s *VPCClusterScope) createSecurityGroupRule(securityGroupID string, securi return fmt.Errorf("error failed to create security group rule remote: %w", err) } switch securityGroupRulePrototype.Protocol { - case infrav1beta2.VPCSecurityGroupRuleProtocolAll: + case infrav1.VPCSecurityGroupRuleProtocolAll: prototype := &vpcv1.SecurityGroupRulePrototypeSecurityGroupRuleProtocolAll{ Direction: ptr.To(string(securityGroupRule.Direction)), Protocol: ptr.To(string(securityGroupRulePrototype.Protocol)), Remote: prototypeRemote, } options.SetSecurityGroupRulePrototype(prototype) - case infrav1beta2.VPCSecurityGroupRuleProtocolIcmp: + case infrav1.VPCSecurityGroupRuleProtocolIcmp: prototype := &vpcv1.SecurityGroupRulePrototypeSecurityGroupRuleProtocolIcmp{ Direction: ptr.To(string(securityGroupRule.Direction)), Protocol: ptr.To(string(securityGroupRulePrototype.Protocol)), @@ -1710,7 +1726,7 @@ func (s *VPCClusterScope) createSecurityGroupRule(securityGroupID string, securi } options.SetSecurityGroupRulePrototype(prototype) // TCP and UDP use the same Prototype, simply with different Protocols, which is agnostic in code - case infrav1beta2.VPCSecurityGroupRuleProtocolTCP, infrav1beta2.VPCSecurityGroupRuleProtocolUDP: + case infrav1.VPCSecurityGroupRuleProtocolTCP, infrav1.VPCSecurityGroupRuleProtocolUDP: prototype := &vpcv1.SecurityGroupRulePrototypeSecurityGroupRuleProtocolTcpudp{ Direction: ptr.To(string(securityGroupRule.Direction)), Protocol: ptr.To(string(securityGroupRulePrototype.Protocol)), @@ -1726,7 +1742,7 @@ func (s *VPCClusterScope) createSecurityGroupRule(securityGroupID string, securi return fmt.Errorf("error failed creating security group rule, unknown protocol") } - s.V(3).Info("Creating Security Group Rule for Security Group", "securityGroupID", securityGroupID, "direction", securityGroupRule.Direction, "protocol", securityGroupRulePrototype.Protocol, "prototypeRemote", prototypeRemote) + log.V(3).Info("Creating Security Group Rule for Security Group", "securityGroupID", securityGroupID, "direction", securityGroupRule.Direction, "protocol", securityGroupRulePrototype.Protocol, "prototypeRemote", prototypeRemote) securityGroupRuleIntfDetails, _, err := s.VPCClient.CreateSecurityGroupRule(options) if err != nil { return fmt.Errorf("error unexpected failure creating security group rule: %w", err) @@ -1737,27 +1753,27 @@ func (s *VPCClusterScope) createSecurityGroupRule(securityGroupID string, securi // Typecast the resulting SecurityGroupRuleIntf, to retrieve the ID for logging var ruleID *string switch reflect.TypeOf(securityGroupRuleIntfDetails).String() { - case infrav1beta2.VPCSecurityGroupRuleProtocolAllType: + case infrav1.VPCSecurityGroupRuleProtocolAllType: rule := securityGroupRuleIntfDetails.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolAll) ruleID = rule.ID - case infrav1beta2.VPCSecurityGroupRuleProtocolIcmpType: + case infrav1.VPCSecurityGroupRuleProtocolIcmpType: rule := securityGroupRuleIntfDetails.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolIcmp) ruleID = rule.ID - case infrav1beta2.VPCSecurityGroupRuleProtocolTcpudpType: + case infrav1.VPCSecurityGroupRuleProtocolTcpudpType: rule := securityGroupRuleIntfDetails.(*vpcv1.SecurityGroupRuleSecurityGroupRuleProtocolTcpudp) ruleID = rule.ID } - s.V(3).Info("Created Security Group Rule", "ruleID", ruleID) + log.V(3).Info("Created Security Group Rule", "ruleID", ruleID) return nil } // createSecurityGroupRuleRemote will create an IBM Cloud SecurityGroupRuleRemotePrototype, which defines the Remote details for an IBM Cloud Security Group Rule, provided by the SecurityGroupRuleRemote. Lookups of Security Group CRN's, by Name, or Subnet CIDRBlock's, by Name, allows the use of CAPI created resources to be defined in the SecurityGroupRuleRemote, when the CRN or CIDRBlock are unknown (runtime defined). -func (s *VPCClusterScope) createSecurityGroupRuleRemote(remote infrav1beta2.VPCSecurityGroupRuleRemote) (*vpcv1.SecurityGroupRuleRemotePrototype, error) { +func (s *VPCClusterScope) createSecurityGroupRuleRemote(remote infrav1.VPCSecurityGroupRuleRemote) (*vpcv1.SecurityGroupRuleRemotePrototype, error) { remotePrototype := &vpcv1.SecurityGroupRuleRemotePrototype{} switch remote.RemoteType { - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeAny: - remotePrototype.CIDRBlock = ptr.To(infrav1beta2.CIDRBlockAny) - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeCIDR: + case infrav1.VPCSecurityGroupRuleRemoteTypeAny: + remotePrototype.CIDRBlock = ptr.To(infrav1.CIDRBlockAny) + case infrav1.VPCSecurityGroupRuleRemoteTypeCIDR: // As we nned the Subnet CIDR block, we have to perform an IBM Cloud API call either way, so simply make the call using the item we know, the Name subnetDetails, err := s.VPCClient.GetVPCSubnetByName(*remote.CIDRSubnetName) if err != nil { @@ -1766,9 +1782,9 @@ func (s *VPCClusterScope) createSecurityGroupRuleRemote(remote infrav1beta2.VPCS return nil, fmt.Errorf("error failed lookup of subnet during security group rule remote creation") } remotePrototype.CIDRBlock = subnetDetails.Ipv4CIDRBlock - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeAddress: + case infrav1.VPCSecurityGroupRuleRemoteTypeAddress: remotePrototype.Address = remote.Address - case infrav1beta2.VPCSecurityGroupRuleRemoteTypeSG: + case infrav1.VPCSecurityGroupRuleRemoteTypeSG: // As we need the Security Group CRN, we have to perform an IBM Cloud API call either way, so simply make the call using the item we know, the Name securityGroupDetails, err := s.VPCClient.GetSecurityGroupByName(*remote.SecurityGroupName) if err != nil { @@ -1786,7 +1802,7 @@ func (s *VPCClusterScope) createSecurityGroupRuleRemote(remote infrav1beta2.VPCS } // ReconcileLoadBalancers reconciles Load Balancers. -func (s *VPCClusterScope) ReconcileLoadBalancers() (bool, error) { +func (s *VPCClusterScope) ReconcileLoadBalancers(ctx context.Context) (bool, error) { // TODO(cjschaef): Determine if we want to use default LB configuration or require at least one is defined in Cluster spec. // TODO(cjschaef): Remove in favor of webhook validation. Perhaps to limit the number of LB's to one public and one private maximum. if len(s.NetworkSpec().LoadBalancers) == 0 { @@ -1810,14 +1826,14 @@ func (s *VPCClusterScope) ReconcileLoadBalancers() (bool, error) { if lbStatus != nil { s.setLoadBalancerStatus(lbStatus) // If the Load Balancer status isn't ready, flag for requeue and continue to next Load Balancer. - if isReady := s.isLoadBalancerReady(lbStatus.State); !isReady { + if isReady := s.isLoadBalancerReady(ctx, lbStatus.State); !isReady { requeue = true } continue } // Otherwise, create the Load Balancer. - err = s.createLoadBalancer(loadBalancer) + err = s.createLoadBalancer(ctx, loadBalancer) if err != nil { return false, fmt.Errorf("error creating load balancer: %w", err) } @@ -1830,21 +1846,22 @@ func (s *VPCClusterScope) ReconcileLoadBalancers() (bool, error) { // isLoadBalancerReady checks the state of a Load Balancer. // If state is active, true is returned, in all other cases, it returns false. // NOTE(cjschaef): May wish to extend this function to check all Load Balancer details (pools, listeners, etc.) as part of a Load Balancer being ready. -func (s *VPCClusterScope) isLoadBalancerReady(status infrav1beta2.VPCLoadBalancerState) bool { +func (s *VPCClusterScope) isLoadBalancerReady(ctx context.Context, status infrav1.VPCLoadBalancerState) bool { + log := ctrl.LoggerFrom(ctx) switch status { - case infrav1beta2.VPCLoadBalancerStateActive: - s.V(5).Info("load balancer is in active state") + case infrav1.VPCLoadBalancerStateActive: + log.V(5).Info("load balancer is in active state") return true - case infrav1beta2.VPCLoadBalancerStateCreatePending: - s.V(5).Info("load balancer is in create pending state") + case infrav1.VPCLoadBalancerStateCreatePending: + log.V(5).Info("load balancer is in create pending state") default: - s.V(5).Info("load balancer is in unexpected state", "loadBalancerStatus", status) + log.V(5).Info("load balancer is in unexpected state", "loadBalancerStatus", status) } return false } // getLoadBalancer attempts to retrieve the Load Balancer, otherwise returns nil if it doesn't exist. -func (s *VPCClusterScope) getLoadBalancer(lb infrav1beta2.VPCLoadBalancerSpec) (*infrav1beta2.VPCLoadBalancerStatus, error) { +func (s *VPCClusterScope) getLoadBalancer(lb infrav1.VPCLoadBalancerSpec) (*infrav1.VPCLoadBalancerStatus, error) { var loadBalancer *vpcv1.LoadBalancer var err error if lb.ID != nil { @@ -1864,7 +1881,7 @@ func (s *VPCClusterScope) getLoadBalancer(lb infrav1beta2.VPCLoadBalancerSpec) ( if lb.Public != nil && !*lb.Public { lbSuffix = privateLBSuffix } - name = fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancer), lbSuffix) + name = fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1.ResourceTypeLoadBalancer), lbSuffix) } loadBalancer, err = s.VPCClient.GetLoadBalancerByName(name) } @@ -1874,15 +1891,16 @@ func (s *VPCClusterScope) getLoadBalancer(lb infrav1beta2.VPCLoadBalancerSpec) ( if loadBalancer == nil { return nil, nil } - return &infrav1beta2.VPCLoadBalancerStatus{ + return &infrav1.VPCLoadBalancerStatus{ ID: loadBalancer.ID, - State: infrav1beta2.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus), + State: infrav1.VPCLoadBalancerState(*loadBalancer.ProvisioningStatus), Hostname: loadBalancer.Hostname, }, nil } // createLoadBalancer creates a Load Balancer. -func (s *VPCClusterScope) createLoadBalancer(loadBalancer infrav1beta2.VPCLoadBalancerSpec) error { +func (s *VPCClusterScope) createLoadBalancer(ctx context.Context, loadBalancer infrav1.VPCLoadBalancerSpec) error { + log := ctrl.LoggerFrom(ctx) options := &vpcv1.CreateLoadBalancerOptions{} resourceGroupID, err := s.GetResourceGroupID() if err != nil { @@ -1909,7 +1927,7 @@ func (s *VPCClusterScope) createLoadBalancer(loadBalancer infrav1beta2.VPCLoadBa if !isPublic { lbSuffix = privateLBSuffix } - name = fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancer), lbSuffix) + name = fmt.Sprintf("%s-%s", *s.GetServiceName(infrav1.ResourceTypeLoadBalancer), lbSuffix) } options.SetName(name) @@ -1926,7 +1944,7 @@ func (s *VPCClusterScope) createLoadBalancer(loadBalancer infrav1beta2.VPCLoadBa subnet := &vpcv1.SubnetIdentityByID{ ID: ptr.To(subnetID), } - s.V(3).Info("adding subnet to load balancer", "loadBalancerName", loadBalancer.Name, "subnetID", subnetID) + log.V(3).Info("adding subnet to load balancer", "loadBalancerName", loadBalancer.Name, "subnetID", subnetID) options.Subnets = append(options.Subnets, subnet) } @@ -1939,7 +1957,7 @@ func (s *VPCClusterScope) createLoadBalancer(loadBalancer infrav1beta2.VPCLoadBa sg := &vpcv1.SecurityGroupIdentityByID{ ID: ptr.To(securityGroupID), } - s.V(3).Info("adding security group to load balancer", "loadBalancerName", loadBalancer.Name, "securityGroupID", securityGroupID) + log.V(3).Info("adding security group to load balancer", "loadBalancerName", loadBalancer.Name, "securityGroupID", securityGroupID) options.SecurityGroups = append(options.SecurityGroups, sg) } @@ -1951,11 +1969,11 @@ func (s *VPCClusterScope) createLoadBalancer(loadBalancer infrav1beta2.VPCLoadBa for _, pool := range loadBalancer.BackendPools { backendPool := s.buildLoadBalancerBackendPool(pool) - s.V(3).Info("added pool to load balancer", "loadBalancerName", loadBalancer.Name, "backendPoolName", pool.Name) + log.V(3).Info("added pool to load balancer", "loadBalancerName", loadBalancer.Name, "backendPoolName", pool.Name) backendPools = append(backendPools, backendPool) } } else { - s.V(3).Info("using default backend pools for load balancer", "loadBalancerName", loadBalancer.Name) + log.V(3).Info("using default backend pools for load balancer", "loadBalancerName", loadBalancer.Name) backendPools = append(backendPools, s.getDefaultLoadBalancerBackendPools()...) } options.SetPools(backendPools) @@ -1968,28 +1986,28 @@ func (s *VPCClusterScope) createLoadBalancer(loadBalancer infrav1beta2.VPCLoadBa for _, additionalListener := range loadBalancer.AdditionalListeners { listener := s.buildLoadBalancerListener(additionalListener) - s.V(3).Info("addd listener to load balancer", "loadBalancerName", loadBalancer.Name, "listenerPort", listener.Port) + log.V(3).Info("addd listener to load balancer", "loadBalancerName", loadBalancer.Name, "listenerPort", listener.Port) listeners = append(listeners, listener) } } else { - s.V(3).Info("using default listeners for load balancer", "loadBalancerName", loadBalancer.Name) + log.V(3).Info("using default listeners for load balancer", "loadBalancerName", loadBalancer.Name) listeners = append(listeners, s.getDefaultLoadBalancerListeners(loadBalancer.BackendPools == nil)...) } options.SetListeners(listeners) // Create the load balancer. - s.V(5).Info("creating new load balancer", "loadBalancerOptions", options) + log.V(5).Info("creating new load balancer", "loadBalancerOptions", options) loadBalancerDetails, _, err := s.VPCClient.CreateLoadBalancer(options) if err != nil { return fmt.Errorf("error creating load balancer: %w", err) } // Initially populate the Load Balancer's status. - s.setLoadBalancerStatus(&infrav1beta2.VPCLoadBalancerStatus{ + s.setLoadBalancerStatus(&infrav1.VPCLoadBalancerStatus{ ID: loadBalancerDetails.ID, ControllerCreated: ptr.To(true), Hostname: loadBalancerDetails.Hostname, - State: infrav1beta2.VPCLoadBalancerState(*loadBalancerDetails.ProvisioningStatus), + State: infrav1.VPCLoadBalancerState(*loadBalancerDetails.ProvisioningStatus), }) // NOTE: This tagging is only attempted once. We may wish to refactor in case this single attempt fails. @@ -2001,7 +2019,7 @@ func (s *VPCClusterScope) createLoadBalancer(loadBalancer infrav1beta2.VPCLoadBa } // getLoadBalancerSubnetIDs builds the set of subnet ID's for a load balancer, or defaults to the Control Plane subnet ID's if no subnets were provided. This will attempt to transform subnet names into their respective ID's. -func (s *VPCClusterScope) getLoadBalancerSubnetIDs(loadBalancer infrav1beta2.VPCLoadBalancerSpec) ([]string, error) { +func (s *VPCClusterScope) getLoadBalancerSubnetIDs(loadBalancer infrav1.VPCLoadBalancerSpec) ([]string, error) { subnetIDs := make([]string, 0) // If Subnets were provided for the load balancer, find ID's, if necessary, and use them. // Otherwise, default to trying to use the Control Plane subnets. @@ -2045,7 +2063,7 @@ func (s *VPCClusterScope) getLoadBalancerSubnetIDs(loadBalancer infrav1beta2.VPC } // getLoadBalancerSecurityGroupIDs will collect the ID's of the desired Security Groups for a Load Balancer. -func (s *VPCClusterScope) getLoadBalancerSecurityGroupIDs(loadBalancer infrav1beta2.VPCLoadBalancerSpec) ([]string, error) { +func (s *VPCClusterScope) getLoadBalancerSecurityGroupIDs(loadBalancer infrav1.VPCLoadBalancerSpec) ([]string, error) { securityGroupIDs := make([]string, 0) // If SecurityGroups were provided for the load balancer, find ID's, if necessary, and use them. if loadBalancer.SecurityGroups != nil { @@ -2082,7 +2100,7 @@ func (s *VPCClusterScope) getLoadBalancerSecurityGroupIDs(loadBalancer infrav1be } // buildLoadBalancerBackendPool will build a Load Balancer Pool based on the provided spec. -func (s *VPCClusterScope) buildLoadBalancerBackendPool(pool infrav1beta2.VPCLoadBalancerBackendPoolSpec) vpcv1.LoadBalancerPoolPrototypeLoadBalancerContext { +func (s *VPCClusterScope) buildLoadBalancerBackendPool(pool infrav1.VPCLoadBalancerBackendPoolSpec) vpcv1.LoadBalancerPoolPrototypeLoadBalancerContext { monitor := &vpcv1.LoadBalancerPoolHealthMonitorPrototype{ Delay: ptr.To(pool.HealthMonitor.Delay), MaxRetries: ptr.To(pool.HealthMonitor.Retries), @@ -2113,17 +2131,17 @@ func (s *VPCClusterScope) getDefaultLoadBalancerBackendPools() []vpcv1.LoadBalan defaultPools := make([]vpcv1.LoadBalancerPoolPrototypeLoadBalancerContext, 0) // For now, only one default pool is expected. - defaultPool := infrav1beta2.VPCLoadBalancerBackendPoolSpec{ - Algorithm: infrav1beta2.VPCLoadBalancerBackendPoolAlgorithmRoundRobin, - HealthMonitor: infrav1beta2.VPCLoadBalancerHealthMonitorSpec{ + defaultPool := infrav1.VPCLoadBalancerBackendPoolSpec{ + Algorithm: infrav1.VPCLoadBalancerBackendPoolAlgorithmRoundRobin, + HealthMonitor: infrav1.VPCLoadBalancerHealthMonitorSpec{ Delay: 5, Retries: 2, Timeout: 2, - Type: infrav1beta2.VPCLoadBalancerBackendPoolHealthMonitorTypeTCP, + Type: infrav1.VPCLoadBalancerBackendPoolHealthMonitorTypeTCP, }, // Use default backend pool service name. - Name: s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancerPool), - Protocol: infrav1beta2.VPCLoadBalancerBackendPoolProtocolTCP, + Name: s.GetServiceName(infrav1.ResourceTypeLoadBalancerPool), + Protocol: infrav1.VPCLoadBalancerBackendPoolProtocolTCP, } defaultPools = append(defaultPools, s.buildLoadBalancerBackendPool(defaultPool)) @@ -2131,11 +2149,11 @@ func (s *VPCClusterScope) getDefaultLoadBalancerBackendPools() []vpcv1.LoadBalan } // buildLoadBalancerListener will create a Load Balancer Listener based on the provided spec. -func (s *VPCClusterScope) buildLoadBalancerListener(additionalListener infrav1beta2.AdditionalListenerSpec) vpcv1.LoadBalancerListenerPrototypeLoadBalancerContext { +func (s *VPCClusterScope) buildLoadBalancerListener(additionalListener infrav1.AdditionalListenerSpec) vpcv1.LoadBalancerListenerPrototypeLoadBalancerContext { listener := vpcv1.LoadBalancerListenerPrototypeLoadBalancerContext{ Port: ptr.To(additionalListener.Port), // Default protocol to TCP. - Protocol: ptr.To(string(infrav1beta2.VPCLoadBalancerListenerProtocolTCP)), + Protocol: ptr.To(string(infrav1.VPCLoadBalancerListenerProtocolTCP)), } // Override protocol if it was defined. if additionalListener.Protocol != nil { @@ -2156,13 +2174,13 @@ func (s *VPCClusterScope) getDefaultLoadBalancerListeners(defaultBackendPool boo defaultListeners := make([]vpcv1.LoadBalancerListenerPrototypeLoadBalancerContext, 0) // For now only one default listener is expected. - defaultListener := infrav1beta2.AdditionalListenerSpec{ + defaultListener := infrav1.AdditionalListenerSpec{ Port: int64(s.GetAPIServerPort()), - Protocol: ptr.To(infrav1beta2.VPCLoadBalancerListenerProtocolTCP), + Protocol: ptr.To(infrav1.VPCLoadBalancerListenerProtocolTCP), } if defaultBackendPool { - defaultListener.DefaultPoolName = s.GetServiceName(infrav1beta2.ResourceTypeLoadBalancerPool) + defaultListener.DefaultPoolName = s.GetServiceName(infrav1.ResourceTypeLoadBalancerPool) } defaultListeners = append(defaultListeners, s.buildLoadBalancerListener(defaultListener)) diff --git a/cloudbuild.yaml b/cloudbuild.yaml index e5549d43a..bcbf0ef44 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -4,7 +4,7 @@ options: substitution_option: ALLOW_LOOSE machineType: 'E2_HIGHCPU_8' steps: - - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20250116-2a05ea7e3d' + - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20250513-9264efb079' entrypoint: bash env: - DOCKER_CLI_EXPERIMENTAL=enabled diff --git a/cmd/capibmadm/cliutils/utils.go b/cmd/capibmadm/cliutils/utils.go new file mode 100644 index 000000000..15e767390 --- /dev/null +++ b/cmd/capibmadm/cliutils/utils.go @@ -0,0 +1,55 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cliutils contains utility functions for cli. +package cliutils + +import ( + "context" + "fmt" + + "github.com/IBM/platform-services-go-sdk/resourcemanagerv2" + + "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/clients/platformservices" +) + +// GetResourceGroupID returns ID of given resource group name. +func GetResourceGroupID(ctx context.Context, resourceGroup string, accountID string) (string, error) { + rmv2, err := platformservices.NewResourceManagerV2Client() + + if err != nil { + return "", err + } + + if rmv2 == nil { + return "", fmt.Errorf("unable to get resource controller") + } + + rmv2ListResourceGroupOpt := resourcemanagerv2.ListResourceGroupsOptions{Name: &resourceGroup, AccountID: &accountID} + resourceGroupListResult, _, err := rmv2.ListResourceGroupsWithContext(ctx, &rmv2ListResourceGroupOpt) + if err != nil { + return "", err + } + + if resourceGroupListResult != nil && len(resourceGroupListResult.Resources) > 0 { + rg := resourceGroupListResult.Resources[0] + resourceGroupID := *rg.ID + return resourceGroupID, nil + } + + err = fmt.Errorf("could not retrieve resource group id for %s", resourceGroup) + return "", err +} diff --git a/cmd/capibmadm/cmd/powervs/image/import.go b/cmd/capibmadm/cmd/powervs/image/import.go index 5c73b23de..1926ddf37 100644 --- a/cmd/capibmadm/cmd/powervs/image/import.go +++ b/cmd/capibmadm/cmd/powervs/image/import.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/clients/iam" "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/clients/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/options" - "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/utils" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/accounts" ) type imageImportOptions struct { @@ -80,7 +80,7 @@ capibmadm powervs image import --service-instance-id -b --description func createPort(ctx context.Context, portCreateOption portCreateOptions) error { logger := log.Log logger.Info("Creating Port ", "Network ID/Name", portCreateOption.network, "IP Address", portCreateOption.ipAddress, "Description", portCreateOption.description, "service-instance-id", options.GlobalOptions.ServiceInstanceID, "zone", options.GlobalOptions.PowerVSZone) - accountID, err := pkgUtils.GetAccount(iam.GetIAMAuth()) + accountID, err := accounts.GetAccount(iam.GetIAMAuth()) if err != nil { return err } @@ -100,12 +100,12 @@ func createPort(ctx context.Context, portCreateOption portCreateOptions) error { } portInfo.Items = append(portInfo.Items, PSpec{ - Description: utils.DereferencePointer(port.Description).(string), + Description: pointer.Dereference(port.Description).(string), ExternalIP: port.ExternalIP, - IPAddress: utils.DereferencePointer(port.IPAddress).(string), - MacAddress: utils.DereferencePointer(port.MacAddress).(string), - PortID: utils.DereferencePointer(port.PortID).(string), - Status: utils.DereferencePointer(port.Status).(string), + IPAddress: pointer.Dereference(port.IPAddress).(string), + MacAddress: pointer.Dereference(port.MacAddress).(string), + PortID: pointer.Dereference(port.PortID).(string), + Status: pointer.Dereference(port.Status).(string), }) printerObj, err := printer.New(options.GlobalOptions.Output, os.Stdout) diff --git a/cmd/capibmadm/cmd/powervs/port/delete.go b/cmd/capibmadm/cmd/powervs/port/delete.go index 68a373c40..2415de739 100755 --- a/cmd/capibmadm/cmd/powervs/port/delete.go +++ b/cmd/capibmadm/cmd/powervs/port/delete.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/clients/iam" "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/clients/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/cmd/capibmadm/options" - "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/utils" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/accounts" ) type portDeleteOptions struct { @@ -62,7 +62,7 @@ capibmadm powervs port delete --port-id --network 0 { - rg := resourceGroupListResult.Resources[0] - resourceGroupID := *rg.ID - return resourceGroupID, nil - } - - err = fmt.Errorf("could not retrieve resource group id for %s", resourceGroup) - return "", err -} - -// DereferencePointer dereferences pointer. -func DereferencePointer(value interface{}) interface{} { - switch v := value.(type) { - case *string: - if v != nil { - return *v - } - return "" - case *int, *int8, *int16, *int32, *int64: - i := value.(*int64) - if i != nil { - return *i - } - return 0 - case *strfmt.DateTime: - if v != nil { - return *v - } - return strfmt.DateTime{} - case *bool: - if v != nil { - return *v - } - return false - case *float32, *float64: - f := value.(*float64) - if f != nil { - return *f - } - return 0 - } - return nil -} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclusters.yaml index daaa224b2..75bbc0601 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmpowervsclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -301,6 +301,54 @@ spec: - tcp - udp type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic required: - port type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclustertemplates.yaml index 79e17cf6f..3faafe1be 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmpowervsclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -336,6 +336,55 @@ spec: - tcp - udp type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic required: - port type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsimages.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsimages.yaml index 1e2841cd2..9184e157e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsimages.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsimages.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmpowervsimages.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -79,6 +79,7 @@ spec: description: Type of storage, storage pool with the most available space will be selected. enum: + - tier0 - tier1 - tier3 type: string @@ -253,6 +254,7 @@ spec: description: Type of storage, storage pool with the most available space will be selected. enum: + - tier0 - tier1 - tier3 type: string @@ -330,6 +332,74 @@ spec: ready: description: Ready is true when the provider resource is ready. type: boolean + v1beta2: + description: v1beta2 groups all the fields that will be added or modified + in IBMPowerVSCluster's status with the V1Beta2 version. + properties: + conditions: + description: conditions represents the observations of a DevCluster's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object type: object type: object served: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachines.yaml index 530672281..844ee8b47 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmpowervsmachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachinetemplates.yaml index 3fb209320..fbdc36427 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmpowervsmachinetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmpowervsmachinetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclusters.yaml index a76af55c2..a177ba084 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmvpcclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -293,6 +293,54 @@ spec: - tcp - udp type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic required: - port type: object @@ -566,6 +614,55 @@ spec: - tcp - udp type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic required: - port type: object @@ -1401,6 +1498,74 @@ spec: zone: type: string type: object + v1beta2: + description: V1beta2 groups all the fields that will be added or modified + in IBMVPCCluster's status with the V1Beta2 version. + properties: + conditions: + description: Conditions represents the observations of a IBMVPCCluster's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object vpc: description: |- Important: Run "make" to regenerate code after modifying this file diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclustertemplates.yaml index 2d08691fe..7aee807bb 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmvpcclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -136,6 +136,55 @@ spec: - tcp - udp type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic required: - port type: object @@ -417,6 +466,55 @@ spec: - tcp - udp type: string + selector: + description: |- + The selector is used to find IBMPowerVSMachines with matching labels. + If the label matches, the machine is then added to the load balancer listener configuration. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic required: - port type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml index 14375be72..8539b241d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmvpcmachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -626,6 +626,74 @@ spec: ready: description: Ready is true when the provider resource is ready. type: boolean + v1beta2: + description: V1beta2 groups all the fields that will be added or modified + in IBMVPCMachine's status with the V1Beta2 version. + properties: + conditions: + description: Conditions represents the observations of a IBMVPCMachine's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object type: object type: object served: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachinetemplates.yaml index 0a9b9e393..2663fc4d5 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachinetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: ibmvpcmachinetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/controllers/ibmpowervscluster_controller.go b/controllers/ibmpowervscluster_controller.go index 51186912e..c61ac7ffd 100644 --- a/controllers/ibmpowervscluster_controller.go +++ b/controllers/ibmpowervscluster_controller.go @@ -39,16 +39,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" @@ -68,7 +69,7 @@ type IBMPowerVSClusterReconciler struct { } type powerVSCluster struct { - cluster *infrav1beta2.IBMPowerVSCluster + cluster *infrav1.IBMPowerVSCluster mu sync.Mutex } @@ -88,7 +89,7 @@ func (r *IBMPowerVSClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re defer log.Info("Finished reconciling IBMPowerVSCluster") // Fetch the IBMPowerVSCluster instance. - ibmPowerVSCluster := &infrav1beta2.IBMPowerVSCluster{} + ibmPowerVSCluster := &infrav1.IBMPowerVSCluster{} if err := r.Client.Get(ctx, req.NamespacedName, ibmPowerVSCluster); err != nil { if apierrors.IsNotFound(err) { log.Info("IBMPowerVSCluster not found") @@ -98,7 +99,7 @@ func (r *IBMPowerVSClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Add finalizer first if not set to avoid the race condition between init and delete. - if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmPowerVSCluster, infrav1beta2.IBMPowerVSClusterFinalizer); err != nil || finalizerAdded { + if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmPowerVSCluster, infrav1.IBMPowerVSClusterFinalizer); err != nil || finalizerAdded { return ctrl.Result{}, err } @@ -133,7 +134,7 @@ func (r *IBMPowerVSClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Initialize the patch helper - patchHelper, err := patch.NewHelper(ibmPowerVSCluster, r.Client) + patchHelper, err := v1beta1patch.NewHelper(ibmPowerVSCluster, r.Client) if err != nil { return ctrl.Result{}, err } @@ -200,6 +201,7 @@ func (r *IBMPowerVSClusterReconciler) reconcile(ctx context.Context, clusterScop var errList []error // receive return values from the channel and decide the requeue for val := range ch { + //nolint:staticcheck if val.Requeue { requeue = true } @@ -219,11 +221,11 @@ func (r *IBMPowerVSClusterReconciler) reconcile(ctx context.Context, clusterScop // reconcile Transit Gateway log.Info("Reconciling transit gateway") if requeue, err := clusterScope.ReconcileTransitGateway(ctx); err != nil { - conditions.MarkFalse(powerVSCluster.cluster, infrav1beta2.TransitGatewayReadyCondition, infrav1beta2.TransitGatewayReconciliationFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(powerVSCluster.cluster, infrav1.TransitGatewayReadyCondition, infrav1.TransitGatewayReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.TransitGatewayReadyV1Beta2Condition, + Type: infrav1.TransitGatewayReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.TransitGatewayNotReadyV1Beta2Reason, + Reason: infrav1.TransitGatewayNotReadyV1Beta2Reason, Message: err.Error(), }) return reconcile.Result{}, fmt.Errorf("failed to reconcile transit gateway: %w", err) @@ -231,40 +233,40 @@ func (r *IBMPowerVSClusterReconciler) reconcile(ctx context.Context, clusterScop log.Info("Creating a transit gateway is pending, requeuing") return reconcile.Result{RequeueAfter: 1 * time.Minute}, nil } - conditions.MarkTrue(powerVSCluster.cluster, infrav1beta2.TransitGatewayReadyCondition) + v1beta1conditions.MarkTrue(powerVSCluster.cluster, infrav1.TransitGatewayReadyCondition) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.TransitGatewayReadyV1Beta2Condition, + Type: infrav1.TransitGatewayReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.TransitGatewayReadyV1Beta2Reason, + Reason: infrav1.TransitGatewayReadyV1Beta2Reason, }) // reconcile COSInstance if clusterScope.IBMPowerVSCluster.Spec.Ignition != nil { log.Info("Reconciling COS service instance") if err := clusterScope.ReconcileCOSInstance(ctx); err != nil { - conditions.MarkFalse(powerVSCluster.cluster, infrav1beta2.COSInstanceReadyCondition, infrav1beta2.COSInstanceReconciliationFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(powerVSCluster.cluster, infrav1.COSInstanceReadyCondition, infrav1.COSInstanceReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.COSInstanceReadyV1Beta2Condition, + Type: infrav1.COSInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.COSInstanceNotReadyV1Beta2Reason, + Reason: infrav1.COSInstanceNotReadyV1Beta2Reason, Message: err.Error(), }) return reconcile.Result{}, fmt.Errorf("failed to reconcile COS instance: %w", err) } - conditions.MarkTrue(powerVSCluster.cluster, infrav1beta2.COSInstanceReadyCondition) + v1beta1conditions.MarkTrue(powerVSCluster.cluster, infrav1.COSInstanceReadyCondition) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.COSInstanceReadyV1Beta2Condition, + Type: infrav1.COSInstanceReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.COSInstanceReadyV1Beta2Reason, + Reason: infrav1.COSInstanceReadyV1Beta2Reason, }) } var networkReady, loadBalancerReady bool for _, cond := range clusterScope.IBMPowerVSCluster.Status.Conditions { - if cond.Type == infrav1beta2.NetworkReadyCondition && cond.Status == corev1.ConditionTrue { + if cond.Type == infrav1.NetworkReadyCondition && cond.Status == corev1.ConditionTrue { networkReady = true } - if cond.Type == infrav1beta2.LoadBalancerReadyCondition && cond.Status == corev1.ConditionTrue { + if cond.Type == infrav1.LoadBalancerReadyCondition && cond.Status == corev1.ConditionTrue { loadBalancerReady = true } } @@ -303,18 +305,18 @@ func (r *IBMPowerVSClusterReconciler) reconcilePowerVSResources(ctx context.Cont // reconcile PowerVS service instance log.Info("Reconciling PowerVS service instance") if requeue, err := clusterScope.ReconcilePowerVSServiceInstance(ctx); err != nil { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Type: infrav1beta2.ServiceInstanceReadyCondition, - Reason: infrav1beta2.ServiceInstanceReconciliationFailedReason, - Severity: capiv1beta1.ConditionSeverityError, + Type: infrav1.ServiceInstanceReadyCondition, + Reason: infrav1.ServiceInstanceReconciliationFailedReason, + Severity: clusterv1beta1.ConditionSeverityError, Message: err.Error(), }) //TODO: When we completely transition into v1beta2 api's update the conditions with lock v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.WorkspaceReadyV1Beta2Condition, + Type: infrav1.WorkspaceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.WorkspaceNotReadyV1Beta2Reason, + Reason: infrav1.WorkspaceNotReadyV1Beta2Reason, Message: err.Error(), }) ch <- reconcileResult{reconcile.Result{}, fmt.Errorf("failed to reconcile PowerVS service instance: %w", err)} @@ -324,14 +326,14 @@ func (r *IBMPowerVSClusterReconciler) reconcilePowerVSResources(ctx context.Cont ch <- reconcileResult{reconcile.Result{Requeue: true}, nil} return } - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionTrue, - Type: infrav1beta2.ServiceInstanceReadyCondition, + Type: infrav1.ServiceInstanceReadyCondition, }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.WorkspaceReadyV1Beta2Condition, + Type: infrav1.WorkspaceReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.WorkspaceReadyV1Beta2Reason, + Reason: infrav1.WorkspaceReadyV1Beta2Reason, }) clusterScope.IBMPowerVSClient.WithClients(powervs.ServiceOptions{CloudInstanceID: clusterScope.GetServiceInstanceID()}) @@ -339,30 +341,30 @@ func (r *IBMPowerVSClusterReconciler) reconcilePowerVSResources(ctx context.Cont // reconcile network log.Info("Reconciling network") if networkActive, err := clusterScope.ReconcileNetwork(ctx); err != nil { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Type: infrav1beta2.NetworkReadyCondition, - Reason: infrav1beta2.NetworkReconciliationFailedReason, - Severity: capiv1beta1.ConditionSeverityError, + Type: infrav1.NetworkReadyCondition, + Reason: infrav1.NetworkReconciliationFailedReason, + Severity: clusterv1beta1.ConditionSeverityError, Message: err.Error(), }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.NetworkReadyV1Beta2Condition, + Type: infrav1.NetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.NetworkNotReadyV1Beta2Reason, + Reason: infrav1.NetworkNotReadyV1Beta2Reason, Message: err.Error(), }) ch <- reconcileResult{reconcile.Result{}, fmt.Errorf("failed to reconcile network: %w", err)} return } else if networkActive { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionTrue, - Type: infrav1beta2.NetworkReadyCondition, + Type: infrav1.NetworkReadyCondition, }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.NetworkReadyV1Beta2Condition, + Type: infrav1.NetworkReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.NetworkReadyV1Beta2Reason, + Reason: infrav1.NetworkReadyV1Beta2Reason, }) return } @@ -380,17 +382,17 @@ func (r *IBMPowerVSClusterReconciler) reconcileVPCResources(ctx context.Context, defer log.Info("Finished VPC reconciliation") if requeue, err := clusterScope.ReconcileVPC(ctx); err != nil { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Type: infrav1beta2.VPCReadyCondition, - Reason: infrav1beta2.VPCReconciliationFailedReason, - Severity: capiv1beta1.ConditionSeverityError, + Type: infrav1.VPCReadyCondition, + Reason: infrav1.VPCReconciliationFailedReason, + Severity: clusterv1beta1.ConditionSeverityError, Message: err.Error(), }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCReadyV1Beta2Condition, + Type: infrav1.VPCReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCNotReadyV1Beta2Reason, + Reason: infrav1.VPCNotReadyV1Beta2Reason, Message: err.Error(), }) ch <- reconcileResult{reconcile.Result{}, fmt.Errorf("failed to reconcile VPC: %w", err)} @@ -400,30 +402,30 @@ func (r *IBMPowerVSClusterReconciler) reconcileVPCResources(ctx context.Context, ch <- reconcileResult{reconcile.Result{Requeue: true}, nil} return } - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionTrue, - Type: infrav1beta2.VPCReadyCondition, + Type: infrav1.VPCReadyCondition, }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCReadyV1Beta2Condition, + Type: infrav1.VPCReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.VPCReadyV1Beta2Reason, + Reason: infrav1.VPCReadyV1Beta2Reason, }) // reconcile VPC Subnet log.Info("Reconciling VPC subnets") if requeue, err := clusterScope.ReconcileVPCSubnets(ctx); err != nil { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Type: infrav1beta2.VPCSubnetReadyCondition, - Reason: infrav1beta2.VPCSubnetReconciliationFailedReason, - Severity: capiv1beta1.ConditionSeverityError, + Type: infrav1.VPCSubnetReadyCondition, + Reason: infrav1.VPCSubnetReconciliationFailedReason, + Severity: clusterv1beta1.ConditionSeverityError, Message: err.Error(), }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCSubnetReadyV1Beta2Condition, + Type: infrav1.VPCSubnetReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCSubnetNotReadyV1Beta2Reason, + Reason: infrav1.VPCSubnetNotReadyV1Beta2Reason, Message: err.Error(), }) ch <- reconcileResult{reconcile.Result{}, fmt.Errorf("failed to reconcile VPC subnets: %w", err)} @@ -433,72 +435,72 @@ func (r *IBMPowerVSClusterReconciler) reconcileVPCResources(ctx context.Context, ch <- reconcileResult{reconcile.Result{Requeue: true}, nil} return } - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionTrue, - Type: infrav1beta2.VPCSubnetReadyCondition, + Type: infrav1.VPCSubnetReadyCondition, }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCSubnetReadyV1Beta2Condition, + Type: infrav1.VPCSubnetReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.VPCSubnetReadyV1Beta2Reason, + Reason: infrav1.VPCSubnetReadyV1Beta2Reason, }) // reconcile VPC security group log.Info("Reconciling VPC security group") if err := clusterScope.ReconcileVPCSecurityGroups(ctx); err != nil { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Type: infrav1beta2.VPCSecurityGroupReadyCondition, - Reason: infrav1beta2.VPCSecurityGroupReconciliationFailedReason, - Severity: capiv1beta1.ConditionSeverityError, + Type: infrav1.VPCSecurityGroupReadyCondition, + Reason: infrav1.VPCSecurityGroupReconciliationFailedReason, + Severity: clusterv1beta1.ConditionSeverityError, Message: err.Error(), }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCSecurityGroupReadyV1Beta2Condition, + Type: infrav1.VPCSecurityGroupReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCSecurityGroupNotReadyV1Beta2Reason, + Reason: infrav1.VPCSecurityGroupNotReadyV1Beta2Reason, Message: err.Error(), }) ch <- reconcileResult{reconcile.Result{}, fmt.Errorf("failed to reconcile VPC security groups: %w", err)} return } - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionTrue, - Type: infrav1beta2.VPCSecurityGroupReadyCondition, + Type: infrav1.VPCSecurityGroupReadyCondition, }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCSecurityGroupReadyV1Beta2Condition, + Type: infrav1.VPCSecurityGroupReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.VPCSecurityGroupReadyV1Beta2Reason, + Reason: infrav1.VPCSecurityGroupReadyV1Beta2Reason, }) // reconcile LoadBalancer log.Info("Reconciling VPC load balancers") if loadBalancerReady, err := clusterScope.ReconcileLoadBalancers(ctx); err != nil { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionFalse, - Type: infrav1beta2.LoadBalancerReadyCondition, - Reason: infrav1beta2.LoadBalancerReconciliationFailedReason, - Severity: capiv1beta1.ConditionSeverityError, + Type: infrav1.LoadBalancerReadyCondition, + Reason: infrav1.LoadBalancerReconciliationFailedReason, + Severity: clusterv1beta1.ConditionSeverityError, Message: err.Error(), }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCLoadBalancerReadyV1Beta2Condition, + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCLoadBalancerNotReadyV1Beta2Reason, + Reason: infrav1.VPCLoadBalancerNotReadyV1Beta2Reason, Message: err.Error(), }) ch <- reconcileResult{reconcile.Result{}, fmt.Errorf("failed to reconcile VPC load balancers: %w", err)} return } else if loadBalancerReady { - powerVSCluster.updateCondition(capiv1beta1.Condition{ + powerVSCluster.updateCondition(clusterv1beta1.Condition{ Status: corev1.ConditionTrue, - Type: infrav1beta2.LoadBalancerReadyCondition, + Type: infrav1.LoadBalancerReadyCondition, }) v1beta2conditions.Set(powerVSCluster.cluster, metav1.Condition{ - Type: infrav1beta2.VPCLoadBalancerReadyV1Beta2Condition, + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.VPCLoadBalancerReadyV1Beta2Reason, + Reason: infrav1.VPCLoadBalancerReadyV1Beta2Reason, }) return } @@ -521,7 +523,7 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust // check for annotation set for cluster resource and decide on proceeding with infra deletion. if !scope.CheckCreateInfraAnnotation(*clusterScope.IBMPowerVSCluster) { log.Info("IBMPowerVSCluster has no infra annotation, removing finalizer") - controllerutil.RemoveFinalizer(cluster, infrav1beta2.IBMPowerVSClusterFinalizer) + controllerutil.RemoveFinalizer(cluster, infrav1.IBMPowerVSClusterFinalizer) return ctrl.Result{}, nil } @@ -530,9 +532,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust log.Info("Deleting transit gateway") v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.TransitGatewayReadyV1Beta2Condition, + Type: infrav1.TransitGatewayReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.TransitGatewayDeletingV1Beta2Reason, + Reason: infrav1.TransitGatewayDeletingV1Beta2Reason, }) if requeue, err := clusterScope.DeleteTransitGateway(ctx); err != nil { allErrs = append(allErrs, fmt.Errorf("failed to delete transit gateway: %w", err)) @@ -543,9 +545,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust log.Info("Deleting VPC load balancer") v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.VPCLoadBalancerReadyV1Beta2Condition, + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCLoadBalancerDeletingV1Beta2Reason, + Reason: infrav1.VPCLoadBalancerDeletingV1Beta2Reason, }) if requeue, err := clusterScope.DeleteLoadBalancer(ctx); err != nil { allErrs = append(allErrs, fmt.Errorf("failed to delete VPC load balancer: %w", err)) @@ -556,9 +558,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust log.Info("Deleting VPC security group") v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.VPCSecurityGroupReadyV1Beta2Condition, + Type: infrav1.VPCSecurityGroupReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCSecurityGroupDeletingV1Beta2Reason, + Reason: infrav1.VPCSecurityGroupDeletingV1Beta2Reason, }) if err := clusterScope.DeleteVPCSecurityGroups(ctx); err != nil { allErrs = append(allErrs, fmt.Errorf("failed to delete VPC security group: %w", err)) @@ -566,9 +568,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust log.Info("Deleting VPC subnet") v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.VPCSubnetReadyV1Beta2Condition, + Type: infrav1.VPCSubnetReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCSubnetDeletingV1Beta2Reason, + Reason: infrav1.VPCSubnetDeletingV1Beta2Reason, }) if requeue, err := clusterScope.DeleteVPCSubnet(ctx); err != nil { allErrs = append(allErrs, fmt.Errorf("failed to delete VPC subnet: %w", err)) @@ -579,9 +581,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust log.Info("Deleting VPC") v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.VPCReadyV1Beta2Condition, + Type: infrav1.VPCReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.VPCDeletingV1Beta2Reason, + Reason: infrav1.VPCDeletingV1Beta2Reason, }) if requeue, err := clusterScope.DeleteVPC(ctx); err != nil { allErrs = append(allErrs, fmt.Errorf("failed to delete VPC: %w", err)) @@ -592,9 +594,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust log.Info("Deleting DHCP server") v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.NetworkReadyV1Beta2Condition, + Type: infrav1.NetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.NetworkDeletingV1Beta2Reason, + Reason: infrav1.NetworkDeletingV1Beta2Reason, }) if err := clusterScope.DeleteDHCPServer(ctx); err != nil { allErrs = append(allErrs, fmt.Errorf("failed to delete DHCP server: %w", err)) @@ -602,9 +604,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust log.Info("Deleting PowerVS service instance") v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.WorkspaceReadyV1Beta2Condition, + Type: infrav1.WorkspaceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.WorkspaceDeletingV1Beta2Reason, + Reason: infrav1.WorkspaceDeletingV1Beta2Reason, }) if requeue, err := clusterScope.DeleteServiceInstance(ctx); err != nil { allErrs = append(allErrs, fmt.Errorf("failed to delete PowerVS service instance: %w", err)) @@ -615,9 +617,9 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust if clusterScope.IBMPowerVSCluster.Spec.Ignition != nil { v1beta2conditions.Set(clusterScope.IBMPowerVSCluster, metav1.Condition{ - Type: infrav1beta2.COSInstanceReadyV1Beta2Condition, + Type: infrav1.COSInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.COSInstanceDeletingV1Beta2Reason, + Reason: infrav1.COSInstanceDeletingV1Beta2Reason, }) log.Info("Deleting COS service instance") if err := clusterScope.DeleteCOSInstance(ctx); err != nil { @@ -630,14 +632,14 @@ func (r *IBMPowerVSClusterReconciler) reconcileDelete(ctx context.Context, clust } log.Info("IBMPowerVSCluster deletion completed") - controllerutil.RemoveFinalizer(cluster, infrav1beta2.IBMPowerVSClusterFinalizer) + controllerutil.RemoveFinalizer(cluster, infrav1.IBMPowerVSClusterFinalizer) return ctrl.Result{}, nil } -func (update *powerVSCluster) updateCondition(condition capiv1beta1.Condition) { +func (update *powerVSCluster) updateCondition(condition clusterv1beta1.Condition) { update.mu.Lock() defer update.mu.Unlock() - conditions.Set(update.cluster, &condition) + v1beta1conditions.Set(update.cluster, &condition) } func (r *IBMPowerVSClusterReconciler) deleteIBMPowerVSImage(ctx context.Context, clusterScope *scope.PowerVSClusterScope) (ctrl.Result, error) { @@ -697,7 +699,7 @@ func (r *IBMPowerVSClusterReconciler) deleteIBMPowerVSImage(ctx context.Context, } type clusterDescendants struct { - ibmPowerVSImages infrav1beta2.IBMPowerVSImageList + ibmPowerVSImages infrav1.IBMPowerVSImageList } // length returns the number of descendants. @@ -719,12 +721,12 @@ func (c *clusterDescendants) descendantNames() string { } // listDescendants returns a list of all IBMPowerVSImages for the cluster. -func (r *IBMPowerVSClusterReconciler) listDescendants(ctx context.Context, cluster *infrav1beta2.IBMPowerVSCluster) (clusterDescendants, error) { +func (r *IBMPowerVSClusterReconciler) listDescendants(ctx context.Context, cluster *infrav1.IBMPowerVSCluster) (clusterDescendants, error) { var descendants clusterDescendants listOptions := []client.ListOption{ client.InNamespace(cluster.Namespace), - client.MatchingLabels(map[string]string{capiv1beta1.ClusterNameLabel: cluster.Name}), + client.MatchingLabels(map[string]string{clusterv1beta1.ClusterNameLabel: cluster.Name}), } if err := r.Client.List(ctx, &descendants.ibmPowerVSImages, listOptions...); err != nil { @@ -736,7 +738,7 @@ func (r *IBMPowerVSClusterReconciler) listDescendants(ctx context.Context, clust // filterOwnedDescendants returns an array of runtime.Objects containing only those descendants that have the cluster // as an owner reference. -func (c *clusterDescendants) filterOwnedDescendants(cluster *infrav1beta2.IBMPowerVSCluster) ([]client.Object, error) { +func (c *clusterDescendants) filterOwnedDescendants(cluster *infrav1.IBMPowerVSCluster) ([]client.Object, error) { var ownedDescendants []client.Object eachFunc := func(o runtime.Object) error { obj := o.(client.Object) @@ -766,7 +768,7 @@ func (c *clusterDescendants) filterOwnedDescendants(cluster *infrav1beta2.IBMPow } // patchIBMPowerVSCluster updates the IBMPowerVSCluster and its status on the API server. -func patchIBMPowerVSCluster(ctx context.Context, patchHelper *patch.Helper, ibmPowerVSCluster *infrav1beta2.IBMPowerVSCluster) error { +func patchIBMPowerVSCluster(ctx context.Context, patchHelper *v1beta1patch.Helper, ibmPowerVSCluster *infrav1.IBMPowerVSCluster) error { // we don't need to set any conditions for IBMPowerVSCluster without create infra annotation. if !scope.CheckCreateInfraAnnotation(*ibmPowerVSCluster) { if err := patchHelper.Patch(ctx, ibmPowerVSCluster); err != nil { @@ -775,46 +777,46 @@ func patchIBMPowerVSCluster(ctx context.Context, patchHelper *patch.Helper, ibmP return nil } - if err := v1beta2conditions.SetSummaryCondition(ibmPowerVSCluster, ibmPowerVSCluster, infrav1beta2.IBMPowerVSClusterReadyV1Beta2Condition, + if err := v1beta2conditions.SetSummaryCondition(ibmPowerVSCluster, ibmPowerVSCluster, infrav1.IBMPowerVSClusterReadyV1Beta2Condition, v1beta2conditions.ForConditionTypes{ - infrav1beta2.WorkspaceReadyV1Beta2Condition, - infrav1beta2.NetworkReadyV1Beta2Condition, - infrav1beta2.VPCReadyV1Beta2Condition, - infrav1beta2.VPCSubnetReadyV1Beta2Condition, - infrav1beta2.VPCSecurityGroupReadyV1Beta2Condition, - infrav1beta2.VPCLoadBalancerReadyV1Beta2Condition, - infrav1beta2.TransitGatewayReadyV1Beta2Condition, - infrav1beta2.COSInstanceReadyV1Beta2Condition, + infrav1.WorkspaceReadyV1Beta2Condition, + infrav1.NetworkReadyV1Beta2Condition, + infrav1.VPCReadyV1Beta2Condition, + infrav1.VPCSubnetReadyV1Beta2Condition, + infrav1.VPCSecurityGroupReadyV1Beta2Condition, + infrav1.VPCLoadBalancerReadyV1Beta2Condition, + infrav1.TransitGatewayReadyV1Beta2Condition, + infrav1.COSInstanceReadyV1Beta2Condition, }, v1beta2conditions.IgnoreTypesIfMissing{ - infrav1beta2.COSInstanceReadyV1Beta2Condition, + infrav1.COSInstanceReadyV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. v1beta2conditions.CustomMergeStrategy{ MergeStrategy: v1beta2conditions.DefaultMergeStrategy( // Use custom reasons. v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( - infrav1beta2.IBMPowerVSClusterNotReadyV1Beta2Reason, - infrav1beta2.IBMPowerVSClusterReadyUnknownV1Beta2Reason, - infrav1beta2.IBMPowerVSClusterReadyV1Beta2Reason, + infrav1.IBMPowerVSClusterNotReadyV1Beta2Reason, + infrav1.IBMPowerVSClusterReadyUnknownV1Beta2Reason, + infrav1.IBMPowerVSClusterReadyV1Beta2Reason, )), ), }, ); err != nil { - return fmt.Errorf("failed to set %s condition: %w", infrav1beta2.IBMPowerVSClusterReadyV1Beta2Condition, err) + return fmt.Errorf("failed to set %s condition: %w", infrav1.IBMPowerVSClusterReadyV1Beta2Condition, err) } return patchHelper.Patch(ctx, ibmPowerVSCluster, - patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - capiv1beta1.PausedV1Beta2Condition, - infrav1beta2.IBMPowerVSClusterReadyV1Beta2Condition, - infrav1beta2.WorkspaceReadyV1Beta2Condition, - infrav1beta2.NetworkReadyV1Beta2Condition, - infrav1beta2.VPCReadyV1Beta2Condition, - infrav1beta2.VPCSubnetReadyV1Beta2Condition, - infrav1beta2.VPCSecurityGroupReadyV1Beta2Condition, - infrav1beta2.TransitGatewayReadyV1Beta2Condition, - infrav1beta2.COSInstanceReadyV1Beta2Condition, + v1beta1patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + clusterv1beta1.PausedV1Beta2Condition, + infrav1.IBMPowerVSClusterReadyV1Beta2Condition, + infrav1.WorkspaceReadyV1Beta2Condition, + infrav1.NetworkReadyV1Beta2Condition, + infrav1.VPCReadyV1Beta2Condition, + infrav1.VPCSubnetReadyV1Beta2Condition, + infrav1.VPCSecurityGroupReadyV1Beta2Condition, + infrav1.TransitGatewayReadyV1Beta2Condition, + infrav1.COSInstanceReadyV1Beta2Condition, }}, ) } @@ -823,12 +825,12 @@ func patchIBMPowerVSCluster(ctx context.Context, patchHelper *patch.Helper, ibmP func (r *IBMPowerVSClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "ibmpowervscluster") err := ctrl.NewControllerManagedBy(mgr). - For(&infrav1beta2.IBMPowerVSCluster{}). + For(&infrav1.IBMPowerVSCluster{}). WithEventFilter(predicates.ResourceHasFilterLabel(r.Scheme, predicateLog, r.WatchFilterValue)). WithEventFilter(predicates.ResourceIsNotExternallyManaged(r.Scheme, predicateLog)). Watches( - &capiv1beta1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1beta2.GroupVersion.WithKind("IBMPowerVSCluster"), mgr.GetClient(), &infrav1beta2.IBMPowerVSCluster{})), + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("IBMPowerVSCluster"), mgr.GetClient(), &infrav1.IBMPowerVSCluster{})), builder.WithPredicates(predicates.All(r.Scheme, predicateLog, predicates.ResourceIsChanged(r.Scheme, predicateLog), predicates.ClusterPausedTransitions(r.Scheme, predicateLog), diff --git a/controllers/ibmpowervscluster_controller_test.go b/controllers/ibmpowervscluster_controller_test.go index 80a2f2b05..9fbb7abec 100644 --- a/controllers/ibmpowervscluster_controller_test.go +++ b/controllers/ibmpowervscluster_controller_test.go @@ -19,6 +19,7 @@ package controllers import ( "errors" "fmt" + "sync" "testing" "time" @@ -44,10 +45,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" powervsmock "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs/mock" @@ -65,11 +67,11 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) g.Expect(err).To(BeNil()) - powerVSCluster := &infrav1beta2.IBMPowerVSCluster{ + powerVSCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "powervs-test-", }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, + Spec: infrav1.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, } createCluster(g, powerVSCluster, ns.Name) @@ -100,18 +102,18 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) g.Expect(err).To(BeNil()) - powerVSCluster := &infrav1beta2.IBMPowerVSCluster{ + powerVSCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "powervs-test-", - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, + Spec: infrav1.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, } createCluster(g, powerVSCluster, ns.Name) @@ -136,12 +138,12 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) g.Expect(err).To(BeNil()) - powerVSCluster := &infrav1beta2.IBMPowerVSCluster{ + powerVSCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "powervs-test-", - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, + Spec: infrav1.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, } createCluster(g, powerVSCluster, ns.Name) @@ -185,18 +187,18 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) g.Expect(err).To(BeNil()) - powerVSCluster := &infrav1beta2.IBMPowerVSCluster{ + powerVSCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "powervs-test-", - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, + Spec: infrav1.IBMPowerVSClusterSpec{ServiceInstanceID: "foo"}, } createCluster(g, powerVSCluster, ns.Name) @@ -220,21 +222,21 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) g.Expect(err).To(BeNil()) - powerVSCluster := &infrav1beta2.IBMPowerVSCluster{ + powerVSCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "powervs-test-", - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{Zone: ptr.To("zone")}, + Spec: infrav1.IBMPowerVSClusterSpec{Zone: ptr.To("zone")}, } - ownerCluster := &capiv1beta1.Cluster{ + ownerCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test", Namespace: ns.Name, @@ -268,7 +270,7 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { }) g.Expect(err).To(BeNil()) - ibmPowerVSCluster := &infrav1beta2.IBMPowerVSCluster{} + ibmPowerVSCluster := &infrav1.IBMPowerVSCluster{} g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{ Name: powerVSCluster.GetName(), @@ -284,21 +286,21 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) g.Expect(err).To(BeNil()) - powerVSCluster := &infrav1beta2.IBMPowerVSCluster{ + powerVSCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "powervs-test-", - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{Zone: ptr.To("zone")}, + Spec: infrav1.IBMPowerVSClusterSpec{Zone: ptr.To("zone")}, } - ownerCluster := &capiv1beta1.Cluster{ + ownerCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test", Namespace: ns.Name, @@ -314,7 +316,7 @@ func TestIBMPowerVSClusterReconciler_Reconcile(t *testing.T) { defer cleanupCluster(g, powerVSCluster, ns) g.Expect(testEnv.Delete(ctx, powerVSCluster)).To(Succeed()) - ibmPowerVSCluster := &infrav1beta2.IBMPowerVSCluster{} + ibmPowerVSCluster := &infrav1.IBMPowerVSCluster{} g.Eventually(func() bool { err := testEnv.Client.Get(ctx, client.ObjectKey{ @@ -353,15 +355,15 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { clusterStatus bool expectedResult ctrl.Result expectedError error - conditions capiv1beta1.Conditions + conditions clusterv1beta1.Conditions }{ { name: "Should add finalizer and reconcile IBMPowerVSCluster", powervsClusterScope: func() *scope.PowerVSClusterScope { return &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, }, }, } @@ -372,9 +374,9 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "Should reconcile IBMPowerVSCluster status as Ready", powervsClusterScope: func() *scope.PowerVSClusterScope { return &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, }, }, } @@ -385,12 +387,12 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "When PowerVS zone does not support PER", powervsClusterScope: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, - Annotations: map[string]string{infrav1beta2.CreateInfrastructureAnnotation: "true"}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, + Annotations: map[string]string{infrav1.CreateInfrastructureAnnotation: "true"}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ Zone: ptr.To("dal10"), }, }, @@ -406,12 +408,12 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "When resource group name is not set", powervsClusterScope: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, - Annotations: map[string]string{infrav1beta2.CreateInfrastructureAnnotation: "true"}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, + Annotations: map[string]string{infrav1.CreateInfrastructureAnnotation: "true"}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ Zone: ptr.To("dal10"), }, }, @@ -427,19 +429,19 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "When reconcile PowerVS resource returns requeue as true", powervsClusterScope: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, - Annotations: map[string]string{infrav1beta2.CreateInfrastructureAnnotation: "true"}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, + Annotations: map[string]string{infrav1.CreateInfrastructureAnnotation: "true"}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ Zone: ptr.To("dal10"), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("rg-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, }, @@ -453,7 +455,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { mockResourceClient.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{ Name: ptr.To("serviceInstanceName"), ID: ptr.To("serviceInstanceID"), - State: ptr.To(string(infrav1beta2.ServiceInstanceStateProvisioning)), + State: ptr.To(string(infrav1.ServiceInstanceStateProvisioning)), }, nil, nil) clusterScope.ResourceClient = mockResourceClient @@ -469,22 +471,22 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "When reconcile PowerVS and VPC resource returns requeue as true", powervsClusterScope: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, - Annotations: map[string]string{infrav1beta2.CreateInfrastructureAnnotation: "true"}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, + Annotations: map[string]string{infrav1.CreateInfrastructureAnnotation: "true"}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ Zone: ptr.To("dal10"), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("rg-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -498,7 +500,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { mockResourceClient.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{ Name: ptr.To("serviceInstanceName"), ID: ptr.To("serviceInstanceID"), - State: ptr.To(string(infrav1beta2.ServiceInstanceStateProvisioning)), + State: ptr.To(string(infrav1.ServiceInstanceStateProvisioning)), }, nil, nil) clusterScope.ResourceClient = mockResourceClient @@ -514,19 +516,19 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "When reconcile VPC and PowerVS resource returns error", powervsClusterScope: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, - Annotations: map[string]string{infrav1beta2.CreateInfrastructureAnnotation: "true"}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, + Annotations: map[string]string{infrav1.CreateInfrastructureAnnotation: "true"}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ Zone: ptr.To("dal10"), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("rg-id"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, }, @@ -563,17 +565,17 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { mockResourceClient.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{ Name: ptr.To("serviceInstanceName"), ID: ptr.To("serviceInstanceID"), - State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), + State: ptr.To(string(infrav1.ServiceInstanceStateActive)), }, nil, nil) clusterScope.ResourceClient = mockResourceClient mockVPC := vpcmock.NewMockVpc(gomock.NewController(t)) - mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{Status: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive))}, nil, nil) + mockVPC.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{Status: ptr.To(string(infrav1.VPCLoadBalancerStateActive))}, nil, nil) mockVPC.EXPECT().GetSubnet(gomock.Any()).Return(&vpcv1.Subnet{Name: ptr.To("subnet1"), Status: ptr.To("active")}, nil, nil) mockVPC.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), }, nil, nil) clusterScope.IBMVPCClient = mockVPC @@ -584,16 +586,16 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { return clusterScope }, expectedError: errors.New("error getting transit gateway"), - conditions: capiv1beta1.Conditions{ + conditions: clusterv1beta1.Conditions{ getVPCLBReadyCondition(), getNetworkReadyCondition(), getServiceInstanceReadyCondition(), - capiv1beta1.Condition{ - Type: infrav1beta2.TransitGatewayReadyCondition, + clusterv1beta1.Condition{ + Type: infrav1.TransitGatewayReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.TransitGatewayReconciliationFailedReason, + Reason: infrav1.TransitGatewayReconciliationFailedReason, Message: "failed to get transit gateway: error getting transit gateway", }, getVPCReadyCondition(), @@ -614,7 +616,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { mockResourceClient.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{ Name: ptr.To("serviceInstanceName"), ID: ptr.To("serviceInstanceID"), - State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), + State: ptr.To(string(infrav1.ServiceInstanceStateActive)), }, nil, nil) clusterScope.ResourceClient = mockResourceClient @@ -624,7 +626,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { mockVPC.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), }, nil, nil) clusterScope.IBMVPCClient = mockVPC @@ -632,7 +634,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStatePending)), + Status: ptr.To(string(infrav1.TransitGatewayStatePending)), }, nil, nil) clusterScope.TransitGatewayClient = mockTransitGateway @@ -644,7 +646,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "When reconcile COS service instance returns error", powervsClusterScope: func() *scope.PowerVSClusterScope { powerVSCluster := getPowerVSClusterWithSpecAndStatus() - powerVSCluster.Spec.Ignition = &infrav1beta2.Ignition{Version: "3.4"} + powerVSCluster.Spec.Ignition = &infrav1.Ignition{Version: "3.4"} clusterScope := &scope.PowerVSClusterScope{ IBMPowerVSCluster: powerVSCluster, } @@ -658,13 +660,13 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { return clusterScope }, expectedError: errors.New("error getting instance by name"), - conditions: capiv1beta1.Conditions{ - capiv1beta1.Condition{ - Type: infrav1beta2.COSInstanceReadyCondition, + conditions: clusterv1beta1.Conditions{ + clusterv1beta1.Condition{ + Type: infrav1.COSInstanceReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.COSInstanceReconciliationFailedReason, + Reason: infrav1.COSInstanceReconciliationFailedReason, Message: "failed to check if COS instance in cloud: failed to get COS service instance: error getting instance by name", }, getVPCLBReadyCondition(), @@ -735,7 +737,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { name: "When reconcile is successful", powervsClusterScope: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - Cluster: &capiv1beta1.Cluster{}, + Cluster: &clusterv1.Cluster{}, IBMPowerVSCluster: getPowerVSClusterWithSpecAndStatus(), } clusterScope.IBMPowerVSClient = getMockPowerVS(t) @@ -746,7 +748,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { mockVPC.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), Hostname: ptr.To("hostname"), }, nil, nil) clusterScope.IBMVPCClient = mockVPC @@ -775,7 +777,7 @@ func TestIBMPowerVSClusterReconciler_reconcile(t *testing.T) { } g.Expect(res).To(Equal(tc.expectedResult)) g.Expect(powerVSClusterScope.IBMPowerVSCluster.Status.Ready).To(Equal(tc.clusterStatus)) - g.Expect(powerVSClusterScope.IBMPowerVSCluster.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSClusterFinalizer)) + g.Expect(powerVSClusterScope.IBMPowerVSCluster.Finalizers).To(ContainElement(infrav1.IBMPowerVSClusterFinalizer)) if len(tc.conditions) > 1 { ignoreLastTransitionTime := cmp.Transformer("", func(metav1.Time) metav1.Time { return metav1.Time{} @@ -801,7 +803,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { } powervsClusterScope = func() *scope.PowerVSClusterScope { return &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IBMPowerVSCluster", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", @@ -811,15 +813,15 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { Annotations: map[string]string{"powervs.cluster.x-k8s.io/create-infra": "true"}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{}, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Spec: infrav1.IBMPowerVSClusterSpec{}, + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, }, @@ -832,7 +834,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("Should reconcile successfully if no descendants are found", func(t *testing.T) { g := NewWithT(t) clusterScope = &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IBMPowerVSCluster", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", @@ -840,7 +842,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "capi-powervs-cluster", }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-1", }, }, @@ -854,7 +856,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("Should reconcile with requeue by deleting the cluster descendants", func(t *testing.T) { g := NewWithT(t) clusterScope = &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IBMPowerVSCluster", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", @@ -862,46 +864,46 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "capi-powervs-cluster", }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-1", }, }, Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects().Build(), } - powervsImage1 := &infrav1beta2.IBMPowerVSImage{ + powervsImage1 := &infrav1.IBMPowerVSImage{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-image", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: infrav1beta2.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "IBMPowerVSCluster", Name: "capi-powervs-cluster", UID: "1", }, }, - Labels: map[string]string{capiv1beta1.ClusterNameLabel: "capi-powervs-cluster"}, + Labels: map[string]string{clusterv1.ClusterNameLabel: "capi-powervs-cluster"}, }, - Spec: infrav1beta2.IBMPowerVSImageSpec{ + Spec: infrav1.IBMPowerVSImageSpec{ ClusterName: "capi-powervs-cluster", Object: ptr.To("capi-image.ova.gz"), Region: ptr.To("us-south"), Bucket: ptr.To("capi-bucket"), }, } - powervsImage2 := &infrav1beta2.IBMPowerVSImage{ + powervsImage2 := &infrav1.IBMPowerVSImage{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-image2", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: infrav1beta2.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "IBMPowerVSCluster", Name: "capi-powervs-cluster", UID: "1", }, }, - Labels: map[string]string{capiv1beta1.ClusterNameLabel: "capi-powervs-cluster"}, + Labels: map[string]string{clusterv1.ClusterNameLabel: "capi-powervs-cluster"}, }, - Spec: infrav1beta2.IBMPowerVSImageSpec{ + Spec: infrav1.IBMPowerVSImageSpec{ ClusterName: "capi-powervs-cluster", Object: ptr.To("capi-image2.ova.gz"), Region: ptr.To("us-south"), @@ -925,14 +927,14 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete TransitGateway returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.TransitGateway = &infrav1beta2.TransitGatewayStatus{ + clusterScope.IBMPowerVSCluster.Status.TransitGateway = &infrav1.TransitGatewayStatus{ ID: ptr.To("transitgatewayID"), ControllerCreated: ptr.To(true), - PowerVSConnection: &infrav1beta2.ResourceReference{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), ID: ptr.To("connectionID"), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), ID: ptr.To("connectionID"), }, @@ -940,7 +942,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable))} + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable))} mockPowerVS = powervsmock.NewMockPowerVS(gomock.NewController(t)) mockPowerVS.EXPECT().WithClients(gomock.Any()) clusterScope.IBMPowerVSClient = mockPowerVS @@ -961,14 +963,14 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete TransitGateway returns requeue as true", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.TransitGateway = &infrav1beta2.TransitGatewayStatus{ + clusterScope.IBMPowerVSCluster.Status.TransitGateway = &infrav1.TransitGatewayStatus{ ID: ptr.To("transitgatewayID"), ControllerCreated: ptr.To(true), - PowerVSConnection: &infrav1beta2.ResourceReference{ + PowerVSConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), ID: ptr.To("connectionID"), }, - VPCConnection: &infrav1beta2.ResourceReference{ + VPCConnection: &infrav1.ResourceReference{ ControllerCreated: ptr.To(true), ID: ptr.To("connectionID"), }, @@ -976,7 +978,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { tgw := &tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateDeletePending))} + Status: ptr.To(string(infrav1.TransitGatewayStateDeletePending))} mockPowerVS = powervsmock.NewMockPowerVS(gomock.NewController(t)) mockPowerVS.EXPECT().WithClients(gomock.Any()) clusterScope.IBMPowerVSClient = mockPowerVS @@ -995,7 +997,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete LoadBalancer returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1beta2.VPCLoadBalancerStatus{ + clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1.VPCLoadBalancerStatus{ "lb": { ID: ptr.To("lb-id"), ControllerCreated: ptr.To(true), @@ -1012,7 +1014,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { mockVpc.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateActive)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateActive)), }, nil, nil) mockVpc.EXPECT().DeleteLoadBalancer(gomock.Any()).Return(&core.DetailedResponse{}, errors.New("failed to delete load balancer")) clusterScope.IBMVPCClient = mockVpc @@ -1024,7 +1026,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete LoadBalancer returns requeue as true", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1beta2.VPCLoadBalancerStatus{ + clusterScope.IBMPowerVSCluster.Status.LoadBalancers = map[string]infrav1.VPCLoadBalancerStatus{ "lb": { ID: ptr.To("lb-id"), ControllerCreated: ptr.To(true), @@ -1041,7 +1043,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { mockVpc.EXPECT().GetLoadBalancer(gomock.Any()).Return(&vpcv1.LoadBalancer{ ID: ptr.To("lb-id"), Name: ptr.To("lb"), - ProvisioningStatus: ptr.To(string(infrav1beta2.VPCLoadBalancerStateDeletePending)), + ProvisioningStatus: ptr.To(string(infrav1.VPCLoadBalancerStateDeletePending)), }, nil, nil) clusterScope.IBMVPCClient = mockVpc result, err := reconciler.reconcileDelete(ctx, clusterScope) @@ -1052,7 +1054,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete VPC security group returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1beta2.VPCSecurityGroupStatus{ + clusterScope.IBMPowerVSCluster.Status.VPCSecurityGroups = map[string]infrav1.VPCSecurityGroupStatus{ "sc": { ID: ptr.To("sc-id"), ControllerCreated: ptr.To(true), @@ -1080,7 +1082,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete VPC subnet returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1.ResourceReference{ "subent1": { ID: ptr.To("subent1"), ControllerCreated: ptr.To(true), @@ -1105,7 +1107,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete VPC subnet returns requeue as true", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPCSubnet = map[string]infrav1.ResourceReference{ "subent1": { ID: ptr.To("subent1"), ControllerCreated: ptr.To(true), @@ -1118,7 +1120,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { clusterScope.ResourceClient = mockResourceClient clusterScope.TransitGatewayClient = mockTransitGateway mockVpc = vpcmock.NewMockVpc(gomock.NewController(t)) - mockVpc.EXPECT().GetSubnet(gomock.Any()).Return(&vpcv1.Subnet{Name: ptr.To("subnet1"), Status: ptr.To(string(infrav1beta2.VPCSubnetStateDeleting))}, nil, nil) + mockVpc.EXPECT().GetSubnet(gomock.Any()).Return(&vpcv1.Subnet{Name: ptr.To("subnet1"), Status: ptr.To(string(infrav1.VPCSubnetStateDeleting))}, nil, nil) clusterScope.IBMVPCClient = mockVpc result, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(BeNil()) @@ -1128,7 +1130,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete VPC returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPC = &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPC = &infrav1.ResourceReference{ ID: ptr.To("vpcid"), ControllerCreated: ptr.To(true), } @@ -1151,7 +1153,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete VPC returns requeue as true", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.VPC = &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.VPC = &infrav1.ResourceReference{ ID: ptr.To("vpcid"), ControllerCreated: ptr.To(true), } @@ -1162,7 +1164,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { clusterScope.ResourceClient = mockResourceClient clusterScope.TransitGatewayClient = mockTransitGateway mockVpc = vpcmock.NewMockVpc(gomock.NewController(t)) - mockVpc.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{ID: ptr.To("vpcid"), Status: ptr.To(string(infrav1beta2.VPCStateDeleting))}, nil, nil) + mockVpc.EXPECT().GetVPC(gomock.Any()).Return(&vpcv1.VPC{ID: ptr.To("vpcid"), Status: ptr.To(string(infrav1.VPCStateDeleting))}, nil, nil) clusterScope.IBMVPCClient = mockVpc result, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(BeNil()) @@ -1172,12 +1174,12 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete DHCP returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status = infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status = infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), ControllerCreated: ptr.To(false), }, - DHCPServer: &infrav1beta2.ResourceReference{ + DHCPServer: &infrav1.ResourceReference{ ID: ptr.To("DHCPServerID"), ControllerCreated: ptr.To(true), }, @@ -1186,7 +1188,7 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { mockPowerVS.EXPECT().WithClients(gomock.Any()) mockPowerVS.EXPECT().GetDHCPServer(gomock.Any()).Return(&models.DHCPServerDetail{ ID: ptr.To("dhcpID"), - Status: ptr.To(string(infrav1beta2.DHCPServerStateActive)), + Status: ptr.To(string(infrav1.DHCPServerStateActive)), }, nil) mockPowerVS.EXPECT().DeleteDHCPServer(gomock.Any()).Return(errors.New("failed to delete DHCP server")) clusterScope.IBMPowerVSClient = mockPowerVS @@ -1203,8 +1205,8 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete ServiceInstance returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status = infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status = infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), ControllerCreated: ptr.To(true), }, @@ -1233,8 +1235,8 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete ServiceInstance returns requeue as true", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status = infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status = infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), ControllerCreated: ptr.To(true), }, @@ -1262,13 +1264,13 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When delete COSInstance returns error", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status.COSInstance = &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status.COSInstance = &infrav1.ResourceReference{ ID: ptr.To("CosInstanceID"), ControllerCreated: ptr.To(true), } - clusterScope.IBMPowerVSCluster.Spec = infrav1beta2.IBMPowerVSClusterSpec{ + clusterScope.IBMPowerVSCluster.Spec = infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-1", - Ignition: &infrav1beta2.Ignition{Version: "3.4"}, + Ignition: &infrav1.Ignition{Version: "3.4"}, } mockPowerVS = powervsmock.NewMockPowerVS(gomock.NewController(t)) mockPowerVS.EXPECT().WithClients(gomock.Any()) @@ -1293,14 +1295,14 @@ func TestIBMPowerVSClusterReconciler_delete(t *testing.T) { t.Run("When reconcile delete is successful", func(t *testing.T) { g := NewWithT(t) clusterScope = powervsClusterScope() - clusterScope.IBMPowerVSCluster.Status = infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + clusterScope.IBMPowerVSCluster.Status = infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, } - clusterScope.IBMPowerVSCluster.Spec = infrav1beta2.IBMPowerVSClusterSpec{ + clusterScope.IBMPowerVSCluster.Spec = infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-1", - Ignition: &infrav1beta2.Ignition{Version: "3.4"}, + Ignition: &infrav1.Ignition{Version: "3.4"}, } mockPowerVS = powervsmock.NewMockPowerVS(gomock.NewController(t)) mockPowerVS.EXPECT().WithClients(gomock.Any()) @@ -1322,13 +1324,13 @@ func TestReconcileVPCResources(t *testing.T) { name string powerVSClusterScopeFunc func() *scope.PowerVSClusterScope reconcileResult reconcileResult - conditions capiv1beta1.Conditions + conditions clusterv1beta1.Conditions }{ { name: "when ReconcileVPC returns error", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, } mockVPC := vpcmock.NewMockVpc(gomock.NewController(t)) mockVPC.EXPECT().GetVPCByName(gomock.Any()).Return(nil, errors.New("vpc not found")) @@ -1338,13 +1340,13 @@ func TestReconcileVPCResources(t *testing.T) { reconcileResult: reconcileResult{ error: errors.New("vpc not found"), }, - conditions: capiv1beta1.Conditions{ - capiv1beta1.Condition{ - Type: infrav1beta2.VPCReadyCondition, + conditions: clusterv1beta1.Conditions{ + clusterv1beta1.Condition{ + Type: infrav1.VPCReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.VPCReconciliationFailedReason, + Reason: infrav1.VPCReconciliationFailedReason, Message: "failed to check if VPC exists: failed to get VPC: error fetching VPC details with name: vpc not found", }, }, @@ -1353,9 +1355,9 @@ func TestReconcileVPCResources(t *testing.T) { name: "when ReconcileVPC returns requeue as true", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -1376,14 +1378,14 @@ func TestReconcileVPCResources(t *testing.T) { name: "when Reconciling VPC subnets returns error", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -1399,14 +1401,14 @@ func TestReconcileVPCResources(t *testing.T) { error: errors.New("vpc subnet not found"), }, - conditions: capiv1beta1.Conditions{ + conditions: clusterv1beta1.Conditions{ getVPCReadyCondition(), - capiv1beta1.Condition{ - Type: infrav1beta2.VPCSubnetReadyCondition, + clusterv1beta1.Condition{ + Type: infrav1.VPCSubnetReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.VPCSubnetReconciliationFailedReason, + Reason: infrav1.VPCSubnetReconciliationFailedReason, Message: "error checking VPC subnet with name: vpc subnet not found", }, }, @@ -1415,17 +1417,17 @@ func TestReconcileVPCResources(t *testing.T) { name: "when Reconciling VPC subnets returns requeue as true", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("rg-id"), }, - VPC: &infrav1beta2.VPCResourceReference{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -1444,7 +1446,7 @@ func TestReconcileVPCResources(t *testing.T) { Requeue: true, }, }, - conditions: capiv1beta1.Conditions{ + conditions: clusterv1beta1.Conditions{ getVPCReadyCondition(), }, }, @@ -1452,24 +1454,24 @@ func TestReconcileVPCResources(t *testing.T) { name: "when Reconciling VPC security group returns error", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { ID: ptr.To("subnet-id"), }, }, - VPCSecurityGroups: []infrav1beta2.VPCSecurityGroup{ + VPCSecurityGroups: []infrav1.VPCSecurityGroup{ { Name: ptr.To("security-group"), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -1486,14 +1488,14 @@ func TestReconcileVPCResources(t *testing.T) { error: errors.New("failed to validate existing security group: vpc security group not found"), }, - conditions: capiv1beta1.Conditions{ + conditions: clusterv1beta1.Conditions{ getVPCReadyCondition(), - capiv1beta1.Condition{ - Type: infrav1beta2.VPCSecurityGroupReadyCondition, + clusterv1beta1.Condition{ + Type: infrav1.VPCSecurityGroupReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.VPCSecurityGroupReconciliationFailedReason, + Reason: infrav1.VPCSecurityGroupReconciliationFailedReason, Message: "failed to validate existing security group: vpc security group not found", }, getVPCSubnetReadyCondition(), @@ -1503,24 +1505,24 @@ func TestReconcileVPCResources(t *testing.T) { name: "when Reconciling LoadBalancer returns error", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { ID: ptr.To("subnet-id"), }, }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("lb-id"), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -1537,13 +1539,13 @@ func TestReconcileVPCResources(t *testing.T) { error: errors.New("load balancer not found"), }, - conditions: capiv1beta1.Conditions{ - capiv1beta1.Condition{ - Type: infrav1beta2.LoadBalancerReadyCondition, + conditions: clusterv1beta1.Conditions{ + clusterv1beta1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.LoadBalancerReconciliationFailedReason, + Reason: infrav1.LoadBalancerReconciliationFailedReason, Message: "failed to fetch load balancer details: load balancer not found", }, getVPCReadyCondition(), @@ -1555,24 +1557,24 @@ func TestReconcileVPCResources(t *testing.T) { name: "when Reconciling LoadBalancer returns with loadbalancer status as ready", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - VPC: &infrav1beta2.VPCResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { ID: ptr.To("subnet-id"), }, }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("lb-id"), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - VPC: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, }, @@ -1589,7 +1591,7 @@ func TestReconcileVPCResources(t *testing.T) { clusterScope.IBMVPCClient = mockVPC return clusterScope }, - conditions: capiv1beta1.Conditions{ + conditions: clusterv1beta1.Conditions{ getVPCLBReadyCondition(), getVPCReadyCondition(), getVPCSGReadyCondition(), @@ -1633,15 +1635,15 @@ func TestReconcilePowerVSResources(t *testing.T) { name string powerVSClusterScopeFunc func() *scope.PowerVSClusterScope reconcileResult reconcileResult - conditions capiv1beta1.Conditions + conditions clusterv1beta1.Conditions }{ { name: "When Reconciling PowerVS service instance returns error", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, }, @@ -1656,13 +1658,13 @@ func TestReconcilePowerVSResources(t *testing.T) { error: errors.New("error getting resource instance"), }, - conditions: capiv1beta1.Conditions{ - capiv1beta1.Condition{ - Type: infrav1beta2.ServiceInstanceReadyCondition, + conditions: clusterv1beta1.Conditions{ + clusterv1beta1.Condition{ + Type: infrav1.ServiceInstanceReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.ServiceInstanceReconciliationFailedReason, + Reason: infrav1.ServiceInstanceReconciliationFailedReason, Message: "failed to fetch service instance details: error getting resource instance", }, }, @@ -1671,16 +1673,16 @@ func TestReconcilePowerVSResources(t *testing.T) { name: "When Reconciling PowerVS service instance returns requeue as true", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, }, }, } mockResourceController := resourceclientmock.NewMockResourceController(gomock.NewController(t)) - mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{State: ptr.To(string(infrav1beta2.ServiceInstanceStateProvisioning)), Name: ptr.To("serviceInstanceName")}, nil, nil) + mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{State: ptr.To(string(infrav1.ServiceInstanceStateProvisioning)), Name: ptr.To("serviceInstanceName")}, nil, nil) clusterScope.ResourceClient = mockResourceController return clusterScope }, @@ -1694,13 +1696,13 @@ func TestReconcilePowerVSResources(t *testing.T) { name: "When Reconciling network returns error", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "serviceInstanceID", }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - Network: &infrav1beta2.ResourceReference{ID: ptr.To("NetworkID")}, - ServiceInstance: &infrav1beta2.ResourceReference{ID: ptr.To("serviceInstanceID")}, + Status: infrav1.IBMPowerVSClusterStatus{ + Network: &infrav1.ResourceReference{ID: ptr.To("NetworkID")}, + ServiceInstance: &infrav1.ResourceReference{ID: ptr.To("serviceInstanceID")}, }, }, } @@ -1708,7 +1710,7 @@ func TestReconcilePowerVSResources(t *testing.T) { mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(nil, errors.New("error getting network")) mockPowerVS.EXPECT().WithClients(gomock.Any()) mockResourceController := resourceclientmock.NewMockResourceController(gomock.NewController(t)) - mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), Name: ptr.To("serviceInstanceName")}, nil, nil) + mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{State: ptr.To(string(infrav1.ServiceInstanceStateActive)), Name: ptr.To("serviceInstanceName")}, nil, nil) clusterScope.ResourceClient = mockResourceController clusterScope.IBMPowerVSClient = mockPowerVS return clusterScope @@ -1716,13 +1718,13 @@ func TestReconcilePowerVSResources(t *testing.T) { reconcileResult: reconcileResult{ error: errors.New("error getting network"), }, - conditions: capiv1beta1.Conditions{ - capiv1beta1.Condition{ - Type: infrav1beta2.NetworkReadyCondition, + conditions: clusterv1beta1.Conditions{ + clusterv1beta1.Condition{ + Type: infrav1.NetworkReadyCondition, Status: "False", - Severity: capiv1beta1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, LastTransitionTime: metav1.Time{}, - Reason: infrav1beta2.NetworkReconciliationFailedReason, + Reason: infrav1.NetworkReconciliationFailedReason, Message: "failed to fetch network by ID: error getting network", }, getServiceInstanceReadyCondition(), @@ -1732,13 +1734,13 @@ func TestReconcilePowerVSResources(t *testing.T) { name: "When reconcile network returns with DHCP server in active state", powerVSClusterScopeFunc: func() *scope.PowerVSClusterScope { clusterScope := &scope.PowerVSClusterScope{ - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "serviceInstanceID", }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - Network: &infrav1beta2.ResourceReference{ID: ptr.To("netID")}, - ServiceInstance: &infrav1beta2.ResourceReference{ID: ptr.To("serviceInstanceID")}, + Status: infrav1.IBMPowerVSClusterStatus{ + Network: &infrav1.ResourceReference{ID: ptr.To("netID")}, + ServiceInstance: &infrav1.ResourceReference{ID: ptr.To("serviceInstanceID")}, }, }, } @@ -1746,12 +1748,12 @@ func TestReconcilePowerVSResources(t *testing.T) { mockPowerVS.EXPECT().GetNetworkByID(gomock.Any()).Return(&models.Network{NetworkID: ptr.To("netID")}, nil) mockPowerVS.EXPECT().WithClients(gomock.Any()) mockResourceController := resourceclientmock.NewMockResourceController(gomock.NewController(t)) - mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{State: ptr.To(string(infrav1beta2.ServiceInstanceStateActive)), Name: ptr.To("serviceInstanceName")}, nil, nil) + mockResourceController.EXPECT().GetResourceInstance(gomock.Any()).Return(&resourcecontrollerv2.ResourceInstance{State: ptr.To(string(infrav1.ServiceInstanceStateActive)), Name: ptr.To("serviceInstanceName")}, nil, nil) clusterScope.ResourceClient = mockResourceController clusterScope.IBMPowerVSClient = mockPowerVS return clusterScope }, - conditions: capiv1beta1.Conditions{ + conditions: clusterv1beta1.Conditions{ getNetworkReadyCondition(), getServiceInstanceReadyCondition(), }, @@ -1788,78 +1790,78 @@ func TestReconcilePowerVSResources(t *testing.T) { } } -func getVPCReadyCondition() capiv1beta1.Condition { - return capiv1beta1.Condition{ - Type: infrav1beta2.VPCReadyCondition, +func getVPCReadyCondition() clusterv1beta1.Condition { + return clusterv1beta1.Condition{ + Type: infrav1.VPCReadyCondition, Status: "True", } } -func getVPCSubnetReadyCondition() capiv1beta1.Condition { - return capiv1beta1.Condition{ - Type: infrav1beta2.VPCSubnetReadyCondition, +func getVPCSubnetReadyCondition() clusterv1beta1.Condition { + return clusterv1beta1.Condition{ + Type: infrav1.VPCSubnetReadyCondition, Status: "True", } } -func getVPCSGReadyCondition() capiv1beta1.Condition { - return capiv1beta1.Condition{ - Type: infrav1beta2.VPCSecurityGroupReadyCondition, +func getVPCSGReadyCondition() clusterv1beta1.Condition { + return clusterv1beta1.Condition{ + Type: infrav1.VPCSecurityGroupReadyCondition, Status: "True", } } -func getVPCLBReadyCondition() capiv1beta1.Condition { - return capiv1beta1.Condition{ - Type: infrav1beta2.LoadBalancerReadyCondition, +func getVPCLBReadyCondition() clusterv1beta1.Condition { + return clusterv1beta1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, Status: "True", } } -func getTGReadyCondition() capiv1beta1.Condition { - return capiv1beta1.Condition{ - Type: infrav1beta2.TransitGatewayReadyCondition, +func getTGReadyCondition() clusterv1beta1.Condition { + return clusterv1beta1.Condition{ + Type: infrav1.TransitGatewayReadyCondition, Status: "True", } } -func getPowerVSClusterWithSpecAndStatus() *infrav1beta2.IBMPowerVSCluster { - return &infrav1beta2.IBMPowerVSCluster{ +func getPowerVSClusterWithSpecAndStatus() *infrav1.IBMPowerVSCluster { + return &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSClusterFinalizer}, - Annotations: map[string]string{infrav1beta2.CreateInfrastructureAnnotation: "true"}, + Finalizers: []string{infrav1.IBMPowerVSClusterFinalizer}, + Annotations: map[string]string{infrav1.CreateInfrastructureAnnotation: "true"}, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ Zone: ptr.To("dal10"), - ResourceGroup: &infrav1beta2.IBMPowerVSResourceReference{ + ResourceGroup: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("rg-id"), }, - VPC: &infrav1beta2.VPCResourceReference{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, - VPCSubnets: []infrav1beta2.Subnet{ + VPCSubnets: []infrav1.Subnet{ { ID: ptr.To("subnet-id"), }, }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { ID: ptr.To("lb-id"), Public: ptr.To(true), }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - ServiceInstance: &infrav1beta2.ResourceReference{ + Status: infrav1.IBMPowerVSClusterStatus{ + ServiceInstance: &infrav1.ResourceReference{ ID: ptr.To("serviceInstanceID"), }, - Network: &infrav1beta2.ResourceReference{ + Network: &infrav1.ResourceReference{ ID: ptr.To("NetworkID"), }, - VPC: &infrav1beta2.ResourceReference{ + VPC: &infrav1.ResourceReference{ ID: ptr.To("vpcID"), }, - TransitGateway: &infrav1beta2.TransitGatewayStatus{ + TransitGateway: &infrav1.TransitGatewayStatus{ ID: ptr.To("transitGatewayID"), }, }, @@ -1911,7 +1913,7 @@ func getMockTransitGateway(t *testing.T) *tgmock.MockTransitGateway { mockTransitGateway.EXPECT().GetTransitGateway(gomock.Any()).Return(&tgapiv1.TransitGateway{ Name: ptr.To("transitGateway"), ID: ptr.To("transitGatewayID"), - Status: ptr.To(string(infrav1beta2.TransitGatewayStateAvailable)), + Status: ptr.To(string(infrav1.TransitGatewayStateAvailable)), }, nil, nil) mockTransitGateway.EXPECT().ListTransitGatewayConnections(gomock.Any()).Return(&tgapiv1.TransitGatewayConnectionCollection{ Connections: []tgapiv1.TransitGatewayConnectionCust{ @@ -1919,25 +1921,25 @@ func getMockTransitGateway(t *testing.T) *tgmock.MockTransitGateway { Name: ptr.To("vpc_connection"), NetworkID: ptr.To("vpc_crn"), NetworkType: ptr.To("vpc"), - Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached)), + Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached)), }, { Name: ptr.To("powervs_connection"), NetworkID: ptr.To("powervs_crn"), NetworkType: ptr.To("power_virtual_server"), - Status: ptr.To(string(infrav1beta2.TransitGatewayConnectionStateAttached)), + Status: ptr.To(string(infrav1.TransitGatewayConnectionStateAttached)), }, }, }, nil, nil) return mockTransitGateway } -func createCluster(g *WithT, powervsCluster *infrav1beta2.IBMPowerVSCluster, namespace string) { +func createCluster(g *WithT, powervsCluster *infrav1.IBMPowerVSCluster, namespace string) { if powervsCluster != nil { powervsCluster.Namespace = namespace g.Expect(testEnv.Create(ctx, powervsCluster)).To(Succeed()) g.Eventually(func() bool { - cluster := &infrav1beta2.IBMPowerVSCluster{} + cluster := &infrav1.IBMPowerVSCluster{} key := client.ObjectKey{ Name: powervsCluster.Name, Namespace: namespace, @@ -1948,7 +1950,7 @@ func createCluster(g *WithT, powervsCluster *infrav1beta2.IBMPowerVSCluster, nam } } -func cleanupCluster(g *WithT, powervsCluster *infrav1beta2.IBMPowerVSCluster, namespace *corev1.Namespace) { +func cleanupCluster(g *WithT, powervsCluster *infrav1.IBMPowerVSCluster, namespace *corev1.Namespace) { if powervsCluster != nil { func(do ...client.Object) { g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) @@ -1956,15 +1958,15 @@ func cleanupCluster(g *WithT, powervsCluster *infrav1beta2.IBMPowerVSCluster, na } } -func getServiceInstanceReadyCondition() capiv1beta1.Condition { - return capiv1beta1.Condition{ - Type: infrav1beta2.ServiceInstanceReadyCondition, +func getServiceInstanceReadyCondition() clusterv1beta1.Condition { + return clusterv1beta1.Condition{ + Type: infrav1.ServiceInstanceReadyCondition, Status: "True", } } -func getNetworkReadyCondition() capiv1beta1.Condition { - return capiv1beta1.Condition{ - Type: infrav1beta2.NetworkReadyCondition, +func getNetworkReadyCondition() clusterv1beta1.Condition { + return clusterv1beta1.Condition{ + Type: infrav1.NetworkReadyCondition, Status: "True", } } diff --git a/controllers/ibmpowervsimage_controller.go b/controllers/ibmpowervsimage_controller.go index 2ad94ac6f..cdfcb1cb4 100644 --- a/controllers/ibmpowervsimage_controller.go +++ b/controllers/ibmpowervsimage_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "errors" "fmt" "time" @@ -26,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -33,11 +35,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterv1util "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck + "sigs.k8s.io/cluster-api/util/finalizers" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" ) @@ -57,73 +63,91 @@ type IBMPowerVSImageReconciler struct { func (r *IBMPowerVSImageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { log := ctrl.LoggerFrom(ctx) - ibmImage := &infrav1beta2.IBMPowerVSImage{} - err := r.Get(ctx, req.NamespacedName, ibmImage) + log.Info("Reconciling IBMPowerVSImage") + defer log.Info("Finished reconciling IBMPowerVSImage") + + // Fetch the IBMPowerVSImage. + ibmPowerVSImage := &infrav1.IBMPowerVSImage{} + err := r.Client.Get(ctx, req.NamespacedName, ibmPowerVSImage) if err != nil { if apierrors.IsNotFound(err) { + log.Info("IBMPowerVSImage not found") return ctrl.Result{}, nil } + return ctrl.Result{}, fmt.Errorf("failed to get IBMPowerVSImage: %w", err) + } + + // Add finalizer first if not set to avoid the race condition between init and delete. + if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmPowerVSImage, infrav1.IBMPowerVSImageFinalizer); err != nil || finalizerAdded { return ctrl.Result{}, err } - var cluster *infrav1beta2.IBMPowerVSCluster + var cluster *infrav1.IBMPowerVSCluster scopeParams := scope.PowerVSImageScopeParams{ Client: r.Client, - Logger: log, - IBMPowerVSImage: ibmImage, + IBMPowerVSImage: ibmPowerVSImage, ServiceEndpoint: r.ServiceEndpoint, } // Externally managed clusters might not be available during image deletion. Get the cluster only when image is still not deleted. - if ibmImage.DeletionTimestamp.IsZero() { - cluster, err = scope.GetClusterByName(ctx, r.Client, ibmImage.Namespace, ibmImage.Spec.ClusterName) + if ibmPowerVSImage.DeletionTimestamp.IsZero() { + cluster, err = scope.GetClusterByName(ctx, r.Client, ibmPowerVSImage.Namespace, ibmPowerVSImage.Spec.ClusterName) if err != nil { return ctrl.Result{}, err } scopeParams.Zone = cluster.Spec.Zone } - // Create the scope - imageScope, err := scope.NewPowerVSImageScope(scopeParams) + // Initialize the patch helper + patchHelper, err := v1beta1patch.NewHelper(ibmPowerVSImage, r.Client) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) + return ctrl.Result{}, fmt.Errorf("failed to init patch helper: %w", err) } - // Always close the scope when exiting this function so we can persist any IBMPowerVSImage changes. + // Always attempt to Patch the IBMPowerVSImage object and status after each reconciliation. defer func() { - if imageScope != nil { - if err := imageScope.Close(); err != nil && reterr == nil { - reterr = err - } + if err := patchIBMPowerVSImage(ctx, patchHelper, ibmPowerVSImage); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() + // Create the scope + imageScope, err := scope.NewPowerVSImageScope(ctx, scopeParams) + if err != nil { + if errors.Is(err, scope.ErrServiceInsanceNotInActiveState) { + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.WorkspaceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.WorkspaceNotReadyV1Beta2Reason, + }) + } + return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) + } + // Handle deleted clusters. - if !ibmImage.DeletionTimestamp.IsZero() { - return r.reconcileDelete(imageScope) + if !ibmPowerVSImage.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, imageScope) } - return r.reconcile(cluster, imageScope) + return r.reconcile(ctx, cluster, imageScope) } -func (r *IBMPowerVSImageReconciler) reconcile(cluster *infrav1beta2.IBMPowerVSCluster, imageScope *scope.PowerVSImageScope) (ctrl.Result, error) { - if controllerutil.AddFinalizer(imageScope.IBMPowerVSImage, infrav1beta2.IBMPowerVSImageFinalizer) { - return ctrl.Result{}, nil - } +func (r *IBMPowerVSImageReconciler) reconcile(ctx context.Context, cluster *infrav1.IBMPowerVSCluster, imageScope *scope.PowerVSImageScope) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) // Create new labels section for IBMPowerVSImage metadata if nil. if imageScope.IBMPowerVSImage.Labels == nil { imageScope.IBMPowerVSImage.Labels = make(map[string]string) } - if _, ok := imageScope.IBMPowerVSImage.Labels[capiv1beta1.ClusterNameLabel]; !ok { - imageScope.IBMPowerVSImage.Labels[capiv1beta1.ClusterNameLabel] = imageScope.IBMPowerVSImage.Spec.ClusterName + if _, ok := imageScope.IBMPowerVSImage.Labels[clusterv1.ClusterNameLabel]; !ok { + imageScope.IBMPowerVSImage.Labels[clusterv1.ClusterNameLabel] = imageScope.IBMPowerVSImage.Spec.ClusterName } if r.shouldAdopt(*imageScope.IBMPowerVSImage) { - imageScope.Info("Image Controller has not yet set OwnerRef") + log.Info("Image Controller has not yet set OwnerRef") imageScope.IBMPowerVSImage.OwnerReferences = clusterv1util.EnsureOwnerRef(imageScope.IBMPowerVSImage.OwnerReferences, metav1.OwnerReference{ - APIVersion: infrav1beta2.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "IBMPowerVSCluster", Name: cluster.Name, UID: cluster.UID, @@ -131,128 +155,243 @@ func (r *IBMPowerVSImageReconciler) reconcile(cluster *infrav1beta2.IBMPowerVSCl return ctrl.Result{}, nil } + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.WorkspaceReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.WorkspaceReadyV1Beta2Reason, + }) + if jobID := imageScope.GetJobID(); jobID != "" { job, err := imageScope.IBMPowerVSClient.GetJob(jobID) if err != nil { - imageScope.Info("Unable to get job details") + log.Info("Unable to get job details", "jobID", jobID) return ctrl.Result{RequeueAfter: 2 * time.Minute}, err } - switch *job.Status.State { - case "completed": - conditions.MarkTrue(imageScope.IBMPowerVSImage, infrav1beta2.ImageImportedCondition) - case "failed": + + imageScope.SetImageState(*job.Status.State) + switch imageScope.GetImageState() { + case infrav1.PowerVSImageStateCompleted: + v1beta1conditions.MarkTrue(imageScope.IBMPowerVSImage, infrav1.ImageImportedCondition) + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.IBMPowerVSImageReadyV1Beta2Reason, + }) + case infrav1.PowerVSImageStateFailed: imageScope.SetNotReady() - imageScope.SetImageState(string(infrav1beta2.PowerVSImageStateFailed)) - conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1beta2.ImageImportedCondition, infrav1beta2.ImageImportFailedReason, capiv1beta1.ConditionSeverityError, "%s", job.Status.Message) + imageScope.SetImageState(string(infrav1.PowerVSImageStateFailed)) + v1beta1conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1.ImageImportedCondition, infrav1.ImageImportFailedReason, clusterv1beta1.ConditionSeverityError, "%s", job.Status.Message) + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.ImageImportFailedReason, + }) return ctrl.Result{RequeueAfter: 2 * time.Minute}, fmt.Errorf("failed to import image, message: %s", job.Status.Message) - case "queued": + case infrav1.PowerVSImageStateQueued: imageScope.SetNotReady() - imageScope.SetImageState(string(infrav1beta2.PowerVSImageStateQue)) - conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1beta2.ImageImportedCondition, string(infrav1beta2.PowerVSImageStateQue), capiv1beta1.ConditionSeverityInfo, "%s", job.Status.Message) + imageScope.SetImageState(string(infrav1.PowerVSImageStateQueued)) + v1beta1conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1.ImageImportedCondition, string(infrav1.PowerVSImageStateQueued), clusterv1beta1.ConditionSeverityInfo, "%s", job.Status.Message) + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.ImageQueuedReason, + }) return ctrl.Result{RequeueAfter: 2 * time.Minute}, nil default: imageScope.SetNotReady() - imageScope.SetImageState(string(infrav1beta2.PowerVSImageStateImporting)) - conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1beta2.ImageImportedCondition, *job.Status.State, capiv1beta1.ConditionSeverityInfo, "%s", job.Status.Message) + imageScope.SetImageState(string(infrav1.PowerVSImageStateImporting)) + v1beta1conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1.ImageImportedCondition, *job.Status.State, clusterv1beta1.ConditionSeverityInfo, "%s", job.Status.Message) + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.ImageNotReadyReason, + }) return ctrl.Result{RequeueAfter: 2 * time.Minute}, nil } } - img, jobRef, err := r.getOrCreate(imageScope) + img, jobRef, err := r.getOrCreate(ctx, imageScope) if err != nil { - imageScope.Error(err, "Unable to import image") + log.Error(err, "Unable to import image") return ctrl.Result{}, fmt.Errorf("failed to reconcile Image for IBMPowerVSImage %s/%s: %w", imageScope.IBMPowerVSImage.Namespace, imageScope.IBMPowerVSImage.Name, err) } if jobRef != nil { imageScope.SetJobID(*jobRef.ID) } - return reconcileImage(img, imageScope) + return reconcileImage(ctx, img, imageScope) } -func reconcileImage(img *models.ImageReference, imageScope *scope.PowerVSImageScope) (_ ctrl.Result, reterr error) { +func reconcileImage(ctx context.Context, img *models.ImageReference, imageScope *scope.PowerVSImageScope) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) if img != nil { image, err := imageScope.IBMPowerVSClient.GetImage(*img.ImageID) if err != nil { - imageScope.Info("Unable to get image details") + log.Info("Unable to get image details", "imageID", *img.ImageID) return ctrl.Result{}, err } imageScope.SetImageID(image.ImageID) - imageScope.Info("ImageID", imageScope.GetImageID()) + log.Info("ImageID", imageScope.GetImageID()) imageScope.SetImageState(image.State) - imageScope.Info("ImageState", image.State) + log.Info("ImageState", image.State) switch imageScope.GetImageState() { - case infrav1beta2.PowerVSImageStateQue: - imageScope.Info("Image is in queued state") + case infrav1.PowerVSImageStateQueued: + log.Info("Image is in queued state") imageScope.SetNotReady() - conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1beta2.ImageReadyCondition, infrav1beta2.ImageNotReadyReason, capiv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(imageScope.IBMPowerVSImage, infrav1.ImageReadyCondition, infrav1.ImageNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMPowerVSImageNotReadyV1Beta2Reason, + }) return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil - case infrav1beta2.PowerVSImageStateACTIVE: - imageScope.Info("Image is in active state") + case infrav1.PowerVSImageStateACTIVE: + log.Info("Image is in active state") imageScope.SetReady() - conditions.MarkTrue(imageScope.IBMPowerVSImage, infrav1beta2.ImageReadyCondition) + v1beta1conditions.MarkTrue(imageScope.IBMPowerVSImage, infrav1.ImageReadyCondition) + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.IBMPowerVSImageReadyV1Beta2Reason, + }) + default: imageScope.SetNotReady() - imageScope.Info("PowerVS image state is undefined", "state", image.State, "image-id", imageScope.GetImageID()) - conditions.MarkUnknown(imageScope.IBMPowerVSImage, infrav1beta2.ImageReadyCondition, "", "") + log.Info("PowerVS image state is undefined", "state", image.State, "image-id", imageScope.GetImageID()) + v1beta1conditions.MarkUnknown(imageScope.IBMPowerVSImage, infrav1.ImageReadyCondition, "", "") + v1beta2conditions.Set(imageScope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMPowerVSImageReadyUnknownV1Beta2Reason, + }) } } // Requeue after 1 minute if image is not ready to update status of the image properly. if !imageScope.IsReady() { - imageScope.Info("Image is not yet ready") + log.Info("Image is not yet ready, requeue", "state", imageScope.GetImageState()) return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } return ctrl.Result{}, nil } -func (r *IBMPowerVSImageReconciler) reconcileDelete(scope *scope.PowerVSImageScope) (_ ctrl.Result, reterr error) { - scope.Info("Handling deleted IBMPowerVSImage") +func (r *IBMPowerVSImageReconciler) reconcileDelete(ctx context.Context, scope *scope.PowerVSImageScope) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Handling deleted IBMPowerVSImage") + + v1beta1conditions.MarkFalse(scope.IBMPowerVSImage, infrav1.ImageReadyCondition, clusterv1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta2conditions.Set(scope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMPowerVSImageDeletingV1Beta2Reason, + }) defer func() { if reterr == nil { - // Image is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(scope.IBMPowerVSImage, infrav1beta2.IBMPowerVSImageFinalizer) + // IBMPowerVSImage is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(scope.IBMPowerVSImage, infrav1.IBMPowerVSImageFinalizer) } }() if scope.GetImageID() == "" { - scope.Info("ImageID is not yet set, hence not invoking the PowerVS API to delete the image") + log.Info("IBMPowerVSImage ImageID is not yet set, hence not invoking the PowerVS API to delete the image") if scope.GetJobID() == "" { - scope.Info("JobID is not yet set, hence not invoking the PowerVS API to delete the image import job") + log.Info("JobID is not yet set, hence not invoking the PowerVS API to delete the image import job") return ctrl.Result{}, nil } if err := scope.DeleteImportJob(); err != nil { - scope.Error(err, "Error deleting IBMPowerVSImage Import Job") + log.Error(err, "Error deleting IBMPowerVSImage Import Job") return ctrl.Result{}, fmt.Errorf("error deleting IBMPowerVSImage Import Job: %w", err) } return ctrl.Result{}, nil } - if scope.IBMPowerVSImage.Spec.DeletePolicy != string(infrav1beta2.DeletePolicyRetain) { + if scope.IBMPowerVSImage.Spec.DeletePolicy != string(infrav1.DeletePolicyRetain) { if err := scope.DeleteImage(); err != nil { - scope.Error(err, "Error deleting IBMPowerVSImage") + v1beta1conditions.MarkFalse(scope.IBMPowerVSImage, infrav1.ImageReadyCondition, clusterv1beta1.DeletionFailedReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta2conditions.Set(scope.IBMPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMPowerVSMachineInstanceDeletingV1Beta2Reason, + Message: fmt.Sprintf("failed to delete IBMPowerVSImage: %v", err), + }) return ctrl.Result{}, fmt.Errorf("error deleting IBMPowerVSImage %v: %w", klog.KObj(scope.IBMPowerVSImage), err) } } return ctrl.Result{}, nil } -func (r *IBMPowerVSImageReconciler) getOrCreate(scope *scope.PowerVSImageScope) (*models.ImageReference, *models.JobReference, error) { - image, job, err := scope.CreateImageCOSBucket() +func (r *IBMPowerVSImageReconciler) getOrCreate(ctx context.Context, scope *scope.PowerVSImageScope) (*models.ImageReference, *models.JobReference, error) { + image, job, err := scope.CreateImageCOSBucket(ctx) return image, job, err } -func (r *IBMPowerVSImageReconciler) shouldAdopt(i infrav1beta2.IBMPowerVSImage) bool { - return !clusterv1util.HasOwner(i.OwnerReferences, infrav1beta2.GroupVersion.String(), []string{"IBMPowerVSCluster"}) +func (r *IBMPowerVSImageReconciler) shouldAdopt(i infrav1.IBMPowerVSImage) bool { + return !clusterv1util.HasOwner(i.OwnerReferences, infrav1.GroupVersion.String(), []string{"IBMPowerVSCluster"}) } // SetupWithManager sets up the controller with the Manager. func (r *IBMPowerVSImageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrav1beta2.IBMPowerVSImage{}). + For(&infrav1.IBMPowerVSImage{}). Complete(r) } + +func patchIBMPowerVSImage(ctx context.Context, patchHelper *v1beta1patch.Helper, ibmPowerVSImage *infrav1.IBMPowerVSImage) error { + // Before computing ready condition, make sure that ImageReady is always set. + // NOTE: This is required because v1beta2 conditions comply to guideline requiring conditions to be set at the + // first reconcile. + if c := v1beta2conditions.Get(ibmPowerVSImage, infrav1.IBMPowerVSImageReadyV1Beta2Condition); c == nil { + if ibmPowerVSImage.Status.Ready { + v1beta2conditions.Set(ibmPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.IBMPowerVSImageReadyV1Beta2Reason, + }) + } else { + v1beta2conditions.Set(ibmPowerVSImage, metav1.Condition{ + Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMPowerVSImageNotReadyV1Beta2Reason, + }) + } + } + + // always update the readyCondition. + v1beta1conditions.SetSummary(ibmPowerVSImage, + v1beta1conditions.WithConditions( + infrav1.ImageReadyCondition, + ), + ) + + if err := v1beta2conditions.SetSummaryCondition(ibmPowerVSImage, ibmPowerVSImage, infrav1.IBMPowerVSImageReadyCondition, + v1beta2conditions.ForConditionTypes{ + infrav1.IBMPowerVSImageReadyV1Beta2Condition, + infrav1.WorkspaceReadyV1Beta2Condition, + }, + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + // Use custom reasons. + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + infrav1.IBMPowerVSImageNotReadyV1Beta2Reason, + infrav1.IBMPowerVSImageReadyUnknownV1Beta2Reason, + infrav1.IBMPowerVSImageReadyV1Beta2Reason, + )), + ), + }, + ); err != nil { + return fmt.Errorf("failed to set %s condition: %w", infrav1.IBMPowerVSImageReadyCondition, err) + } + + // Patch the IBMPowerVSImage resource. + return patchHelper.Patch(ctx, ibmPowerVSImage, v1beta1patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + infrav1.IBMPowerVSImageReadyCondition, + infrav1.IBMPowerVSImageReadyV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, + infrav1.WorkspaceReadyV1Beta2Condition, + }}) +} diff --git a/controllers/ibmpowervsimage_controller_test.go b/controllers/ibmpowervsimage_controller_test.go index b4d936f9b..5efbc8770 100644 --- a/controllers/ibmpowervsimage_controller_test.go +++ b/controllers/ibmpowervsimage_controller_test.go @@ -28,16 +28,18 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + "sigs.k8s.io/cluster-api/util" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck + + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs/mock" @@ -47,8 +49,8 @@ import ( func TestIBMPowerVSImageReconciler_Reconcile(t *testing.T) { testCases := []struct { name string - powervsCluster *infrav1beta2.IBMPowerVSCluster - powervsImage *infrav1beta2.IBMPowerVSImage + powervsCluster *infrav1.IBMPowerVSCluster + powervsImage *infrav1.IBMPowerVSImage expectError bool }{ { @@ -57,11 +59,12 @@ func TestIBMPowerVSImageReconciler_Reconcile(t *testing.T) { }, { name: "Should not Reconcile if failed to find IBMPowerVSCluster", - powervsImage: &infrav1beta2.IBMPowerVSImage{ + powervsImage: &infrav1.IBMPowerVSImage{ ObjectMeta: metav1.ObjectMeta{ - Name: "capi-image", + Name: "capi-image", + Finalizers: []string{infrav1.IBMPowerVSImageFinalizer}, }, - Spec: infrav1beta2.IBMPowerVSImageSpec{ + Spec: infrav1.IBMPowerVSImageSpec{ ClusterName: "capi-powervs-cluster", Object: ptr.To("capi-image.ova.gz"), Region: ptr.To("us-south"), @@ -87,12 +90,12 @@ func TestIBMPowerVSImageReconciler_Reconcile(t *testing.T) { if tc.powervsImage != nil { g.Eventually(func() bool { - machine := &infrav1beta2.IBMPowerVSImage{} + image := &infrav1.IBMPowerVSImage{} key := client.ObjectKey{ Name: tc.powervsImage.Name, Namespace: ns.Name, } - err = testEnv.Get(ctx, key, machine) + err = testEnv.Get(ctx, key, image) return err == nil }, 10*time.Second).Should(Equal(true)) @@ -146,17 +149,16 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - powervsCluster := &infrav1beta2.IBMPowerVSCluster{ + powervsCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-powervs-cluster"}, } imageScope := &scope.PowerVSImageScope{ - Logger: klog.Background(), - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-image", }, - Spec: infrav1beta2.IBMPowerVSImageSpec{ + Spec: infrav1.IBMPowerVSImageSpec{ ClusterName: "capi-powervs-cluster", Object: ptr.To("capi-image.ova.gz"), Region: ptr.To("us-south"), @@ -164,7 +166,7 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { }, }, } - _, err := reconciler.reconcile(powervsCluster, imageScope) + _, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(BeNil()) }) t.Run("Reconciling an image import job", func(t *testing.T) { @@ -172,29 +174,29 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { setup(t) t.Cleanup(teardown) const jobID = "job-1" - powervsCluster := &infrav1beta2.IBMPowerVSCluster{ + powervsCluster := &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-powervs-cluster", UID: "1", }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-1", }, } - powervsImage := &infrav1beta2.IBMPowerVSImage{ + powervsImage := &infrav1.IBMPowerVSImage{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-image", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: infrav1beta2.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), Kind: "IBMPowerVSCluster", Name: "capi-powervs-cluster", UID: "1", }, }, - Finalizers: []string{infrav1beta2.IBMPowerVSImageFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSImageFinalizer}, }, - Spec: infrav1beta2.IBMPowerVSImageSpec{ + Spec: infrav1.IBMPowerVSImageSpec{ ClusterName: "capi-powervs-cluster", Object: ptr.To("capi-image.ova.gz"), Region: ptr.To("us-south"), @@ -204,7 +206,6 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { mockclient := fake.NewClientBuilder().WithObjects([]client.Object{powervsCluster, powervsImage}...).Build() imageScope := &scope.PowerVSImageScope{ - Logger: klog.Background(), Client: mockclient, IBMPowerVSImage: powervsImage, IBMPowerVSClient: mockpowervs, @@ -213,10 +214,10 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { imageScope.IBMPowerVSImage.Status.JobID = jobID t.Run("When failed to get the import job using jobID", func(_ *testing.T) { mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf(jobID)).Return(nil, errors.New("Error finding the job")) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(Not(BeNil())) g.Expect(result.RequeueAfter).To(Not(BeZero())) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) }) job := &models.Job{ ID: ptr.To(jobID), @@ -226,34 +227,34 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { } t.Run("When import job status is queued", func(_ *testing.T) { mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf(jobID)).Return(job, nil) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(BeNil()) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) g.Expect(imageScope.IBMPowerVSImage.Status.Ready).To(Equal(false)) - g.Expect(imageScope.IBMPowerVSImage.Status.ImageState).To(BeEquivalentTo(infrav1beta2.PowerVSImageStateQue)) - expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1beta2.ImageImportedCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityInfo, string(infrav1beta2.PowerVSImageStateQue)}}) + g.Expect(imageScope.IBMPowerVSImage.Status.ImageState).To(BeEquivalentTo(infrav1.PowerVSImageStateQueued)) + expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1.ImageImportedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, string(infrav1.PowerVSImageStateQueued)}}) g.Expect(result.RequeueAfter).To(Not(BeZero())) }) t.Run("When importing image is still in progress", func(_ *testing.T) { job.Status.State = ptr.To("") mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf("job-1")).Return(job, nil) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(BeNil()) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) g.Expect(imageScope.IBMPowerVSImage.Status.Ready).To(Equal(false)) - g.Expect(imageScope.IBMPowerVSImage.Status.ImageState).To(BeEquivalentTo(infrav1beta2.PowerVSImageStateImporting)) - expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1beta2.ImageImportedCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityInfo, *job.Status.State}}) + g.Expect(imageScope.IBMPowerVSImage.Status.ImageState).To(BeEquivalentTo(infrav1.PowerVSImageStateImporting)) + expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1.ImageImportedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, *job.Status.State}}) g.Expect(result.RequeueAfter).To(Not(BeZero())) }) t.Run("When import job status is failed", func(_ *testing.T) { job.Status.State = ptr.To("failed") mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf("job-1")).Return(job, nil) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(Not(BeNil())) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) g.Expect(imageScope.IBMPowerVSImage.Status.Ready).To(Equal(false)) - g.Expect(imageScope.IBMPowerVSImage.Status.ImageState).To(BeEquivalentTo(infrav1beta2.PowerVSImageStateFailed)) - expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1beta2.ImageImportedCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityError, infrav1beta2.ImageImportFailedReason}}) + g.Expect(imageScope.IBMPowerVSImage.Status.ImageState).To(BeEquivalentTo(infrav1.PowerVSImageStateFailed)) + expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1.ImageImportedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.ImageImportFailedReason}}) g.Expect(result.RequeueAfter).To(Not(BeZero())) }) job.Status.State = ptr.To("completed") @@ -269,11 +270,11 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf("job-1")).Return(job, nil) mockpowervs.EXPECT().GetAllImage().Return(images, nil) mockpowervs.EXPECT().GetImage(gomock.AssignableToTypeOf("capi-image-id")).Return(nil, errors.New("Failed to the image details")) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(Not(BeNil())) g.Expect(result.RequeueAfter).To(BeZero()) - expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{conditionType: infrav1beta2.ImageImportedCondition, status: corev1.ConditionTrue}}) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{conditionType: infrav1.ImageImportedCondition, status: corev1.ConditionTrue}}) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) }) image := &models.Image{ Name: ptr.To("capi-image"), @@ -284,11 +285,11 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf("job-1")).Return(job, nil) mockpowervs.EXPECT().GetAllImage().Return(images, nil) mockpowervs.EXPECT().GetImage(gomock.AssignableToTypeOf("capi-image-id")).Return(image, nil) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(BeNil()) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) g.Expect(imageScope.IBMPowerVSImage.Status.Ready).To(Equal(false)) - expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1beta2.ImageReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityWarning, infrav1beta2.ImageNotReadyReason}}) + expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1.ImageReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.ImageNotReadyReason}}) g.Expect(result.RequeueAfter).To(Not(BeZero())) }) t.Run("When import job status is completed and image state is undefined", func(_ *testing.T) { @@ -296,10 +297,10 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf("job-1")).Return(job, nil) mockpowervs.EXPECT().GetAllImage().Return(images, nil) mockpowervs.EXPECT().GetImage(gomock.AssignableToTypeOf("capi-image-id")).Return(image, nil) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(BeNil()) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) - expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1beta2.ImageReadyCondition, corev1.ConditionUnknown, "", ""}}) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) + expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{infrav1.ImageReadyCondition, corev1.ConditionUnknown, "", ""}}) g.Expect(imageScope.IBMPowerVSImage.Status.Ready).To(Equal(false)) g.Expect(result.RequeueAfter).To(Not(BeZero())) }) @@ -308,10 +309,10 @@ func TestIBMPowerVSImageReconciler_reconcile(t *testing.T) { mockpowervs.EXPECT().GetJob(gomock.AssignableToTypeOf("job-1")).Return(job, nil) mockpowervs.EXPECT().GetAllImage().Return(images, nil) mockpowervs.EXPECT().GetImage(gomock.AssignableToTypeOf("capi-image-id")).Return(image, nil) - result, err := reconciler.reconcile(powervsCluster, imageScope) + result, err := reconciler.reconcile(ctx, powervsCluster, imageScope) g.Expect(err).To(BeNil()) - expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{conditionType: infrav1beta2.ImageReadyCondition, status: corev1.ConditionTrue}}) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + expectConditionsImage(g, imageScope.IBMPowerVSImage, []conditionAssertion{{conditionType: infrav1.ImageReadyCondition, status: corev1.ConditionTrue}}) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) g.Expect(imageScope.IBMPowerVSImage.Status.Ready).To(Equal(true)) g.Expect(result.RequeueAfter).To(BeZero()) }) @@ -337,8 +338,7 @@ func TestIBMPowerVSImageReconciler_delete(t *testing.T) { Recorder: recorder, } imageScope = &scope.PowerVSImageScope{ - Logger: klog.Background(), - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{}, + IBMPowerVSImage: &infrav1.IBMPowerVSImage{}, IBMPowerVSClient: mockpowervs, } } @@ -351,62 +351,62 @@ func TestIBMPowerVSImageReconciler_delete(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - imageScope.IBMPowerVSImage.Finalizers = []string{infrav1beta2.IBMPowerVSImageFinalizer} - _, err := reconciler.reconcileDelete(imageScope) + imageScope.IBMPowerVSImage.Finalizers = []string{infrav1.IBMPowerVSImageFinalizer} + _, err := reconciler.reconcileDelete(ctx, imageScope) g.Expect(err).To(BeNil()) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(Not(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer))) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(Not(ContainElement(infrav1.IBMPowerVSImageFinalizer))) }) t.Run("Should fail to delete the import image job", func(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) imageScope.IBMPowerVSImage.Status.JobID = "job-1" - imageScope.IBMPowerVSImage.Finalizers = []string{infrav1beta2.IBMPowerVSImageFinalizer} + imageScope.IBMPowerVSImage.Finalizers = []string{infrav1.IBMPowerVSImageFinalizer} mockpowervs.EXPECT().DeleteJob(gomock.AssignableToTypeOf("job-1")).Return(errors.New("Failed to deleted the import job")) - _, err := reconciler.reconcileDelete(imageScope) + _, err := reconciler.reconcileDelete(ctx, imageScope) g.Expect(err).To(Not(BeNil())) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) }) t.Run("Should delete the import image job", func(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) imageScope.IBMPowerVSImage.Status.JobID = "job-1" - imageScope.IBMPowerVSImage.Finalizers = []string{infrav1beta2.IBMPowerVSImageFinalizer} + imageScope.IBMPowerVSImage.Finalizers = []string{infrav1.IBMPowerVSImageFinalizer} mockpowervs.EXPECT().DeleteJob(gomock.AssignableToTypeOf("job-1")).Return(nil) - _, err := reconciler.reconcileDelete(imageScope) + _, err := reconciler.reconcileDelete(ctx, imageScope) g.Expect(err).To(BeNil()) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(Not(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer))) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(Not(ContainElement(infrav1.IBMPowerVSImageFinalizer))) }) t.Run("Should fail to delete the image using ID when delete policy is not to retain it", func(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) imageScope.IBMPowerVSImage.Status.ImageID = "capi-image-id" - imageScope.IBMPowerVSImage.Finalizers = []string{infrav1beta2.IBMPowerVSImageFinalizer} + imageScope.IBMPowerVSImage.Finalizers = []string{infrav1.IBMPowerVSImageFinalizer} mockpowervs.EXPECT().DeleteImage(gomock.AssignableToTypeOf("capi-image-id")).Return(errors.New("Failed to delete the image")) - _, err := reconciler.reconcileDelete(imageScope) + _, err := reconciler.reconcileDelete(ctx, imageScope) g.Expect(err).To(Not(BeNil())) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer)) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(ContainElement(infrav1.IBMPowerVSImageFinalizer)) }) t.Run("Should not delete the image using ID when delete policy is to retain it", func(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) imageScope.IBMPowerVSImage.Status.ImageID = "capi-image-id" - imageScope.IBMPowerVSImage.Finalizers = []string{infrav1beta2.IBMPowerVSImageFinalizer} + imageScope.IBMPowerVSImage.Finalizers = []string{infrav1.IBMPowerVSImageFinalizer} imageScope.IBMPowerVSImage.Spec.DeletePolicy = "retain" - _, err := reconciler.reconcileDelete(imageScope) + _, err := reconciler.reconcileDelete(ctx, imageScope) g.Expect(err).To(BeNil()) - g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(Not(ContainElement(infrav1beta2.IBMPowerVSImageFinalizer))) + g.Expect(imageScope.IBMPowerVSImage.Finalizers).To(Not(ContainElement(infrav1.IBMPowerVSImageFinalizer))) }) }) } -func expectConditionsImage(g *WithT, m *infrav1beta2.IBMPowerVSImage, expected []conditionAssertion) { +func expectConditionsImage(g *WithT, m *infrav1.IBMPowerVSImage, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected))) for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) @@ -414,3 +414,94 @@ func expectConditionsImage(g *WithT, m *infrav1beta2.IBMPowerVSImage, expected [ g.Expect(actual.Reason).To(Equal(c.reason)) } } + +func TestIBMPowerVSImageReconciler_Reconcile_Conditions(t *testing.T) { + testCases := []struct { + name string + powervsCluster *infrav1.IBMPowerVSCluster + powervsImage *infrav1.IBMPowerVSImage + expectError bool + }{ + + { + name: "Conditions should be set after first reconcile", + powervsCluster: &infrav1.IBMPowerVSCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-powervs-cluster"}, + }, + powervsImage: &infrav1.IBMPowerVSImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-image", + Finalizers: []string{infrav1.IBMPowerVSImageFinalizer}, + }, + Spec: infrav1.IBMPowerVSImageSpec{ + ClusterName: "capi-powervs-cluster", + Object: ptr.To("capi-image.ova.gz"), + Region: ptr.To("us-south"), + Bucket: ptr.To("capi-bucket"), + }, + }, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + reconciler := &IBMPowerVSImageReconciler{ + Client: testEnv.Client, + } + + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) + g.Expect(err).To(BeNil()) + + createObject(g, tc.powervsImage, ns.Name) + createCluster(g, tc.powervsCluster, ns.Name) + defer cleanupObject(g, tc.powervsImage) + defer cleanupCluster(g, tc.powervsCluster, ns) + + if tc.powervsImage != nil { + g.Eventually(func() bool { + image := &infrav1.IBMPowerVSImage{} + key := client.ObjectKey{ + Name: tc.powervsImage.Name, + Namespace: ns.Name, + } + err = testEnv.Get(ctx, key, image) + return err == nil + }, 10*time.Second).Should(Equal(true)) + + _, err := reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: tc.powervsImage.Namespace, + Name: tc.powervsImage.Name, + }, + }) + if tc.expectError { + g.Expect(err).ToNot(BeNil()) + image := &infrav1.IBMPowerVSImage{} + key := client.ObjectKey{ + Namespace: tc.powervsImage.Namespace, + Name: tc.powervsImage.Name, + } + err = testEnv.Get(ctx, key, image) + g.Expect(err).To(BeNil()) + expectConditionsImagev1beta2(g, image, []metav1.Condition{{Type: infrav1.IBMPowerVSImageReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.IBMPowerVSImageNotReadyV1Beta2Reason}}) + } else { + g.Expect(err).To(BeNil()) + } + } + }) + } +} + +func expectConditionsImagev1beta2(g *WithT, m *infrav1.IBMPowerVSImage, expected []metav1.Condition) { + g.Expect(len(m.Status.V1Beta2.Conditions)).To(BeNumerically(">=", len(expected))) + for _, c := range expected { + actual := v1beta2conditions.Get(m, infrav1.IBMPowerVSImageReadyV1Beta2Condition) + g.Expect(actual).To(Not(BeNil())) + g.Expect(actual.Type).To(Equal(c.Type)) + g.Expect(actual.Status).To(Equal(c.Status)) + g.Expect(actual.Reason).To(Equal(c.Reason)) + } +} diff --git a/controllers/ibmpowervsmachine_controller.go b/controllers/ibmpowervsmachine_controller.go index 1f38dea18..859de0a8e 100644 --- a/controllers/ibmpowervsmachine_controller.go +++ b/controllers/ibmpowervsmachine_controller.go @@ -36,17 +36,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck //nolint:staticcheck + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" @@ -82,7 +84,7 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re defer log.Info("Finished reconciling IBMPowerVSMachine") // Fetch the IBMPowerVSMachine instance. - ibmPowerVSMachine := &infrav1beta2.IBMPowerVSMachine{} + ibmPowerVSMachine := &infrav1.IBMPowerVSMachine{} err := r.Client.Get(ctx, req.NamespacedName, ibmPowerVSMachine) if err != nil { if apierrors.IsNotFound(err) { @@ -93,7 +95,7 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Add finalizer first if not set to avoid the race condition between init and delete. - if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmPowerVSMachine, infrav1beta2.IBMPowerVSMachineFinalizer); err != nil || finalizerAdded { + if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmPowerVSMachine, infrav1.IBMPowerVSMachineFinalizer); err != nil || finalizerAdded { return ctrl.Result{}, err } @@ -122,7 +124,7 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } if cluster == nil { - log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", capiv1beta1.ClusterNameLabel)) + log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1.ClusterNameLabel)) return ctrl.Result{}, nil } @@ -130,7 +132,7 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re ctx = ctrl.LoggerInto(ctx, log) // Fetch the IBMPowerVSCluster. - ibmPowerVSCluster := &infrav1beta2.IBMPowerVSCluster{} + ibmPowerVSCluster := &infrav1.IBMPowerVSCluster{} ibmPowerVSClusterName := client.ObjectKey{ Namespace: ibmPowerVSMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, @@ -144,9 +146,9 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re ctx = ctrl.LoggerInto(ctx, log) // Fetch the IBMPowerVSImage. - var ibmPowerVSImage *infrav1beta2.IBMPowerVSImage + var ibmPowerVSImage *infrav1.IBMPowerVSImage if ibmPowerVSMachine.Spec.ImageRef != nil { - ibmPowerVSImage = &infrav1beta2.IBMPowerVSImage{} + ibmPowerVSImage = &infrav1.IBMPowerVSImage{} ibmPowerVSImageName := client.ObjectKey{ Namespace: ibmPowerVSMachine.Namespace, Name: ibmPowerVSMachine.Spec.ImageRef.Name, @@ -161,7 +163,7 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, err } - if cluster.Spec.InfrastructureRef == nil { + if !cluster.Spec.InfrastructureRef.IsDefined() { log.Info("Cluster infrastructureRef is not available yet") return ctrl.Result{}, nil } @@ -183,7 +185,7 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Initialize the patch helper - patchHelper, err := patch.NewHelper(ibmPowerVSMachine, r.Client) + patchHelper, err := v1beta1patch.NewHelper(ibmPowerVSMachine, r.Client) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to init patch helper: %w", err) } @@ -207,17 +209,17 @@ func (r *IBMPowerVSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re func (r *IBMPowerVSMachineReconciler) reconcileDelete(ctx context.Context, scope *scope.PowerVSMachineScope) (_ ctrl.Result, reterr error) { log := ctrl.LoggerFrom(ctx) - conditions.MarkFalse(scope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, capiv1beta1.DeletingReason, capiv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(scope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, clusterv1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") v1beta2conditions.Set(scope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceDeletingV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceDeletingV1Beta2Reason, }) defer func() { if reterr == nil { // PowerVS machine is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(scope.IBMPowerVSMachine, infrav1beta2.IBMPowerVSMachineFinalizer) + controllerutil.RemoveFinalizer(scope.IBMPowerVSMachine, infrav1.IBMPowerVSMachineFinalizer) } }() @@ -227,11 +229,11 @@ func (r *IBMPowerVSMachineReconciler) reconcileDelete(ctx context.Context, scope } if err := scope.DeleteMachine(); err != nil { log.Error(err, "error deleting IBMPowerVSMachine") - conditions.MarkFalse(scope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, capiv1beta1.DeletionFailedReason, capiv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(scope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, clusterv1beta1.DeletionFailedReason, clusterv1beta1.ConditionSeverityWarning, "") v1beta2conditions.Set(scope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceDeletingV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceDeletingV1Beta2Reason, Message: fmt.Sprintf("failed to delete IBMPowerVSMachine: %v", err), }) return ctrl.Result{}, fmt.Errorf("error deleting IBMPowerVSMachine %v: %w", klog.KObj(scope.IBMPowerVSMachine), err) @@ -254,7 +256,7 @@ func (r *IBMPowerVSMachineReconciler) handleLoadBalancerPoolMemberConfiguration( if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create VPC load balancer pool member: %w", err) } - if poolMember != nil && *poolMember.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) { + if poolMember != nil && *poolMember.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } return ctrl.Result{}, nil @@ -263,13 +265,13 @@ func (r *IBMPowerVSMachineReconciler) handleLoadBalancerPoolMemberConfiguration( func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.PowerVSMachineScope) (ctrl.Result, error) { //nolint:gocyclo log := ctrl.LoggerFrom(ctx) - if !machineScope.Cluster.Status.InfrastructureReady { + if machineScope.Cluster.Status.Initialization.InfrastructureProvisioned == nil || !*machineScope.Cluster.Status.Initialization.InfrastructureProvisioned { log.Info("Cluster infrastructure is not ready yet, skipping reconciliation") - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.WaitingForClusterInfrastructureReason, capiv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceWaitingForClusterInfrastructureReadyV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceWaitingForClusterInfrastructureReadyV1Beta2Reason, }) return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } @@ -277,11 +279,11 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi if machineScope.IBMPowerVSImage != nil { if !machineScope.IBMPowerVSImage.Status.Ready { log.Info("IBMPowerVSImage is not ready yet, skipping reconciliation") - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.WaitingForIBMPowerVSImageReason, capiv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForIBMPowerVSImageReason, clusterv1beta1.ConditionSeverityInfo, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.WaitingForIBMPowerVSImageReason, + Reason: infrav1.WaitingForIBMPowerVSImageReason, }) return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } @@ -289,23 +291,23 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi // Make sure bootstrap data is available and populated. if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(machineScope.Machine) && !conditions.IsTrue(machineScope.Cluster, capiv1beta1.ControlPlaneInitializedCondition) { + if !util.IsControlPlaneMachine(machineScope.Machine) && !conditions.IsTrue(machineScope.Cluster, clusterv1.ClusterControlPlaneInitializedCondition) { log.Info("Waiting for the control plane to be initialized, skipping reconciliation") - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, capiv1beta1.WaitingForControlPlaneAvailableReason, capiv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, clusterv1beta1.WaitingForControlPlaneAvailableReason, clusterv1beta1.ConditionSeverityInfo, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceWaitingForControlPlaneInitializedV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceWaitingForControlPlaneInitializedV1Beta2Reason, }) return ctrl.Result{}, nil } log.Info("Waiting for bootstrap data to be ready, skipping reconciliation") - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.WaitingForBootstrapDataReason, capiv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceWaitingForBootstrapDataV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceWaitingForBootstrapDataV1Beta2Reason, }) return reconcile.Result{}, nil } @@ -313,11 +315,11 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi machine, err := machineScope.CreateMachine(ctx) if err != nil { log.Error(err, "Unable to create PowerVS machine") - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceProvisionFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.InstanceProvisionFailedReason, + Reason: infrav1.InstanceProvisionFailedReason, Message: err.Error(), }) return ctrl.Result{}, fmt.Errorf("failed to create IBMPowerVSMachine: %w", err) @@ -325,11 +327,11 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi if machine == nil { machineScope.SetNotReady() - conditions.MarkUnknown(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceStateUnknownReason, "") + v1beta1conditions.MarkUnknown(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStateUnknownReason, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: infrav1beta2.InstanceStateUnknownReason, + Reason: infrav1.InstanceStateUnknownReason, }) return ctrl.Result{}, nil } @@ -347,38 +349,38 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi machineScope.SetInstanceState(instance.Status) switch machineScope.GetInstanceState() { - case infrav1beta2.PowerVSInstanceStateBUILD: + case infrav1.PowerVSInstanceStateBUILD: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceNotReadyReason, capiv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.InstanceNotReadyReason, + Reason: infrav1.InstanceNotReadyReason, }) - case infrav1beta2.PowerVSInstanceStateSHUTOFF: + case infrav1.PowerVSInstanceStateSHUTOFF: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceStoppedReason, capiv1beta1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStoppedReason, clusterv1beta1.ConditionSeverityError, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.InstanceStoppedReason, + Reason: infrav1.InstanceStoppedReason, }) return ctrl.Result{}, nil - case infrav1beta2.PowerVSInstanceStateACTIVE: + case infrav1.PowerVSInstanceStateACTIVE: machineScope.SetReady() - case infrav1beta2.PowerVSInstanceStateERROR: + case infrav1.PowerVSInstanceStateERROR: msg := "" if instance.Fault != nil { msg = instance.Fault.Details } machineScope.SetNotReady() - machineScope.SetFailureReason(infrav1beta2.UpdateMachineError) + machineScope.SetFailureReason(infrav1.UpdateMachineError) machineScope.SetFailureMessage(msg) - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceErroredReason, capiv1beta1.ConditionSeverityError, "%s", msg) + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceErroredReason, clusterv1beta1.ConditionSeverityError, "%s", msg) v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.InstanceErroredReason, + Reason: infrav1.InstanceErroredReason, Message: msg, }) capibmrecord.Warnf(machineScope.IBMPowerVSMachine, "FailedBuildInstance", "Failed to build the instance %s", msg) @@ -386,11 +388,11 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi default: machineScope.SetNotReady() log.Info("PowerVS instance state is undefined", "state", *instance.Status, "instance-id", machineScope.GetInstanceID()) - conditions.MarkUnknown(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, "", "") + v1beta1conditions.MarkUnknown(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, "", "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: infrav1beta2.InstanceStateUnknownReason, + Reason: infrav1.InstanceStateUnknownReason, }) } @@ -400,25 +402,13 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi return ctrl.Result{RequeueAfter: 2 * time.Minute}, nil } - // We configure load balancer for only control-plane machines - if !util.IsControlPlaneMachine(machineScope.Machine) { - log.Info("Skipping load balancer configuration for worker machine") - conditions.MarkTrue(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition) - v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, - Status: metav1.ConditionTrue, - Reason: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Reason, - }) - return ctrl.Result{}, nil - } - if machineScope.IBMPowerVSCluster.Spec.VPC == nil || machineScope.IBMPowerVSCluster.Spec.VPC.Region == nil { log.Info("Skipping configuring machine to load balancer as VPC is not set") - conditions.MarkTrue(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition) + v1beta1conditions.MarkTrue(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition) v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Reason, }) return ctrl.Result{}, nil } @@ -428,11 +418,11 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi internalIP := machineScope.GetMachineInternalIP() if internalIP == "" { log.Info("Unable to update the load balancer, Machine internal IP not yet set") - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.IBMPowerVSMachineInstanceWaitingForNetworkAddressV1Beta2Reason, capiv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.IBMPowerVSMachineInstanceWaitingForNetworkAddressV1Beta2Reason, clusterv1beta1.ConditionSeverityWarning, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceWaitingForNetworkAddressV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceWaitingForNetworkAddressV1Beta2Reason, Message: "Internal IP not yet set", }) return ctrl.Result{}, nil @@ -440,20 +430,20 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi log.Info("Configuring load balancer for machine", "IP", internalIP) result, err := r.handleLoadBalancerPoolMemberConfiguration(ctx, machineScope) if err != nil { - conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason, capiv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition, infrav1.IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason, clusterv1beta1.ConditionSeverityWarning, "") v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason, Message: fmt.Sprintf("Failed to configure load balancer: %v", err), }) return result, fmt.Errorf("failed to configure load balancer: %w", err) } - conditions.MarkTrue(machineScope.IBMPowerVSMachine, infrav1beta2.InstanceReadyCondition) + v1beta1conditions.MarkTrue(machineScope.IBMPowerVSMachine, infrav1.InstanceReadyCondition) v1beta2conditions.Set(machineScope.IBMPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Reason, }) return result, nil } @@ -463,7 +453,7 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(ctx context.Context, machi func (r *IBMPowerVSMachineReconciler) ibmPowerVSClusterToIBMPowerVSMachines(ctx context.Context, o client.Object) []ctrl.Request { log := ctrl.LoggerFrom(ctx) result := []ctrl.Request{} - c, ok := o.(*infrav1beta2.IBMPowerVSCluster) + c, ok := o.(*infrav1.IBMPowerVSCluster) if !ok { log.Error(fmt.Errorf("expected a IBMPowerVSCluster but got a %T", o), "failed to get IBMPowerVSMachines for IBMPowerVSCluster") return nil @@ -478,8 +468,8 @@ func (r *IBMPowerVSMachineReconciler) ibmPowerVSClusterToIBMPowerVSMachines(ctx return result } - labels := map[string]string{capiv1beta1.ClusterNameLabel: cluster.Name} - machineList := &capiv1beta1.MachineList{} + labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} + machineList := &clusterv1.MachineList{} if err := r.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { log.Error(err, "failed to list Machines") return nil @@ -498,30 +488,30 @@ func (r *IBMPowerVSMachineReconciler) ibmPowerVSClusterToIBMPowerVSMachines(ctx // SetupWithManager creates a new IBMVPCMachine controller for a manager. func (r *IBMPowerVSMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "ibmpowervsmachine") - clusterToIBMPowerVSMachines, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &infrav1beta2.IBMPowerVSMachineList{}, mgr.GetScheme()) + clusterToIBMPowerVSMachines, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &infrav1.IBMPowerVSMachineList{}, mgr.GetScheme()) if err != nil { return err } err = ctrl.NewControllerManagedBy(mgr). - For(&infrav1beta2.IBMPowerVSMachine{}). + For(&infrav1.IBMPowerVSMachine{}). WithEventFilter(predicates.ResourceHasFilterLabel(r.Scheme, predicateLog, r.WatchFilterValue)). Watches( - &capiv1beta1.Machine{}, - handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1beta2.GroupVersion.WithKind("IBMPowerVSMachine"))), + &clusterv1.Machine{}, + handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("IBMPowerVSMachine"))), builder.WithPredicates(predicates.ResourceIsChanged(r.Scheme, predicateLog)), ). Watches( - &infrav1beta2.IBMPowerVSCluster{}, + &infrav1.IBMPowerVSCluster{}, handler.EnqueueRequestsFromMapFunc(r.ibmPowerVSClusterToIBMPowerVSMachines), builder.WithPredicates(predicates.ResourceIsChanged(r.Scheme, predicateLog)), ). Watches( - &capiv1beta1.Cluster{}, + &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToIBMPowerVSMachines), builder.WithPredicates(predicates.All(r.Scheme, predicateLog, predicates.ResourceIsChanged(r.Scheme, predicateLog), - predicates.ClusterPausedTransitionsOrInfrastructureReady(r.Scheme, predicateLog), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(r.Scheme, predicateLog), )), ). Complete(r) @@ -532,56 +522,56 @@ func (r *IBMPowerVSMachineReconciler) SetupWithManager(ctx context.Context, mgr return nil } -func patchIBMPowerVSMachine(ctx context.Context, patchHelper *patch.Helper, ibmPowerVSMachine *infrav1beta2.IBMPowerVSMachine) error { +func patchIBMPowerVSMachine(ctx context.Context, patchHelper *v1beta1patch.Helper, ibmPowerVSMachine *infrav1.IBMPowerVSMachine) error { // Before computing ready condition, make sure that InstanceReady is always set. // NOTE: This is required because v1beta2 conditions comply to guideline requiring conditions to be set at the // first reconcile. - if c := v1beta2conditions.Get(ibmPowerVSMachine, infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition); c == nil { + if c := v1beta2conditions.Get(ibmPowerVSMachine, infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition); c == nil { if ibmPowerVSMachine.Status.Ready { v1beta2conditions.Set(ibmPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Reason, }) } else { v1beta2conditions.Set(ibmPowerVSMachine, metav1.Condition{ - Type: infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + Type: infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: infrav1beta2.IBMPowerVSMachineInstanceNotReadyV1Beta2Reason, + Reason: infrav1.IBMPowerVSMachineInstanceNotReadyV1Beta2Reason, }) } } // always update the readyCondition. - conditions.SetSummary(ibmPowerVSMachine, - conditions.WithConditions( - infrav1beta2.InstanceReadyCondition, + v1beta1conditions.SetSummary(ibmPowerVSMachine, + v1beta1conditions.WithConditions( + infrav1.InstanceReadyCondition, ), ) - if err := v1beta2conditions.SetSummaryCondition(ibmPowerVSMachine, ibmPowerVSMachine, infrav1beta2.IBMPowerVSMachineReadyV1Beta2Condition, + if err := v1beta2conditions.SetSummaryCondition(ibmPowerVSMachine, ibmPowerVSMachine, infrav1.IBMPowerVSMachineReadyV1Beta2Condition, v1beta2conditions.ForConditionTypes{ - infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. v1beta2conditions.CustomMergeStrategy{ MergeStrategy: v1beta2conditions.DefaultMergeStrategy( // Use custom reasons. v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( - infrav1beta2.IBMPowerVSMachineNotReadyV1Beta2Reason, - infrav1beta2.IBMPowerVSMachineReadyUnknownV1Beta2Reason, - infrav1beta2.IBMPowerVSMachineReadyV1Beta2Reason, + infrav1.IBMPowerVSMachineNotReadyV1Beta2Reason, + infrav1.IBMPowerVSMachineReadyUnknownV1Beta2Reason, + infrav1.IBMPowerVSMachineReadyV1Beta2Reason, )), ), }, ); err != nil { - return fmt.Errorf("failed to set %s condition: %w", infrav1beta2.IBMPowerVSMachineReadyV1Beta2Condition, err) + return fmt.Errorf("failed to set %s condition: %w", infrav1.IBMPowerVSMachineReadyV1Beta2Condition, err) } // Patch the IBMPowerVSMachine resource. - return patchHelper.Patch(ctx, ibmPowerVSMachine, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - infrav1beta2.IBMPowerVSMachineReadyV1Beta2Condition, - infrav1beta2.IBMPowerVSMachineInstanceReadyV1Beta2Condition, - capiv1beta1.PausedV1Beta2Condition, + return patchHelper.Patch(ctx, ibmPowerVSMachine, v1beta1patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + infrav1.IBMPowerVSMachineReadyV1Beta2Condition, + infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, }}) } diff --git a/controllers/ibmpowervsmachine_controller_test.go b/controllers/ibmpowervsmachine_controller_test.go index d13709f4d..047dba229 100644 --- a/controllers/ibmpowervsmachine_controller_test.go +++ b/controllers/ibmpowervsmachine_controller_test.go @@ -24,6 +24,10 @@ import ( "go.uber.org/mock/gomock" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/vpc-go-sdk/vpcv1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -32,34 +36,32 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs/mock" mockVPC "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/options" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck //nolint:staticcheck - "github.com/IBM-Cloud/power-go-client/power/models" - "github.com/IBM/go-sdk-core/v5/core" - "github.com/IBM/vpc-go-sdk/vpcv1" . "github.com/onsi/gomega" ) func TestIBMPowerVSMachineReconciler_Reconcile(t *testing.T) { testCases := []struct { name string - powervsMachine *infrav1beta2.IBMPowerVSMachine - ownerMachine *capiv1beta1.Machine - powervsCluster *infrav1beta2.IBMPowerVSCluster - ownerCluster *capiv1beta1.Cluster + powervsMachine *infrav1.IBMPowerVSMachine + ownerMachine *clusterv1.Machine + powervsCluster *infrav1.IBMPowerVSCluster + ownerCluster *clusterv1.Cluster expectError bool }{ { @@ -68,129 +70,129 @@ func TestIBMPowerVSMachineReconciler_Reconcile(t *testing.T) { }, { name: "Should Reconcile if Owner Reference is not set", - powervsMachine: &infrav1beta2.IBMPowerVSMachine{ + powervsMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "powervs-test-1"}, - Spec: infrav1beta2.IBMPowerVSMachineSpec{ + Spec: infrav1.IBMPowerVSMachineSpec{ ServiceInstanceID: "service-instance-1", - Image: &infrav1beta2.IBMPowerVSResourceReference{}}}, + Image: &infrav1.IBMPowerVSResourceReference{}}}, expectError: false, }, { name: "Should fail Reconcile if no OwnerMachine found", - powervsMachine: &infrav1beta2.IBMPowerVSMachine{ + powervsMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "powervs-test-2", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "capi-test-machine", UID: "1", }, }, - Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSMachineFinalizer}, }, - Spec: infrav1beta2.IBMPowerVSMachineSpec{ + Spec: infrav1.IBMPowerVSMachineSpec{ ServiceInstanceID: "service-instance-1", - Image: &infrav1beta2.IBMPowerVSResourceReference{}}, + Image: &infrav1.IBMPowerVSResourceReference{}}, }, expectError: true, }, { name: "Should not Reconcile if machine does not contain cluster label", - powervsMachine: &infrav1beta2.IBMPowerVSMachine{ + powervsMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "powervs-test-3", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "capi-test-machine", UID: "1", }, }, - }, Spec: infrav1beta2.IBMPowerVSMachineSpec{ + }, Spec: infrav1.IBMPowerVSMachineSpec{ ServiceInstanceID: "service-instance-1", - Image: &infrav1beta2.IBMPowerVSResourceReference{}}, + Image: &infrav1.IBMPowerVSResourceReference{}}, }, - ownerMachine: &capiv1beta1.Machine{ + ownerMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-machine"}}, - ownerCluster: &capiv1beta1.Cluster{ + ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}}, expectError: false, }, { name: "Should not Reconcile if IBMPowerVSCluster is not found", - powervsMachine: &infrav1beta2.IBMPowerVSMachine{ + powervsMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "powervs-test-4", - Labels: map[string]string{capiv1beta1.ClusterNameAnnotation: "capi-test-2"}, + Labels: map[string]string{clusterv1.ClusterNameAnnotation: "capi-test-2"}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "capi-test-machine", UID: "1", }, { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test-2", UID: "1", }, }, - }, Spec: infrav1beta2.IBMPowerVSMachineSpec{ + }, Spec: infrav1.IBMPowerVSMachineSpec{ ServiceInstanceID: "service-instance-1", - Image: &infrav1beta2.IBMPowerVSResourceReference{}}, + Image: &infrav1.IBMPowerVSResourceReference{}}, }, - ownerMachine: &capiv1beta1.Machine{ + ownerMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-machine"}}, - ownerCluster: &capiv1beta1.Cluster{ + ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test-2"}, - Spec: capiv1beta1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ Name: "powervs-cluster"}}}, expectError: false, }, { name: "Should not Reconcile if IBMPowerVSImage is not found", - powervsMachine: &infrav1beta2.IBMPowerVSMachine{ + powervsMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "powervs-test-5", - Labels: map[string]string{capiv1beta1.ClusterNameAnnotation: "capi-test-3"}, + Labels: map[string]string{clusterv1.ClusterNameAnnotation: "capi-test-3"}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "capi-test-machine", UID: "1", }, { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test-3", UID: "1", }, }, - Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer}, - }, Spec: infrav1beta2.IBMPowerVSMachineSpec{ + Finalizers: []string{infrav1.IBMPowerVSMachineFinalizer}, + }, Spec: infrav1.IBMPowerVSMachineSpec{ ServiceInstanceID: "service-instance-1", ImageRef: &corev1.LocalObjectReference{ Name: "capi-image", }}, }, - ownerMachine: &capiv1beta1.Machine{ + ownerMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-machine"}}, - ownerCluster: &capiv1beta1.Cluster{ + ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test-3"}, - Spec: capiv1beta1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "powervs-cluster"}}}, - powervsCluster: &infrav1beta2.IBMPowerVSCluster{ + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{Name: "powervs-cluster"}}}, + powervsCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "powervs-cluster"}, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ + Spec: infrav1.IBMPowerVSClusterSpec{ ServiceInstanceID: "service-instance-1"}}, expectError: false, }, @@ -221,7 +223,7 @@ func TestIBMPowerVSMachineReconciler_Reconcile(t *testing.T) { if tc.powervsMachine != nil { g.Eventually(func() bool { - machine := &infrav1beta2.IBMPowerVSMachine{} + machine := &infrav1.IBMPowerVSMachine{} key := client.ObjectKey{ Name: tc.powervsMachine.Name, Namespace: ns.Name, @@ -287,7 +289,7 @@ func TestIBMPowerVSMachineReconciler_Delete(t *testing.T) { machineScope = &scope.PowerVSMachineScope{ IBMPowerVSClient: mockpowervs, IBMPowerVSMachine: pvsmachine, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, } _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(BeNil()) @@ -299,21 +301,21 @@ func TestIBMPowerVSMachineReconciler_Delete(t *testing.T) { t.Cleanup(teardown) machineScope = &scope.PowerVSMachineScope{ IBMPowerVSClient: mockpowervs, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSMachineFinalizer}, }, - Spec: infrav1beta2.IBMPowerVSMachineSpec{}, - Status: infrav1beta2.IBMPowerVSMachineStatus{ + Spec: infrav1.IBMPowerVSMachineSpec{}, + Status: infrav1.IBMPowerVSMachineStatus{ InstanceID: "powervs-instance-id", }, }, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, } mockpowervs.EXPECT().DeleteInstance(machineScope.IBMPowerVSMachine.Status.InstanceID).Return(errors.New("could not delete PowerVS instance")) _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(Not(BeNil())) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) }) t.Run("Should successfully delete the PowerVS machine", func(t *testing.T) { g := NewWithT(t) @@ -326,16 +328,16 @@ func TestIBMPowerVSMachineReconciler_Delete(t *testing.T) { machineScope = &scope.PowerVSMachineScope{ Client: mockClient, IBMPowerVSClient: mockpowervs, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSMachineFinalizer}, }, - Spec: infrav1beta2.IBMPowerVSMachineSpec{}, - Status: infrav1beta2.IBMPowerVSMachineStatus{ + Spec: infrav1.IBMPowerVSMachineSpec{}, + Status: infrav1.IBMPowerVSMachineStatus{ InstanceID: "powervs-instance-id", }, }, - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{}, + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{}, DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL), Machine: machine, } @@ -377,17 +379,17 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { setup(t) t.Cleanup(teardown) machineScope = &scope.PowerVSMachineScope{ - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: false, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{}, }, }, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{}, } result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityInfo, infrav1beta2.WaitingForClusterInfrastructureReason}}) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) }) t.Run("Should requeue if IBMPowerVSImage status is not ready", func(t *testing.T) { @@ -395,14 +397,16 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { setup(t) t.Cleanup(teardown) machineScope = &scope.PowerVSMachineScope{ - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: true, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{}, - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{}, + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ Ready: false, }, }, @@ -410,7 +414,7 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityInfo, infrav1beta2.WaitingForIBMPowerVSImageReason}}) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1.WaitingForIBMPowerVSImageReason}}) }) t.Run("Should requeue if boostrap data secret reference is not found", func(t *testing.T) { @@ -418,15 +422,17 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { setup(t) t.Cleanup(teardown) machineScope = &scope.PowerVSMachineScope{ - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: true, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, - Machine: &capiv1beta1.Machine{}, - IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{}, - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + Machine: &clusterv1.Machine{}, + IBMPowerVSMachine: &infrav1.IBMPowerVSMachine{}, + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ Ready: true, }, }, @@ -434,7 +440,7 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(BeZero()) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityInfo, capiv1beta1.WaitingForControlPlaneAvailableReason}}) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.WaitingForControlPlaneAvailableReason}}) }) t.Run("Should fail reconcile with create instance failure due to error in retrieving bootstrap data secret", func(t *testing.T) { @@ -447,15 +453,17 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { mockClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects().Build() machineScope = &scope.PowerVSMachineScope{ Client: mockClient, - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: true, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: machine, IBMPowerVSMachine: pvsMachine, - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ Ready: true, }, }, @@ -466,8 +474,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(HaveOccurred()) g.Expect(result.RequeueAfter).To(BeZero()) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityError, infrav1beta2.InstanceProvisionFailedReason}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.InstanceProvisionFailedReason}}) }) t.Run("Should fail reconcile if creation of the load balancer pool member is unsuccessful", func(t *testing.T) { @@ -486,42 +494,44 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { machineScope = &scope.PowerVSMachineScope{ Client: mockclient, - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: true, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: machine, IBMPowerVSMachine: pvsmachine, - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ Ready: true, }, }, IBMVPCClient: mockvpc, IBMPowerVSClient: mockpowervs, DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL), - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "powervs.cluster.x-k8s.io/create-infra": "true", }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.VPCResourceReference{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "capi-test-lb", }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "capi-test-lb": { ID: ptr.To("capi-test-lb-id"), }, @@ -560,10 +570,11 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).ToNot(BeNil()) + //nolint:staticcheck g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityWarning, infrav1beta2.IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.IBMPowerVSMachineInstanceLoadBalancerConfigurationFailedV1Beta2Reason}}) }) t.Run("Should requeue if the load balancer pool member is created successfully, but its provisioning status is not active", func(t *testing.T) { @@ -581,42 +592,44 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { mockclient := fake.NewClientBuilder().WithObjects([]client.Object{secret, pvsmachine, machine}...).Build() machineScope = &scope.PowerVSMachineScope{ Client: mockclient, - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: true, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: machine, IBMPowerVSMachine: pvsmachine, - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ Ready: true, }, }, IBMVPCClient: mockvpc, IBMPowerVSClient: mockpowervs, DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL), - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "powervs.cluster.x-k8s.io/create-infra": "true", }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("serviceInstanceID"), }, - VPC: &infrav1beta2.VPCResourceReference{ + VPC: &infrav1.VPCResourceReference{ Region: ptr.To("us-south"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "capi-test-lb", }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "capi-test-lb": { ID: ptr.To("capi-test-lb-id"), }, @@ -679,8 +692,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(true)) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) }) t.Run("Should reconcile IBMPowerVSMachine instance creation in different states", func(t *testing.T) { @@ -695,23 +708,25 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { mockclient := fake.NewClientBuilder().WithObjects([]client.Object{secret, pvsmachine, machine}...).Build() machineScope = &scope.PowerVSMachineScope{ Client: mockclient, - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: true, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: machine, IBMPowerVSMachine: pvsmachine, - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ Ready: true, }, }, IBMPowerVSClient: mockpowervs, DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL), - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("serviceInstanceID"), }, }, @@ -738,8 +753,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(false)) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityWarning, infrav1beta2.InstanceNotReadyReason}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) t.Run("When PVM instance is in SHUTOFF state", func(_ *testing.T) { instance.Status = ptr.To("SHUTOFF") @@ -749,8 +764,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(BeZero()) g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(false)) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityError, infrav1beta2.InstanceStoppedReason}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) t.Run("When PVM instance is in ACTIVE state", func(_ *testing.T) { instance.Status = ptr.To("ACTIVE") @@ -760,8 +775,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(BeZero()) g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(true)) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{conditionType: infrav1beta2.InstanceReadyCondition, status: corev1.ConditionTrue}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionTrue}}) }) t.Run("When PVM instance is in ERROR state", func(_ *testing.T) { instance.Status = ptr.To("ERROR") @@ -772,8 +787,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(BeZero()) g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(false)) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityError, infrav1beta2.InstanceErroredReason}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.InstanceErroredReason}}) }) t.Run("When PVM instance is in unknown state", func(_ *testing.T) { instance.Status = ptr.To("UNKNOWN") @@ -783,8 +798,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(false)) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{conditionType: infrav1beta2.InstanceReadyCondition, status: corev1.ConditionUnknown}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionUnknown}}) }) }) }) @@ -805,38 +820,40 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { machineScope = &scope.PowerVSMachineScope{ Client: mockclient, - Cluster: &capiv1beta1.Cluster{ - Status: capiv1beta1.ClusterStatus{ - InfrastructureReady: true, + Cluster: &clusterv1.Cluster{ + Status: clusterv1.ClusterStatus{ + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: machine, IBMPowerVSMachine: pvsmachine, - IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{ - Status: infrav1beta2.IBMPowerVSImageStatus{ + IBMPowerVSImage: &infrav1.IBMPowerVSImage{ + Status: infrav1.IBMPowerVSImageStatus{ Ready: true, }, }, IBMPowerVSClient: mockpowervs, DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL), - IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{ + IBMPowerVSCluster: &infrav1.IBMPowerVSCluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "powervs.cluster.x-k8s.io/create-infra": "true", }, }, - Spec: infrav1beta2.IBMPowerVSClusterSpec{ - ServiceInstance: &infrav1beta2.IBMPowerVSResourceReference{ + Spec: infrav1.IBMPowerVSClusterSpec{ + ServiceInstance: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("serviceInstanceID"), }, - LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{ + LoadBalancers: []infrav1.VPCLoadBalancerSpec{ { Name: "capi-test-lb", }, }, }, - Status: infrav1beta2.IBMPowerVSClusterStatus{ - LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{ + Status: infrav1.IBMPowerVSClusterStatus{ + LoadBalancers: map[string]infrav1.VPCLoadBalancerStatus{ "capi-test-lb": { ID: ptr.To("capi-test-lb-id"), }, @@ -868,25 +885,26 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) { mockpowervs.EXPECT().GetInstance(gomock.AssignableToTypeOf("capi-test-machine-id")).Return(instance, nil) result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) + //nolint:staticcheck g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(BeZero()) g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(true)) - g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer)) - expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) + g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1.IBMPowerVSMachineFinalizer)) + expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) }) } type conditionAssertion struct { - conditionType capiv1beta1.ConditionType + conditionType clusterv1beta1.ConditionType status corev1.ConditionStatus - severity capiv1beta1.ConditionSeverity + severity clusterv1beta1.ConditionSeverity reason string } -func expectConditions(g *WithT, m *infrav1beta2.IBMPowerVSMachine, expected []conditionAssertion) { +func expectConditions(g *WithT, m *infrav1.IBMPowerVSMachine, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected))) for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) @@ -912,7 +930,7 @@ func newSecret() *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - capiv1beta1.ClusterNameLabel: "powervs-cluster", + clusterv1.ClusterNameLabel: "powervs-cluster", }, Name: "bootsecret", Namespace: "default", @@ -923,19 +941,19 @@ func newSecret() *corev1.Secret { } } -func newIBMPowerVSMachine() *infrav1beta2.IBMPowerVSMachine { - return &infrav1beta2.IBMPowerVSMachine{ +func newIBMPowerVSMachine() *infrav1.IBMPowerVSMachine { + return &infrav1.IBMPowerVSMachine{ ObjectMeta: metav1.ObjectMeta{ Name: *ptr.To("capi-test-machine"), - Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer}, + Finalizers: []string{infrav1.IBMPowerVSMachineFinalizer}, }, - Spec: infrav1beta2.IBMPowerVSMachineSpec{ + Spec: infrav1.IBMPowerVSMachineSpec{ MemoryGiB: 8, Processors: intstr.FromString("0.5"), - Image: &infrav1beta2.IBMPowerVSResourceReference{ + Image: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("capi-image-id"), }, - Network: infrav1beta2.IBMPowerVSResourceReference{ + Network: infrav1.IBMPowerVSResourceReference{ ID: ptr.To("capi-net-id"), }, ServiceInstanceID: *ptr.To("service-instance-1"), @@ -943,14 +961,14 @@ func newIBMPowerVSMachine() *infrav1beta2.IBMPowerVSMachine { } } -func newMachine() *capiv1beta1.Machine { - return &capiv1beta1.Machine{ +func newMachine() *clusterv1.Machine { + return &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "owner-machine", Namespace: "default", }, - Spec: capiv1beta1.MachineSpec{ - Bootstrap: capiv1beta1.Bootstrap{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To("bootsecret"), }, }, diff --git a/controllers/ibmpowervsmachinetemplate_controller.go b/controllers/ibmpowervsmachinetemplate_controller.go index f1d95838a..1df2ef7a4 100644 --- a/controllers/ibmpowervsmachinetemplate_controller.go +++ b/controllers/ibmpowervsmachinetemplate_controller.go @@ -32,9 +32,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" ) // defaultSMT is the default value of simultaneous multithreading. @@ -48,7 +48,7 @@ type IBMPowerVSMachineTemplateReconciler struct { func (r *IBMPowerVSMachineTemplateReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrav1beta2.IBMPowerVSMachineTemplate{}). + For(&infrav1.IBMPowerVSMachineTemplate{}). Complete(r) } @@ -59,13 +59,13 @@ func (r *IBMPowerVSMachineTemplateReconciler) Reconcile(ctx context.Context, req log := ctrl.LoggerFrom(ctx) log.Info("Reconciling IBMPowerVSMachineTemplate") - var machineTemplate infrav1beta2.IBMPowerVSMachineTemplate + var machineTemplate infrav1.IBMPowerVSMachineTemplate if err := r.Get(ctx, req.NamespacedName, &machineTemplate); err != nil { log.Error(err, "Unable to fetch ibmpowervsmachinetemplate") return ctrl.Result{}, client.IgnoreNotFound(err) } - helper, err := patch.NewHelper(&machineTemplate, r.Client) + helper, err := v1beta1patch.NewHelper(&machineTemplate, r.Client) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to init patch helper: %w", err) } @@ -89,7 +89,7 @@ func (r *IBMPowerVSMachineTemplateReconciler) Reconcile(ctx context.Context, req return ctrl.Result{}, nil } -func getIBMPowerVSMachineCapacity(machineTemplate infrav1beta2.IBMPowerVSMachineTemplate) (corev1.ResourceList, error) { +func getIBMPowerVSMachineCapacity(machineTemplate infrav1.IBMPowerVSMachineTemplate) (corev1.ResourceList, error) { capacity := make(corev1.ResourceList) memory := strconv.FormatInt(int64(machineTemplate.Spec.Template.Spec.MemoryGiB), 10) capacity[corev1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%sG", memory)) diff --git a/controllers/ibmpowervsmachinetemplate_controller_test.go b/controllers/ibmpowervsmachinetemplate_controller_test.go index e52cbf4ff..0648b8434 100644 --- a/controllers/ibmpowervsmachinetemplate_controller_test.go +++ b/controllers/ibmpowervsmachinetemplate_controller_test.go @@ -31,7 +31,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" . "github.com/onsi/gomega" ) @@ -40,7 +40,7 @@ func TestIBMPowerVSMachineTemplateReconciler_Reconcile(t *testing.T) { testCases := []struct { name string expectError bool - powerVSMachineTemplate *infrav1beta2.IBMPowerVSMachineTemplate + powerVSMachineTemplate *infrav1.IBMPowerVSMachineTemplate expectedCapacity corev1.ResourceList }{ { @@ -83,7 +83,7 @@ func TestIBMPowerVSMachineTemplateReconciler_Reconcile(t *testing.T) { if tc.powerVSMachineTemplate != nil { g.Eventually(func() bool { - machineTemplate := &infrav1beta2.IBMPowerVSMachineTemplate{} + machineTemplate := &infrav1.IBMPowerVSMachineTemplate{} key := client.ObjectKey{ Name: tc.powerVSMachineTemplate.Name, Namespace: ns.Name, @@ -103,7 +103,7 @@ func TestIBMPowerVSMachineTemplateReconciler_Reconcile(t *testing.T) { } else { g.Expect(err).To(BeNil()) g.Eventually(func() bool { - machineTemplate := &infrav1beta2.IBMPowerVSMachineTemplate{} + machineTemplate := &infrav1.IBMPowerVSMachineTemplate{} key := client.ObjectKey{ Name: tc.powerVSMachineTemplate.Name, Namespace: ns.Name, @@ -129,7 +129,7 @@ func TestIBMPowerVSMachineTemplateReconciler_Reconcile(t *testing.T) { func TestGetIBMPowerVSMachineCapacity(t *testing.T) { testCases := []struct { name string - powerVSMachineTemplate infrav1beta2.IBMPowerVSMachineTemplate + powerVSMachineTemplate infrav1.IBMPowerVSMachineTemplate expectedCapacity corev1.ResourceList expectErr bool }{ @@ -182,16 +182,16 @@ func TestGetIBMPowerVSMachineCapacity(t *testing.T) { } } -func stubPowerVSMachineTemplate(processor intstr.IntOrString, memory int32) *infrav1beta2.IBMPowerVSMachineTemplate { - return &infrav1beta2.IBMPowerVSMachineTemplate{ +func stubPowerVSMachineTemplate(processor intstr.IntOrString, memory int32) *infrav1.IBMPowerVSMachineTemplate { + return &infrav1.IBMPowerVSMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "powervs-test-1", }, - Spec: infrav1beta2.IBMPowerVSMachineTemplateSpec{ - Template: infrav1beta2.IBMPowerVSMachineTemplateResource{ - Spec: infrav1beta2.IBMPowerVSMachineSpec{ + Spec: infrav1.IBMPowerVSMachineTemplateSpec{ + Template: infrav1.IBMPowerVSMachineTemplateResource{ + Spec: infrav1.IBMPowerVSMachineSpec{ ServiceInstanceID: "test_service_instance_id_27", - Image: &infrav1beta2.IBMPowerVSResourceReference{ + Image: &infrav1.IBMPowerVSResourceReference{ ID: ptr.To("capi-image"), }, Processors: processor, diff --git a/controllers/ibmvpccluster_controller.go b/controllers/ibmvpccluster_controller.go index 3ab9476a1..472b38f56 100644 --- a/controllers/ibmvpccluster_controller.go +++ b/controllers/ibmvpccluster_controller.go @@ -26,20 +26,27 @@ import ( "github.com/IBM/vpc-go-sdk/vpcv1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" + "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/predicates" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" ) @@ -59,25 +66,29 @@ type IBMVPCClusterReconciler struct { // Reconcile implements controller runtime Reconciler interface and handles reconcileation logic for IBMVPCCluster. func (r *IBMVPCClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - log := r.Log.WithValues("ibmvpccluster", req.NamespacedName) + log := ctrl.LoggerFrom(ctx) + + log.Info("Reconciling IBMVPCCluster") + defer log.Info("Finished reconciling IBMVPCCluster") // Fetch the IBMVPCCluster instance. - ibmCluster := &infrav1beta2.IBMVPCCluster{} - err := r.Get(ctx, req.NamespacedName, ibmCluster) + ibmVPCCluster := &infrav1.IBMVPCCluster{} + err := r.Get(ctx, req.NamespacedName, ibmVPCCluster) if err != nil { if apierrors.IsNotFound(err) { + log.Info("IBMVPCCluster not found") return ctrl.Result{}, nil } return ctrl.Result{}, err } // Determine whether the Cluster is designed for extended Infrastructure support, implemented in a separate path. - if ibmCluster.Spec.Network != nil { + if ibmVPCCluster.Spec.Network != nil { return r.reconcileV2(ctx, req) } // Fetch the Cluster. - cluster, err := util.GetOwnerCluster(ctx, r.Client, ibmCluster.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, ibmVPCCluster.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -86,40 +97,55 @@ func (r *IBMVPCClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } + // Add finalizer first if not set to avoid the race condition between init and delete. + if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmVPCCluster, infrav1.ClusterFinalizer); err != nil || finalizerAdded { + return ctrl.Result{}, err + } + + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + + if isPaused, requeue, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, ibmVPCCluster); err != nil || isPaused || requeue { + return ctrl.Result{}, err + } + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Client: r.Client, - Logger: log, Cluster: cluster, - IBMVPCCluster: ibmCluster, + IBMVPCCluster: ibmVPCCluster, ServiceEndpoint: r.ServiceEndpoint, }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) + } - // Always close the scope when exiting this function so we can persist any IBMVPCCluster changes. + // Initialize the patch helper. + patchHelper, err := v1beta1patch.NewHelper(ibmVPCCluster, r.Client) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to initialize patch helper: %w", err) + } + + // Always attempt to Patch the IBMVPCCluster object and status after each reconciliation. defer func() { - if clusterScope != nil { - if err := clusterScope.Close(); err != nil && reterr == nil { - reterr = err - } + if err := patchIBMVPCCluster(ctx, patchHelper, ibmVPCCluster); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() // Handle deleted clusters. - if !ibmCluster.DeletionTimestamp.IsZero() { - return r.reconcileDelete(clusterScope) + if !ibmVPCCluster.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, clusterScope) } - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create scope: %w", err) - } - return r.reconcile(clusterScope) + return r.reconcile(ctx, clusterScope) } func (r *IBMVPCClusterReconciler) reconcileV2(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - log := r.Log.WithValues("ibmvpccluster", req.NamespacedName) + log := ctrl.LoggerFrom(ctx).WithValues("controller", "IBMVPCCluster") // Fetch the IBMVPCCluster instance. - ibmCluster := &infrav1beta2.IBMVPCCluster{} - err := r.Get(ctx, req.NamespacedName, ibmCluster) + ibmVPCCluster := &infrav1.IBMVPCCluster{} + err := r.Get(ctx, req.NamespacedName, ibmVPCCluster) if err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil @@ -127,8 +153,13 @@ func (r *IBMVPCClusterReconciler) reconcileV2(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } + // Add finalizer first if not set to avoid the race condition between init and delete. + if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmVPCCluster, infrav1.ClusterFinalizer); err != nil || finalizerAdded { + return ctrl.Result{}, err + } + // Fetch the Cluster. - cluster, err := util.GetOwnerCluster(ctx, r.Client, ibmCluster.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, ibmVPCCluster.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -137,37 +168,46 @@ func (r *IBMVPCClusterReconciler) reconcileV2(ctx context.Context, req ctrl.Requ return ctrl.Result{}, nil } + if isPaused, requeue, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, ibmVPCCluster); err != nil || isPaused || requeue { + return ctrl.Result{}, err + } + clusterScope, err := scope.NewVPCClusterScope(scope.VPCClusterScopeParams{ Client: r.Client, Logger: log, Cluster: cluster, - IBMVPCCluster: ibmCluster, + IBMVPCCluster: ibmVPCCluster, ServiceEndpoint: r.ServiceEndpoint, }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) + } - // Always close the scope when exiting this function so we can persist any IBMVPCCluster changes. + // Initialize the patch helper. + patchHelper, err := v1beta1patch.NewHelper(ibmVPCCluster, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the IBMVPCCluster object and status after each reconciliation. defer func() { - if clusterScope != nil { - if err := clusterScope.Close(); err != nil && reterr == nil { - reterr = err - } + if err := patchIBMVPCCluster(ctx, patchHelper, ibmVPCCluster); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() // Handle deleted clusters. - if !ibmCluster.DeletionTimestamp.IsZero() { + if !ibmVPCCluster.DeletionTimestamp.IsZero() { return r.reconcileDeleteV2(clusterScope) } - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create scope: %w", err) - } - return r.reconcileCluster(clusterScope) + return r.reconcileCluster(ctx, clusterScope) } -func (r *IBMVPCClusterReconciler) reconcile(clusterScope *scope.ClusterScope) (ctrl.Result, error) { +func (r *IBMVPCClusterReconciler) reconcile(ctx context.Context, clusterScope *scope.ClusterScope) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx).WithValues("controller", "IBMVPCCluster") // If the IBMVPCCluster doesn't have our finalizer, add it. - if controllerutil.AddFinalizer(clusterScope.IBMVPCCluster, infrav1beta2.ClusterFinalizer) { + if controllerutil.AddFinalizer(clusterScope.IBMVPCCluster, infrav1.ClusterFinalizer) { return ctrl.Result{}, nil } @@ -180,36 +220,67 @@ func (r *IBMVPCClusterReconciler) reconcile(clusterScope *scope.ClusterScope) (c if loadBalancerEndpoint == nil { return ctrl.Result{}, fmt.Errorf("no loadBalancer found with hostname - %s", clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Host) } - r.reconcileLBState(clusterScope, loadBalancerEndpoint) + r.reconcileLBState(ctx, clusterScope, loadBalancerEndpoint) } + log.Info("Reconciling VPC") vpc, err := clusterScope.CreateVPC() if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to reconcile VPC for IBMVPCCluster %s/%s: %w", clusterScope.IBMVPCCluster.Namespace, clusterScope.IBMVPCCluster.Name, err) + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.VPCReadyCondition, infrav1.VPCReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCNotReadyV1Beta2Reason, + Message: err.Error(), + }) + return ctrl.Result{}, fmt.Errorf("failed to reconcile VPC for IBMVPCCluster %s: %w", klog.KObj(clusterScope.IBMVPCCluster), err) } if vpc != nil { - clusterScope.IBMVPCCluster.Status.VPC = infrav1beta2.VPC{ + clusterScope.IBMVPCCluster.Status.VPC = infrav1.VPC{ ID: *vpc.ID, Name: *vpc.Name, } + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.VPCReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCReadyV1Beta2Reason, + }) + log.Info("Reconciliation of VPC complete") } if clusterScope.IBMVPCCluster.Status.Subnet.ID == nil { + log.Info("Reconciling VPC Subnets") subnet, err := clusterScope.CreateSubnet() if err != nil { + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.VPCSubnetReadyCondition, infrav1.VPCSubnetReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCSubnetReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCSubnetNotReadyV1Beta2Reason, + Message: err.Error(), + }) return ctrl.Result{}, fmt.Errorf("failed to reconcile Subnet for IBMVPCCluster %s/%s: %w", clusterScope.IBMVPCCluster.Namespace, clusterScope.IBMVPCCluster.Name, err) } if subnet != nil { - clusterScope.IBMVPCCluster.Status.Subnet = infrav1beta2.Subnet{ + clusterScope.IBMVPCCluster.Status.Subnet = infrav1.Subnet{ Ipv4CidrBlock: subnet.Ipv4CIDRBlock, Name: subnet.Name, ID: subnet.ID, Zone: subnet.Zone.Name, } + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.VPCSubnetReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCSubnetReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCSubnetReadyV1Beta2Reason, + }) + log.Info("Reconciliation of VPC Subnets complete") } } if clusterScope.IBMVPCCluster.Spec.ControlPlaneLoadBalancer != nil && clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Host == "" { + log.Info("Reconciling Load Balancers") loadBalancer, err := r.getOrCreate(clusterScope) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile Control Plane LoadBalancer for IBMVPCCluster %s/%s: %w", clusterScope.IBMVPCCluster.Namespace, clusterScope.IBMVPCCluster.Name, err) @@ -217,95 +288,152 @@ func (r *IBMVPCClusterReconciler) reconcile(clusterScope *scope.ClusterScope) (c if loadBalancer != nil { clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Host = *loadBalancer.Hostname - r.reconcileLBState(clusterScope, loadBalancer) + r.reconcileLBState(ctx, clusterScope, loadBalancer) + log.Info("Reconciliation of Load Balancers complete") } } // Requeue after 1 minute if cluster is not ready to update status of the cluster properly. if !clusterScope.IsReady() { - clusterScope.Info("Cluster is not yet ready") + log.Info("Cluster is not yet ready") return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } return ctrl.Result{}, nil } -func (r *IBMVPCClusterReconciler) reconcileCluster(clusterScope *scope.VPCClusterScope) (ctrl.Result, error) { +func (r *IBMVPCClusterReconciler) reconcileCluster(ctx context.Context, clusterScope *scope.VPCClusterScope) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) // If the IBMVPCCluster doesn't have our finalizer, add it. - if controllerutil.AddFinalizer(clusterScope.IBMVPCCluster, infrav1beta2.ClusterFinalizer) { + if controllerutil.AddFinalizer(clusterScope.IBMVPCCluster, infrav1.ClusterFinalizer) { return ctrl.Result{}, nil } // Reconcile the cluster's VPC. - clusterScope.Info("Reconciling VPC") - if requeue, err := clusterScope.ReconcileVPC(); err != nil { - clusterScope.Error(err, "failed to reconcile VPC") - conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1beta2.VPCReadyCondition, infrav1beta2.VPCReconciliationFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + log.Info("Reconciling VPC") + if requeue, err := clusterScope.ReconcileVPC(ctx); err != nil { + log.Error(err, "failed to reconcile VPC") + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.VPCReadyCondition, infrav1.VPCReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCNotReadyV1Beta2Reason, + Message: err.Error(), + }) return reconcile.Result{}, err } else if requeue { - clusterScope.Info("VPC creation is pending, requeuing") + log.Info("VPC creation is pending, requeuing") return reconcile.Result{RequeueAfter: 15 * time.Second}, nil } - clusterScope.Info("Reconciliation of VPC complete") - conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1beta2.VPCReadyCondition) + log.Info("Reconciliation of VPC complete") + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.VPCReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCReadyV1Beta2Reason, + }) // Reconcile the cluster's VPC Custom Image. - clusterScope.Info("Reconciling VPC Custom Image") - if requeue, err := clusterScope.ReconcileVPCCustomImage(); err != nil { - clusterScope.Error(err, "failed to reconcile VPC Custom Image") - conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1beta2.ImageReadyCondition, infrav1beta2.ImageReconciliationFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + log.Info("Reconciling VPC Custom Image") + if requeue, err := clusterScope.ReconcileVPCCustomImage(ctx); err != nil { + log.Error(err, "failed to reconcile VPC Custom Image") + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.ImageReadyCondition, infrav1.ImageReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCImageReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCImageNotReadyV1Beta2Reason, + Message: err.Error(), + }) return reconcile.Result{}, err } else if requeue { - clusterScope.Info("VPC Custom Image creation is pending, requeueing") + log.Info("VPC Custom Image creation is pending, requeueing") return reconcile.Result{RequeueAfter: 15 * time.Second}, nil } - clusterScope.Info("Reconciliation of VPC Custom Image complete") - conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1beta2.ImageReadyCondition) + log.Info("Reconciliation of VPC Custom Image complete") + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.ImageReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCImageReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCImageReadyV1Beta2Reason, + }) // Reconcile the cluster's VPC Subnets. - clusterScope.Info("Reconciling VPC Subnets") - if requeue, err := clusterScope.ReconcileSubnets(); err != nil { - clusterScope.Error(err, "failed to reconcile VPC Subnets") - conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1beta2.VPCSubnetReadyCondition, infrav1beta2.VPCSubnetReconciliationFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + log.Info("Reconciling VPC Subnets") + if requeue, err := clusterScope.ReconcileSubnets(ctx); err != nil { + log.Error(err, "failed to reconcile VPC Subnets") + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.VPCSubnetReadyCondition, infrav1.VPCSubnetReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCSubnetReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCSubnetNotReadyV1Beta2Reason, + Message: err.Error(), + }) return reconcile.Result{}, err } else if requeue { - clusterScope.Info("VPC Subnets creation is pending, requeueing") + log.Info("VPC Subnets creation is pending, requeueing") return reconcile.Result{RequeueAfter: 15 * time.Second}, nil } - clusterScope.Info("Reconciliation of VPC Subnets complete") - conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1beta2.VPCSubnetReadyCondition) + log.Info("Reconciliation of VPC Subnets complete") + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.VPCSubnetReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCSubnetReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCSubnetReadyV1Beta2Reason, + }) // Reconcile the cluster's Security Groups (and Security Group Rules) - clusterScope.Info("Reconciling Security Groups") - if requeue, err := clusterScope.ReconcileSecurityGroups(); err != nil { - clusterScope.Error(err, "failed to reconcile Security Groups") - conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1beta2.VPCSecurityGroupReadyCondition, infrav1beta2.VPCSecurityGroupReconciliationFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + log.Info("Reconciling Security Groups") + if requeue, err := clusterScope.ReconcileSecurityGroups(ctx); err != nil { + log.Error(err, "failed to reconcile Security Groups") + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.VPCSecurityGroupReadyCondition, infrav1.VPCSecurityGroupReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCSecurityGroupReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCSecurityGroupNotReadyV1Beta2Reason, + Message: err.Error(), + }) return reconcile.Result{}, err } else if requeue { - clusterScope.Info("Security Groups creation is pending, requeueing") + log.Info("Security Groups creation is pending, requeueing") return reconcile.Result{RequeueAfter: 15 * time.Second}, nil } - clusterScope.Info("Reconciliation of Security Groups complete") - conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1beta2.VPCSecurityGroupReadyCondition) + log.Info("Reconciliation of Security Groups complete") + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.VPCSecurityGroupReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCSecurityGroupReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCSecurityGroupReadyV1Beta2Reason, + }) // Reconcile the cluster's Load Balancers - clusterScope.Info("Reconciling Load Balancers") - if requeue, err := clusterScope.ReconcileLoadBalancers(); err != nil { - clusterScope.Error(err, "failed to reconcile Load Balancers") - conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1beta2.LoadBalancerReadyCondition, infrav1beta2.LoadBalancerReconciliationFailedReason, capiv1beta1.ConditionSeverityError, "%s", err.Error()) + log.Info("Reconciling Load Balancers") + if requeue, err := clusterScope.ReconcileLoadBalancers(ctx); err != nil { + log.Error(err, "failed to reconcile Load Balancers") + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCLoadBalancerNotReadyV1Beta2Reason, + Message: err.Error(), + }) return reconcile.Result{}, err } else if requeue { - clusterScope.Info("Load Balancers creation is pending, requeueing") + log.Info("Load Balancers creation is pending, requeueing") return reconcile.Result{RequeueAfter: 15 * time.Second}, nil } - clusterScope.Info("Reconciliation of Load Balancers complete") - conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1beta2.LoadBalancerReadyCondition) + log.Info("Reconciliation of Load Balancers complete") + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.LoadBalancerReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCLoadBalancerReadyV1Beta2Reason, + }) // Collect cluster's Load Balancer hostname for spec. hostName, err := clusterScope.GetLoadBalancerHostName() if err != nil { return reconcile.Result{}, fmt.Errorf("error retrieving load balancer hostname: %w", err) } else if hostName == nil || *hostName == "" { - clusterScope.Info("No Load Balancer hostname found, requeueing") + log.Info("No Load Balancer hostname found, requeueing") return reconcile.Result{RequeueAfter: 15 * time.Second}, nil } @@ -313,11 +441,12 @@ func (r *IBMVPCClusterReconciler) reconcileCluster(clusterScope *scope.VPCCluste clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Host = *hostName clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port = clusterScope.GetAPIServerPort() clusterScope.IBMVPCCluster.Status.Ready = true - clusterScope.Info("cluster infrastructure is now ready for cluster", "clusterName", clusterScope.IBMVPCCluster.Name) + log.Info("cluster infrastructure is now ready for cluster", "clusterName", clusterScope.IBMVPCCluster.Name) return ctrl.Result{}, nil } -func (r *IBMVPCClusterReconciler) reconcileDelete(clusterScope *scope.ClusterScope) (ctrl.Result, error) { +func (r *IBMVPCClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ClusterScope) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) // check if still have existing VSIs. listVSIOpts := &vpcv1.ListInstancesOptions{ VPCID: &clusterScope.IBMVPCCluster.Status.VPC.ID, @@ -342,7 +471,7 @@ func (r *IBMVPCClusterReconciler) reconcileDelete(clusterScope *scope.ClusterSco return ctrl.Result{}, fmt.Errorf("error when retrieving load balancer with specified hostname: %w", err) } - if loadBalancer == nil && (string(clusterScope.GetLoadBalancerState()) != string(infrav1beta2.VPCLoadBalancerStateDeletePending)) { + if loadBalancer == nil && (string(clusterScope.GetLoadBalancerState()) != string(infrav1.VPCLoadBalancerStateDeletePending)) { return handleFinalizerRemoval(clusterScope) } if loadBalancer != nil { @@ -350,6 +479,13 @@ func (r *IBMVPCClusterReconciler) reconcileDelete(clusterScope *scope.ClusterSco if *loadBalancer.Name != clusterScope.IBMVPCCluster.Spec.ControlPlaneLoadBalancer.Name { return handleFinalizerRemoval(clusterScope) } + + log.Info("Deleting VPC load balancer") + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCLoadBalancerDeletingV1Beta2Reason, + }) deleted, err := clusterScope.DeleteLoadBalancer() if err != nil { return ctrl.Result{}, fmt.Errorf("failed to delete loadBalancer: %w", err) @@ -361,18 +497,34 @@ func (r *IBMVPCClusterReconciler) reconcileDelete(clusterScope *scope.ClusterSco } } - if err := clusterScope.DeleteSubnet(); err != nil { + log.Info("Deleting VPC subnet") + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCSubnetReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCSubnetDeletingV1Beta2Reason, + }) + if err := clusterScope.DeleteSubnet(ctx); err != nil { return ctrl.Result{}, fmt.Errorf("failed to delete subnet: %w", err) } + log.Info("Deleting VPC") + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCDeletingV1Beta2Reason, + }) if err := clusterScope.DeleteVPC(); err != nil { return ctrl.Result{}, fmt.Errorf("failed to delete VPC: %w", err) } + + log.Info("IBMVPCCluster deletion completed") return handleFinalizerRemoval(clusterScope) } -func (r *IBMVPCClusterReconciler) reconcileDeleteV2(_ *scope.VPCClusterScope) (ctrl.Result, error) { - return ctrl.Result{}, fmt.Errorf("not implemented") +func (r *IBMVPCClusterReconciler) reconcileDeleteV2(clusterScope *scope.VPCClusterScope) (ctrl.Result, error) { //nolint:unparam + clusterScope.Info("Delete cluster is not implemented for reconcile v2") + controllerutil.RemoveFinalizer(clusterScope.IBMVPCCluster, infrav1.ClusterFinalizer) + return ctrl.Result{}, nil } func (r *IBMVPCClusterReconciler) getOrCreate(clusterScope *scope.ClusterScope) (*vpcv1.LoadBalancer, error) { @@ -381,41 +533,99 @@ func (r *IBMVPCClusterReconciler) getOrCreate(clusterScope *scope.ClusterScope) } func handleFinalizerRemoval(clusterScope *scope.ClusterScope) (ctrl.Result, error) { - controllerutil.RemoveFinalizer(clusterScope.IBMVPCCluster, infrav1beta2.ClusterFinalizer) + controllerutil.RemoveFinalizer(clusterScope.IBMVPCCluster, infrav1.ClusterFinalizer) return ctrl.Result{}, nil } -func (r *IBMVPCClusterReconciler) reconcileLBState(clusterScope *scope.ClusterScope, loadBalancer *vpcv1.LoadBalancer) { +func (r *IBMVPCClusterReconciler) reconcileLBState(ctx context.Context, clusterScope *scope.ClusterScope, loadBalancer *vpcv1.LoadBalancer) { + log := ctrl.LoggerFrom(ctx) if clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port == 0 { clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port = clusterScope.APIServerPort() } clusterScope.SetLoadBalancerID(loadBalancer.ID) - clusterScope.Logger.V(3).Info("LoadBalancerID - " + clusterScope.GetLoadBalancerID()) + log.V(3).Info("LoadBalancerID - " + clusterScope.GetLoadBalancerID()) clusterScope.SetLoadBalancerAddress(loadBalancer.Hostname) clusterScope.SetLoadBalancerState(*loadBalancer.ProvisioningStatus) - clusterScope.Logger.V(3).Info("LoadBalancerState - " + string(clusterScope.GetLoadBalancerState())) + log.V(3).Info("LoadBalancerState - " + string(clusterScope.GetLoadBalancerState())) switch clusterScope.GetLoadBalancerState() { - case infrav1beta2.VPCLoadBalancerStateCreatePending: - clusterScope.Logger.V(3).Info("LoadBalancer is in create state") + case infrav1.VPCLoadBalancerStateCreatePending: + log.V(3).Info("LoadBalancer is in create state") clusterScope.SetNotReady() - conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1beta2.LoadBalancerReadyCondition, string(infrav1beta2.VPCLoadBalancerStateCreatePending), capiv1beta1.ConditionSeverityInfo, "%s", *loadBalancer.OperatingStatus) - case infrav1beta2.VPCLoadBalancerStateActive: - clusterScope.Logger.V(3).Info("LoadBalancer is in active state") + v1beta1conditions.MarkFalse(clusterScope.IBMVPCCluster, infrav1.LoadBalancerReadyCondition, string(infrav1.VPCLoadBalancerStateCreatePending), clusterv1beta1.ConditionSeverityInfo, "%s", *loadBalancer.OperatingStatus) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.VPCLoadBalancerNotReadyV1Beta2Reason, + Message: "VPC load balancer is in creating state", + }) + case infrav1.VPCLoadBalancerStateActive: + log.V(3).Info("LoadBalancer is in active state") clusterScope.SetReady() - conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1beta2.LoadBalancerReadyCondition) + v1beta1conditions.MarkTrue(clusterScope.IBMVPCCluster, infrav1.LoadBalancerReadyCondition) + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.VPCLoadBalancerReadyV1Beta2Reason, + Message: "VPC load balancer is in active state", + }) default: - clusterScope.Logger.V(3).Info("LoadBalancer state is undefined", "state", clusterScope.GetLoadBalancerState(), "loadbalancer-id", clusterScope.GetLoadBalancerID()) + log.V(3).Info("LoadBalancer state is undefined", "state", clusterScope.GetLoadBalancerState(), "loadbalancerID", clusterScope.GetLoadBalancerID()) clusterScope.SetNotReady() - conditions.MarkUnknown(clusterScope.IBMVPCCluster, infrav1beta2.LoadBalancerReadyCondition, *loadBalancer.ProvisioningStatus, "") + v1beta1conditions.MarkUnknown(clusterScope.IBMVPCCluster, infrav1.LoadBalancerReadyCondition, *loadBalancer.ProvisioningStatus, "") + v1beta2conditions.Set(clusterScope.IBMVPCCluster, metav1.Condition{ + Type: infrav1.VPCLoadBalancerReadyV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: infrav1.VPCLoadBalancerNotReadyV1Beta2Reason, + Message: fmt.Sprintf("VPC load balancer is in an unknown state: %s", *loadBalancer.ProvisioningStatus), + }) } } // SetupWithManager creates a new IBMVPCCluster controller for a manager. func (r *IBMVPCClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrav1beta2.IBMVPCCluster{}). + For(&infrav1.IBMVPCCluster{}). WithEventFilter(predicates.ResourceIsNotExternallyManaged(r.Scheme, ctrl.LoggerFrom(ctx))). Complete(r) } + +// patchIBMVPCCluster updates the IBMVPCCluster and its status on the API server. +func patchIBMVPCCluster(ctx context.Context, patchHelper *v1beta1patch.Helper, ibmVPCCluster *infrav1.IBMVPCCluster) error { + if err := v1beta2conditions.SetSummaryCondition(ibmVPCCluster, ibmVPCCluster, infrav1.IBMVPCClusterReadyV1Beta2Condition, + v1beta2conditions.ForConditionTypes{ + infrav1.VPCReadyV1Beta2Condition, + infrav1.VPCSubnetReadyV1Beta2Condition, + infrav1.VPCLoadBalancerReadyV1Beta2Condition, + }, + v1beta2conditions.IgnoreTypesIfMissing{ + infrav1.VPCSecurityGroupReadyV1Beta2Condition, + infrav1.VPCImageReadyV1Beta2Condition, + }, + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + // Use custom reasons. + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + infrav1.IBMVPCClusterNotReadyV1Beta2Reason, + infrav1.IBMVPCClusterReadyUnknownV1Beta2Reason, + infrav1.IBMVPCClusterReadyV1Beta2Reason, + )), + ), + }, + ); err != nil { + return fmt.Errorf("failed to set %s condition: %w", infrav1.IBMVPCClusterReadyV1Beta2Condition, err) + } + + // Patch the IBMVPCCluster resource. + return patchHelper.Patch(ctx, ibmVPCCluster, v1beta1patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + infrav1.IBMVPCClusterReadyV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, + infrav1.VPCReadyV1Beta2Condition, + infrav1.VPCSubnetReadyV1Beta2Condition, + infrav1.VPCSecurityGroupReadyV1Beta2Condition, + infrav1.VPCLoadBalancerReadyV1Beta2Condition, + infrav1.VPCImageReadyV1Beta2Condition, + }}) +} diff --git a/controllers/ibmvpccluster_controller_test.go b/controllers/ibmvpccluster_controller_test.go index 2bf95d79a..43c22c37b 100644 --- a/controllers/ibmvpccluster_controller_test.go +++ b/controllers/ibmvpccluster_controller_test.go @@ -19,6 +19,7 @@ package controllers import ( "errors" "fmt" + "testing" "time" @@ -30,12 +31,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock" @@ -45,24 +47,24 @@ import ( func TestIBMVPCClusterReconciler_Reconcile(t *testing.T) { testCases := []struct { name string - vpcCluster *infrav1beta2.IBMVPCCluster - ownerCluster *capiv1beta1.Cluster + vpcCluster *infrav1.IBMVPCCluster + ownerCluster *clusterv1beta1.Cluster expectError bool }{ { name: "Should fail Reconcile if owner cluster not found", - vpcCluster: &infrav1beta2.IBMVPCCluster{ + vpcCluster: &infrav1.IBMVPCCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "vpc-test-", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test", UID: "1", }}}, - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: *core.StringPtr("vpc-load-balancer"), }, }}, @@ -70,11 +72,11 @@ func TestIBMVPCClusterReconciler_Reconcile(t *testing.T) { }, { name: "Should not reconcile if owner reference is not set", - vpcCluster: &infrav1beta2.IBMVPCCluster{ + vpcCluster: &infrav1.IBMVPCCluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "vpc-test-"}, - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: *core.StringPtr("vpc-load-balancer"), }, }}, @@ -105,7 +107,7 @@ func TestIBMVPCClusterReconciler_Reconcile(t *testing.T) { }(tc.ownerCluster) tc.vpcCluster.OwnerReferences = []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: tc.ownerCluster.Name, UID: "1", @@ -158,15 +160,15 @@ func TestIBMVPCClusterReconciler_reconcile(t *testing.T) { } clusterScope = &scope.ClusterScope{ IBMVPCClient: mockvpc, - Cluster: &capiv1beta1.Cluster{}, + Cluster: &clusterv1.Cluster{}, Logger: klog.Background(), - IBMVPCCluster: &infrav1beta2.IBMVPCCluster{ + IBMVPCCluster: &infrav1.IBMVPCCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "vpc-cluster", }, - Spec: infrav1beta2.IBMVPCClusterSpec{ + Spec: infrav1.IBMVPCClusterSpec{ VPC: "capi-vpc", - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: *core.StringPtr("vpc-load-balancer"), }, }, @@ -182,11 +184,11 @@ func TestIBMVPCClusterReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} listVpcsOptions := &vpcv1.ListVpcsOptions{} response := &core.DetailedResponse{} vpclist := &vpcv1.VPCCollection{} @@ -194,11 +196,11 @@ func TestIBMVPCClusterReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} mockvpc.EXPECT().ListVpcs(listVpcsOptions).Return(vpclist, response, errors.New("failed to list VPCs")) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(Not(BeNil())) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) vpclist.Vpcs = []vpcv1.VPC{ { @@ -223,12 +225,12 @@ func TestIBMVPCClusterReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} mockvpc.EXPECT().ListVpcs(listVpcsOptions).Return(vpclist, response, nil) mockvpc.EXPECT().ListSubnets(subnetOptions).Return(subnets, response, errors.New("Failed to list the subnets")) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(Not(BeNil())) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) subnets.Subnets = []vpcv1.Subnet{ { @@ -243,28 +245,28 @@ func TestIBMVPCClusterReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} mockvpc.EXPECT().ListVpcs(listVpcsOptions).Return(vpclist, response, nil) mockvpc.EXPECT().ListSubnets(subnetOptions).Return(subnets, response, nil) mockvpc.EXPECT().ListLoadBalancers(loadBalancerOptions).Return(loadBalancers, response, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(true)) }) t.Run("Should use the user supplied port for the apiserver", func(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} port := int32(412) - clusterScope.Cluster.Spec.ClusterNetwork = &capiv1beta1.ClusterNetwork{APIServerPort: &port} + clusterScope.Cluster.Spec.ClusterNetwork = clusterv1.ClusterNetwork{APIServerPort: port} mockvpc.EXPECT().ListVpcs(listVpcsOptions).Return(vpclist, response, nil) mockvpc.EXPECT().ListSubnets(subnetOptions).Return(subnets, response, nil) mockvpc.EXPECT().ListLoadBalancers(loadBalancerOptions).Return(loadBalancers, response, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(true)) g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(port)) }) @@ -272,15 +274,15 @@ func TestIBMVPCClusterReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} mockvpc.EXPECT().ListVpcs(listVpcsOptions).Return(vpclist, response, nil) mockvpc.EXPECT().ListSubnets(subnetOptions).Return(subnets, response, nil) mockvpc.EXPECT().ListLoadBalancers(loadBalancerOptions).Return(loadBalancers, response, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(true)) - g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(infrav1beta2.DefaultAPIServerPort)) + g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(infrav1.DefaultAPIServerPort)) }) }) } @@ -295,15 +297,15 @@ func TestIBMVPCClusterLBReconciler_reconcile(t *testing.T) { } clusterScope := &scope.ClusterScope{ IBMVPCClient: mockvpc, - Cluster: &capiv1beta1.Cluster{}, + Cluster: &clusterv1.Cluster{}, Logger: klog.Background(), - IBMVPCCluster: &infrav1beta2.IBMVPCCluster{ + IBMVPCCluster: &infrav1.IBMVPCCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "vpc-cluster", }, - Spec: infrav1beta2.IBMVPCClusterSpec{ + Spec: infrav1.IBMVPCClusterSpec{ VPC: "capi-vpc", - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: "vpc-load-balancer", }, }, @@ -348,41 +350,41 @@ func TestIBMVPCClusterLBReconciler_reconcile(t *testing.T) { g := NewWithT(t) mockController, mockvpc, clusterScope, reconciler := setup(t) t.Cleanup(mockController.Finish) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} mockvpc.EXPECT().ListVpcs(&vpcv1.ListVpcsOptions{}).Return(vpclist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListSubnets(&vpcv1.ListSubnetsOptions{}).Return(subnets, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(&vpcv1.ListLoadBalancersOptions{}).Return(&vpcv1.LoadBalancerCollection{}, &core.DetailedResponse{}, errors.New("Failed to list the LoadBalancers")) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(Not(BeNil())) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) t.Run("Should successfully reconcile IBMVPCCluster with default port for the apiserver and set cluster status as Ready when LoadBalancer is in active state", func(t *testing.T) { g := NewWithT(t) mockController, mockvpc, clusterScope, reconciler := setup(t) t.Cleanup(mockController.Finish) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} mockvpc.EXPECT().ListVpcs(&vpcv1.ListVpcsOptions{}).Return(vpclist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListSubnets(&vpcv1.ListSubnetsOptions{}).Return(subnets, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(&vpcv1.ListLoadBalancersOptions{}).Return(loadBalancerCollection, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(true)) - g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(infrav1beta2.DefaultAPIServerPort)) + g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(infrav1.DefaultAPIServerPort)) }) t.Run("Should successfully reconcile IBMVPCCluster with user supplied port for the apiserver and set cluster status as Ready when LoadBalancer is in active state", func(t *testing.T) { g := NewWithT(t) mockController, mockvpc, clusterScope, reconciler := setup(t) t.Cleanup(mockController.Finish) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} port := int32(412) - clusterScope.Cluster.Spec.ClusterNetwork = &capiv1beta1.ClusterNetwork{APIServerPort: &port} + clusterScope.Cluster.Spec.ClusterNetwork = clusterv1.ClusterNetwork{APIServerPort: port} mockvpc.EXPECT().ListVpcs(&vpcv1.ListVpcsOptions{}).Return(vpclist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListSubnets(&vpcv1.ListSubnetsOptions{}).Return(subnets, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(&vpcv1.ListLoadBalancersOptions{}).Return(loadBalancerCollection, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(true)) g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(port)) }) @@ -390,45 +392,45 @@ func TestIBMVPCClusterLBReconciler_reconcile(t *testing.T) { g := NewWithT(t) mockController, mockvpc, clusterScope, reconciler := setup(t) t.Cleanup(mockController.Finish) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} - clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint = capiv1beta1.APIEndpoint{ + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} + clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ Host: *core.StringPtr("vpc-load-balancer-hostname"), } mockvpc.EXPECT().ListVpcs(&vpcv1.ListVpcsOptions{}).Return(vpclist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListSubnets(&vpcv1.ListSubnetsOptions{}).Return(subnets, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(&vpcv1.ListLoadBalancersOptions{}).Return(loadBalancerCollection, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(true)) - g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(infrav1beta2.DefaultAPIServerPort)) + g.Expect(clusterScope.IBMVPCCluster.Spec.ControlPlaneEndpoint.Port).To(Equal(infrav1.DefaultAPIServerPort)) }) t.Run("Should successfully reconcile IBMVPCCluster and set cluster status as NotReady when LoadBalancer is create state", func(t *testing.T) { g := NewWithT(t) mockController, mockvpc, clusterScope, reconciler := setup(t) t.Cleanup(mockController.Finish) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} loadBalancerCollection.LoadBalancers[0].ProvisioningStatus = core.StringPtr("create_pending") mockvpc.EXPECT().ListVpcs(&vpcv1.ListVpcsOptions{}).Return(vpclist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListSubnets(&vpcv1.ListSubnetsOptions{}).Return(subnets, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(&vpcv1.ListLoadBalancersOptions{}).Return(loadBalancerCollection, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(false)) }) t.Run("Should successfully reconcile IBMVPCCluster and set cluster status as NotReady when LoadBalancer is in undefined state", func(t *testing.T) { g := NewWithT(t) mockController, mockvpc, clusterScope, reconciler := setup(t) t.Cleanup(mockController.Finish) - clusterScope.IBMVPCCluster.Finalizers = []string{infrav1beta2.ClusterFinalizer} + clusterScope.IBMVPCCluster.Finalizers = []string{infrav1.ClusterFinalizer} loadBalancerCollection.LoadBalancers[0].ProvisioningStatus = core.StringPtr("update_pending") mockvpc.EXPECT().ListVpcs(&vpcv1.ListVpcsOptions{}).Return(vpclist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListSubnets(&vpcv1.ListSubnetsOptions{}).Return(subnets, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(&vpcv1.ListLoadBalancersOptions{}).Return(loadBalancerCollection, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcile(clusterScope) + _, err := reconciler.reconcile(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) g.Expect(clusterScope.IBMVPCCluster.Status.Ready).To(Equal(false)) }) }) @@ -453,15 +455,15 @@ func TestIBMVPCClusterReconciler_delete(t *testing.T) { clusterScope = &scope.ClusterScope{ IBMVPCClient: mockvpc, Logger: klog.Background(), - IBMVPCCluster: &infrav1beta2.IBMVPCCluster{ + IBMVPCCluster: &infrav1.IBMVPCCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.ClusterFinalizer}, + Finalizers: []string{infrav1.ClusterFinalizer}, }, - Status: infrav1beta2.IBMVPCClusterStatus{ - Subnet: infrav1beta2.Subnet{ + Status: infrav1.IBMVPCClusterStatus{ + Subnet: infrav1.Subnet{ ID: ptr.To("capi-subnet-id"), }, - VPC: infrav1beta2.VPC{ + VPC: infrav1.VPC{ ID: "capi-vpc-id", }, }, @@ -483,9 +485,9 @@ func TestIBMVPCClusterReconciler_delete(t *testing.T) { setup(t) t.Cleanup(teardown) mockvpc.EXPECT().ListInstances(listVSIOpts).Return(instancelist, response, errors.New("Failed to list the VSIs")) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(Not(BeNil())) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) t.Run("Should skip deleting other resources if instances are still running", func(t *testing.T) { g := NewWithT(t) @@ -493,9 +495,9 @@ func TestIBMVPCClusterReconciler_delete(t *testing.T) { t.Cleanup(teardown) instancelist.TotalCount = ptr.To(int64(2)) mockvpc.EXPECT().ListInstances(listVSIOpts).Return(instancelist, response, nil) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) getPGWOptions := &vpcv1.GetSubnetPublicGatewayOptions{ID: ptr.To("capi-subnet-id")} subnet := &vpcv1.SubnetCollection{Subnets: []vpcv1.Subnet{{ID: core.StringPtr("capi-subnet-id")}}} @@ -514,9 +516,9 @@ func TestIBMVPCClusterReconciler_delete(t *testing.T) { mockvpc.EXPECT().UnsetSubnetPublicGateway(unsetPGWOptions).Return(response, nil) mockvpc.EXPECT().DeletePublicGateway(deletePGWOptions).Return(response, nil) mockvpc.EXPECT().DeleteSubnet(deleteSubnetOptions).Return(response, errors.New("failed to delete subnet")) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(Not(BeNil())) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) deleteVpcOptions := &vpcv1.DeleteVPCOptions{ID: ptr.To("capi-vpc-id")} t.Run("Should fail deleting the VPC", func(t *testing.T) { @@ -530,9 +532,9 @@ func TestIBMVPCClusterReconciler_delete(t *testing.T) { mockvpc.EXPECT().DeletePublicGateway(deletePGWOptions).Return(response, nil) mockvpc.EXPECT().DeleteSubnet(deleteSubnetOptions).Return(response, nil) mockvpc.EXPECT().DeleteVPC(deleteVpcOptions).Return(response, errors.New("failed to delete VPC")) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(Not(BeNil())) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) t.Run("Should successfully delete IBMVPCCluster and remove the finalizer", func(t *testing.T) { g := NewWithT(t) @@ -545,9 +547,9 @@ func TestIBMVPCClusterReconciler_delete(t *testing.T) { mockvpc.EXPECT().DeletePublicGateway(deletePGWOptions).Return(response, nil) mockvpc.EXPECT().DeleteSubnet(deleteSubnetOptions).Return(response, nil) mockvpc.EXPECT().DeleteVPC(deleteVpcOptions).Return(response, nil) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(Not(ContainElement(infrav1beta2.ClusterFinalizer))) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(Not(ContainElement(infrav1.ClusterFinalizer))) }) }) } @@ -563,26 +565,26 @@ func TestIBMVPCClusterLBReconciler_delete(t *testing.T) { clusterScope := &scope.ClusterScope{ IBMVPCClient: mockvpc, Logger: klog.Background(), - IBMVPCCluster: &infrav1beta2.IBMVPCCluster{ + IBMVPCCluster: &infrav1.IBMVPCCluster{ ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{infrav1beta2.ClusterFinalizer}, + Finalizers: []string{infrav1.ClusterFinalizer}, }, - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: "vpc-load-balancer", }, - ControlPlaneEndpoint: capiv1beta1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "vpc-load-balancer-hostname", }, }, - Status: infrav1beta2.IBMVPCClusterStatus{ - VPCEndpoint: infrav1beta2.VPCEndpoint{ + Status: infrav1.IBMVPCClusterStatus{ + VPCEndpoint: infrav1.VPCEndpoint{ LBID: ptr.To("vpc-load-balancer-id"), }, - Subnet: infrav1beta2.Subnet{ + Subnet: infrav1.Subnet{ ID: ptr.To("capi-subnet-id"), }, - VPC: infrav1beta2.VPC{ + VPC: infrav1.VPC{ ID: "capi-vpc-id", }, }, @@ -602,7 +604,7 @@ func TestIBMVPCClusterLBReconciler_delete(t *testing.T) { t.Cleanup(mockController.Finish) mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(instancelist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancersOptions{})).Return(&vpcv1.LoadBalancerCollection{}, &core.DetailedResponse{}, errors.New("failed to list LoadBalancers")) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(Not(BeNil())) }) t.Run("Should skip deleting other resources if LoadBalancer is still present", func(t *testing.T) { @@ -622,9 +624,9 @@ func TestIBMVPCClusterLBReconciler_delete(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(instancelist, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancersOptions{})).Return(customloadBalancerCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancersOptions{})).Return(customloadBalancerCollection, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1beta2.ClusterFinalizer)) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(ContainElement(infrav1.ClusterFinalizer)) }) t.Run("Should successfully delete IBMVPCCluster and remove the finalizer when ControlPlaneEndpoint Host is set", func(t *testing.T) { g := NewWithT(t) @@ -632,19 +634,19 @@ func TestIBMVPCClusterLBReconciler_delete(t *testing.T) { t.Cleanup(mockController.Finish) clusterScope.IBMVPCCluster.Spec.ControlPlaneLoadBalancer = nil mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(instancelist, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileDelete(clusterScope) + _, err := reconciler.reconcileDelete(ctx, clusterScope) g.Expect(err).To(BeNil()) - g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(Not(ContainElement(infrav1beta2.ClusterFinalizer))) + g.Expect(clusterScope.IBMVPCCluster.Finalizers).To(Not(ContainElement(infrav1.ClusterFinalizer))) }) }) } -func createVPCCluster(g *WithT, vpcCluster *infrav1beta2.IBMVPCCluster, namespace string) { +func createVPCCluster(g *WithT, vpcCluster *infrav1.IBMVPCCluster, namespace string) { if vpcCluster != nil { vpcCluster.Namespace = namespace g.Expect(testEnv.Create(ctx, vpcCluster)).To(Succeed()) g.Eventually(func() bool { - cluster := &infrav1beta2.IBMVPCCluster{} + cluster := &infrav1.IBMVPCCluster{} key := client.ObjectKey{ Name: vpcCluster.Name, Namespace: namespace, @@ -655,7 +657,7 @@ func createVPCCluster(g *WithT, vpcCluster *infrav1beta2.IBMVPCCluster, namespac } } -func cleanupVPCCluster(g *WithT, vpcCluster *infrav1beta2.IBMVPCCluster, namespace *corev1.Namespace) { +func cleanupVPCCluster(g *WithT, vpcCluster *infrav1.IBMVPCCluster, namespace *corev1.Namespace) { if vpcCluster != nil { func(do ...client.Object) { g.Expect(testEnv.Cleanup(ctx, do...)).To(Succeed()) diff --git a/controllers/ibmvpcmachine_controller.go b/controllers/ibmvpcmachine_controller.go index 08e4383cf..11efdcef6 100644 --- a/controllers/ibmvpcmachine_controller.go +++ b/controllers/ibmvpcmachine_controller.go @@ -26,18 +26,26 @@ import ( "github.com/IBM/vpc-go-sdk/vpcv1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" + "sigs.k8s.io/cluster-api/util/finalizers" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" capibmrecord "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/record" @@ -60,12 +68,14 @@ type IBMVPCMachineReconciler struct { // Reconcile implements controller runtime Reconciler interface and handles reconcileation logic for IBMVPCMachine. func (r *IBMVPCMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - log := r.Log.WithValues("ibmvpcmachine", req.NamespacedName) + log := ctrl.LoggerFrom(ctx) - // Fetch the IBMVPCMachine instance. + log.Info("Reconciling IBMVPCMachine") + defer log.Info("Finished reconciling IBMVPCMachine") - ibmVpcMachine := &infrav1beta2.IBMVPCMachine{} - err := r.Get(ctx, req.NamespacedName, ibmVpcMachine) + // Fetch the IBMVPCMachine instance. + ibmVPCMachine := &infrav1.IBMVPCMachine{} + err := r.Get(ctx, req.NamespacedName, ibmVPCMachine) if err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil @@ -73,7 +83,7 @@ func (r *IBMVPCMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } // Fetch the Machine. - machine, err := util.GetOwnerMachine(ctx, r.Client, ibmVpcMachine.ObjectMeta) + machine, err := util.GetOwnerMachine(ctx, r.Client, ibmVPCMachine.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -83,81 +93,98 @@ func (r *IBMVPCMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, r.Client, ibmVpcMachine.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, ibmVPCMachine.ObjectMeta) if err != nil { log.Info("Machine is missing cluster label or cluster does not exist") return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) - - ibmCluster := &infrav1beta2.IBMVPCCluster{} - ibmVpcClusterName := client.ObjectKey{ - Namespace: ibmVpcMachine.Namespace, + ibmVPCCluster := &infrav1.IBMVPCCluster{} + ibmVPCClusterName := client.ObjectKey{ + Namespace: ibmVPCMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - if err := r.Client.Get(ctx, ibmVpcClusterName, ibmCluster); err != nil { + if err := r.Client.Get(ctx, ibmVPCClusterName, ibmVPCCluster); err != nil { log.Info("IBMVPCCluster is not available yet") return ctrl.Result{}, nil } + // Add finalizer first if not set to avoid the race condition between init and delete. + if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmVPCMachine, infrav1.MachineFinalizer); err != nil || finalizerAdded { + return ctrl.Result{}, err + } + + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + + // Initialize the patch helper. + patchHelper, err := v1beta1patch.NewHelper(ibmVPCMachine, r.Client) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to initialize patch helper: %w", err) + } + + // Always attempt to Patch the IBMVPCMachine object and status after each reconciliation. + defer func() { + if err := patchIBMVPCMachine(ctx, patchHelper, ibmVPCMachine); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + if isPaused, requeue, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, ibmVPCMachine); err != nil || isPaused || requeue { + return ctrl.Result{}, err + } + // Create the machine scope. machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: r.Client, - Logger: log, Cluster: cluster, - IBMVPCCluster: ibmCluster, + IBMVPCCluster: ibmVPCCluster, Machine: machine, - IBMVPCMachine: ibmVpcMachine, + IBMVPCMachine: ibmVPCMachine, ServiceEndpoint: r.ServiceEndpoint, }) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) } - // Always close the scope when exiting this function, so we can persist any IBMVPCMachine changes. - defer func() { - if machineScope != nil { - if err := machineScope.Close(); err != nil && reterr == nil { - reterr = err - } - } - }() + log = log.WithValues("IBMVPCMachine", klog.KObj(ibmVPCMachine)) + ctx = ctrl.LoggerInto(ctx, log) // Handle deleted machines. - if !ibmVpcMachine.DeletionTimestamp.IsZero() { - return r.reconcileDelete(machineScope) + if !ibmVPCMachine.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, machineScope) } // Handle non-deleted machines. - return r.reconcileNormal(machineScope) + return r.reconcileNormal(ctx, machineScope) } // SetupWithManager creates a new IBMVPCMachine controller for a manager. func (r *IBMVPCMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrav1beta2.IBMVPCMachine{}). + For(&infrav1.IBMVPCMachine{}). Complete(r) } -func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineScope) (ctrl.Result, error) { //nolint:gocyclo - if controllerutil.AddFinalizer(machineScope.IBMVPCMachine, infrav1beta2.MachineFinalizer) { +func (r *IBMVPCMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope) (ctrl.Result, error) { //nolint:gocyclo + log := ctrl.LoggerFrom(ctx) + if controllerutil.AddFinalizer(machineScope.IBMVPCMachine, infrav1.MachineFinalizer) { return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { - machineScope.Info("Bootstrap data secret reference is not yet available") + log.Info("Bootstrap data secret reference is not yet available") return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } if machineScope.IBMVPCCluster.Status.Subnet.ID != nil { - machineScope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1beta2.NetworkInterface{ + machineScope.IBMVPCMachine.Spec.PrimaryNetworkInterface = infrav1.NetworkInterface{ Subnet: *machineScope.IBMVPCCluster.Status.Subnet.ID, } } - instance, err := r.getOrCreate(machineScope) + instance, err := r.getOrCreate(ctx, machineScope) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile VSI for IBMVPCMachine %s/%s: %w", machineScope.IBMVPCMachine.Namespace, machineScope.IBMVPCMachine.Name, err) } @@ -181,10 +208,35 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco switch machineScope.GetInstanceStatus() { case vpcv1.InstanceStatusPendingConst: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceNotReadyReason, capiv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }) + case vpcv1.InstanceStatusStartingConst: + machineScope.SetNotReady() + v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }) case vpcv1.InstanceStatusStoppedConst: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceStoppedReason, capiv1beta1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStoppedReason, clusterv1beta1.ConditionSeverityError, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceStoppedReason, + }) + case vpcv1.InstanceStatusDeletingConst: + v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceDeletingReason, clusterv1beta1.ConditionSeverityError, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceDeletingReason, + }) case vpcv1.InstanceStatusFailedConst: msg := "" healthReasonsLen := len(instance.HealthReasons) @@ -194,21 +246,36 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco msg = fmt.Sprintf("%s: %s", *instance.HealthReasons[healthReasonsLen-1].Code, *instance.HealthReasons[healthReasonsLen-1].Message) } machineScope.SetNotReady() - machineScope.SetFailureReason(infrav1beta2.UpdateMachineError) + machineScope.SetFailureReason(infrav1.UpdateMachineError) machineScope.SetFailureMessage(msg) - conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceErroredReason, capiv1beta1.ConditionSeverityError, "%s", msg) + v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceErroredReason, clusterv1beta1.ConditionSeverityError, "%s", msg) + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceErroredReason, + }) capibmrecord.Warnf(machineScope.IBMVPCMachine, "FailedBuildInstance", "Failed to build the instance - %s", msg) return ctrl.Result{}, nil case vpcv1.InstanceStatusRunningConst: machineRunning = true default: machineScope.SetNotReady() - machineScope.V(3).Info("unexpected vpc instance status", "instanceStatus", *instance.Status, "instanceID", machineScope.GetInstanceID()) - conditions.MarkUnknown(machineScope.IBMVPCMachine, infrav1beta2.InstanceReadyCondition, "", "") + log.V(3).Info("unexpected vpc instance status", "instanceStatus", *instance.Status, "instanceID", machineScope.GetInstanceID()) + v1beta1conditions.MarkUnknown(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, "", "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceStateUnknownReason, + }) } } else { machineScope.SetNotReady() - conditions.MarkUnknown(machineScope.IBMVPCMachine, infrav1beta2.InstanceReadyCondition, infrav1beta2.InstanceStateUnknownReason, "") + v1beta1conditions.MarkUnknown(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStateUnknownReason, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceStateUnknownReason, + }) } // Check if the Machine is running. @@ -221,7 +288,7 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco if len(machineScope.IBMVPCMachine.Spec.LoadBalancerPoolMembers) > 0 { needsRequeue := false for _, poolMember := range machineScope.IBMVPCMachine.Spec.LoadBalancerPoolMembers { - requeue, err := machineScope.ReconcileVPCLoadBalancerPoolMember(poolMember) + requeue, err := machineScope.ReconcileVPCLoadBalancerPoolMember(ctx, poolMember) if err != nil { return ctrl.Result{}, fmt.Errorf("error failed to reconcile machine's pool member: %w", err) } else if requeue { @@ -235,7 +302,7 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco } } else { // Otherwise, default to previous Load Balancer Pool Member configuration. - _, ok := machineScope.IBMVPCMachine.Labels[capiv1beta1.MachineControlPlaneNameLabel] + _, ok := machineScope.IBMVPCMachine.Labels[clusterv1.MachineControlPlaneNameLabel] if err = machineScope.SetProviderID(instance.ID); err != nil { return ctrl.Result{}, fmt.Errorf("failed to set provider id IBMVPCMachine %s/%s: %w", machineScope.IBMVPCMachine.Namespace, machineScope.IBMVPCMachine.Name, err) } @@ -245,11 +312,11 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco } internalIP := instance.PrimaryNetworkInterface.PrimaryIP.Address port := int64(machineScope.APIServerPort()) - poolMember, err := machineScope.CreateVPCLoadBalancerPoolMember(internalIP, port) + poolMember, err := machineScope.CreateVPCLoadBalancerPoolMember(ctx, internalIP, port) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to bind port %d to control plane %s/%s: %w", port, machineScope.IBMVPCMachine.Namespace, machineScope.IBMVPCMachine.Name, err) } - if poolMember != nil && *poolMember.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) { + if poolMember != nil && *poolMember.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } } @@ -257,35 +324,94 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco // With a running machine and all Load Balancer Pool Members reconciled, mark machine as ready. machineScope.SetReady() - conditions.MarkTrue(machineScope.IBMVPCMachine, infrav1beta2.InstanceReadyCondition) + v1beta1conditions.MarkTrue(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition) + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.IBMVPCMachineInstanceReadyV1Beta2Reason, + }) return ctrl.Result{}, nil } -func (r *IBMVPCMachineReconciler) getOrCreate(scope *scope.MachineScope) (*vpcv1.Instance, error) { - instance, err := scope.CreateMachine() +func (r *IBMVPCMachineReconciler) getOrCreate(ctx context.Context, scope *scope.MachineScope) (*vpcv1.Instance, error) { + instance, err := scope.CreateMachine(ctx) return instance, err } -func (r *IBMVPCMachineReconciler) reconcileDelete(scope *scope.MachineScope) (_ ctrl.Result, reterr error) { - scope.Info("Handling deleted IBMVPCMachine") +func (r *IBMVPCMachineReconciler) reconcileDelete(ctx context.Context, scope *scope.MachineScope) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Handling deleted IBMVPCMachine") - if _, ok := scope.IBMVPCMachine.Labels[capiv1beta1.MachineControlPlaneNameLabel]; ok { - if err := scope.DeleteVPCLoadBalancerPoolMember(); err != nil { + if _, ok := scope.IBMVPCMachine.Labels[clusterv1.MachineControlPlaneNameLabel]; ok { + if err := scope.DeleteVPCLoadBalancerPoolMember(ctx); err != nil { return ctrl.Result{}, fmt.Errorf("failed to delete loadBalancer pool member: %w", err) } } if err := scope.DeleteMachine(); err != nil { - scope.Info("error deleting IBMVPCMachine") + log.Info("Error deleting IBMVPCMachine") return ctrl.Result{}, fmt.Errorf("error deleting IBMVPCMachine %s/%s: %w", scope.IBMVPCMachine.Namespace, scope.IBMVPCMachine.Spec.Name, err) } defer func() { if reterr == nil { // VSI is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(scope.IBMVPCMachine, infrav1beta2.MachineFinalizer) + controllerutil.RemoveFinalizer(scope.IBMVPCMachine, infrav1.MachineFinalizer) } }() return ctrl.Result{}, nil } + +func patchIBMVPCMachine(ctx context.Context, patchHelper *v1beta1patch.Helper, ibmVPCMachine *infrav1.IBMVPCMachine) error { + // Before computing ready condition, make sure that InstanceReady is always set. + // NOTE: This is required because v1beta2 conditions comply to guideline requiring conditions to be set at the + // first reconcile. + if c := v1beta2conditions.Get(ibmVPCMachine, infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition); c == nil { + if ibmVPCMachine.Status.Ready { + v1beta2conditions.Set(ibmVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.IBMVPCMachineInstanceReadyV1Beta2Reason, + }) + } else { + v1beta2conditions.Set(ibmVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }) + } + } + + v1beta1conditions.SetSummary(ibmVPCMachine, + v1beta1conditions.WithConditions( + infrav1.InstanceReadyCondition, + ), + ) + + if err := v1beta2conditions.SetSummaryCondition(ibmVPCMachine, ibmVPCMachine, infrav1.IBMVPCMachineReadyV1Beta2Condition, + v1beta2conditions.ForConditionTypes{ + infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + }, + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + // Use custom reasons. + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + infrav1.IBMVPCMachineNotReadyV1Beta2Reason, + infrav1.IBMVPCMachineReadyUnknownV1Beta2Reason, + infrav1.IBMVPCMachineReadyV1Beta2Reason, + )), + ), + }, + ); err != nil { + return fmt.Errorf("failed to set %s condition: %w", infrav1.IBMVPCMachineReadyV1Beta2Condition, err) + } + + // Patch the IBMVPCMachine resource. + return patchHelper.Patch(ctx, ibmVPCMachine, v1beta1patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + infrav1.IBMVPCMachineReadyV1Beta2Condition, + infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, + }}) +} diff --git a/controllers/ibmvpcmachine_controller_test.go b/controllers/ibmvpcmachine_controller_test.go index 48b91c8a9..26189dc79 100644 --- a/controllers/ibmvpcmachine_controller_test.go +++ b/controllers/ibmvpcmachine_controller_test.go @@ -27,19 +27,20 @@ import ( "github.com/IBM/vpc-go-sdk/vpcv1" "go.uber.org/mock/gomock" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "k8s.io/utils/ptr" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" + "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/accounts" gtmock "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/globaltagging/mock" - "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/utils" vpcmock "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock" . "github.com/onsi/gomega" @@ -48,10 +49,10 @@ import ( func TestIBMVPCMachineReconciler_Reconcile(t *testing.T) { testCases := []struct { name string - vpcMachine *infrav1beta2.IBMVPCMachine - ownerMachine *capiv1beta1.Machine - vpcCluster *infrav1beta2.IBMVPCCluster - ownerCluster *capiv1beta1.Cluster + vpcMachine *infrav1.IBMVPCMachine + ownerMachine *clusterv1.Machine + vpcCluster *infrav1.IBMVPCCluster + ownerCluster *clusterv1.Cluster expectError bool }{ { @@ -60,94 +61,94 @@ func TestIBMVPCMachineReconciler_Reconcile(t *testing.T) { }, { name: "Should Reconcile if Owner Reference is not set", - vpcMachine: &infrav1beta2.IBMVPCMachine{ + vpcMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "vpc-test-1", }, - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{}, + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{}, }, }, expectError: false, }, { name: "Should fail Reconcile if no OwnerMachine found", - vpcMachine: &infrav1beta2.IBMVPCMachine{ + vpcMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "vpc-test-2", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "capi-test-machine", UID: "1", }, }, }, - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{}, + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{}, }, }, expectError: true, }, { name: "Should not Reconcile if machine does not contain cluster label", - vpcMachine: &infrav1beta2.IBMVPCMachine{ + vpcMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "vpc-test-3", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "capi-test-machine", UID: "1", }, }, }, - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{}, + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{}, }, }, - ownerMachine: &capiv1beta1.Machine{ + ownerMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test-machine"}}, - ownerCluster: &capiv1beta1.Cluster{ + ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test-1"}}, expectError: false, }, { name: "Should not Reconcile if IBMVPCCluster is not found", - vpcMachine: &infrav1beta2.IBMVPCMachine{ + vpcMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "vpc-test-4", Labels: map[string]string{ - capiv1beta1.ClusterNameAnnotation: "capi-test-2"}, + clusterv1.ClusterNameAnnotation: "capi-test-2"}, OwnerReferences: []metav1.OwnerReference{ { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Machine", Name: "capi-test-machine", UID: "1", }, { - APIVersion: capiv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", Name: "capi-test-2", UID: "1", }, }, }, - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{}, + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{}, }, }, - ownerMachine: &capiv1beta1.Machine{ + ownerMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test-machine"}}, - ownerCluster: &capiv1beta1.Cluster{ + ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-test-2"}, - Spec: capiv1beta1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ Name: "vpc-cluster"}}}, expectError: false, }, @@ -179,7 +180,7 @@ func TestIBMVPCMachineReconciler_Reconcile(t *testing.T) { if tc.vpcMachine != nil { g.Eventually(func() bool { - machine := &infrav1beta2.IBMVPCMachine{} + machine := &infrav1.IBMVPCMachine{} key := client.ObjectKey{ Name: tc.vpcMachine.Name, Namespace: ns.Name, @@ -229,22 +230,21 @@ func TestIBMVPCMachineReconciler_reconcile(t *testing.T) { Log: klog.Background(), } machineScope = &scope.MachineScope{ - Logger: klog.Background(), - IBMVPCMachine: &infrav1beta2.IBMVPCMachine{ + IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", Labels: map[string]string{ - capiv1beta1.MachineControlPlaneNameLabel: "capi-control-plane-machine", + clusterv1.MachineControlPlaneNameLabel: "capi-control-plane-machine", }, - Finalizers: []string{infrav1beta2.MachineFinalizer}, + Finalizers: []string{infrav1.MachineFinalizer}, }, }, - Machine: &capiv1beta1.Machine{ - Spec: capiv1beta1.MachineSpec{ + Machine: &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ ClusterName: "vpc-cluster", }, }, - IBMVPCCluster: &infrav1beta2.IBMVPCCluster{}, + IBMVPCCluster: &infrav1.IBMVPCCluster{}, IBMVPCClient: mockvpc, } } @@ -257,9 +257,9 @@ func TestIBMVPCMachineReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) options := &vpcv1.ListInstancesOptions{} response := &core.DetailedResponse{} @@ -271,9 +271,9 @@ func TestIBMVPCMachineReconciler_reconcile(t *testing.T) { machineScope.Machine.Spec.Bootstrap.DataSecretName = ptr.To("capi-machine") machineScope.IBMVPCCluster.Status.Subnet.ID = ptr.To("capi-subnet-id") mockvpc.EXPECT().ListInstances(options).Return(instancelist, response, errors.New("Failed to create or fetch instance")) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(Not(BeNil())) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) }) } @@ -288,47 +288,46 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { Log: klog.Background(), } machineScope := &scope.MachineScope{ - Logger: klog.Background(), - IBMVPCMachine: &infrav1beta2.IBMVPCMachine{ + IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", Labels: map[string]string{ - capiv1beta1.MachineControlPlaneNameLabel: "capi-control-plane-machine", + clusterv1.MachineControlPlaneNameLabel: "capi-control-plane-machine", }, - Finalizers: []string{infrav1beta2.MachineFinalizer}, + Finalizers: []string{infrav1.MachineFinalizer}, }, }, - Machine: &capiv1beta1.Machine{ - Spec: capiv1beta1.MachineSpec{ + Machine: &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ ClusterName: "vpc-cluster", - Bootstrap: capiv1beta1.Bootstrap{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To("capi-machine"), }, }, }, - IBMVPCCluster: &infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + IBMVPCCluster: &infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: "vpc-load-balancer", }, }, - Status: infrav1beta2.IBMVPCClusterStatus{ - Subnet: infrav1beta2.Subnet{ + Status: infrav1.IBMVPCClusterStatus{ + Subnet: infrav1.Subnet{ ID: ptr.To("capi-subnet-id"), }, - VPCEndpoint: infrav1beta2.VPCEndpoint{ + VPCEndpoint: infrav1.VPCEndpoint{ LBID: core.StringPtr("vpc-load-balancer-id"), }, }, }, - Cluster: &capiv1beta1.Cluster{}, + Cluster: &clusterv1.Cluster{}, IBMVPCClient: mockvpc, GlobalTaggingClient: mockgt, } return gomock.NewController(t), mockvpc, mockgt, machineScope, reconciler } - utils.GetAccountIDFunc = func() (string, error) { + accounts.GetAccountIDFunc = func() (string, error) { return "dummy-account-id", nil // Return dummy value } @@ -387,9 +386,9 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockgt.EXPECT().GetTagByName(gomock.AssignableToTypeOf("capi-cluster")).Return(existingTag, nil) mockgt.EXPECT().AttachTag(gomock.AssignableToTypeOf(&globaltaggingv1.AttachTagOptions{})).Return(nil, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To((Not(BeNil()))) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should fail to bind loadBalancer IP to control plane", func(t *testing.T) { g := NewWithT(t) @@ -402,9 +401,9 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("failed to list loadBalancerPoolMembers")) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(Not(BeNil())) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should successfully reconcile IBMVPCMachine but its status should be set to Not Ready when the PoolMember is not yet in the active state requiring a requeue", func(t *testing.T) { g := NewWithT(t) @@ -422,11 +421,11 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(customloadBalancerPoolMember, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) // Requeue should be set when the Pool Member is found, but not yet ready (active). g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(err).To(BeNil()) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) // Machine Status should not be ready (running but LB Member Pools not active). g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(false)) }) @@ -446,9 +445,9 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(loadBalancerPoolMember, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(true)) }) @@ -488,7 +487,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(false)) @@ -513,7 +512,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(true)) }) @@ -537,7 +536,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(false)) @@ -562,7 +561,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(BeZero()) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(false)) @@ -588,13 +587,12 @@ func TestIBMVPCMachineReconciler_Delete(t *testing.T) { Log: klog.Background(), } machineScope = &scope.MachineScope{ - Logger: klog.Background(), - IBMVPCMachine: &infrav1beta2.IBMVPCMachine{ + IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", - Finalizers: []string{infrav1beta2.MachineFinalizer}, + Finalizers: []string{infrav1.MachineFinalizer}, }, - Status: infrav1beta2.IBMVPCMachineStatus{ + Status: infrav1.IBMVPCMachineStatus{ InstanceID: "capi-machine-id", }, }, @@ -612,9 +610,9 @@ func TestIBMVPCMachineReconciler_Delete(t *testing.T) { setup(t) t.Cleanup(teardown) mockvpc.EXPECT().DeleteInstance(gomock.AssignableToTypeOf(options)).Return(nil, errors.New("Failed to delete the VPC instance")) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(Not(BeNil())) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should successfully delete VPC machine and remove the finalizer", func(t *testing.T) { g := NewWithT(t) @@ -622,9 +620,9 @@ func TestIBMVPCMachineReconciler_Delete(t *testing.T) { t.Cleanup(teardown) response := &core.DetailedResponse{} mockvpc.EXPECT().DeleteInstance(gomock.AssignableToTypeOf(options)).Return(response, nil) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(BeNil()) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(Not(ContainElement(infrav1beta2.MachineFinalizer))) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(Not(ContainElement(infrav1.MachineFinalizer))) }) }) } @@ -638,28 +636,27 @@ func TestIBMVPCMachineLBReconciler_Delete(t *testing.T) { Log: klog.Background(), } machineScope := &scope.MachineScope{ - Logger: klog.Background(), - IBMVPCMachine: &infrav1beta2.IBMVPCMachine{ + IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", - Finalizers: []string{infrav1beta2.MachineFinalizer}, + Finalizers: []string{infrav1.MachineFinalizer}, Labels: map[string]string{ - capiv1beta1.MachineControlPlaneNameLabel: "capi-control-plane-machine", + clusterv1.MachineControlPlaneNameLabel: "capi-control-plane-machine", }, }, - Status: infrav1beta2.IBMVPCMachineStatus{ + Status: infrav1.IBMVPCMachineStatus{ InstanceID: "capi-machine-id", }, }, IBMVPCClient: mockvpc, - IBMVPCCluster: &infrav1beta2.IBMVPCCluster{ - Spec: infrav1beta2.IBMVPCClusterSpec{ - ControlPlaneLoadBalancer: &infrav1beta2.VPCLoadBalancerSpec{ + IBMVPCCluster: &infrav1.IBMVPCCluster{ + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneLoadBalancer: &infrav1.VPCLoadBalancerSpec{ Name: "vpc-load-balancer", }, }, - Status: infrav1beta2.IBMVPCClusterStatus{ - VPCEndpoint: infrav1beta2.VPCEndpoint{ + Status: infrav1.IBMVPCClusterStatus{ + VPCEndpoint: infrav1.VPCEndpoint{ LBID: core.StringPtr("vpc-load-balancer-id"), }, }, @@ -686,9 +683,9 @@ func TestIBMVPCMachineLBReconciler_Delete(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("failed to list LoadBalancerPoolMembers")) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To((Not(BeNil()))) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1beta2.MachineFinalizer)) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should successfully delete VPC machine and remove the finalizer", func(t *testing.T) { g := NewWithT(t) @@ -698,9 +695,139 @@ func TestIBMVPCMachineLBReconciler_Delete(t *testing.T) { mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteInstance(gomock.AssignableToTypeOf(&vpcv1.DeleteInstanceOptions{})).Return(&core.DetailedResponse{}, nil) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(BeNil()) - g.Expect(machineScope.IBMVPCMachine.Finalizers).To(Not(ContainElement(infrav1beta2.MachineFinalizer))) + g.Expect(machineScope.IBMVPCMachine.Finalizers).To(Not(ContainElement(infrav1.MachineFinalizer))) }) }) } + +func TestIBMVPCMachine_Reconcile_Conditions(t *testing.T) { + testCases := []struct { + name string + vpcMachine *infrav1.IBMVPCMachine + ownerMachine *clusterv1.Machine + vpcCluster *infrav1.IBMVPCCluster + ownerCluster *clusterv1.Cluster + expectedCondition metav1.Condition + expectError bool + }{ + { + name: "Should set conditions on first reconcile", + vpcMachine: &infrav1.IBMVPCMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vpc-machine", Labels: map[string]string{ + clusterv1.ClusterNameAnnotation: "capi-cluster"}, + Finalizers: []string{infrav1.MachineFinalizer}, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: "capi-test-machine", + UID: "1", + }, + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "capi-cluster", + UID: "1", + }, + }, + }, + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{}, + }, + }, + vpcCluster: &infrav1.IBMVPCCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-cluster"}, + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneEndpoint: v1beta1.APIEndpoint{ + Host: "cluster-host", + }, + }, + }, + ownerMachine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-test-machine"}}, + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-cluster"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-cluster"}}}, + expectedCondition: metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }, + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + reconciler := &IBMVPCMachineReconciler{ + Client: testEnv.Client, + Log: klog.Background(), + } + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) + g.Expect(err).To(BeNil()) + defer func() { + g.Expect(testEnv.Cleanup(ctx, ns)).To(Succeed()) + }() + + createObject(g, tc.ownerCluster, ns.Name) + defer cleanupObject(g, tc.ownerCluster) + + createObject(g, tc.vpcCluster, ns.Name) + defer cleanupObject(g, tc.vpcCluster) + + createObject(g, tc.ownerMachine, ns.Name) + defer cleanupObject(g, tc.ownerMachine) + + createObject(g, tc.vpcMachine, ns.Name) + defer cleanupObject(g, tc.vpcMachine) + + g.Eventually(func() bool { + machine := &infrav1.IBMVPCMachine{} + key := client.ObjectKey{ + Name: tc.vpcMachine.Name, + Namespace: ns.Name, + } + err = testEnv.Get(ctx, key, machine) + return err == nil + }, 10*time.Second).Should(Equal(true)) + + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: tc.vpcMachine.Namespace, + Name: tc.vpcMachine.Name, + }, + }) + + if tc.expectError { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + } + + machine := &infrav1.IBMVPCMachine{} + key := client.ObjectKey{ + Name: tc.vpcMachine.Name, + Namespace: ns.Name, + } + + err = testEnv.Get(ctx, key, machine) + g.Expect(err).To(BeNil()) + g.Expect(len(machine.Status.V1Beta2.Conditions)).To(BeNumerically(">", 0)) + + instanceReadyCondition := v1beta2conditions.Get(machine, infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition) + g.Expect(instanceReadyCondition).To(Not(BeNil())) + g.Expect(tc.expectedCondition.Type).To(Equal(instanceReadyCondition.Type)) + g.Expect(tc.expectedCondition.Status).To(Equal(instanceReadyCondition.Status)) + g.Expect(tc.expectedCondition.Reason).To(Equal(instanceReadyCondition.Reason)) + }) + } +} diff --git a/controllers/ibmvpcmachinetemplate_controller.go b/controllers/ibmvpcmachinetemplate_controller.go index a2d1c5530..f34e12572 100644 --- a/controllers/ibmvpcmachinetemplate_controller.go +++ b/controllers/ibmvpcmachinetemplate_controller.go @@ -31,9 +31,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/endpoints" ) @@ -47,7 +47,7 @@ type IBMVPCMachineTemplateReconciler struct { func (r *IBMVPCMachineTemplateReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrav1beta2.IBMVPCMachineTemplate{}). + For(&infrav1.IBMVPCMachineTemplate{}). Complete(r) } @@ -58,7 +58,7 @@ func (r *IBMVPCMachineTemplateReconciler) Reconcile(ctx context.Context, req ctr log := ctrl.LoggerFrom(ctx) log.Info("Reconciling IBMVPCMachineTemplate") - var machineTemplate infrav1beta2.IBMVPCMachineTemplate + var machineTemplate infrav1.IBMVPCMachineTemplate if err := r.Get(ctx, req.NamespacedName, &machineTemplate); err != nil { log.Error(err, "Unable to fetch ibmvpcmachinetemplate") return ctrl.Result{}, client.IgnoreNotFound(err) @@ -77,9 +77,9 @@ func (r *IBMVPCMachineTemplateReconciler) Reconcile(ctx context.Context, req ctr return r.reconcileNormal(ctx, vpcClient, machineTemplate) } -func (r *IBMVPCMachineTemplateReconciler) reconcileNormal(ctx context.Context, vpcClient vpc.Vpc, machineTemplate infrav1beta2.IBMVPCMachineTemplate) (ctrl.Result, error) { +func (r *IBMVPCMachineTemplateReconciler) reconcileNormal(ctx context.Context, vpcClient vpc.Vpc, machineTemplate infrav1.IBMVPCMachineTemplate) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - helper, err := patch.NewHelper(&machineTemplate, r.Client) + helper, err := v1beta1patch.NewHelper(&machineTemplate, r.Client) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to init patch helper: %w", err) } diff --git a/controllers/ibmvpcmachinetemplate_controller_test.go b/controllers/ibmvpcmachinetemplate_controller_test.go index b8cf445b8..3bc4bce06 100644 --- a/controllers/ibmvpcmachinetemplate_controller_test.go +++ b/controllers/ibmvpcmachinetemplate_controller_test.go @@ -31,7 +31,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock" "github.com/IBM/go-sdk-core/v5/core" @@ -43,7 +43,7 @@ func TestIBMVPCMachineTemplateReconciler_Reconcile(t *testing.T) { testCases := []struct { name string expectError bool - VPCMachineTemplate *infrav1beta2.IBMVPCMachineTemplate + VPCMachineTemplate *infrav1.IBMVPCMachineTemplate }{ { name: "Should Reconcile successfully if no IBMVPCMachineTemplate found", @@ -74,7 +74,7 @@ func TestIBMVPCMachineTemplateReconciler_Reconcile(t *testing.T) { if tc.VPCMachineTemplate != nil { g.Eventually(func() bool { - machineTemplate := &infrav1beta2.IBMVPCMachineTemplate{} + machineTemplate := &infrav1.IBMVPCMachineTemplate{} key := client.ObjectKey{ Name: tc.VPCMachineTemplate.Name, Namespace: ns.Name, @@ -153,7 +153,7 @@ func TestIBMVPCMachineTemplateReconciler_reconcileNormal(t *testing.T) { } g.Expect(err).To(BeNil()) g.Eventually(func() bool { - machineTemplate := &infrav1beta2.IBMVPCMachineTemplate{} + machineTemplate := &infrav1.IBMVPCMachineTemplate{} key := client.ObjectKey{ Name: vPCMachineTemplate.Name, Namespace: ns.Name, @@ -214,15 +214,15 @@ func TestIBMVPCMachineTemplateReconciler_reconcileNormal(t *testing.T) { ) } -func stubVPCMachineTemplate(profile string) infrav1beta2.IBMVPCMachineTemplate { - return infrav1beta2.IBMVPCMachineTemplate{ +func stubVPCMachineTemplate(profile string) infrav1.IBMVPCMachineTemplate { + return infrav1.IBMVPCMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "vpc-test-1", }, - Spec: infrav1beta2.IBMVPCMachineTemplateSpec{ - Template: infrav1beta2.IBMVPCMachineTemplateResource{ - Spec: infrav1beta2.IBMVPCMachineSpec{ - Image: &infrav1beta2.IBMVPCResourceReference{ + Spec: infrav1.IBMVPCMachineTemplateSpec{ + Template: infrav1.IBMVPCMachineTemplateResource{ + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{ ID: ptr.To("capi-image"), }, Profile: profile, diff --git a/controllers/suite_test.go b/controllers/suite_test.go index e17195184..dfb802523 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -27,10 +27,10 @@ import ( ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports - infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" + infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/internal/webhooks" "sigs.k8s.io/cluster-api-provider-ibmcloud/test/helpers" - capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( @@ -47,8 +47,8 @@ func TestMain(m *testing.M) { // Setting up the test environment. func setup() { - utilruntime.Must(infrav1beta2.AddToScheme(scheme.Scheme)) - utilruntime.Must(capiv1beta1.AddToScheme(scheme.Scheme)) + utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/docs/book/src/developer/conversion.md b/docs/book/src/developer/conversion.md index 6b86fb405..c66769299 100644 --- a/docs/book/src/developer/conversion.md +++ b/docs/book/src/developer/conversion.md @@ -14,12 +14,12 @@ kubebuilder create api --group --version --kind **_NOTE:_** [Refer for more detailed information about prerequisites.](https://kubebuilder.io/multiversion-tutorial/api-changes.html#changing-things-up) ## Conversion flow -1. In each “spoke” version package, add marker `+k8s:conversion-gen` directive pointing to the “hub” version package. It must be in `doc.go`. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1alpha4/doc.go) +1. In each “spoke” version package, add marker `+k8s:conversion-gen` directive pointing to the “hub” version package. It must be in `doc.go`. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1beta1/doc.go) 2. In “hub” version package, create `doc.go` file without any marker. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1beta1/doc.go) -3. In “spoke” version package, add a var `localSchemeBuilder = &SchemeBuilder.SchemeBuilder` in `groupversion_info.go` so the auto-generated code would compile. [Refer]( https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/e55357a520089abc6ac2a9ca91a5b9cf0606bbbc/api/v1alpha4/groupversion_info.go#L37) -4. In “hub” version package, create a `conversion.go` to implement the “hub” methods. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1beta1/conversion.go) +3. In “spoke” version package, add a var `localSchemeBuilder = &SchemeBuilder.SchemeBuilder` in `groupversion_info.go` so the auto-generated code would compile. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1beta1/groupversion_info.go) +4. In “hub” version package, create a `conversion.go` to implement the “hub” methods. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1beta2/conversion.go) 5. Run target `make generate-go-conversions-core`, this will generate `zz_generated.conversion.go` in the spoke version package. -6. In "spoke" version package, update `{kind}_conversion.go` to implement Convertible for each type. When `conversion-gen` stops generating methods because of incompatibilities or we need to override the behavior, we stick them in this source file. Our “spoke” versions need to implement the Convertible interface. Namely, they’ll need ConvertTo and ConvertFrom methods to convert to/from the hub version. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1alpha4/ibmpowervs_conversion.go) +6. In "spoke" version package, update `{kind}_conversion.go` to implement Convertible for each type. When `conversion-gen` stops generating methods because of incompatibilities or we need to override the behavior, we stick them in this source file. Our “spoke” versions need to implement the Convertible interface. Namely, they’ll need ConvertTo and ConvertFrom methods to convert to/from the hub version. [Refer](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/api/v1beta1/ibmpowervs_conversion.go) ## References - [What are hubs and spokes?](https://book.kubebuilder.io/multiversion-tutorial/conversion-concepts.html) diff --git a/docs/book/src/developer/dependencies.md b/docs/book/src/developer/dependencies.md index 4557274c2..6d9d5b480 100644 --- a/docs/book/src/developer/dependencies.md +++ b/docs/book/src/developer/dependencies.md @@ -10,12 +10,12 @@ - ##### K8s Packages | Package | Module name | Used by | | ------- | ----------- | ------- | -| api | [k8s.io/api](https://k8s.io/api) | [go.mod][go.mod1] | -| apiextensions-apiserver | [k8s.io/apiextensions-apiserver](https://k8s.io/apiextensions-apiserver) | [go.mod][go.mod1] | -| apimachinery | [k8s.io/apimachinery](https://k8s.io/apimachinery) | [go.mod][go.mod1] | -| cli-runtime | [k8s.io/cli-runtime](https://k8s.io/cli-runtime) | [go.mod][go.mod1] | -| client-go | [k8s.io/client-go](https://k8s.io/client-go) | [go.mod][go.mod1] | -| utils | [k8s.io/utils](https://k8s.io/utils) | [go.mod][go.mod1] | +| api | [api](https://github.com/kubernetes/api) | [go.mod][go.mod1] | +| apiextensions-apiserver | [apiextensions-apiserver](https://github.com/kubernetes/apiextensions-apiserver) | [go.mod][go.mod1] | +| apimachinery | [apimachinery](https://github.com/kubernetes/apimachinery) | [go.mod][go.mod1] | +| cli-runtime | [cli-runtime](https://github.com/kubernetes/cli-runtime) | [go.mod][go.mod1] | +| client-go | [client-go](https://github.com/kubernetes/client-go) | [go.mod][go.mod1] | +| utils | [utils](https://github.com/kubernetes/utils) | [go.mod][go.mod1] | | controller-runtime | [sigs.k8s.io/controller-runtime](https://sigs.k8s.io/controller-runtime) | [go.mod][go.mod1] | | controller-runtime/tools/setup-envtest | [sigs.k8s.io/controller-runtime/tools/setup-envtest](https://sigs.k8s.io/controller-runtime/tools/setup-envtest) | [hack/tools/go.mod][go.mod2] | | controller-tools | [sigs.k8s.io/controller-tools](https://sigs.k8s.io/controller-tools) | [hack/tools/go.mod][go.mod2] | @@ -49,8 +49,8 @@ #### Other Tools | Package | Used by | Source | | --- | ----------- | ------ | -| kind | [ensure-kind.sh](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/hack/ensure-kind.sh#L24) | [kind](https://github.com/kubernetes-sigs/kind) | -| kubebuilder-tools | [fetch_ext_bins.sh](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/scripts/fetch_ext_bins.sh#L29)
[Makefile](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/Makefile#L72) | [kubebuilder-tools](https://storage.googleapis.com/kubebuilder-tools) | +| kind | [ensure-kind.sh](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/hack/ensure-kind.sh) | [kind](https://github.com/kubernetes-sigs/kind) | +| kubebuilder | [Makefile](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/Makefile) | [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) | [go.mod1]: https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/go.mod [go.mod2]: https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/hack/tools/go.mod \ No newline at end of file diff --git a/docs/book/src/developer/index.md b/docs/book/src/developer/index.md index 788b4eb7f..8bd5bca54 100644 --- a/docs/book/src/developer/index.md +++ b/docs/book/src/developer/index.md @@ -1,9 +1,9 @@ # Developer Guide -- [Rapid iterative development with Tilt](/developer/tilt.html) -- [Guide for API conversions](/developer/conversion.html) -- [Release Process](/developer/release.html) -- [Release Support Guidelines](/developer/release-support-guidelines.md) -- [How to build the machine boot images](/developer/build-images.html) -- [Modules and tools dependencies](/developer/dependencies.html) -- [E2E testing](/developer/e2e.html) \ No newline at end of file +- [Rapid iterative development with Tilt](./tilt.md) +- [Guide for API conversions](./conversion.md) +- [Release Process](./release.md) +- [Release Support Guidelines](./release-support-guidelines.md) +- [How to build the machine boot images](./build-images.md) +- [Modules and tools dependencies](./dependencies.md) +- [E2E testing](./e2e.md) \ No newline at end of file diff --git a/docs/book/src/developer/release-support-guidelines.md b/docs/book/src/developer/release-support-guidelines.md index ee5ace29a..b9ee72244 100644 --- a/docs/book/src/developer/release-support-guidelines.md +++ b/docs/book/src/developer/release-support-guidelines.md @@ -34,8 +34,9 @@ standard [backport policy](https://github.com/kubernetes-sigs/cluster-api/blob/m | Minor Release | API Version | Supported Until | |---------------|-------------|----------------------------------------------------| +| v0.11.x | **v1beta2** | when v0.13.0 will be released | | v0.10.x | **v1beta2** | when v0.12.0 will be released | -| v0.9.x | **v1beta2** | when v0.11.0 will be released | +| v0.9.x | **v1beta2** | EOL since 2025-05-13 - v0.11.0 release date | | v0.8.x | **v1beta2** | EOL since 2025-02-12 - v0.10.0 release date | | v0.7.x | **v1beta2** | EOL since 2024-11-22 - v0.9.0 release date | | v0.6.x | **v1beta2** | EOL since 2024-05-23 - v0.8.0 release date | diff --git a/docs/book/src/developer/release.md b/docs/book/src/developer/release.md index e3603b2e8..bdd7d4977 100644 --- a/docs/book/src/developer/release.md +++ b/docs/book/src/developer/release.md @@ -25,30 +25,35 @@ > Note: In the above instructions, `v0.2.0-alpha.3` is the version/tag is being released ## GA Releases -- Create a tag and push +- Review if all issues linked to the release version are either completed or moved to the "Next" release. +- Create a release branch from main. +- Clone the repository and create a tag (release tag) and push to origin. Ensure that the GPG keys are set. ```shell git clone git@github.com:kubernetes-sigs/cluster-api-provider-ibmcloud.git git tag -s -m "v0.1.0" v0.1.0 git push origin v0.1.0 ``` -- Wait for the google cloud build to be finished -- [Prepare release notes](#prepare-release-notes) -- Create a draft release with release notes for the tag -- Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter): +- Wait for the Google Cloudbuild to finish, which is triggered once the tag is created. + - The status of the build jobs can be tracked from: [https://prow.k8s.io/?job=post-cluster-api-provider-ibmcloud-push-images](https://prow.k8s.io/?job=post-cluster-api-provider-ibmcloud-push-images) + - The built images are available here: [https://console.cloud.google.com/gcr/images/k8s-staging-capi-ibmcloud](https://console.cloud.google.com/gcr/images/k8s-staging-capi-ibmcloud) +- Create a draft release with release notes for the created tag. + - Use the `make release-notes` target to generate release notes. (Refer topic - [Prepare release notes](https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/blob/main/docs/book/src/developer/release.md#prepare-release-notes)) + - Update the controller image version towards the bottom of the release document. +- Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/registry.k8s.io#image-promoter): - Clone and pull down the latest from [kubernetes/k8s.io](https://github.com/kubernetes/k8s.io) - Create a new branch in your fork of `kubernetes/k8s.io`. - The staging repository is [here](https://console.cloud.google.com/gcr/images/k8s-staging-capi-ibmcloud/GLOBAL). - Once image is present in the above staging repository, find the sha256 tag for the image by following instructions ```shell - $ manifest-tool inspect --raw gcr.io/k8s-staging-capi-ibmcloud/cluster-api-ibmcloud-controller:v0.1.0 | jq '.[0].Digest' + $ manifest-tool inspect --raw gcr.io/k8s-staging-capi-ibmcloud/cluster-api-ibmcloud-controller:v0.1.0 | jq '.digest' "sha256:6c92a6a337ca5152eda855ac27c9e4ca1f30bba0aa4de5c3a0b937270ead4363" ``` - In your `kubernetes/k8s.io` branch edit `k8s.gcr.io/images/k8s-staging-capi-ibmcloud/images.yaml` and add an entry for the version using the sha256 value got from the above command. For example: `"sha256:6c92a6a337ca5152eda855ac27c9e4ca1f30bba0aa4de5c3a0b937270ead4363": ["v0.1.0"]` - - You can use [this PR](https://github.com/kubernetes/k8s.io/pull/3185) as example - - Wait for the PR to be approved and merged - - Run `make release` command - - Copy the content from `out` directory to release asset - - Publish the drafted release + - You can use [this PR](https://github.com/kubernetes/k8s.io/pull/7780) as example. + - Wait for the PR to be approved and merged. + - This should trigger a build job to build artifacts through cloud-build / run `make release` on the release branch. + - Upload the binaries/files that are uploaded to Google Cloud Storage / built locally and publish the drafted release. + - Create an alpha tag for the `release-version+1` for allowing subsequent commits. > Note: In the above instructions, `v0.1.0` is the version/tag is being released diff --git a/docs/book/src/developer/tilt.md b/docs/book/src/developer/tilt.md index 37e63a560..1908e1f69 100644 --- a/docs/book/src/developer/tilt.md +++ b/docs/book/src/developer/tilt.md @@ -97,11 +97,11 @@ extra_args: --- ## Different flavors of deploying workload clusters using CAPIBM. -> **Note:** Currently, both [ClusterClass](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/index.html) and [ClusterResourceset](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set.html) are experimental features. By default, the workload cluster is deployed using the external Cloud Controller Manager (CCM). +> **Note:** Currently, [ClusterClass](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/index.html) is an experimental features. By default, the workload cluster is deployed using the external Cloud Controller Manager (CCM). ### 1. Configuration to deploy workload cluster from ClusterClass template -To deploy workload cluster with [clusterclass-template](/topics/powervs/clusterclass-cluster.html), enable the feature gates `EXP_CLUSTER_RESOURCE_SET` and `CLUSTER_TOPOLOGY` to `true` under kustomize_substitutions. +To deploy workload cluster with [clusterclass-template](../topics/powervs/creating-a-cluster.md#deploy-a-powervs-cluster-with-cluster-class), enable the feature gates `EXP_CLUSTER_RESOURCE_SET` and `CLUSTER_TOPOLOGY` to `true` under kustomize_substitutions. ```yaml default_registry: "localhost:5001" @@ -183,7 +183,7 @@ Kind cluster becomes a management cluster after this point, check the pods runni ## Create workload clusters -To provision your workload cluster, check the `Creating a cluster` section for [VPC](/topics/vpc/creating-a-cluster.html) and [PowerVS](/topics/powervs/creating-a-cluster.html). +To provision your workload cluster, check the `Creating a cluster` section for [VPC](../topics/vpc/creating-a-cluster.md) and [PowerVS](../topics/powervs/creating-a-cluster.md). After deploying it, check the tilt logs and wait for the clusters to be created. diff --git a/docs/book/src/getting-started.md b/docs/book/src/getting-started.md index 708eb9f6f..65f8857c5 100644 --- a/docs/book/src/getting-started.md +++ b/docs/book/src/getting-started.md @@ -51,7 +51,7 @@ it into a management cluster using `clusterctl`. ```console export SERVICE_ENDPOINT=us-south:vpc=https://us-south-stage01.iaasdev.cloud.ibm.com,powervs=https://dal.power-iaas.test.cloud.ibm.com,rc=https://resource-controller.test.cloud.ibm.com ``` - > Note: Refer [Regions-Zones Mapping](/reference/regions-zones-mapping.html) for more information. + > Note: Refer [Regions-Zones Mapping](./reference/regions-zones-mapping.md) for more information. 4. For enabling debug level logs for the controller, set the `LOGLEVEL` environment variable(defaults to 0). ```console @@ -86,4 +86,4 @@ it into a management cluster using `clusterctl`. clusterctl generate cluster [name] --kubernetes-version [version] | kubectl apply -f - ``` -6. Once the management cluster is ready with the required providers up and running, proceed to provisioning the workload cluster. Check the respective sections for [VPC](/topics/vpc/creating-a-cluster.html) and [PowerVS](/topics/powervs/creating-a-cluster.html) to deploy the cluster. +6. Once the management cluster is ready with the required providers up and running, proceed to provisioning the workload cluster. Check the respective sections for [VPC](./topics/vpc/creating-a-cluster.md) and [PowerVS](./topics/powervs/creating-a-cluster.md) to deploy the cluster. diff --git a/docs/book/src/introduction.md b/docs/book/src/introduction.md index 488ff9c52..dfabce8a7 100644 --- a/docs/book/src/introduction.md +++ b/docs/book/src/introduction.md @@ -1,10 +1,5 @@ # Kubernetes Cluster API Provider IBM Cloud -

-Kubernetes Cluster API Provider IBM Cloud -

- -------

Kubernetes-native declarative infrastructure for IBM Cloud.

## What is the Cluster API Provider IBM Cloud @@ -14,14 +9,15 @@ The [Cluster API](https://github.com/kubernetes-sigs/cluster-api) brings declara The API itself is shared across multiple cloud providers allowing for true IBM Cloud hybrid deployments of Kubernetes. It is built atop the lessons learned from previous cluster managers such as [kops](https://github.com/kubernetes/kops) and -[kubicorn](http://kubicorn.io/). +[kubicorn](https://github.com/kubicorn/kubicorn).