Skip to content

Commit 1f2a46c

Browse files
committed
Add e2e spec to upgrade/reinstall Helm chart
1 parent 8cebe5f commit 1f2a46c

File tree

5 files changed

+327
-87
lines changed

5 files changed

+327
-87
lines changed

test/e2e/common.go

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,11 @@ import (
3030
corev1 "k8s.io/api/core/v1"
3131
apierrors "k8s.io/apimachinery/pkg/api/errors"
3232
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33+
apitypes "k8s.io/apimachinery/pkg/types"
3334
"k8s.io/client-go/kubernetes"
3435
"k8s.io/utils/ptr"
36+
addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1"
37+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3538
kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
3639
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
3740
"sigs.k8s.io/cluster-api/test/framework"
@@ -115,6 +118,74 @@ func EnsureCalicoIsReady(ctx context.Context, input clusterctl.ApplyCustomCluste
115118
}
116119
}
117120

121+
// EnsureHelmReleaseInstallOrUpgrade ensures that a Helm install or upgrade is successful. Only one of installInput or upgradeInput should be provided
122+
// depending on the Helm operation.
123+
func EnsureHelmReleaseInstallOrUpgrade(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, installInput *HelmInstallInput, upgradeInput *HelmUpgradeInput) {
124+
var (
125+
clusterName string
126+
clusterNamespace string
127+
helmChartProxy *addonsv1alpha1.HelmChartProxy
128+
expectedRevision int
129+
)
130+
131+
Expect(installInput != nil || upgradeInput != nil).To(BeTrue(), "either installInput or upgradeInput should be provided")
132+
if installInput != nil {
133+
Expect(upgradeInput).To(BeNil(), "only one of installInput or upgradeInput should be provided")
134+
clusterName = installInput.ClusterName
135+
clusterNamespace = installInput.Namespace.Name
136+
helmChartProxy = installInput.HelmChartProxy
137+
expectedRevision = 1
138+
} else if upgradeInput != nil {
139+
Expect(installInput).To(BeNil(), "only one of installInput or upgradeInput should be provided")
140+
clusterName = upgradeInput.ClusterName
141+
clusterNamespace = upgradeInput.Namespace.Name
142+
helmChartProxy = upgradeInput.HelmChartProxy
143+
expectedRevision = upgradeInput.ExpectedRevision
144+
}
145+
146+
mgmtClient := bootstrapClusterProxy.GetClient()
147+
Expect(mgmtClient).NotTo(BeNil())
148+
149+
// Get Cluster from management Cluster
150+
workloadCluster := &clusterv1.Cluster{}
151+
key := apitypes.NamespacedName{
152+
Namespace: clusterNamespace,
153+
Name: clusterName,
154+
}
155+
err := mgmtClient.Get(ctx, key, workloadCluster)
156+
Expect(err).NotTo(HaveOccurred())
157+
158+
// Patch cluster labels, ignore match expressions for now
159+
selector := helmChartProxy.Spec.ClusterSelector
160+
labels := workloadCluster.Labels
161+
if labels == nil {
162+
labels = make(map[string]string)
163+
}
164+
165+
for k, v := range selector.MatchLabels {
166+
labels[k] = v
167+
}
168+
169+
err = mgmtClient.Update(ctx, workloadCluster)
170+
Expect(err).NotTo(HaveOccurred())
171+
172+
// Wait for HelmReleaseProxy to be ready
173+
hrpWaitInput := GetWaitForHelmReleaseProxyReadyInput(ctx, bootstrapClusterProxy, clusterName, *helmChartProxy, expectedRevision, specName)
174+
WaitForHelmReleaseProxyReady(ctx, hrpWaitInput, e2eConfig.GetIntervals(specName, "wait-helmreleaseproxy-ready")...)
175+
176+
// Get workload Cluster proxy
177+
By("creating a clusterctl proxy to the workload cluster")
178+
workloadClusterProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, clusterNamespace, clusterName)
179+
Expect(workloadClusterProxy).NotTo(BeNil())
180+
181+
// Wait for Helm release on workload cluster to have stauts = deployed
182+
releaseWaitInput := GetWaitForHelmReleaseDeployedInput(ctx, workloadClusterProxy, hrpWaitInput.HelmReleaseProxy.Spec.ReleaseName, hrpWaitInput.HelmReleaseProxy.Spec.ReleaseNamespace, specName)
183+
release := WaitForHelmReleaseDeployed(ctx, releaseWaitInput, e2eConfig.GetIntervals(specName, "wait-helm-release-deployed")...)
184+
185+
// Verify Helm release values and revision.
186+
ValidateHelmRelease(ctx, hrpWaitInput.HelmReleaseProxy, release, expectedRevision)
187+
}
188+
118189
// CheckTestBeforeCleanup checks to see if the current running Ginkgo test failed, and prints
119190
// a status message regarding cleanup.
120191
func CheckTestBeforeCleanup() {

test/e2e/helm_install.go

Lines changed: 7 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,7 @@ import (
2525
. "github.com/onsi/ginkgo/v2"
2626
. "github.com/onsi/gomega"
2727
corev1 "k8s.io/api/core/v1"
28-
"k8s.io/apimachinery/pkg/types"
2928
addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1"
30-
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3129
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
3230

3331
"sigs.k8s.io/cluster-api/test/framework"
@@ -42,15 +40,13 @@ type HelmInstallInput struct {
4240
}
4341

4442
// HelmInstallSpec implements a test that verifies a Helm chart can be installed on a workload cluster. It creates a HelmChartProxy
45-
// resource and patches the Cluster labels such that they match the HelmChartProxy's clusterSelector. It then waits for the Helm
46-
// release to be deployed on the workload cluster.
43+
// resource and patches the Cluster labels such that they match the HelmChartProxy's clusterSelector. It then waits for the Helm release
44+
// to be deployed on the workload cluster and validates the release and the HelmReleaseProxy status fields.
4745
func HelmInstallSpec(ctx context.Context, inputGetter func() HelmInstallInput) {
4846
var (
49-
specName = "helm-install"
50-
input HelmInstallInput
51-
workloadClusterProxy framework.ClusterProxy
52-
mgmtClient ctrlclient.Client
53-
err error
47+
specName = "helm-install"
48+
input HelmInstallInput
49+
mgmtClient ctrlclient.Client
5450
)
5551

5652
input = inputGetter()
@@ -62,42 +58,8 @@ func HelmInstallSpec(ctx context.Context, inputGetter func() HelmInstallInput) {
6258
Expect(mgmtClient).NotTo(BeNil())
6359

6460
// Create HCP on management Cluster
65-
Byf("Creating HelmChartProxy %s/%s", input.HelmChartProxy.Namespace, input.HelmChartProxy.Name)
61+
Byf("Creating HelmChartProxy %s/%s", input.Namespace, input.HelmChartProxy.Name)
6662
Expect(mgmtClient.Create(ctx, input.HelmChartProxy)).To(Succeed())
6763

68-
// Get Cluster from management Cluster
69-
workloadCluster := &clusterv1.Cluster{}
70-
key := types.NamespacedName{
71-
Namespace: input.Namespace.Name,
72-
Name: input.ClusterName,
73-
}
74-
err = mgmtClient.Get(ctx, key, workloadCluster)
75-
Expect(err).NotTo(HaveOccurred())
76-
77-
// Patch cluster labels, ignore match expressions for now
78-
selector := input.HelmChartProxy.Spec.ClusterSelector
79-
labels := workloadCluster.Labels
80-
if labels == nil {
81-
labels = make(map[string]string)
82-
}
83-
84-
for k, v := range selector.MatchLabels {
85-
labels[k] = v
86-
}
87-
88-
err = mgmtClient.Update(ctx, workloadCluster)
89-
Expect(err).NotTo(HaveOccurred())
90-
91-
// Wait for HelmReleaseProxy to be ready
92-
hrpWaitInput := GetWaitForHelmReleaseProxyReadyInput(ctx, bootstrapClusterProxy, input.ClusterName, *input.HelmChartProxy, specName)
93-
WaitForHelmReleaseProxyReady(ctx, hrpWaitInput, e2eConfig.GetIntervals(specName, "wait-helmreleaseproxy-ready")...)
94-
95-
// Get workload Cluster proxy
96-
By("creating a clusterctl proxy to the workload cluster")
97-
workloadClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace.Name, input.ClusterName)
98-
Expect(workloadClusterProxy).NotTo(BeNil())
99-
100-
// Wait for Helm release on workload cluster to have stauts = deployed
101-
releaseWaitInput := GetWaitForHelmReleaseDeployedInput(ctx, workloadClusterProxy, input.HelmChartProxy.Spec.ReleaseName, input.Namespace.Name, specName)
102-
WaitForHelmReleaseDeployed(ctx, releaseWaitInput, e2eConfig.GetIntervals(specName, "wait-helm-release-deployed")...)
64+
EnsureHelmReleaseInstallOrUpgrade(ctx, specName, input.BootstrapClusterProxy, &input, nil)
10365
}

test/e2e/helm_test.go

Lines changed: 67 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,11 @@ import (
4141

4242
var nginxValues = `controller:
4343
name: "{{ .ControlPlane.metadata.name }}-nginx"
44+
nginxStatus:
45+
allowCidrs: {{ index .Cluster.spec.clusterNetwork.pods.cidrBlocks 0 }}`
46+
47+
var newNginxValues = `controller:
48+
name: "{{ .Cluster.metadata.name }}-nginx"
4449
nginxStatus:
4550
allowCidrs: 127.0.0.1,::1,{{ index .Cluster.spec.clusterNetwork.pods.cidrBlocks 0 }}`
4651

@@ -113,7 +118,7 @@ var _ = Describe("Workload cluster creation", func() {
113118
})
114119

115120
Context("Creating workload cluster [REQUIRED]", func() {
116-
It("With default template and calico Helm chart", func() {
121+
It("With default template to install and upgrade nginx Helm chart", func() {
117122
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
118123
clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
119124
specName,
@@ -126,30 +131,27 @@ var _ = Describe("Workload cluster creation", func() {
126131
}),
127132
), result)
128133

129-
// Create new Helm chart
130-
By("Creating new HelmChartProxy to install nginx", func() {
131-
hcp := &addonsv1alpha1.HelmChartProxy{
132-
TypeMeta: metav1.TypeMeta{
133-
APIVersion: addonsv1alpha1.GroupVersion.String(),
134-
Kind: "HelmChartProxy",
135-
},
136-
ObjectMeta: metav1.ObjectMeta{
137-
Name: "nginx-ingress",
138-
Namespace: namespace.Name,
139-
},
140-
Spec: addonsv1alpha1.HelmChartProxySpec{
141-
ClusterSelector: metav1.LabelSelector{
142-
MatchLabels: map[string]string{
143-
"nginxIngress": "enabled",
144-
},
134+
hcp := &addonsv1alpha1.HelmChartProxy{
135+
ObjectMeta: metav1.ObjectMeta{
136+
Name: "nginx-ingress",
137+
Namespace: namespace.Name,
138+
},
139+
Spec: addonsv1alpha1.HelmChartProxySpec{
140+
ClusterSelector: metav1.LabelSelector{
141+
MatchLabels: map[string]string{
142+
"nginxIngress": "enabled",
145143
},
146-
ReleaseName: "nginx-ingress",
147-
ChartName: "nginx-ingress",
148-
RepoURL: "https://helm.nginx.com/stable",
149-
ValuesTemplate: nginxValues,
150-
Options: addonsv1alpha1.HelmOptions{},
151144
},
152-
}
145+
ReleaseName: "nginx-ingress",
146+
ReleaseNamespace: "nginx-namespace",
147+
ChartName: "nginx-ingress",
148+
RepoURL: "https://helm.nginx.com/stable",
149+
ValuesTemplate: nginxValues,
150+
},
151+
}
152+
153+
// Create new Helm chart
154+
By("Creating new HelmChartProxy to install nginx", func() {
153155
HelmInstallSpec(ctx, func() HelmInstallInput {
154156
return HelmInstallInput{
155157
BootstrapClusterProxy: bootstrapClusterProxy,
@@ -159,6 +161,48 @@ var _ = Describe("Workload cluster creation", func() {
159161
}
160162
})
161163
})
164+
165+
// Update existing Helm chart
166+
By("Updating nginx HelmChartProxy valuesTemplate", func() {
167+
hcp.Spec.ValuesTemplate = newNginxValues
168+
HelmUpgradeSpec(ctx, func() HelmUpgradeInput {
169+
return HelmUpgradeInput{
170+
BootstrapClusterProxy: bootstrapClusterProxy,
171+
Namespace: namespace,
172+
ClusterName: clusterName,
173+
HelmChartProxy: hcp,
174+
ExpectedRevision: 2,
175+
}
176+
})
177+
})
178+
179+
// Force reinstall of existing Helm chart by changing the release namespace
180+
By("Updating HelmChartProxy release namespace", func() {
181+
hcp.Spec.ReleaseNamespace = "new-nginx-namespace"
182+
HelmUpgradeSpec(ctx, func() HelmUpgradeInput {
183+
return HelmUpgradeInput{
184+
BootstrapClusterProxy: bootstrapClusterProxy,
185+
Namespace: namespace,
186+
ClusterName: clusterName,
187+
HelmChartProxy: hcp,
188+
ExpectedRevision: 1,
189+
}
190+
})
191+
})
192+
193+
// Force reinstall of existing Helm chart by changing the release name
194+
By("Updating HelmChartProxy release name", func() {
195+
hcp.Spec.ReleaseName = "new-nginx-name"
196+
HelmUpgradeSpec(ctx, func() HelmUpgradeInput {
197+
return HelmUpgradeInput{
198+
BootstrapClusterProxy: bootstrapClusterProxy,
199+
Namespace: namespace,
200+
ClusterName: clusterName,
201+
HelmChartProxy: hcp,
202+
ExpectedRevision: 1,
203+
}
204+
})
205+
})
162206
})
163207
})
164208
})

test/e2e/helm_upgrade.go

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
//go:build e2e
2+
// +build e2e
3+
4+
/*
5+
Copyright 2024 The Kubernetes Authors.
6+
7+
Licensed under the Apache License, Version 2.0 (the "License");
8+
you may not use this file except in compliance with the License.
9+
You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
*/
19+
20+
package e2e
21+
22+
import (
23+
"context"
24+
25+
. "github.com/onsi/ginkgo/v2"
26+
. "github.com/onsi/gomega"
27+
corev1 "k8s.io/api/core/v1"
28+
"k8s.io/apimachinery/pkg/types"
29+
"k8s.io/klog/v2"
30+
addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1"
31+
"sigs.k8s.io/cluster-api/util/patch"
32+
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
33+
34+
"sigs.k8s.io/cluster-api/test/framework"
35+
)
36+
37+
// HelmUpgradeInput specifies the input for updating or reinstalling a Helm chart on a workload cluster and verifying that it was successful.
38+
type HelmUpgradeInput struct {
39+
BootstrapClusterProxy framework.ClusterProxy
40+
Namespace *corev1.Namespace
41+
ClusterName string
42+
HelmChartProxy *addonsv1alpha1.HelmChartProxy // Note: Only the Spec field is used.
43+
ExpectedRevision int
44+
}
45+
46+
// HelmUpgradeSpec implements a test that verifies a Helm chart can be either updated or reinstalled on a workload cluster, depending on
47+
// if an immutable field has changed. It takes a HelmChartProxy resource and updates ONLY the spec field and patches the Cluster labels
48+
// such that they match the HelmChartProxy's clusterSelector. It then waits for the Helm release to be deployed on the workload cluster
49+
// and validates the release and the HelmReleaseProxy status fields.
50+
func HelmUpgradeSpec(ctx context.Context, inputGetter func() HelmUpgradeInput) {
51+
var (
52+
specName = "helm-upgrade"
53+
input HelmUpgradeInput
54+
mgmtClient ctrlclient.Client
55+
err error
56+
)
57+
58+
input = inputGetter()
59+
Expect(input.BootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
60+
Expect(input.Namespace).NotTo(BeNil(), "Invalid argument. input.Namespace can't be nil when calling %s spec", specName)
61+
62+
By("creating a Kubernetes client to the management cluster")
63+
mgmtClient = input.BootstrapClusterProxy.GetClient()
64+
Expect(mgmtClient).NotTo(BeNil())
65+
66+
// Get existing HCP from management Cluster
67+
existing := &addonsv1alpha1.HelmChartProxy{}
68+
key := types.NamespacedName{
69+
Namespace: input.HelmChartProxy.Namespace,
70+
Name: input.HelmChartProxy.Name,
71+
}
72+
err = mgmtClient.Get(ctx, key, existing)
73+
Expect(err).NotTo(HaveOccurred())
74+
75+
// Patch HCP on management Cluster
76+
Byf("Patching HelmChartProxy %s/%s", existing.Namespace, existing.Name)
77+
patchHelper, err := patch.NewHelper(existing, mgmtClient)
78+
Expect(err).ToNot(HaveOccurred())
79+
80+
existing.Spec = input.HelmChartProxy.Spec
81+
input.HelmChartProxy = existing
82+
83+
Eventually(func() error {
84+
return patchHelper.Patch(ctx, existing)
85+
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch HelmChartProxy %s", klog.KObj(existing))
86+
87+
EnsureHelmReleaseInstallOrUpgrade(ctx, specName, input.BootstrapClusterProxy, nil, &input)
88+
}

0 commit comments

Comments
 (0)