Skip to content

Commit 8cebe5f

Browse files
authored
Merge pull request #171 from Jont828/helm-install-spec
✨ Add e2e test to install and validate a Helm chart on a workload cluster
2 parents ca44da5 + d00fcd9 commit 8cebe5f

File tree

7 files changed

+678
-2
lines changed

7 files changed

+678
-2
lines changed

Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -631,8 +631,8 @@ clean: ## Remove generated binaries, GitBook files, Helm charts, and Tilt build
631631
$(MAKE) clean-bin
632632

633633
.PHONY: clean-kind
634-
clean-kind: ## Cleans up the kind cluster with the name $CAPI_KIND_CLUSTER_NAME
635-
kind delete cluster --name="$(CAPI_KIND_CLUSTER_NAME)" || true
634+
clean-kind: ## Cleans up the kind cluster from e2e tests
635+
kind delete cluster --name=caaph-e2e || true
636636

637637
.PHONY: clean-bin
638638
clean-bin: ## Remove all generated binaries

test/e2e/common.go

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,19 @@ package e2e
2121

2222
import (
2323
"context"
24+
"log"
25+
"path/filepath"
2426

2527
. "github.com/onsi/ginkgo/v2"
2628
"github.com/onsi/ginkgo/v2/types"
2729
. "github.com/onsi/gomega"
2830
corev1 "k8s.io/api/core/v1"
31+
apierrors "k8s.io/apimachinery/pkg/api/errors"
32+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33+
"k8s.io/client-go/kubernetes"
34+
"k8s.io/utils/ptr"
2935
kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
36+
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
3037
"sigs.k8s.io/cluster-api/test/framework"
3138
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
3239
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -116,3 +123,165 @@ func CheckTestBeforeCleanup() {
116123
}
117124
Logf("Cleaning up after \"%s\" spec", CurrentSpecReport().FullText())
118125
}
126+
127+
func setupSpecNamespace(ctx context.Context, namespaceName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc, error) {
128+
Byf("Creating namespace %q for hosting the cluster", namespaceName)
129+
Logf("starting to create namespace for hosting the %q test spec", namespaceName)
130+
logPath := filepath.Join(artifactFolder, "clusters", clusterProxy.GetName())
131+
namespace, err := GetNamespace(ctx, clusterProxy.GetClientSet(), namespaceName)
132+
if err != nil && !apierrors.IsNotFound(err) {
133+
return nil, nil, err
134+
}
135+
136+
// namespace exists wire it up
137+
if err == nil {
138+
Byf("Creating event watcher for existing namespace %q", namespace.Name)
139+
watchesCtx, cancelWatches := context.WithCancel(ctx)
140+
go func() {
141+
defer GinkgoRecover()
142+
framework.WatchNamespaceEvents(watchesCtx, framework.WatchNamespaceEventsInput{
143+
ClientSet: clusterProxy.GetClientSet(),
144+
Name: namespace.Name,
145+
LogFolder: logPath,
146+
})
147+
}()
148+
149+
return namespace, cancelWatches, nil
150+
}
151+
152+
// create and wire up namespace
153+
namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
154+
Creator: clusterProxy.GetClient(),
155+
ClientSet: clusterProxy.GetClientSet(),
156+
Name: namespaceName,
157+
LogFolder: logPath,
158+
})
159+
160+
return namespace, cancelWatches, nil
161+
}
162+
163+
// GetNamespace returns a namespace for with a given name
164+
func GetNamespace(ctx context.Context, clientset *kubernetes.Clientset, name string) (*corev1.Namespace, error) {
165+
opts := metav1.GetOptions{}
166+
namespace, err := clientset.CoreV1().Namespaces().Get(ctx, name, opts)
167+
if err != nil {
168+
log.Printf("failed trying to get namespace (%s):%s\n", name, err.Error())
169+
return nil, err
170+
}
171+
172+
return namespace, nil
173+
}
174+
175+
func createApplyClusterTemplateInput(specName string, changes ...func(*clusterctl.ApplyClusterTemplateAndWaitInput)) clusterctl.ApplyClusterTemplateAndWaitInput {
176+
input := clusterctl.ApplyClusterTemplateAndWaitInput{
177+
ClusterProxy: bootstrapClusterProxy,
178+
ConfigCluster: clusterctl.ConfigClusterInput{
179+
LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
180+
ClusterctlConfigPath: clusterctlConfigPath,
181+
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
182+
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
183+
Flavor: clusterctl.DefaultFlavor,
184+
Namespace: "default",
185+
ClusterName: "cluster",
186+
KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
187+
ControlPlaneMachineCount: ptr.To[int64](1),
188+
WorkerMachineCount: ptr.To[int64](1),
189+
},
190+
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
191+
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
192+
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
193+
WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
194+
CNIManifestPath: "",
195+
}
196+
for _, change := range changes {
197+
change(&input)
198+
}
199+
200+
return input
201+
}
202+
203+
func withClusterProxy(proxy framework.ClusterProxy) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
204+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
205+
input.ClusterProxy = proxy
206+
}
207+
}
208+
209+
func withFlavor(flavor string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
210+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
211+
input.ConfigCluster.Flavor = flavor
212+
}
213+
}
214+
215+
func withNamespace(namespace string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
216+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
217+
input.ConfigCluster.Namespace = namespace
218+
}
219+
}
220+
221+
func withClusterName(clusterName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
222+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
223+
input.ConfigCluster.ClusterName = clusterName
224+
}
225+
}
226+
227+
func withKubernetesVersion(version string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
228+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
229+
input.ConfigCluster.KubernetesVersion = version
230+
}
231+
}
232+
233+
func withControlPlaneMachineCount(count int64) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
234+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
235+
input.ConfigCluster.ControlPlaneMachineCount = ptr.To[int64](count)
236+
}
237+
}
238+
239+
func withWorkerMachineCount(count int64) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
240+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
241+
input.ConfigCluster.WorkerMachineCount = ptr.To[int64](count)
242+
}
243+
}
244+
245+
func withClusterInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
246+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
247+
if intervalName != "" {
248+
input.WaitForClusterIntervals = e2eConfig.GetIntervals(specName, intervalName)
249+
}
250+
}
251+
}
252+
253+
func withControlPlaneInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
254+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
255+
if intervalName != "" {
256+
input.WaitForControlPlaneIntervals = e2eConfig.GetIntervals(specName, intervalName)
257+
}
258+
}
259+
}
260+
261+
func withMachineDeploymentInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
262+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
263+
if intervalName != "" {
264+
input.WaitForMachineDeployments = e2eConfig.GetIntervals(specName, intervalName)
265+
}
266+
}
267+
}
268+
269+
func withMachinePoolInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
270+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
271+
if intervalName != "" {
272+
input.WaitForMachinePools = e2eConfig.GetIntervals(specName, intervalName)
273+
}
274+
}
275+
}
276+
277+
func withControlPlaneWaiters(waiters clusterctl.ControlPlaneWaiters) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
278+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
279+
input.ControlPlaneWaiters = waiters
280+
}
281+
}
282+
283+
func withPostMachinesProvisioned(postMachinesProvisioned func()) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
284+
return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
285+
input.PostMachinesProvisioned = postMachinesProvisioned
286+
}
287+
}

test/e2e/config/helm.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,10 @@ intervals:
187187
default/wait-job: [ "5m", "10s" ]
188188
default/wait-service: [ "15m", "10s" ]
189189
default/wait-private-cluster: ["30m", "10s"]
190+
default/wait-helmreleaseproxy: ["10m", "10s"]
191+
default/wait-helmreleaseproxy-ready: ["10m", "10s"]
192+
default/wait-helm-release: ["10m", "10s"]
193+
default/wait-helm-release-deployed: ["10m", "10s"]
190194
node-drain/wait-deployment-available: ["3m", "10s"]
191195
node-drain/wait-control-plane: ["15m", "10s"]
192196
node-drain/wait-machine-deleted: ["2m", "10s"]

test/e2e/e2e_suite_test.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ import (
3535
"k8s.io/apimachinery/pkg/runtime"
3636
"k8s.io/klog/v2"
3737
addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1"
38+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3839
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
3940
"sigs.k8s.io/cluster-api/test/framework"
4041
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
@@ -158,6 +159,7 @@ func initScheme() *runtime.Scheme {
158159
scheme := runtime.NewScheme()
159160
framework.TryAddDefaultSchemes(scheme)
160161
Expect(addonsv1alpha1.AddToScheme(scheme)).To(Succeed())
162+
Expect(clusterv1.AddToScheme(scheme)).To(Succeed())
161163
return scheme
162164
}
163165

test/e2e/helm_install.go

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
//go:build e2e
2+
// +build e2e
3+
4+
/*
5+
Copyright 2024 The Kubernetes Authors.
6+
7+
Licensed under the Apache License, Version 2.0 (the "License");
8+
you may not use this file except in compliance with the License.
9+
You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
*/
19+
20+
package e2e
21+
22+
import (
23+
"context"
24+
25+
. "github.com/onsi/ginkgo/v2"
26+
. "github.com/onsi/gomega"
27+
corev1 "k8s.io/api/core/v1"
28+
"k8s.io/apimachinery/pkg/types"
29+
addonsv1alpha1 "sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1"
30+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
31+
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
32+
33+
"sigs.k8s.io/cluster-api/test/framework"
34+
)
35+
36+
// HelmInstallInput specifies the input for installing a Helm chart on a workload cluster and verifying that it was successful.
37+
type HelmInstallInput struct {
38+
BootstrapClusterProxy framework.ClusterProxy
39+
Namespace *corev1.Namespace
40+
ClusterName string
41+
HelmChartProxy *addonsv1alpha1.HelmChartProxy
42+
}
43+
44+
// HelmInstallSpec implements a test that verifies a Helm chart can be installed on a workload cluster. It creates a HelmChartProxy
45+
// resource and patches the Cluster labels such that they match the HelmChartProxy's clusterSelector. It then waits for the Helm
46+
// release to be deployed on the workload cluster.
47+
func HelmInstallSpec(ctx context.Context, inputGetter func() HelmInstallInput) {
48+
var (
49+
specName = "helm-install"
50+
input HelmInstallInput
51+
workloadClusterProxy framework.ClusterProxy
52+
mgmtClient ctrlclient.Client
53+
err error
54+
)
55+
56+
input = inputGetter()
57+
Expect(input.BootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
58+
Expect(input.Namespace).NotTo(BeNil(), "Invalid argument. input.Namespace can't be nil when calling %s spec", specName)
59+
60+
By("creating a Kubernetes client to the management cluster")
61+
mgmtClient = input.BootstrapClusterProxy.GetClient()
62+
Expect(mgmtClient).NotTo(BeNil())
63+
64+
// Create HCP on management Cluster
65+
Byf("Creating HelmChartProxy %s/%s", input.HelmChartProxy.Namespace, input.HelmChartProxy.Name)
66+
Expect(mgmtClient.Create(ctx, input.HelmChartProxy)).To(Succeed())
67+
68+
// Get Cluster from management Cluster
69+
workloadCluster := &clusterv1.Cluster{}
70+
key := types.NamespacedName{
71+
Namespace: input.Namespace.Name,
72+
Name: input.ClusterName,
73+
}
74+
err = mgmtClient.Get(ctx, key, workloadCluster)
75+
Expect(err).NotTo(HaveOccurred())
76+
77+
// Patch cluster labels, ignore match expressions for now
78+
selector := input.HelmChartProxy.Spec.ClusterSelector
79+
labels := workloadCluster.Labels
80+
if labels == nil {
81+
labels = make(map[string]string)
82+
}
83+
84+
for k, v := range selector.MatchLabels {
85+
labels[k] = v
86+
}
87+
88+
err = mgmtClient.Update(ctx, workloadCluster)
89+
Expect(err).NotTo(HaveOccurred())
90+
91+
// Wait for HelmReleaseProxy to be ready
92+
hrpWaitInput := GetWaitForHelmReleaseProxyReadyInput(ctx, bootstrapClusterProxy, input.ClusterName, *input.HelmChartProxy, specName)
93+
WaitForHelmReleaseProxyReady(ctx, hrpWaitInput, e2eConfig.GetIntervals(specName, "wait-helmreleaseproxy-ready")...)
94+
95+
// Get workload Cluster proxy
96+
By("creating a clusterctl proxy to the workload cluster")
97+
workloadClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace.Name, input.ClusterName)
98+
Expect(workloadClusterProxy).NotTo(BeNil())
99+
100+
// Wait for Helm release on workload cluster to have stauts = deployed
101+
releaseWaitInput := GetWaitForHelmReleaseDeployedInput(ctx, workloadClusterProxy, input.HelmChartProxy.Spec.ReleaseName, input.Namespace.Name, specName)
102+
WaitForHelmReleaseDeployed(ctx, releaseWaitInput, e2eConfig.GetIntervals(specName, "wait-helm-release-deployed")...)
103+
}

0 commit comments

Comments
 (0)