Skip to content

test(e2e): Consistent core/bootstrap/control-plane provider versions #639

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 21 additions & 8 deletions test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,13 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
capie2e "sigs.k8s.io/cluster-api/test/e2e"
"sigs.k8s.io/cluster-api/test/framework"
capie2eframework "sigs.k8s.io/cluster-api/test/framework"
capibootstrap "sigs.k8s.io/cluster-api/test/framework/bootstrap"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
ctrl "sigs.k8s.io/controller-runtime"

helmaddonsv1 "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/external/sigs.k8s.io/cluster-api-addon-provider-helm/api/v1alpha1"
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/test/e2e/framework"
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/test/framework/bootstrap"
clusterctltemp "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/test/framework/clusterctl"
)
Expand Down Expand Up @@ -147,11 +148,11 @@ var _ = SynchronizedBeforeSuite(func() []byte {
os.Unsetenv(capie2e.KubernetesVersionUpgradeTo)

kubeconfigPath := parts[3]
bootstrapClusterProxy = framework.NewClusterProxy(
bootstrapClusterProxy = capie2eframework.NewClusterProxy(
"bootstrap",
kubeconfigPath,
initScheme(),
framework.WithMachineLogCollector(framework.DockerLogCollector{}),
capie2eframework.WithMachineLogCollector(capie2eframework.DockerLogCollector{}),
)
})

Expand Down Expand Up @@ -204,7 +205,7 @@ func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFol

func initScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
framework.TryAddDefaultSchemes(scheme)
capie2eframework.TryAddDefaultSchemes(scheme)
Expect(helmaddonsv1.AddToScheme(scheme)).To(Succeed())
return scheme
}
Expand All @@ -213,7 +214,7 @@ func setupBootstrapCluster(
config *clusterctl.E2EConfig,
scheme *runtime.Scheme,
useExistingCluster bool,
) (capibootstrap.ClusterProvider, framework.ClusterProxy) {
) (capibootstrap.ClusterProvider, capie2eframework.ClusterProxy) {
var clusterProvider capibootstrap.ClusterProvider
kubeconfigPath := ""
if !useExistingCluster {
Expand Down Expand Up @@ -245,13 +246,13 @@ func setupBootstrapCluster(
Expect(err).To(BeNil(), "Failed to load images to the bootstrap cluster: %s", err)
}

clusterProxy := framework.NewClusterProxy("bootstrap", kubeconfigPath, scheme)
clusterProxy := capie2eframework.NewClusterProxy("bootstrap", kubeconfigPath, scheme)
Expect(clusterProxy).NotTo(BeNil(), "Failed to get a bootstrap cluster proxy")
return clusterProvider, clusterProxy
}

func initBootstrapCluster(
bootstrapClusterProxy framework.ClusterProxy,
bootstrapClusterProxy capie2eframework.ClusterProxy,
config *clusterctl.E2EConfig,
clusterctlConfig, artifactFolder string,
) {
Expand All @@ -260,6 +261,18 @@ func initBootstrapCluster(
clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfig,
CoreProvider: config.GetProviderLatestVersionsByContract(
"*",
framework.CoreProvider(config),
)[0],
BootstrapProviders: config.GetProviderLatestVersionsByContract(
"*",
framework.BootstrapProviders(config)...,
),
ControlPlaneProviders: config.GetProviderLatestVersionsByContract(
"*",
framework.ControlPlaneProviders(config)...,
),
InfrastructureProviders: config.GetProviderLatestVersionsByContract(
"*",
config.InfrastructureProviders()...),
Expand All @@ -280,7 +293,7 @@ func initBootstrapCluster(

func tearDown(
bootstrapClusterProvider capibootstrap.ClusterProvider,
bootstrapClusterProxy framework.ClusterProxy,
bootstrapClusterProxy capie2eframework.ClusterProxy,
) {
if bootstrapClusterProxy != nil {
bootstrapClusterProxy.Dispose(context.TODO())
Expand Down
38 changes: 38 additions & 0 deletions test/e2e/framework/provider_utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
//go:build e2e

// Copyright 2024 Nutanix. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package framework

import (
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
)

// The functions in this file are used to get the provider names from the e2e config
// based on the provider type. They are adaptations of the functions that already exist
// on the E2EConfig type for other provider types, but unfortunately the existing functions
// do not include core, bootstrap, and control plane providers.

func CoreProvider(cfg *clusterctl.E2EConfig) string {
return getProviders(cfg, clusterctlv1.CoreProviderType)[0]
}

func BootstrapProviders(cfg *clusterctl.E2EConfig) []string {
return getProviders(cfg, clusterctlv1.BootstrapProviderType)
}

func ControlPlaneProviders(cfg *clusterctl.E2EConfig) []string {
return getProviders(cfg, clusterctlv1.ControlPlaneProviderType)
}

func getProviders(cfg *clusterctl.E2EConfig, t clusterctlv1.ProviderType) []string {
providers := []string{}
for _, provider := range cfg.Providers {
if provider.Type == string(t) {
providers = append(providers, provider.Name)
}
}
return providers
}
80 changes: 50 additions & 30 deletions test/e2e/framework/self_hosted.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
capie2e "sigs.k8s.io/cluster-api/test/e2e"
"sigs.k8s.io/cluster-api/test/framework"
capie2eframework "sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
Expand All @@ -32,7 +32,7 @@ import (
type SelfHostedSpecInput struct {
E2EConfig *clusterctl.E2EConfig
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
BootstrapClusterProxy capie2eframework.ClusterProxy
ArtifactFolder string
SkipCleanup bool
ControlPlaneWaiters clusterctl.ControlPlaneWaiters
Expand Down Expand Up @@ -67,7 +67,7 @@ type SelfHostedSpecInput struct {
WorkerMachineCount *int64

// PostClusterMoved is a function that is called after the cluster is moved to self-hosted.
PostClusterMoved func(proxy framework.ClusterProxy, cluster *clusterv1.Cluster)
PostClusterMoved func(proxy capie2eframework.ClusterProxy, cluster *clusterv1.Cluster)
}

// SelfHostedSpec implements a test that verifies Cluster API creating a cluster, pivoting to a self-hosted cluster.
Expand All @@ -79,7 +79,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
cancelWatches context.CancelFunc
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult

selfHostedClusterProxy framework.ClusterProxy
selfHostedClusterProxy capie2eframework.ClusterProxy
selfHostedNamespace *corev1.Namespace
selfHostedCancelWatches context.CancelFunc
selfHostedCluster *clusterv1.Cluster
Expand Down Expand Up @@ -212,13 +212,13 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
ctx,
cluster.Namespace,
cluster.Name,
framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()),
capie2eframework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()),
)

capie2e.Byf("Creating a namespace for hosting the %s test spec", specName)
selfHostedNamespace, selfHostedCancelWatches = framework.CreateNamespaceAndWatchEvents(
selfHostedNamespace, selfHostedCancelWatches = capie2eframework.CreateNamespaceAndWatchEvents(
ctx,
framework.CreateNamespaceAndWatchEventsInput{
capie2eframework.CreateNamespaceAndWatchEventsInput{
Creator: selfHostedClusterProxy.GetClient(),
ClientSet: selfHostedClusterProxy.GetClientSet(),
Name: namespace.Name,
Expand All @@ -234,12 +234,29 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
clusterctl.InitManagementClusterAndWatchControllerLogs(
watchesCtx,
clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: selfHostedClusterProxy,
ClusterctlConfigPath: input.ClusterctlConfigPath,
InfrastructureProviders: input.E2EConfig.InfrastructureProviders(),
IPAMProviders: input.E2EConfig.IPAMProviders(),
RuntimeExtensionProviders: input.E2EConfig.RuntimeExtensionProviders(),
AddonProviders: input.E2EConfig.AddonProviders(),
ClusterProxy: selfHostedClusterProxy,
ClusterctlConfigPath: input.ClusterctlConfigPath,
CoreProvider: input.E2EConfig.GetProviderLatestVersionsByContract(
"*",
CoreProvider(input.E2EConfig),
)[0],
BootstrapProviders: input.E2EConfig.GetProviderLatestVersionsByContract(
"*",
BootstrapProviders(input.E2EConfig)...,
),
ControlPlaneProviders: input.E2EConfig.GetProviderLatestVersionsByContract(
"*",
ControlPlaneProviders(input.E2EConfig)...,
),
InfrastructureProviders: input.E2EConfig.GetProviderLatestVersionsByContract(
"*",
input.E2EConfig.InfrastructureProviders()...),
AddonProviders: input.E2EConfig.GetProviderLatestVersionsByContract(
"*",
input.E2EConfig.AddonProviders()...),
RuntimeExtensionProviders: input.E2EConfig.GetProviderLatestVersionsByContract(
"*",
input.E2EConfig.RuntimeExtensionProviders()...),
LogFolder: filepath.Join(
input.ArtifactFolder,
"clusters",
Expand Down Expand Up @@ -274,9 +291,9 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
})

By("Waiting for the cluster to be reconciled after moving to self hosted")
selfHostedCluster = framework.DiscoveryAndWaitForCluster(
selfHostedCluster = capie2eframework.DiscoveryAndWaitForCluster(
ctx,
framework.DiscoveryAndWaitForClusterInput{
capie2eframework.DiscoveryAndWaitForClusterInput{
Getter: selfHostedClusterProxy.GetClient(),
Namespace: selfHostedNamespace.Name,
Name: cluster.Name,
Expand Down Expand Up @@ -335,9 +352,9 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
})

By("Waiting for the cluster to be reconciled after moving back to bootstrap")
clusterResources.Cluster = framework.DiscoveryAndWaitForCluster(
clusterResources.Cluster = capie2eframework.DiscoveryAndWaitForCluster(
ctx,
framework.DiscoveryAndWaitForClusterInput{
capie2eframework.DiscoveryAndWaitForClusterInput{
Getter: input.BootstrapClusterProxy.GetClient(),
Namespace: namespace.Name,
Name: clusterResources.Cluster.Name,
Expand Down Expand Up @@ -377,13 +394,13 @@ func hasProvider(ctx context.Context, c client.Client, providerName string) bool
func setupSpecNamespace(
ctx context.Context,
specName string,
clusterProxy framework.ClusterProxy,
clusterProxy capie2eframework.ClusterProxy,
artifactFolder string,
) (*corev1.Namespace, context.CancelFunc) {
capie2e.Byf("Creating a namespace for hosting the %q test spec", specName)
namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(
namespace, cancelWatches := capie2eframework.CreateNamespaceAndWatchEvents(
ctx,
framework.CreateNamespaceAndWatchEventsInput{
capie2eframework.CreateNamespaceAndWatchEventsInput{
Creator: clusterProxy.GetClient(),
ClientSet: clusterProxy.GetClientSet(),
Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
Expand All @@ -397,7 +414,7 @@ func setupSpecNamespace(
// dumpAllResources dumps all the resources in the spec namespace and the workload cluster.
func dumpAllResources(
ctx context.Context,
clusterProxy framework.ClusterProxy,
clusterProxy capie2eframework.ClusterProxy,
artifactFolder string,
namespace *corev1.Namespace,
cluster *clusterv1.Cluster,
Expand All @@ -415,7 +432,7 @@ func dumpAllResources(
capie2e.Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name)

// Dump all Cluster API related resources to artifacts.
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
capie2eframework.DumpAllResources(ctx, capie2eframework.DumpAllResourcesInput{
Lister: clusterProxy.GetClient(),
Namespace: namespace.Name,
LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"),
Expand All @@ -424,12 +441,12 @@ func dumpAllResources(
// If the cluster still exists, dump pods and nodes of the workload cluster.
if err := clusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(cluster), &clusterv1.Cluster{}); err == nil {
capie2e.Byf("Dumping Pods and Nodes of Cluster %s", klog.KObj(cluster))
framework.DumpResourcesForCluster(ctx, framework.DumpResourcesForClusterInput{
capie2eframework.DumpResourcesForCluster(ctx, capie2eframework.DumpResourcesForClusterInput{
Lister: clusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name).
GetClient(),
Cluster: cluster,
LogPath: filepath.Join(artifactFolder, "clusters", cluster.Name, "resources"),
Resources: []framework.DumpNamespaceAndGVK{
Resources: []capie2eframework.DumpNamespaceAndGVK{
{
GVK: schema.GroupVersionKind{
Version: corev1.SchemeGroupVersion.Version,
Expand All @@ -451,7 +468,7 @@ func dumpAllResources(
func dumpSpecResourcesAndCleanup(
ctx context.Context,
specName string,
clusterProxy framework.ClusterProxy,
clusterProxy capie2eframework.ClusterProxy,
artifactFolder string,
namespace *corev1.Namespace,
cancelWatches context.CancelFunc,
Expand All @@ -467,13 +484,16 @@ func dumpSpecResourcesAndCleanup(
// While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a
// chance that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait
// instead of DeleteClusterAndWait
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
Client: clusterProxy.GetClient(),
Namespace: namespace.Name,
}, intervalsGetter(specName, "wait-delete-cluster")...)
capie2eframework.DeleteAllClustersAndWait(
ctx,
capie2eframework.DeleteAllClustersAndWaitInput{
Client: clusterProxy.GetClient(),
Namespace: namespace.Name,
},
intervalsGetter(specName, "wait-delete-cluster")...)

capie2e.Byf("Deleting namespace used for hosting the %q test spec", specName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
capie2eframework.DeleteNamespace(ctx, capie2eframework.DeleteNamespaceInput{
Deleter: clusterProxy.GetClient(),
Name: namespace.Name,
})
Expand Down
Loading