Skip to content

Commit 6e9e8e3

Browse files
committed
test: add legacy test
This change adds a new EKS e2e test to ensure that we don't break the original way of using AWSManagedControlPlane for both the infrastrastructure and control plane. We can considering removing this test at some point in the future. Also, a few minor changes as a result of review. Signed-off-by: Richard Case <[email protected]>
1 parent f70fa6c commit 6e9e8e3

File tree

10 files changed

+152
-68
lines changed

10 files changed

+152
-68
lines changed

api/v1beta2/awsmanagedcluster_types.go

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,6 @@ type AWSManagedClusterStatus struct {
3838
// FailureDomains specifies a list fo available availability zones that can be used
3939
// +optional
4040
FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`
41-
42-
// Conditions provide observations of the operational state of AWSManagedCluster.
43-
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
4441
}
4542

4643
// +kubebuilder:object:root=true
@@ -69,16 +66,6 @@ type AWSManagedClusterList struct {
6966
Items []AWSManagedCluster `json:"items"`
7067
}
7168

72-
// GetConditions returns the observations of the operational state of the AWSManagedCluster resource.
73-
func (r *AWSManagedCluster) GetConditions() clusterv1.Conditions {
74-
return r.Status.Conditions
75-
}
76-
77-
// SetConditions sets the underlying service state of the AWSManagedCluster to the predescribed clusterv1.Conditions.
78-
func (r *AWSManagedCluster) SetConditions(conditions clusterv1.Conditions) {
79-
r.Status.Conditions = conditions
80-
}
81-
8269
func init() {
8370
SchemeBuilder.Register(&AWSManagedCluster{}, &AWSManagedClusterList{})
8471
}

api/v1beta2/zz_generated.deepcopy.go

Lines changed: 0 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml

Lines changed: 0 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -72,52 +72,6 @@ spec:
7272
status:
7373
description: AWSManagedClusterStatus defines the observed state of AWSManagedCluster
7474
properties:
75-
conditions:
76-
description: Conditions provide observations of the operational state
77-
of AWSManagedCluster.
78-
items:
79-
description: Condition defines an observation of a Cluster API resource
80-
operational state.
81-
properties:
82-
lastTransitionTime:
83-
description: Last time the condition transitioned from one status
84-
to another. This should be when the underlying condition changed.
85-
If that is not known, then using the time when the API field
86-
changed is acceptable.
87-
format: date-time
88-
type: string
89-
message:
90-
description: A human readable message indicating details about
91-
the transition. This field may be empty.
92-
type: string
93-
reason:
94-
description: The reason for the condition's last transition
95-
in CamelCase. The specific API may choose whether or not this
96-
field is considered a guaranteed API. This field may not be
97-
empty.
98-
type: string
99-
severity:
100-
description: Severity provides an explicit classification of
101-
Reason code, so the users or machines can immediately understand
102-
the current situation and act accordingly. The Severity field
103-
MUST be set only when Status=False.
104-
type: string
105-
status:
106-
description: Status of the condition, one of True, False, Unknown.
107-
type: string
108-
type:
109-
description: Type of condition in CamelCase or in foo.example.com/CamelCase.
110-
Many .condition.type values are consistent across resources
111-
like Available, but because arbitrary conditions can be useful
112-
(see .node.status.conditions), the ability to deconflict is
113-
important.
114-
type: string
115-
required:
116-
- lastTransitionTime
117-
- status
118-
- type
119-
type: object
120-
type: array
12175
failureDomains:
12276
additionalProperties:
12377
description: FailureDomainSpec is the Schema for Cluster API failure

docs/book/src/topics/scale-from-0.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ spec:
125125
name: "managed-cluster-control-plane"
126126
---
127127
kind: AWSManagedCluster
128-
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
128+
apiVersion: infrastructure.cluster.x-k8s.io/v1beta2
129129
metadata:
130130
name: "managed-cluster"
131131
spec: {}

main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ func setupEKSReconcilersAndWebhooks(ctx context.Context, mgr ctrl.Manager, awsSe
359359
Recorder: mgr.GetEventRecorderFor("awsmanagedcluster-controller"),
360360
WatchFilterValue: watchFilterValue,
361361
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: true}); err != nil {
362-
setupLog.Error(err, "unable to create controller", "controller", "AWSCluster")
362+
setupLog.Error(err, "unable to create controller", "controller", "AWSManagedCluster")
363363
os.Exit(1)
364364
}
365365

test/e2e/data/e2e_eks_conf.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,8 @@ providers:
112112
targetName: "cluster-template-eks-managedmachinepool.yaml"
113113
- sourcePath: "./eks/cluster-template-eks-ipv6-cluster.yaml"
114114
targetName: "cluster-template-eks-ipv6-cluster.yaml"
115+
- sourcePath: "./eks/cluster-template-eks-control-plane-only-legacy.yaml"
116+
targetName: "cluster-template-eks-control-plane-only-legacy.yaml"
115117

116118
variables:
117119
KUBERNETES_VERSION: "v1.22.9"
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
---
2+
apiVersion: cluster.x-k8s.io/v1beta1
3+
kind: Cluster
4+
metadata:
5+
name: "${CLUSTER_NAME}"
6+
spec:
7+
clusterNetwork:
8+
pods:
9+
cidrBlocks: ["192.168.0.0/16"]
10+
infrastructureRef:
11+
kind: AWSManagedControlPlane
12+
apiVersion: controlplane.cluster.x-k8s.io/v1beta2
13+
name: "${CLUSTER_NAME}-control-plane"
14+
controlPlaneRef:
15+
kind: AWSManagedControlPlane
16+
apiVersion: controlplane.cluster.x-k8s.io/v1beta2
17+
name: "${CLUSTER_NAME}-control-plane"
18+
---
19+
kind: AWSManagedControlPlane
20+
apiVersion: controlplane.cluster.x-k8s.io/v1beta2
21+
metadata:
22+
name: "${CLUSTER_NAME}-control-plane"
23+
spec:
24+
region: "${AWS_REGION}"
25+
sshKeyName: "${AWS_SSH_KEY_NAME}"
26+
version: "${KUBERNETES_VERSION}"
27+
identityRef:
28+
kind: AWSClusterStaticIdentity
29+
name: e2e-account
Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
//go:build e2e
2+
// +build e2e
3+
4+
/*
5+
Copyright 2022 The Kubernetes Authors.
6+
7+
Licensed under the Apache License, Version 2.0 (the "License");
8+
you may not use this file except in compliance with the License.
9+
You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
*/
19+
20+
package managed
21+
22+
import (
23+
"context"
24+
"fmt"
25+
26+
"github.com/onsi/ginkgo"
27+
. "github.com/onsi/gomega"
28+
corev1 "k8s.io/api/core/v1"
29+
30+
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
31+
"sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared"
32+
"sigs.k8s.io/cluster-api/test/framework"
33+
"sigs.k8s.io/cluster-api/util"
34+
)
35+
36+
// Legacy EKS e2e test. This test has been added after re-introducing AWSManagedCluster to ensure that we don't break
37+
// the scenario where we used AWSManagedControlPlane for both the infra cluster and control plane. This test
38+
// can be removed in the future when we have given people sufficient time to stop using the old model.
39+
var _ = ginkgo.Describe("[managed] [legacy] EKS cluster tests - single kind", func() {
40+
var (
41+
namespace *corev1.Namespace
42+
ctx context.Context
43+
specName = "eks-nodes"
44+
clusterName string
45+
)
46+
47+
shared.ConditionalIt(runLegacyTests, "should create a cluster and add nodes using single kind", func() {
48+
ginkgo.By("should have a valid test configuration")
49+
Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil")
50+
Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
51+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion))
52+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CNIAddonVersion))
53+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CorednsAddonVersion))
54+
Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubeproxyAddonVersion))
55+
56+
ctx = context.TODO()
57+
namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx)
58+
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
59+
eksClusterName := getEKSClusterName(namespace.Name, clusterName)
60+
61+
ginkgo.By("default iam role should exist")
62+
VerifyRoleExistsAndOwned(ekscontrolplanev1.DefaultEKSControlPlaneRole, eksClusterName, false, e2eCtx.BootstrapUserAWSSession)
63+
64+
ginkgo.By("should create an EKS control plane")
65+
ManagedClusterSpec(ctx, func() ManagedClusterSpecInput {
66+
return ManagedClusterSpecInput{
67+
E2EConfig: e2eCtx.E2EConfig,
68+
ConfigClusterFn: defaultConfigCluster,
69+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
70+
AWSSession: e2eCtx.BootstrapUserAWSSession,
71+
Namespace: namespace,
72+
ClusterName: clusterName,
73+
Flavour: EKSControlPlaneOnlyLegacyFlavor,
74+
ControlPlaneMachineCount: 1, //NOTE: this cannot be zero as clusterctl returns an error
75+
WorkerMachineCount: 0,
76+
}
77+
})
78+
79+
ginkgo.By("should create a managed node pool and scale")
80+
MachinePoolSpec(ctx, func() MachinePoolSpecInput {
81+
return MachinePoolSpecInput{
82+
E2EConfig: e2eCtx.E2EConfig,
83+
ConfigClusterFn: defaultConfigCluster,
84+
BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
85+
AWSSession: e2eCtx.BootstrapUserAWSSession,
86+
Namespace: namespace,
87+
ClusterName: clusterName,
88+
IncludeScaling: false,
89+
Cleanup: true,
90+
ManagedMachinePool: true,
91+
Flavor: EKSManagedMachinePoolOnlyFlavor,
92+
}
93+
})
94+
95+
shared.Byf("getting cluster with name %s", clusterName)
96+
cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
97+
Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
98+
Namespace: namespace.Name,
99+
Name: clusterName,
100+
})
101+
Expect(cluster).NotTo(BeNil(), "couldn't find CAPI cluster")
102+
103+
framework.DeleteCluster(ctx, framework.DeleteClusterInput{
104+
Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
105+
Cluster: cluster,
106+
})
107+
framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{
108+
Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
109+
Cluster: cluster,
110+
}, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...)
111+
})
112+
})

test/e2e/suites/managed/helpers.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ const (
4747
EKSManagedMachinePoolWithLaunchTemplateOnlyFlavor = "eks-managed-machinepool-with-launch-template-only"
4848
EKSMachinePoolOnlyFlavor = "eks-machinepool-only"
4949
EKSIPv6ClusterFlavor = "eks-ipv6-cluster"
50+
EKSControlPlaneOnlyLegacyFlavor = "eks-control-plane-only-legacy"
5051
)
5152

5253
type DefaultConfigClusterFn func(clusterName, namespace string) clusterctl.ConfigClusterInput

test/e2e/suites/managed/managed_suite_test.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ var (
3939
e2eCtx *shared.E2EContext
4040
skipUpgradeTests bool
4141
skipGeneralTests bool
42+
skipLegacyTests bool
4243
)
4344

4445
func init() {
@@ -47,6 +48,7 @@ func init() {
4748
shared.CreateDefaultFlags(e2eCtx)
4849
flag.BoolVar(&skipGeneralTests, "skip-eks-general-tests", false, "if true, the general EKS tests will be skipped")
4950
flag.BoolVar(&skipUpgradeTests, "skip-eks-upgrade-tests", false, "if true, the EKS upgrade tests will be skipped")
51+
flag.BoolVar(&skipLegacyTests, "skip-eks-legacy-tests", false, "if true, the EKS legacy tests will be skipped")
5052
}
5153

5254
func TestE2E(t *testing.T) {
@@ -77,6 +79,10 @@ func runUpgradeTests() bool {
7779
return !skipUpgradeTests
7880
}
7981

82+
func runLegacyTests() bool {
83+
return !skipLegacyTests
84+
}
85+
8086
func initScheme() *runtime.Scheme {
8187
sc := shared.DefaultScheme()
8288
_ = expinfrav1.AddToScheme(sc)

0 commit comments

Comments
 (0)