diff --git a/api/v1beta2/awsmanagedcluster_types.go b/api/v1beta2/awsmanagedcluster_types.go new file mode 100644 index 0000000000..db82c2e6d9 --- /dev/null +++ b/api/v1beta2/awsmanagedcluster_types.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// AWSManagedClusterSpec defines the desired state of AWSManagedCluster +type AWSManagedClusterSpec struct { + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` +} + +// AWSManagedClusterStatus defines the observed state of AWSManagedCluster +type AWSManagedClusterStatus struct { + // Ready is when the AWSManagedControlPlane has a API server URL. + // +optional + Ready bool `json:"ready,omitempty"` + + // FailureDomains specifies a list fo available availability zones that can be used + // +optional + FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsmanagedclusters,scope=Namespaced,categories=cluster-api,shortName=awsmc +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes" +// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1 + +// AWSManagedCluster is the Schema for the awsmanagedclusters API +type AWSManagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSManagedClusterSpec `json:"spec,omitempty"` + Status AWSManagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AWSManagedClusterList contains a list of AWSManagedCluster. +type AWSManagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSManagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSManagedCluster{}, &AWSManagedClusterList{}) +} diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index b4edb00b36..edb4601af4 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -894,6 +894,103 @@ func (in *AWSMachineTemplateStatus) DeepCopy() *AWSMachineTemplateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedCluster) DeepCopyInto(out *AWSManagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedCluster. +func (in *AWSManagedCluster) DeepCopy() *AWSManagedCluster { + if in == nil { + return nil + } + out := new(AWSManagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterList) DeepCopyInto(out *AWSManagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSManagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterList. +func (in *AWSManagedClusterList) DeepCopy() *AWSManagedClusterList { + if in == nil { + return nil + } + out := new(AWSManagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterSpec) DeepCopyInto(out *AWSManagedClusterSpec) { + *out = *in + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterSpec. +func (in *AWSManagedClusterSpec) DeepCopy() *AWSManagedClusterSpec { + if in == nil { + return nil + } + out := new(AWSManagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) { + *out = *in + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(v1beta1.FailureDomains, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterStatus. +func (in *AWSManagedClusterStatus) DeepCopy() *AWSManagedClusterStatus { + if in == nil { + return nil + } + out := new(AWSManagedClusterStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { *out = *in diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml index ae449c5787..022e24b1eb 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: awsmanagedclusters.infrastructure.cluster.x-k8s.io spec: @@ -28,80 +28,11 @@ spec: jsonPath: .status.ready name: Ready type: string - - description: AWS VPC the control plane is using - jsonPath: .spec.networkSpec.vpc.id - name: VPC - type: string - description: API Endpoint jsonPath: .spec.controlPlaneEndpoint.host name: Endpoint priority: 1 type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: AWSManagedCluster is the Schema for the awsmanagedclusters API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: AWSManagedClusterSpec defines the desired state of AWSManagedCluster - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint used to - communicate with the control plane. - properties: - host: - description: The hostname on which the API server is serving. - type: string - port: - description: The port on which the API server is serving. - format: int32 - type: integer - required: - - host - - port - type: object - type: object - status: - description: AWSManagedClusterStatus defines the observed state of AWSManagedCluster - properties: - failureDomains: - additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. - properties: - attributes: - additionalProperties: - type: string - description: Attributes is a free form map of attributes an - infrastructure provider might use or require. - type: object - controlPlane: - description: ControlPlane determines if this failure domain - is suitable for use by control plane machines. - type: boolean - type: object - description: FailureDomains specifies a list fo available availability - zones that can be used - type: object - ready: - description: Ready is when the AWSManagedControlPlane has a API server - URL. - type: boolean - type: object - type: object name: v1beta2 schema: openAPIV3Schema: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 504b4a9a06..ef3db43caa 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -18,6 +18,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml - bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml - bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +- bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml - bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml - bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizeresource @@ -45,6 +46,7 @@ patchesStrategicMerge: - patches/cainjection_in_awsclusterroleidentities.yaml - patches/cainjection_in_awsclustertemplates.yaml - patches/cainjection_in_awsmanagedcontrolplanes.yaml +- patches/cainjection_in_awsmanagedclusters.yaml - patches/cainjection_in_eksconfigs.yaml - patches/cainjection_in_eksconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch diff --git a/config/crd/patches/cainjection_in_awsmanagedclusters.yaml b/config/crd/patches/cainjection_in_awsmanagedclusters.yaml new file mode 100644 index 0000000000..8da71de7a3 --- /dev/null +++ b/config/crd/patches/cainjection_in_awsmanagedclusters.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: awsmanagedclusters.infrastructure.cluster.x-k8s.io diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 536de44ae5..99b0d47b68 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -266,6 +266,35 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedclusters + - awsmanagedclusters/status + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedclusters/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controllers/awsmanagedcluster_controller.go b/controllers/awsmanagedcluster_controller.go new file mode 100644 index 0000000000..99d9423272 --- /dev/null +++ b/controllers/awsmanagedcluster_controller.go @@ -0,0 +1,199 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" +) + +// AWSManagedClusterReconciler reconciles AWSManagedCluster. +type AWSManagedClusterReconciler struct { + client.Client + Recorder record.EventRecorder + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes;awsmanagedcontrolplanes/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete + +func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the AWSManagedCluster instance + awsManagedCluster := &infrav1.AWSManagedCluster{} + err := r.Get(ctx, req.NamespacedName, awsManagedCluster) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return reconcile.Result{}, nil + } + + if annotations.IsPaused(cluster, awsManagedCluster) { + log.Info("AWSManagedCluster or linked Cluster is marked as paused. Won't reconcile") + return reconcile.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} + controlPlaneRef := types.NamespacedName{ + Name: cluster.Spec.ControlPlaneRef.Name, + Namespace: cluster.Spec.ControlPlaneRef.Namespace, + } + + if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get control plane ref: %w", err) + } + + log = log.WithValues("controlPlane", controlPlaneRef.Name) + + patchHelper, err := patch.NewHelper(awsManagedCluster, r.Client) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to init patch helper: %w", err) + } + + // Set the values from the managed control plane + awsManagedCluster.Status.Ready = true + awsManagedCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint + awsManagedCluster.Status.FailureDomains = controlPlane.Status.FailureDomains + + if err := patchHelper.Patch(ctx, awsManagedCluster); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to patch AWSManagedCluster: %w", err) + } + + log.Info("Successfully reconciled AWSManagedCluster") + + return reconcile.Result{}, nil +} + +func (r *AWSManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + log := logger.FromContext(ctx) + + awsManagedCluster := &infrav1.AWSManagedCluster{} + + controller, err := ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(awsManagedCluster). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Build(r) + + if err != nil { + return fmt.Errorf("error creating controller: %w", err) + } + + // Add a watch for clusterv1.Cluster unpaise + if err = controller.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AWSManagedCluster"), mgr.GetClient(), &infrav1.AWSManagedCluster{})), + predicates.ClusterUnpaused(log.GetLogger()), + ); err != nil { + return fmt.Errorf("failed adding a watch for ready clusters: %w", err) + } + + // Add a watch for AWSManagedControlPlane + if err = controller.Watch( + &source.Kind{Type: &ekscontrolplanev1.AWSManagedControlPlane{}}, + handler.EnqueueRequestsFromMapFunc(r.managedControlPlaneToManagedCluster(ctx, log)), + ); err != nil { + return fmt.Errorf("failed adding watch on AWSManagedControlPlane: %w", err) + } + + return nil +} + +func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(ctx context.Context, log *logger.Logger) handler.MapFunc { + return func(o client.Object) []ctrl.Request { + awsManagedControlPlane, ok := o.(*ekscontrolplanev1.AWSManagedControlPlane) + if !ok { + log.Error(errors.Errorf("expected an AWSManagedControlPlane, got %T instead", o), "failed to map AWSManagedControlPlane") + return nil + } + + log := log.WithValues("objectMapper", "awsmcpTomc", "awsmanagedcontrolplane", klog.KRef(awsManagedControlPlane.Namespace, awsManagedControlPlane.Name)) + + if !awsManagedControlPlane.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("AWSManagedControlPlane has a deletion timestamp, skipping mapping") + return nil + } + + if awsManagedControlPlane.Spec.ControlPlaneEndpoint.IsZero() { + log.Debug("AWSManagedControlPlane has no control plane endpoint, skipping mapping") + return nil + } + + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedControlPlane.ObjectMeta) + if err != nil { + log.Error(err, "failed to get owning cluster") + return nil + } + if cluster == nil { + log.Info("no owning cluster, skipping mapping") + return nil + } + + managedClusterRef := cluster.Spec.InfrastructureRef + if managedClusterRef == nil || managedClusterRef.Kind != "AWSManagedCluster" { + log.Info("InfrastructureRef is nil or not AWSManagedCluster, skipping mapping") + return nil + } + + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: managedClusterRef.Name, + Namespace: managedClusterRef.Namespace, + }, + }, + } + } +} diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index d74cbce5c0..e82891c344 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -19,10 +19,12 @@ package controllers import ( "context" "fmt" + "strings" "time" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -59,6 +61,8 @@ const ( // deleteRequeueAfter is how long to wait before checking again to see if the control plane still // has dependencies during deletion. deleteRequeueAfter = 20 * time.Second + + awsManagedControlPlaneKind = "AWSManagedControlPlane" ) var defaultEKSSecurityGroupRoles = []infrav1.SecurityGroupRole{ @@ -112,6 +116,13 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } + if err = c.Watch( + &source.Kind{Type: &infrav1.AWSManagedCluster{}}, + handler.EnqueueRequestsFromMapFunc(r.managedClusterToManagedControlPlane(ctx, log)), + ); err != nil { + return fmt.Errorf("failed adding a watch for AWSManagedCluster") + } + return nil } @@ -125,6 +136,7 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes/status,verbs=get;update;patch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities;awsclusterstaticidentities;awsclustercontrolleridentities,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters;awsmanagedclusters/status,verbs=get;list;watch // Reconcile will reconcile AWSManagedControlPlane Resources. func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { @@ -159,7 +171,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct Client: r.Client, Cluster: cluster, ControlPlane: awsControlPlane, - ControllerName: "awsmanagedcontrolplane", + ControllerName: strings.ToLower(awsManagedControlPlaneKind), EnableIAM: r.EnableIAM, AllowAdditionalRoles: r.AllowAdditionalRoles, Endpoints: r.Endpoints, @@ -213,6 +225,17 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (res ctrl.Result, reterr error) { managedScope.Info("Reconciling AWSManagedControlPlane") + // TODO (richardcase): we can remove the if check here in the future when we have + // allowed enough time for users to move away from using the single kind for + // infrastructureRef and controlplaneRef. + if managedScope.Cluster.Spec.InfrastructureRef.Kind != awsManagedControlPlaneKind { + // Wait for the cluster infrastructure to be ready before creating machines + if !managedScope.Cluster.Status.InfrastructureReady { + managedScope.Info("Cluster infrastructure is not ready yet") + return ctrl.Result{}, nil + } + } + awsManagedControlPlane := managedScope.ControlPlane controllerutil.AddFinalizer(managedScope.ControlPlane, ekscontrolplanev1.ManagedControlPlaneFinalizer) @@ -346,7 +369,7 @@ func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o cli } controlPlaneRef := c.Spec.ControlPlaneRef - if controlPlaneRef != nil && controlPlaneRef.Kind == "AWSManagedControlPlane" { + if controlPlaneRef != nil && controlPlaneRef.Kind == awsManagedControlPlaneKind { return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} } @@ -392,3 +415,43 @@ func (r *AWSManagedControlPlaneReconciler) dependencyCount(ctx context.Context, return dependencies, nil } + +func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(ctx context.Context, log *logger.Logger) handler.MapFunc { + return func(o client.Object) []ctrl.Request { + awsManagedCluster, ok := o.(*infrav1.AWSManagedCluster) + if !ok { + log.Error(fmt.Errorf("expected a AWSManagedCluster but got a %T", o), "Expected AWSManagedCluster") + return nil + } + + if !awsManagedCluster.ObjectMeta.DeletionTimestamp.IsZero() { + log.Debug("AWSManagedCluster has a deletion timestamp, skipping mapping") + return nil + } + + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta) + if err != nil { + log.Error(err, "failed to get owning cluster") + return nil + } + if cluster == nil { + log.Debug("Owning cluster not set on AWSManagedCluster, skipping mapping") + return nil + } + + controlPlaneRef := cluster.Spec.ControlPlaneRef + if controlPlaneRef == nil || controlPlaneRef.Kind != awsManagedControlPlaneKind { + log.Debug("ControlPlaneRef is nil or not AWSManagedControlPlane, skipping mapping") + return nil + } + + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: controlPlaneRef.Name, + Namespace: controlPlaneRef.Namespace, + }, + }, + } + } +} diff --git a/docs/book/src/topics/scale-from-0.md b/docs/book/src/topics/scale-from-0.md index b579a76049..f69763874c 100644 --- a/docs/book/src/topics/scale-from-0.md +++ b/docs/book/src/topics/scale-from-0.md @@ -69,7 +69,7 @@ If you are using a service account to access it, you also have an option to defi autoscaler's repository. The second one is the workload cluster. It needs both because the MachineDeployment is in the control-plane while the actual node and pods are in the workload cluster. -Therefor, you have to install cluster-autoscaler into the _control-plane_ cluster. +Therefore, you have to install cluster-autoscaler into the _control-plane_ cluster. I have a handy script to launch autoscaler which looks like this: @@ -116,14 +116,20 @@ metadata: name: "managed-cluster" spec: infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - name: "managed-cluster-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "managed-cluster" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta1 name: "managed-cluster-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "managed-cluster" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta1 metadata: diff --git a/main.go b/main.go index ea2689d0df..279890af86 100644 --- a/main.go +++ b/main.go @@ -353,6 +353,16 @@ func setupEKSReconcilersAndWebhooks(ctx context.Context, mgr ctrl.Manager, awsSe os.Exit(1) } + setupLog.Debug("enabling EKS managed cluster controller") + if err := (&controllers.AWSManagedClusterReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("awsmanagedcluster-controller"), + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: true}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AWSManagedCluster") + os.Exit(1) + } + if feature.Gates.Enabled(feature.EKSFargate) { setupLog.Debug("enabling EKS fargate profile controller") if err := (&expcontrollers.AWSFargateProfileReconciler{ diff --git a/templates/cluster-template-eks-fargate.yaml b/templates/cluster-template-eks-fargate.yaml index 27de533485..c9dca2b49d 100644 --- a/templates/cluster-template-eks-fargate.yaml +++ b/templates/cluster-template-eks-fargate.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/templates/cluster-template-eks-ipv6.yaml b/templates/cluster-template-eks-ipv6.yaml index 9369135ca6..7a6dfa262b 100644 --- a/templates/cluster-template-eks-ipv6.yaml +++ b/templates/cluster-template-eks-ipv6.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/templates/cluster-template-eks-machinepool.yaml b/templates/cluster-template-eks-machinepool.yaml index 22bb09821a..9ae1e6dce4 100644 --- a/templates/cluster-template-eks-machinepool.yaml +++ b/templates/cluster-template-eks-machinepool.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/templates/cluster-template-eks-managedmachinepool-gpu.yaml b/templates/cluster-template-eks-managedmachinepool-gpu.yaml index d90afcdcb7..adfe109ca2 100644 --- a/templates/cluster-template-eks-managedmachinepool-gpu.yaml +++ b/templates/cluster-template-eks-managedmachinepool-gpu.yaml @@ -10,14 +10,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/templates/cluster-template-eks-managedmachinepool-vpccni.yaml b/templates/cluster-template-eks-managedmachinepool-vpccni.yaml index 6ac40cb8aa..bae62e1113 100644 --- a/templates/cluster-template-eks-managedmachinepool-vpccni.yaml +++ b/templates/cluster-template-eks-managedmachinepool-vpccni.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/templates/cluster-template-eks-managedmachinepool.yaml b/templates/cluster-template-eks-managedmachinepool.yaml index c739b2835d..1db30a2c6f 100644 --- a/templates/cluster-template-eks-managedmachinepool.yaml +++ b/templates/cluster-template-eks-managedmachinepool.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/templates/cluster-template-eks.yaml b/templates/cluster-template-eks.yaml index 928258752a..033ddde7f6 100644 --- a/templates/cluster-template-eks.yaml +++ b/templates/cluster-template-eks.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml index 8338226886..d070789898 100644 --- a/test/e2e/data/e2e_eks_conf.yaml +++ b/test/e2e/data/e2e_eks_conf.yaml @@ -112,6 +112,8 @@ providers: targetName: "cluster-template-eks-managedmachinepool.yaml" - sourcePath: "./eks/cluster-template-eks-ipv6-cluster.yaml" targetName: "cluster-template-eks-ipv6-cluster.yaml" + - sourcePath: "./eks/cluster-template-eks-control-plane-only-legacy.yaml" + targetName: "cluster-template-eks-control-plane-only-legacy.yaml" variables: KUBERNETES_VERSION: "v1.22.9" diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only-legacy.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only-legacy.yaml new file mode 100644 index 0000000000..fc060e5624 --- /dev/null +++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only-legacy.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" + controlPlaneRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" +--- +kind: AWSManagedControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + region: "${AWS_REGION}" + sshKeyName: "${AWS_SSH_KEY_NAME}" + version: "${KUBERNETES_VERSION}" + identityRef: + kind: AWSClusterStaticIdentity + name: e2e-account diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml index da771fd9ab..ad793240de 100644 --- a/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml +++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml index fc060e5624..d7750c617d 100644 --- a/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml +++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml b/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml index 0ce1f75623..e2697c0200 100644 --- a/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml +++ b/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml b/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml index c739b2835d..1db30a2c6f 100644 --- a/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml +++ b/test/e2e/data/eks/cluster-template-eks-managedmachinepool.yaml @@ -8,14 +8,20 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - kind: AWSManagedControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 - name: "${CLUSTER_NAME}-control-plane" + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: diff --git a/test/e2e/shared/context.go b/test/e2e/shared/context.go index 553586cd06..5e2e15b5b5 100644 --- a/test/e2e/shared/context.go +++ b/test/e2e/shared/context.go @@ -89,6 +89,8 @@ type Settings struct { SkipCloudFormationCreation bool // SkipCloudFormationDeletion prevents the deletion of the AWS CloudFormation stack. SkipCloudFormationDeletion bool + // SkipQuotas will skip requesting quotas for aws services. + SkipQuotas bool // number of ginkgo nodes to use for kubetest. GinkgoNodes int // time in s before kubetest spec is marked as slow. diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index b31070f7be..1d62e72f9b 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -208,6 +208,7 @@ func CreateDefaultFlags(ctx *E2EContext) { flag.BoolVar(&ctx.Settings.SkipCleanup, "skip-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.BoolVar(&ctx.Settings.SkipCloudFormationDeletion, "skip-cloudformation-deletion", false, "if true, an AWS CloudFormation stack will not be deleted") flag.BoolVar(&ctx.Settings.SkipCloudFormationCreation, "skip-cloudformation-creation", false, "if true, an AWS CloudFormation stack will not be created") + flag.BoolVar(&ctx.Settings.SkipQuotas, "skip-quotas", false, "if true, the requesting of quotas for aws services will be skipped") flag.StringVar(&ctx.Settings.DataFolder, "data-folder", "", "path to the data folder") flag.StringVar(&ctx.Settings.SourceTemplate, "source-template", "infrastructure-aws/generated/cluster-template.yaml", "path to the data folder") } diff --git a/test/e2e/shared/suite.go b/test/e2e/shared/suite.go index 37143bc76d..8b0f92d87a 100644 --- a/test/e2e/shared/suite.go +++ b/test/e2e/shared/suite.go @@ -156,10 +156,12 @@ func Node1BeforeSuite(e2eCtx *E2EContext) []byte { base64EncodedCredentials := encodeCredentials(e2eCtx.Environment.BootstrapAccessKey, boostrapTemplate.Spec.Region) SetEnvVar("AWS_B64ENCODED_CREDENTIALS", base64EncodedCredentials, true) - By("Writing AWS service quotas to a file for parallel tests") - quotas := EnsureServiceQuotas(e2eCtx.BootstrapUserAWSSession) - WriteResourceQuotesToFile(ResourceQuotaFilePath, quotas) - WriteResourceQuotesToFile(path.Join(e2eCtx.Settings.ArtifactFolder, "initial-resource-quotas.yaml"), quotas) + if !e2eCtx.Settings.SkipQuotas { + By("Writing AWS service quotas to a file for parallel tests") + quotas := EnsureServiceQuotas(e2eCtx.BootstrapUserAWSSession) + WriteResourceQuotesToFile(ResourceQuotaFilePath, quotas) + WriteResourceQuotesToFile(path.Join(e2eCtx.Settings.ArtifactFolder, "initial-resource-quotas.yaml"), quotas) + } e2eCtx.Settings.InstanceVCPU, err = strconv.Atoi(e2eCtx.E2EConfig.GetVariable(InstanceVcpu)) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/suites/managed/eks_legacy_test.go b/test/e2e/suites/managed/eks_legacy_test.go new file mode 100644 index 0000000000..7dfd0ace50 --- /dev/null +++ b/test/e2e/suites/managed/eks_legacy_test.go @@ -0,0 +1,112 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managed + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util" +) + +// Legacy EKS e2e test. This test has been added after re-introducing AWSManagedCluster to ensure that we don't break +// the scenario where we used AWSManagedControlPlane for both the infra cluster and control plane. This test +// can be removed in the future when we have given people sufficient time to stop using the old model. +var _ = ginkgo.Describe("[managed] [legacy] EKS cluster tests - single kind", func() { + var ( + namespace *corev1.Namespace + ctx context.Context + specName = "eks-nodes" + clusterName string + ) + + shared.ConditionalIt(runLegacyTests, "should create a cluster and add nodes using single kind", func() { + ginkgo.By("should have a valid test configuration") + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CNIAddonVersion)) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CorednsAddonVersion)) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubeproxyAddonVersion)) + + ctx = context.TODO() + namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) + clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + eksClusterName := getEKSClusterName(namespace.Name, clusterName) + + ginkgo.By("default iam role should exist") + VerifyRoleExistsAndOwned(ekscontrolplanev1.DefaultEKSControlPlaneRole, eksClusterName, false, e2eCtx.BootstrapUserAWSSession) + + ginkgo.By("should create an EKS control plane") + ManagedClusterSpec(ctx, func() ManagedClusterSpecInput { + return ManagedClusterSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + Flavour: EKSControlPlaneOnlyLegacyFlavor, + ControlPlaneMachineCount: 1, //NOTE: this cannot be zero as clusterctl returns an error + WorkerMachineCount: 0, + } + }) + + ginkgo.By("should create a managed node pool and scale") + MachinePoolSpec(ctx, func() MachinePoolSpecInput { + return MachinePoolSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + IncludeScaling: false, + Cleanup: true, + ManagedMachinePool: true, + Flavor: EKSManagedMachinePoolOnlyFlavor, + } + }) + + shared.Byf("getting cluster with name %s", clusterName) + cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + Name: clusterName, + }) + Expect(cluster).NotTo(BeNil(), "couldn't find CAPI cluster") + + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...) + }) +}) diff --git a/test/e2e/suites/managed/helpers.go b/test/e2e/suites/managed/helpers.go index ca74eb514f..8863a05eb3 100644 --- a/test/e2e/suites/managed/helpers.go +++ b/test/e2e/suites/managed/helpers.go @@ -47,6 +47,7 @@ const ( EKSManagedMachinePoolWithLaunchTemplateOnlyFlavor = "eks-managed-machinepool-with-launch-template-only" EKSMachinePoolOnlyFlavor = "eks-machinepool-only" EKSIPv6ClusterFlavor = "eks-ipv6-cluster" + EKSControlPlaneOnlyLegacyFlavor = "eks-control-plane-only-legacy" ) type DefaultConfigClusterFn func(clusterName, namespace string) clusterctl.ConfigClusterInput diff --git a/test/e2e/suites/managed/managed_suite_test.go b/test/e2e/suites/managed/managed_suite_test.go index 10b87de1d3..c5180bb307 100644 --- a/test/e2e/suites/managed/managed_suite_test.go +++ b/test/e2e/suites/managed/managed_suite_test.go @@ -39,6 +39,7 @@ var ( e2eCtx *shared.E2EContext skipUpgradeTests bool skipGeneralTests bool + skipLegacyTests bool ) func init() { @@ -47,6 +48,7 @@ func init() { shared.CreateDefaultFlags(e2eCtx) flag.BoolVar(&skipGeneralTests, "skip-eks-general-tests", false, "if true, the general EKS tests will be skipped") flag.BoolVar(&skipUpgradeTests, "skip-eks-upgrade-tests", false, "if true, the EKS upgrade tests will be skipped") + flag.BoolVar(&skipLegacyTests, "skip-eks-legacy-tests", false, "if true, the EKS legacy tests will be skipped") } func TestE2E(t *testing.T) { @@ -77,6 +79,10 @@ func runUpgradeTests() bool { return !skipUpgradeTests } +func runLegacyTests() bool { + return !skipLegacyTests +} + func initScheme() *runtime.Scheme { sc := shared.DefaultScheme() _ = expinfrav1.AddToScheme(sc)