Skip to content
This repository was archived by the owner on Jul 30, 2021. It is now read-only.

🏃Document tests with minor clean ups #209

Merged
merged 1 commit into from
Sep 5, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions controllers/kubeadmconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re
ctx := context.Background()
log := r.Log.WithValues("kubeadmconfig", req.NamespacedName)

// Lookup the kubeadm config
config := &bootstrapv1.KubeadmConfig{}
if err := r.Get(ctx, req.NamespacedName, config); err != nil {
if apierrors.IsNotFound(err) {
Expand All @@ -105,6 +106,7 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re
return ctrl.Result{}, nil
}

// Look up the Machine that owns this KubeConfig if there is one
machine, err := util.GetOwnerMachine(ctx, r.Client, config.ObjectMeta)
if err != nil {
log.Error(err, "could not get owner machine")
Expand All @@ -118,15 +120,18 @@ func (r *KubeadmConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re

// Ignore machines that already have bootstrap data
if machine.Spec.Bootstrap.Data != nil {
// TODO: mark the config as ready?
return ctrl.Result{}, nil
}

// Lookup the cluster the machine is associated with
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
log.Error(err, "could not get cluster by machine metadata")
return ctrl.Result{}, err
}

// Wait patiently for the infrastructure to be ready
if !cluster.Status.InfrastructureReady {
log.Info("Infrastructure is not ready, waiting until ready.")
return ctrl.Result{}, nil
Expand Down
65 changes: 0 additions & 65 deletions controllers/kubeadmconfig_controller_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,74 +58,9 @@ var _ = Describe("KubeadmConfigReconciler", func() {
Expect(err).To(Succeed())
Expect(result.Requeue).To(BeFalse())
})
/*
When apimachinery decodes into a typed struct, the decoder strips the TypeMeta from the object;
the theory at the time being that because it was a typed object, you knew its API version, group, and kind.
if fact this leads to errors with k8sClient, because it loses GVK, and this leads r.Status().Patch to fail
with "the server could not find the requested resource (patch kubeadmconfigs.bootstrap.cluster.x-k8s.io control-plane-config)"

There's a WIP PR to k/k to fix this.
After this merge, we can implement more behavioral test

It("should process only control plane machines when infrastructure is ready but control plane is not", func() {
cluster := newCluster("cluster2")
Expect(k8sClient.Create(context.Background(), cluster)).To(Succeed())
cluster.Status.InfrastructureReady = true
Expect(k8sClient.Status().Update(context.Background(), cluster)).To(Succeed())

controlplaneMachine := newMachine(cluster, "control-plane")
controlplaneMachine.ObjectMeta.Labels[clusterv1alpha2.MachineControlPlaneLabelName] = "true"
Expect(k8sClient.Create(context.Background(), controlplaneMachine)).To(Succeed())

controlplaneConfig := newKubeadmConfig(controlplaneMachine, "control-plane-config")
controlplaneConfig.Spec.ClusterConfiguration = &kubeadmv1beta1.ClusterConfiguration{}
controlplaneConfig.Spec.InitConfiguration = &kubeadmv1beta1.InitConfiguration{}
Expect(k8sClient.Create(context.Background(), controlplaneConfig)).To(Succeed())

workerMachine := newMachine(cluster, "worker")
Expect(k8sClient.Create(context.Background(), workerMachine)).To(Succeed())

workerConfig := newKubeadmConfig(workerMachine, "worker-config")
Expect(k8sClient.Create(context.Background(), workerConfig)).To(Succeed())

reconciler := KubeadmConfigReconciler{
Log: log.Log,
Client: k8sClient,
}

By("Calling reconcile on a config corresponding to worker node should requeue")
resultWorker, err := reconciler.Reconcile(ctrl.Request{
NamespacedName: types.NamespacedName{
Namespace: "default",
Name: "worker-config",
},
})
Expect(err).To(Succeed())
Expect(resultWorker.Requeue).To(BeFalse())
Expect(resultWorker.RequeueAfter).To(Equal(30 * time.Second))

By("Calling reconcile on a config corresponding to a control plane node should create BootstrapData")
resultControlPlane, err := reconciler.Reconcile(ctrl.Request{
NamespacedName: types.NamespacedName{
Namespace: "default",
Name: "control-plane-config",
},
})
Expect(err).To(Succeed())
Expect(resultControlPlane.Requeue).To(BeFalse())
Expect(resultControlPlane.RequeueAfter).To(BeZero())

controlplaneConfigAfter, err := getKubeadmConfig(k8sClient, "control-plane-config")
Expect(err).To(Succeed())
Expect(controlplaneConfigAfter.Status.Ready).To(BeTrue())
Expect(controlplaneConfigAfter.Status.BootstrapData).NotTo(BeEmpty())
})
*/
})
})

// test utils

// getKubeadmConfig returns a KubeadmConfig object from the cluster
func getKubeadmConfig(c client.Client, name string) (*bootstrapv1.KubeadmConfig, error) {
ctx := context.Background()
Expand Down
Loading