Skip to content

Commit 801f5ef

Browse files
committed
Simplify bastion resource initialisation
Firstly, we remove the resource reconcile calls from the cluster flow before calling reconcileNormal/reconcileDelete because the guards around them and various other guards throughout the code are heavily inter-depdendent and hard to reason about. Instead, we push them to the the places we: * know they are required * know we are sufficiently initialised that they can work Firstly we resolve references at the top of reconcileBastion. We know the cluster has been initialised at this point, so we don't need to guard against it. This also means that it is always called when entering that function, so we don't need to guard against it not having been called during first cluster initialisation. We also force that function to re-reconcile if it calls deleteBastion(), because deleteBastion() removes the bastion status. We reconcile again, so we always know that it is set. We also add an explicit call to resource reconcile in the reconcileDelete flow. This is the only place we now need a 'weird' guard against the cluster network not having been set. We add a comment about that appropriate to its weirdness.
1 parent cc78607 commit 801f5ef

File tree

2 files changed

+100
-61
lines changed

2 files changed

+100
-61
lines changed

controllers/openstackcluster_controller.go

Lines changed: 97 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"time"
2525

2626
"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
27+
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
2728
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
2829
corev1 "k8s.io/api/core/v1"
2930
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -123,30 +124,6 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
123124
}
124125
scope := scope.NewWithLogger(clientScope, log)
125126

126-
// Resolve and store referenced & dependent resources for the bastion
127-
if openStackCluster.Spec.Bastion != nil && openStackCluster.Spec.Bastion.Enabled {
128-
if openStackCluster.Status.Bastion == nil {
129-
openStackCluster.Status.Bastion = &infrav1.BastionStatus{}
130-
}
131-
changed, err := compute.ResolveReferencedMachineResources(scope, openStackCluster, &openStackCluster.Spec.Bastion.Instance, &openStackCluster.Status.Bastion.ReferencedResources)
132-
if err != nil {
133-
return reconcile.Result{}, err
134-
}
135-
if changed {
136-
// If the referenced resources have changed, we need to update the OpenStackCluster status now.
137-
return reconcile.Result{}, nil
138-
}
139-
140-
changed, err = compute.ResolveDependentBastionResources(scope, openStackCluster, bastionName(cluster.Name))
141-
if err != nil {
142-
return reconcile.Result{}, err
143-
}
144-
if changed {
145-
// If the dependent resources have changed, we need to update the OpenStackCluster status now.
146-
return reconcile.Result{}, nil
147-
}
148-
}
149-
150127
// Handle deleted clusters
151128
if !openStackCluster.DeletionTimestamp.IsZero() {
152129
return r.reconcileDelete(ctx, scope, cluster, openStackCluster)
@@ -173,8 +150,17 @@ func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope
173150
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
174151
}
175152

176-
if err := deleteBastion(scope, cluster, openStackCluster); err != nil {
177-
return reconcile.Result{}, err
153+
// A bastion may have been created if cluster initialisation previously reached populating the network status
154+
// We attempt to delete it even if no status was written, just in case
155+
if openStackCluster.Status.Network != nil {
156+
// Attempt to resolve bastion resources before delete. We don't need to worry about starting if the resources have changed on update.
157+
if _, err := resolveBastionResources(scope, openStackCluster); err != nil {
158+
return reconcile.Result{}, err
159+
}
160+
161+
if err := deleteBastion(scope, cluster, openStackCluster); err != nil {
162+
return reconcile.Result{}, err
163+
}
178164
}
179165

180166
networkingService, err := networking.NewService(scope)
@@ -234,6 +220,32 @@ func contains(arr []string, target string) bool {
234220
return false
235221
}
236222

223+
func resolveBastionResources(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster) (bool, error) {
224+
if openStackCluster.Spec.Bastion != nil && openStackCluster.Spec.Bastion.Enabled {
225+
if openStackCluster.Status.Bastion == nil {
226+
openStackCluster.Status.Bastion = &infrav1.BastionStatus{}
227+
}
228+
changed, err := compute.ResolveReferencedMachineResources(scope, openStackCluster, &openStackCluster.Spec.Bastion.Instance, &openStackCluster.Status.Bastion.ReferencedResources)
229+
if err != nil {
230+
return false, err
231+
}
232+
if changed {
233+
// If the referenced resources have changed, we need to update the OpenStackCluster status now.
234+
return true, nil
235+
}
236+
237+
changed, err = compute.ResolveDependentBastionResources(scope, openStackCluster, bastionName(openStackCluster.Name))
238+
if err != nil {
239+
return false, err
240+
}
241+
if changed {
242+
// If the dependent resources have changed, we need to update the OpenStackCluster status now.
243+
return true, nil
244+
}
245+
}
246+
return false, nil
247+
}
248+
237249
func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
238250
scope.Logger().Info("Deleting Bastion")
239251

@@ -307,7 +319,7 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac
307319
openStackCluster.Status.Bastion.DependentResources.Ports = nil
308320
}
309321

310-
scope.Logger().Info("Deleted Bastion for cluster %s", cluster.Name)
322+
scope.Logger().Info("Deleted Bastion")
311323

312324
openStackCluster.Status.Bastion = nil
313325
delete(openStackCluster.ObjectMeta.Annotations, BastionInstanceHashAnnotation)
@@ -335,8 +347,11 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt
335347
}
336348

337349
result, err := reconcileBastion(scope, cluster, openStackCluster)
338-
if err != nil || !reflect.DeepEqual(result, reconcile.Result{}) {
339-
return result, err
350+
if err != nil {
351+
return reconcile.Result{}, err
352+
}
353+
if result != nil {
354+
return *result, nil
340355
}
341356

342357
availabilityZones, err := computeService.GetAvailabilityZones()
@@ -366,102 +381,126 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt
366381
return reconcile.Result{}, nil
367382
}
368383

369-
func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
370-
scope.Logger().Info("Reconciling Bastion")
384+
func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) {
385+
scope.Logger().V(4).Info("Reconciling Bastion")
371386

372-
if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled {
373-
return reconcile.Result{}, deleteBastion(scope, cluster, openStackCluster)
387+
changed, err := resolveBastionResources(scope, openStackCluster)
388+
if err != nil {
389+
return nil, err
390+
}
391+
if changed {
392+
return &reconcile.Result{}, nil
374393
}
375394

376-
// If ports options aren't in the status, we'll re-trigger the reconcile to get them
377-
// via adopting the referenced resources.
378-
if len(openStackCluster.Status.Bastion.ReferencedResources.Ports) == 0 {
379-
return reconcile.Result{}, nil
395+
// No Bastion defined
396+
if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled {
397+
// Delete any existing bastion
398+
if openStackCluster.Status.Bastion != nil {
399+
if err := deleteBastion(scope, cluster, openStackCluster); err != nil {
400+
return nil, err
401+
}
402+
// Reconcile again before continuing
403+
return &reconcile.Result{}, nil
404+
}
405+
406+
// Otherwise nothing to do
407+
return nil, nil
380408
}
381409

382410
computeService, err := compute.NewService(scope)
383411
if err != nil {
384-
return reconcile.Result{}, err
412+
return nil, err
385413
}
386414

387415
networkingService, err := networking.NewService(scope)
388416
if err != nil {
389-
return reconcile.Result{}, err
417+
return nil, err
390418
}
391419

392420
instanceSpec, err := bastionToInstanceSpec(openStackCluster, cluster)
393421
if err != nil {
394-
return reconcile.Result{}, err
422+
return nil, err
395423
}
396424
clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)
425+
397426
bastionHash, err := compute.HashInstanceSpec(instanceSpec)
398427
if err != nil {
399-
return reconcile.Result{}, fmt.Errorf("failed computing bastion hash from instance spec: %w", err)
428+
return nil, fmt.Errorf("failed computing bastion hash from instance spec: %w", err)
400429
}
401430
if bastionHashHasChanged(bastionHash, openStackCluster.ObjectMeta.Annotations) {
402431
if err := deleteBastion(scope, cluster, openStackCluster); err != nil {
403-
return ctrl.Result{}, err
432+
return nil, err
404433
}
434+
435+
// Add the new annotation and reconcile again before continuing
436+
annotations.AddAnnotations(openStackCluster, map[string]string{BastionInstanceHashAnnotation: bastionHash})
437+
return &reconcile.Result{}, nil
405438
}
406439

407440
err = getOrCreateBastionPorts(openStackCluster, networkingService, cluster.Name)
408441
if err != nil {
409442
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to get or create ports for bastion: %w", err))
410-
return ctrl.Result{}, fmt.Errorf("failed to get or create ports for bastion: %w", err)
443+
return nil, fmt.Errorf("failed to get or create ports for bastion: %w", err)
411444
}
412445
bastionPortIDs := GetPortIDs(openStackCluster.Status.Bastion.DependentResources.Ports)
413446

414447
var instanceStatus *compute.InstanceStatus
415448
if openStackCluster.Status.Bastion != nil && openStackCluster.Status.Bastion.ID != "" {
416449
if instanceStatus, err = computeService.GetInstanceStatus(openStackCluster.Status.Bastion.ID); err != nil {
417-
return reconcile.Result{}, err
450+
return nil, err
418451
}
419452
}
420453
if instanceStatus == nil {
421454
// Check if there is an existing instance with bastion name, in case where bastion ID would not have been properly stored in cluster status
422455
if instanceStatus, err = computeService.GetInstanceStatusByName(openStackCluster, instanceSpec.Name); err != nil {
423-
return reconcile.Result{}, err
456+
return nil, err
424457
}
425458
}
426459
if instanceStatus == nil {
427460
instanceStatus, err = computeService.CreateInstance(openStackCluster, instanceSpec, bastionPortIDs)
428461
if err != nil {
429-
return reconcile.Result{}, fmt.Errorf("failed to create bastion: %w", err)
462+
return nil, fmt.Errorf("failed to create bastion: %w", err)
430463
}
431464
}
432465

433466
// Save hash & status as soon as we know we have an instance
434467
instanceStatus.UpdateBastionStatus(openStackCluster)
435-
annotations.AddAnnotations(openStackCluster, map[string]string{BastionInstanceHashAnnotation: bastionHash})
436468

437469
// Make sure that bastion instance has a valid state
438470
switch instanceStatus.State() {
439471
case infrav1.InstanceStateError:
440-
return ctrl.Result{}, fmt.Errorf("failed to reconcile bastion, instance state is ERROR")
472+
return nil, fmt.Errorf("failed to reconcile bastion, instance state is ERROR")
441473
case infrav1.InstanceStateBuild, infrav1.InstanceStateUndefined:
442474
scope.Logger().Info("Waiting for bastion instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State())
443-
return ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil
475+
return &reconcile.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil
444476
case infrav1.InstanceStateDeleted:
445-
// This should normally be handled by deleteBastion
446-
openStackCluster.Status.Bastion = nil
447-
return ctrl.Result{}, nil
477+
// Not clear why this would happen, so try to clean everything up before reconciling again
478+
if err := deleteBastion(scope, cluster, openStackCluster); err != nil {
479+
return nil, err
480+
}
481+
return &reconcile.Result{}, nil
448482
}
449483

450484
port, err := computeService.GetManagementPort(openStackCluster, instanceStatus)
451485
if err != nil {
452486
err = fmt.Errorf("getting management port for bastion: %w", err)
453487
handleUpdateOSCError(openStackCluster, err)
454-
return ctrl.Result{}, err
488+
return nil, err
455489
}
490+
491+
return bastionAddFloatingIP(openStackCluster, clusterName, port, networkingService)
492+
}
493+
494+
func bastionAddFloatingIP(openStackCluster *infrav1.OpenStackCluster, clusterName string, port *ports.Port, networkingService *networking.Service) (*reconcile.Result, error) {
456495
fp, err := networkingService.GetFloatingIPByPortID(port.ID)
457496
if err != nil {
458497
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to get or create floating IP for bastion: %w", err))
459-
return ctrl.Result{}, fmt.Errorf("failed to get floating IP for bastion port: %w", err)
498+
return nil, fmt.Errorf("failed to get floating IP for bastion port: %w", err)
460499
}
461500
if fp != nil {
462501
// Floating IP is already attached to bastion, no need to proceed
463502
openStackCluster.Status.Bastion.FloatingIP = fp.FloatingIP
464-
return ctrl.Result{}, nil
503+
return nil, nil
465504
}
466505

467506
var floatingIP *string
@@ -477,17 +516,17 @@ func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openS
477516
fp, err = networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, floatingIP)
478517
if err != nil {
479518
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to get or create floating IP for bastion: %w", err))
480-
return ctrl.Result{}, fmt.Errorf("failed to get or create floating IP for bastion: %w", err)
519+
return nil, fmt.Errorf("failed to get or create floating IP for bastion: %w", err)
481520
}
482521
openStackCluster.Status.Bastion.FloatingIP = fp.FloatingIP
483522

484523
err = networkingService.AssociateFloatingIP(openStackCluster, fp, port.ID)
485524
if err != nil {
486525
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to associate floating IP with bastion: %w", err))
487-
return ctrl.Result{}, fmt.Errorf("failed to associate floating IP with bastion: %w", err)
526+
return nil, fmt.Errorf("failed to associate floating IP with bastion: %w", err)
488527
}
489528

490-
return ctrl.Result{}, nil
529+
return nil, nil
491530
}
492531

493532
func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*compute.InstanceSpec, error) {

controllers/openstackcluster_controller_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ var _ = Describe("OpenStackCluster controller", func() {
302302
},
303303
}))
304304
Expect(err).To(BeNil())
305-
Expect(res).To(Equal(reconcile.Result{}))
305+
Expect(res).To(BeNil())
306306
})
307307
It("should adopt an existing bastion Floating IP if even if its uuid is not stored in status", func() {
308308
testCluster.SetName("requeue-bastion")
@@ -387,7 +387,7 @@ var _ = Describe("OpenStackCluster controller", func() {
387387
},
388388
}))
389389
Expect(err).To(BeNil())
390-
Expect(res).To(Equal(reconcile.Result{}))
390+
Expect(res).To(BeNil())
391391
})
392392
It("should requeue until bastion becomes active", func() {
393393
testCluster.SetName("requeue-bastion")
@@ -466,7 +466,7 @@ var _ = Describe("OpenStackCluster controller", func() {
466466
},
467467
}))
468468
Expect(err).To(BeNil())
469-
Expect(res).To(Equal(reconcile.Result{RequeueAfter: waitForBuildingInstanceToReconcile}))
469+
Expect(res).To(Equal(&reconcile.Result{RequeueAfter: waitForBuildingInstanceToReconcile}))
470470
})
471471
It("should delete an existing bastion even if its uuid is not stored in status", func() {
472472
testCluster.SetName("delete-existing-bastion")

0 commit comments

Comments
 (0)