Skip to content

Commit d9d0d6f

Browse files
committed
WIP - move ports out of instance
1 parent a873934 commit d9d0d6f

File tree

12 files changed

+791
-97
lines changed

12 files changed

+791
-97
lines changed

api/v1alpha7/conditions_consts.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@ const (
2222
// InstanceReadyCondition reports on current status of the OpenStack instance. Ready indicates the instance is in a Running state.
2323
InstanceReadyCondition clusterv1.ConditionType = "InstanceReady"
2424

25+
// PortsReadyCondition reports on the current status of the ports. Ready indicates that all ports have been created
26+
// successfully and ready to be attached to the instance.
27+
PortsReadyCondition clusterv1.ConditionType = "PortsReady"
28+
2529
// WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding.
2630
WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure"
2731
// WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding.
@@ -42,6 +46,8 @@ const (
4246
InstanceDeleteFailedReason = "InstanceDeleteFailed"
4347
// OpenstackErrorReason used when there is an error communicating with OpenStack.
4448
OpenStackErrorReason = "OpenStackError"
49+
// PortsCreateFailedReason used when creating the ports failed.
50+
PortsCreateFailedReason = "PortsCreateFailed"
4551
)
4652

4753
const (

api/v1alpha7/openstackmachine_types.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,10 @@ type OpenStackMachineStatus struct {
113113

114114
FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"`
115115

116+
// PortsStatus is the status of the ports associated with the machine.
117+
// +optional
118+
PortsStatus []PortStatus `json:"portsStatus,omitempty"`
119+
116120
// FailureMessage will be set in the event that there is a terminal problem
117121
// reconciling the Machine and will contain a more verbose string suitable
118122
// for logging and human consumption.

api/v1alpha7/types.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,12 @@ type PortOpts struct {
128128
ValueSpecs []ValueSpec `json:"valueSpecs,omitempty"`
129129
}
130130

131+
type PortStatus struct {
132+
// ID is the unique identifier of the port.
133+
// +optional
134+
ID string `json:"id,omitempty"`
135+
}
136+
131137
type BindingProfile struct {
132138
// OVSHWOffload enables or disables the OVS hardware offload feature.
133139
OVSHWOffload bool `json:"ovsHWOffload,omitempty"`

controllers/openstackcluster_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@ func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCl
349349
}
350350
}
351351

352-
instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name)
352+
instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name, []string{})
353353
if err != nil {
354354
return fmt.Errorf("failed to reconcile bastion: %w", err)
355355
}

controllers/openstackmachine_controller.go

Lines changed: 77 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -328,9 +328,24 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
328328
return ctrl.Result{}, err
329329
}
330330

331-
instanceStatus, err := r.getOrCreate(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData)
331+
managedSecurityGroups := getManagedSecurityGroups(openStackCluster, machine, openStackMachine)
332+
instanceTags := getInstanceTags(openStackMachine, openStackCluster)
333+
portsStatus, err := r.getOrCreatePorts(scope.Logger(), clusterName, openStackCluster, openStackMachine.Spec.Ports, openStackMachine.Spec.Trunk, managedSecurityGroups, instanceTags, openStackMachine.Name, openStackMachine, networkingService)
332334
if err != nil {
333-
// Conditions set in getOrCreate
335+
// Conditions set in getOrCreatePorts
336+
return ctrl.Result{}, err
337+
}
338+
// TODO(emilien) do we want to deal with Ports Status like we do for the nova instance? Might be expensive...
339+
// In the meantime, we consider that ports are ready if they are created.
340+
conditions.MarkTrue(openStackMachine, infrav1.PortsReadyCondition)
341+
portIDs := make([]string, len(*portsStatus))
342+
for i, portStatus := range *portsStatus {
343+
portIDs[i] = portStatus.ID
344+
}
345+
346+
instanceStatus, err := r.getOrCreateInstance(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData, portIDs)
347+
if err != nil {
348+
// Conditions set in getOrCreateInstance
334349
return ctrl.Result{}, err
335350
}
336351

@@ -430,7 +445,33 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
430445
return ctrl.Result{}, nil
431446
}
432447

433-
func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) {
448+
func (r *OpenStackMachineReconciler) getOrCreatePorts(logger logr.Logger, clusterName string, openStackCluster *infrav1.OpenStackCluster, machinePorts []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string, openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service) (*[]infrav1.PortStatus, error) {
449+
// TODO(emilien) implement portsStatus where we check if the machine has ports created and part of the OpenStackMachine.Status
450+
// Check `GetInstanceStatusByName` as it's done for instances. For each port found in Status, we might want get the port name and verify there is no duplicate.
451+
// If Status is empty, we create the ports and add them to the Status.
452+
// If Status is not empty, we try to adopt the existing ports by checking if the ports are still there and if not, we create them and add them to the Status.
453+
454+
// For now, we create the ports.
455+
var portsStatus *[]infrav1.PortStatus
456+
var err error
457+
458+
if openStackMachine.Status.PortsStatus == nil {
459+
logger.Info("Creating ports for OpenStackMachine", "name", openStackMachine.Name)
460+
portsStatus, err = networkingService.CreatePorts(openStackMachine, clusterName, openStackCluster, machinePorts, trunkEnabled, securityGroups, instanceTags, instanceName)
461+
if err != nil {
462+
// Add Conditions
463+
conditions.MarkFalse(openStackMachine, infrav1.PortsReadyCondition, infrav1.PortsCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
464+
return nil, fmt.Errorf("create ports: %w", err)
465+
}
466+
}
467+
468+
// TODO(emilien) maybe we want to set PortsStatus in OpenStackMachineStatus here instead of in port.go?
469+
// Major difference of doing it in port.go is that we can add ports one by one into the status instead of all at once in the controller.
470+
471+
return portsStatus, nil
472+
}
473+
474+
func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string, portIDs []string) (*compute.InstanceStatus, error) {
434475
instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name)
435476
if err != nil {
436477
logger.Info("Unable to get OpenStack instance", "name", openStackMachine.Name)
@@ -441,7 +482,7 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl
441482
if instanceStatus == nil {
442483
instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData)
443484
logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name)
444-
instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name)
485+
instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name, portIDs)
445486
if err != nil {
446487
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
447488
return nil, fmt.Errorf("create OpenStack instance: %w", err)
@@ -472,6 +513,19 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *
472513
instanceSpec.FailureDomain = *machine.Spec.FailureDomain
473514
}
474515

516+
instanceSpec.Tags = getInstanceTags(openStackMachine, openStackCluster)
517+
518+
instanceSpec.SecurityGroups = getManagedSecurityGroups(openStackCluster, machine, openStackMachine)
519+
520+
instanceSpec.Ports = openStackMachine.Spec.Ports
521+
522+
return &instanceSpec
523+
}
524+
525+
// getInstanceTags returns the tags that should be applied to the instance.
526+
// The tags are a combination of the tags specified on the OpenStackMachine and
527+
// the ones specified on the OpenStackCluster.
528+
func getInstanceTags(openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) []string {
475529
machineTags := []string{}
476530

477531
// Append machine specific tags
@@ -494,31 +548,31 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *
494548
}
495549
machineTags = deduplicate(machineTags)
496550

497-
instanceSpec.Tags = machineTags
551+
return machineTags
552+
}
498553

499-
instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups
500-
if openStackCluster.Spec.ManagedSecurityGroups {
501-
var managedSecurityGroup string
502-
if util.IsControlPlaneMachine(machine) {
503-
if openStackCluster.Status.ControlPlaneSecurityGroup != nil {
504-
managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID
505-
}
506-
} else {
507-
if openStackCluster.Status.WorkerSecurityGroup != nil {
508-
managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID
509-
}
554+
// getManagedSecurityGroups returns a combination of OpenStackMachine.Spec.SecurityGroups
555+
// and the security group managed by the OpenStackCluster whether it's a control plane or a worker machine.
556+
func getManagedSecurityGroups(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) []infrav1.SecurityGroupFilter {
557+
machineSpecSecurityGroups := openStackMachine.Spec.SecurityGroups
558+
var managedSecurityGroup string
559+
if util.IsControlPlaneMachine(machine) {
560+
if openStackCluster.Status.ControlPlaneSecurityGroup != nil {
561+
managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID
510562
}
511-
512-
if managedSecurityGroup != "" {
513-
instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupFilter{
514-
ID: managedSecurityGroup,
515-
})
563+
} else {
564+
if openStackCluster.Status.WorkerSecurityGroup != nil {
565+
managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID
516566
}
517567
}
518568

519-
instanceSpec.Ports = openStackMachine.Spec.Ports
569+
if managedSecurityGroup != "" {
570+
machineSpecSecurityGroups = append(machineSpecSecurityGroups, infrav1.SecurityGroupFilter{
571+
ID: managedSecurityGroup,
572+
})
573+
}
520574

521-
return &instanceSpec
575+
return machineSpecSecurityGroups
522576
}
523577

524578
func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error {

controllers/suite_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ var _ = Describe("When calling getOrCreate", func() {
151151
openStackMachine := &infrav1.OpenStackMachine{}
152152

153153
mockScopeFactory.ComputeClient.EXPECT().ListServers(gomock.Any()).Return(nil, errors.New("Test error when listing servers"))
154-
instanceStatus, err := reconsiler.getOrCreate(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "")
154+
instanceStatus, err := reconsiler.getOrCreateInstance(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "", []string{})
155155
Expect(err).To(HaveOccurred())
156156
Expect(instanceStatus).To(BeNil())
157157
conditions := openStackMachine.GetConditions()

pkg/clients/networking.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,11 @@ import (
3636
"sigs.k8s.io/cluster-api-provider-openstack/pkg/metrics"
3737
)
3838

39+
// PortExt is the base gophercloud Port with extensions used by PortStatus.
40+
type PortExt struct {
41+
ports.Port
42+
}
43+
3944
type NetworkClient interface {
4045
ListFloatingIP(opts floatingips.ListOptsBuilder) ([]floatingips.FloatingIP, error)
4146
CreateFloatingIP(opts floatingips.CreateOptsBuilder) (*floatingips.FloatingIP, error)

pkg/cloud/services/compute/instance.go

Lines changed: 37 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@ func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster *
9090
if fixedIP.Subnet == nil {
9191
continue
9292
}
93-
9493
subnet, err := networkingService.GetSubnetByFilter(fixedIP.Subnet)
9594
if err != nil {
9695
// Multiple matches might be ok later when we restrict matches to a single network
@@ -221,8 +220,8 @@ func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ins
221220
return ports, nil
222221
}
223222

224-
func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string) (*InstanceStatus, error) {
225-
return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus)
223+
func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, portIDs []string) (*InstanceStatus, error) {
224+
return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus, portIDs)
226225
}
227226

228227
func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, error) {
@@ -234,7 +233,7 @@ func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, erro
234233
return f, nil
235234
}
236235

237-
func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration) (*InstanceStatus, error) {
236+
func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration, portIDs []string) (*InstanceStatus, error) {
238237
var server *clients.ServerExt
239238
portList := []servers.Network{}
240239

@@ -254,41 +253,50 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste
254253
return
255254
}
256255

256+
// TODO(emilien) we probably want to move that in port management
257257
if err := s.deletePorts(eventObject, portList); err != nil {
258258
s.scope.Logger().V(4).Error(err, "Failed to clean up ports after failure")
259259
}
260260
}()
261261

262-
ports, err := s.constructPorts(openStackCluster, instanceSpec)
263-
if err != nil {
264-
return nil, err
265-
}
266-
267-
networkingService, err := s.getNetworkingService()
268-
if err != nil {
269-
return nil, err
270-
}
271-
272-
securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups)
273-
if err != nil {
274-
return nil, fmt.Errorf("error getting security groups: %v", err)
275-
}
276-
277-
for i := range ports {
278-
portOpts := &ports[i]
279-
iTags := []string{}
280-
if len(instanceSpec.Tags) > 0 {
281-
iTags = instanceSpec.Tags
262+
if len(portIDs) == 0 {
263+
ports, err := s.constructPorts(openStackCluster, instanceSpec)
264+
if err != nil {
265+
return nil, err
282266
}
283-
portName := networking.GetPortName(instanceSpec.Name, portOpts, i)
284-
port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags)
267+
268+
networkingService, err := s.getNetworkingService()
285269
if err != nil {
286270
return nil, err
287271
}
288272

289-
portList = append(portList, servers.Network{
290-
Port: port.ID,
291-
})
273+
securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups)
274+
if err != nil {
275+
return nil, fmt.Errorf("error getting security groups: %v", err)
276+
}
277+
278+
for i := range ports {
279+
portOpts := &ports[i]
280+
iTags := []string{}
281+
if len(instanceSpec.Tags) > 0 {
282+
iTags = instanceSpec.Tags
283+
}
284+
portName := networking.GetPortName(instanceSpec.Name, portOpts, i)
285+
port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags)
286+
if err != nil {
287+
return nil, err
288+
}
289+
290+
portList = append(portList, servers.Network{
291+
Port: port.ID,
292+
})
293+
}
294+
} else {
295+
for _, portID := range portIDs {
296+
portList = append(portList, servers.Network{
297+
Port: portID,
298+
})
299+
}
292300
}
293301

294302
instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", timeoutInstanceCreate)

pkg/cloud/services/compute/instance_test.go

Lines changed: 1 addition & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@ import (
4848
infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7"
4949
"sigs.k8s.io/cluster-api-provider-openstack/pkg/clients"
5050
"sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock"
51-
"sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking"
5251
"sigs.k8s.io/cluster-api-provider-openstack/pkg/scope"
5352
)
5453

@@ -77,47 +76,6 @@ func (m *gomegaMockMatcher) Matches(x interface{}) bool {
7776
return success
7877
}
7978

80-
func Test_getPortName(t *testing.T) {
81-
type args struct {
82-
instanceName string
83-
opts *infrav1.PortOpts
84-
netIndex int
85-
}
86-
tests := []struct {
87-
name string
88-
args args
89-
want string
90-
}{
91-
{
92-
name: "with nil PortOpts",
93-
args: args{"test-1-instance", nil, 2},
94-
want: "test-1-instance-2",
95-
},
96-
{
97-
name: "with PortOpts name suffix",
98-
args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4},
99-
want: "test-1-instance-foo",
100-
},
101-
{
102-
name: "without PortOpts name suffix",
103-
args: args{"test-1-instance", &infrav1.PortOpts{}, 4},
104-
want: "test-1-instance-4",
105-
},
106-
{
107-
name: "with PortOpts name suffix",
108-
args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4},
109-
want: "test-1-instance-foo2",
110-
},
111-
}
112-
for _, tt := range tests {
113-
t.Run(tt.name, func(t *testing.T) {
114-
if got := networking.GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want {
115-
t.Errorf("getPortName() = %v, want %v", got, tt.want)
116-
}
117-
})
118-
}
119-
}
120-
12179
func TestService_getImageID(t *testing.T) {
12280
const imageIDA = "ce96e584-7ebc-46d6-9e55-987d72e3806c"
12381
const imageIDB = "8f536889-5198-42d7-8314-cb78f4f4755c"
@@ -976,7 +934,7 @@ func TestService_ReconcileInstance(t *testing.T) {
976934
}
977935

978936
// Call CreateInstance with a reduced retry interval to speed up the test
979-
_, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond)
937+
_, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond, []string{})
980938
if (err != nil) != tt.wantErr {
981939
t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr)
982940
return

0 commit comments

Comments
 (0)