diff --git a/charts/cluster-api-runtime-extensions-nutanix/templates/cluster-autoscaler/manifests/helm-addon-installation.yaml b/charts/cluster-api-runtime-extensions-nutanix/templates/cluster-autoscaler/manifests/helm-addon-installation.yaml index eea0ee103..d4d21bac9 100644 --- a/charts/cluster-api-runtime-extensions-nutanix/templates/cluster-autoscaler/manifests/helm-addon-installation.yaml +++ b/charts/cluster-api-runtime-extensions-nutanix/templates/cluster-autoscaler/manifests/helm-addon-installation.yaml @@ -9,7 +9,7 @@ metadata: data: values.yaml: |- --- - fullnameOverride: "cluster-autoscaler-{{ `{{ .Cluster.metadata.name }}` }}" + fullnameOverride: "cluster-autoscaler-{{ `{{ .Cluster.Name }}` }}" cloudProvider: clusterapi @@ -24,19 +24,19 @@ data: # Limit a single cluster-autoscaler Deployment to a single Cluster. autoDiscovery: - clusterName: "{{ `{{ .Cluster.metadata.name }}` }}" + clusterName: "{{ `{{ .Cluster.Name }}` }}" # The controller failed with an RBAC error trying to watch CAPI objects at the cluster scope without this. labels: - - namespace: "{{ `{{ .Cluster.metadata.namespace }}` }}" + - namespace: "{{ `{{ .Cluster.Namespace }}` }}" - clusterAPIConfigMapsNamespace: "{{ `{{ .Cluster.metadata.namespace }}` }}" + clusterAPIConfigMapsNamespace: "{{ `{{ .Cluster.Namespace }}` }}" # For workload clusters it is not possible to use the in-cluster client. # To simplify the configuration, use the admin kubeconfig generated by CAPI for all clusters. clusterAPIMode: kubeconfig-incluster clusterAPIWorkloadKubeconfigPath: /cluster/kubeconfig extraVolumeSecrets: kubeconfig: - name: "{{ `{{ .Cluster.metadata.name }}` }}-kubeconfig" + name: "{{ `{{ .Cluster.Name }}` }}-kubeconfig" mountPath: /cluster readOnly: true items: diff --git a/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_crs.go b/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_crs.go index ee592da3e..56000b30b 100644 --- a/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_crs.go +++ b/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_crs.go @@ -68,7 +68,7 @@ func (s crsStrategy) apply( cluster := &req.Cluster - data := templateData(defaultCM.Data, cluster.Name, cluster.Namespace) + data := templateData(cluster, defaultCM.Data) cm := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ APIVersion: corev1.SchemeGroupVersion.String(), diff --git a/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_helmaddon.go b/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_helmaddon.go index 772e08824..b7fc5a551 100644 --- a/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_helmaddon.go +++ b/pkg/handlers/generic/lifecycle/clusterautoscaler/strategy_helmaddon.go @@ -73,6 +73,14 @@ func (s helmAddonStrategy) apply( return err } + // Cannot rely directly on Cluster.metadata.Name and Cluster.metadata.Namespace values + // because the selected Cluster will always be the management cluster. + // By templating the values, we will have unique Deployment name for each cluster. + values, err = templateValues(&req.Cluster, values) + if err != nil { + return fmt.Errorf("failed to template Helm values read from ConfigMap: %w", err) + } + hcp := &caaphv1.HelmChartProxy{ TypeMeta: metav1.TypeMeta{ APIVersion: caaphv1.GroupVersion.String(), diff --git a/pkg/handlers/generic/lifecycle/clusterautoscaler/template.go b/pkg/handlers/generic/lifecycle/clusterautoscaler/template.go index ae278fa7f..2b0341efa 100644 --- a/pkg/handlers/generic/lifecycle/clusterautoscaler/template.go +++ b/pkg/handlers/generic/lifecycle/clusterautoscaler/template.go @@ -3,20 +3,50 @@ package clusterautoscaler -import "strings" +import ( + "bytes" + "fmt" + "strings" + "text/template" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) const ( nameTemplate = "tmpl-clustername-tmpl" namespaceTemplate = "tmpl-clusternamespace-tmpl" ) -// templateData replaces templates 'tmpl-clustername-tmpl' and 'tmpl-clusternamespace-tmpl' in data -// with clusterName and clusterNamespace. -func templateData(data map[string]string, clusterName, clusterNamespace string) map[string]string { +// templateData replaces 'tmpl-clustername-tmpl' and 'tmpl-clusternamespace-tmpl' in data. +func templateData(cluster *clusterv1.Cluster, data map[string]string) map[string]string { templated := make(map[string]string, len(data)) for k, v := range data { - r := strings.NewReplacer(nameTemplate, clusterName, namespaceTemplate, clusterNamespace) + r := strings.NewReplacer(nameTemplate, cluster.Name, namespaceTemplate, cluster.Namespace) templated[k] = r.Replace(v) } return templated } + +// templateValues replaces Cluster.Name and Cluster.Namespace in Helm values text. +func templateValues(cluster *clusterv1.Cluster, text string) (string, error) { + clusterAutoscalerTemplate, err := template.New("").Parse(text) + if err != nil { + return "", fmt.Errorf("failed to parse template: %w", err) + } + + type input struct { + Cluster *clusterv1.Cluster + } + + templateInput := input{ + Cluster: cluster, + } + + var b bytes.Buffer + err = clusterAutoscalerTemplate.Execute(&b, templateInput) + if err != nil { + return "", fmt.Errorf("failed setting target Cluster name and namespace in template: %w", err) + } + + return b.String(), nil +} diff --git a/pkg/handlers/generic/lifecycle/clusterautoscaler/template_test.go b/pkg/handlers/generic/lifecycle/clusterautoscaler/template_test.go new file mode 100644 index 000000000..1e95e45ec --- /dev/null +++ b/pkg/handlers/generic/lifecycle/clusterautoscaler/template_test.go @@ -0,0 +1,267 @@ +// Copyright 2024 Nutanix. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package clusterautoscaler + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +func Test_templateData(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1.Cluster + data map[string]string + want map[string]string + }{ + { + name: "template data", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + }, + data: map[string]string{ + mapKey: testDeployment, + }, + want: map[string]string{ + mapKey: templatedDeployment, + }, + }, + { + name: "no data to template", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + }, + data: map[string]string{ + mapKey: templatedDeployment, + }, + want: map[string]string{ + mapKey: templatedDeployment, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := templateData(tt.cluster, tt.data) + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_templateValues(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1.Cluster + text string + want string + }{ + { + name: "template values", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + }, + text: testValues, + want: templatedValues, + }, + { + name: "no values to template", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + }, + text: templatedValues, + want: templatedValues, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := templateValues(tt.cluster, tt.text) + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +const ( + mapKey = "deployment.yaml" + + testDeployment = `--- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cluster-autoscaler-tmpl-clustername-tmpl + namespace: tmpl-clusternamespace-tmpl + spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/instance: cluster-autoscaler-tmpl-clustername-tmpl + app.kubernetes.io/name: clusterapi-cluster-autoscaler + template: + metadata: + labels: + app.kubernetes.io/instance: cluster-autoscaler-tmpl-clustername-tmpl + app.kubernetes.io/name: clusterapi-cluster-autoscaler + spec: + containers: + - command: + - ./cluster-autoscaler + - --cloud-provider=clusterapi + - --namespace=tmpl-clusternamespace-tmpl + - --node-group-auto-discovery=clusterapi:clusterName=tmpl-clustername-tmpl + - --kubeconfig=/cluster/kubeconfig + - --clusterapi-cloud-config-authoritative + - --enforce-node-group-min-size=true + - --logtostderr=true + - --stderrthreshold=info + - --v=4 + name: clusterapi-cluster-autoscaler + volumeMounts: + - mountPath: /cluster + name: kubeconfig + readOnly: true + serviceAccountName: cluster-autoscaler-tmpl-clustername-tmpl + volumes: + - name: kubeconfig + secret: + items: + - key: value + path: kubeconfig + secretName: tmpl-clustername-tmpl-kubeconfig` + + templatedDeployment = `--- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: cluster-autoscaler-test-cluster + namespace: test-namespace + spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/instance: cluster-autoscaler-test-cluster + app.kubernetes.io/name: clusterapi-cluster-autoscaler + template: + metadata: + labels: + app.kubernetes.io/instance: cluster-autoscaler-test-cluster + app.kubernetes.io/name: clusterapi-cluster-autoscaler + spec: + containers: + - command: + - ./cluster-autoscaler + - --cloud-provider=clusterapi + - --namespace=test-namespace + - --node-group-auto-discovery=clusterapi:clusterName=test-cluster + - --kubeconfig=/cluster/kubeconfig + - --clusterapi-cloud-config-authoritative + - --enforce-node-group-min-size=true + - --logtostderr=true + - --stderrthreshold=info + - --v=4 + name: clusterapi-cluster-autoscaler + volumeMounts: + - mountPath: /cluster + name: kubeconfig + readOnly: true + serviceAccountName: cluster-autoscaler-test-cluster + volumes: + - name: kubeconfig + secret: + items: + - key: value + path: kubeconfig + secretName: test-cluster-kubeconfig` + + testValues = ` --- + fullnameOverride: "cluster-autoscaler-{{ .Cluster.Name }}" + + cloudProvider: clusterapi + + # Always trigger a scale-out if replicas are less than the min. + extraArgs: + enforce-node-group-min-size: true + + # Enable it to run in a 1 Node cluster. + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + + # Limit a single cluster-autoscaler Deployment to a single Cluster. + autoDiscovery: + clusterName: "{{ .Cluster.Name }}" + # The controller failed with an RBAC error trying to watch CAPI objects at the cluster scope without this. + labels: + - namespace: "{{ .Cluster.Namespace }}" + + clusterAPIConfigMapsNamespace: "{{ .Cluster.Namespace }}" + # For workload clusters it is not possible to use the in-cluster client. + # To simplify the configuration, use the admin kubeconfig generated by CAPI for all clusters. + clusterAPIMode: kubeconfig-incluster + clusterAPIWorkloadKubeconfigPath: /cluster/kubeconfig + extraVolumeSecrets: + kubeconfig: + name: "{{ .Cluster.Name }}-kubeconfig" + mountPath: /cluster + readOnly: true + items: + - key: value + path: kubeconfig + rbac: + # Create a Role instead of a ClusterRoles to update cluster-api objects + clusterScoped: false` + + templatedValues = ` --- + fullnameOverride: "cluster-autoscaler-test-cluster" + + cloudProvider: clusterapi + + # Always trigger a scale-out if replicas are less than the min. + extraArgs: + enforce-node-group-min-size: true + + # Enable it to run in a 1 Node cluster. + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + + # Limit a single cluster-autoscaler Deployment to a single Cluster. + autoDiscovery: + clusterName: "test-cluster" + # The controller failed with an RBAC error trying to watch CAPI objects at the cluster scope without this. + labels: + - namespace: "test-namespace" + + clusterAPIConfigMapsNamespace: "test-namespace" + # For workload clusters it is not possible to use the in-cluster client. + # To simplify the configuration, use the admin kubeconfig generated by CAPI for all clusters. + clusterAPIMode: kubeconfig-incluster + clusterAPIWorkloadKubeconfigPath: /cluster/kubeconfig + extraVolumeSecrets: + kubeconfig: + name: "test-cluster-kubeconfig" + mountPath: /cluster + readOnly: true + items: + - key: value + path: kubeconfig + rbac: + # Create a Role instead of a ClusterRoles to update cluster-api objects + clusterScoped: false` +)