diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 11d1321f8..001e4d8c4 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -5,6 +5,8 @@ package v1alpha1 const ( APIServerPort = 6443 + + VirtualIPProviderKubeVIP = "KubeVIP" ) // ObjectMeta is metadata that all persisted resources must have, which includes all objects @@ -37,4 +39,16 @@ type ControlPlaneEndpointSpec struct { // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 Port int32 `json:"port"` + + // Configuration for the virtual IP provider. + // +optional + VirtualIPSpec *ControlPlaneVirtualIPSpec `json:"virtualIP,omitempty"` +} + +type ControlPlaneVirtualIPSpec struct { + // Virtual IP provider to deploy. + // +kubebuilder:validation:Enum=KubeVIP + // +kubebuilder:default=KubeVIP + // +optional + Provider string `json:"provider,omitempty"` } diff --git a/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml b/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml index 1d7b4c9aa..632aa9193 100644 --- a/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml +++ b/api/v1alpha1/crds/caren.nutanix.com_nutanixclusterconfigs.yaml @@ -476,6 +476,16 @@ spec: maximum: 65535 minimum: 1 type: integer + virtualIP: + description: Configuration for the virtual IP provider. + properties: + provider: + default: KubeVIP + description: Virtual IP provider to deploy. + enum: + - KubeVIP + type: string + type: object required: - host - port diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index d447db035..9986f1357 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -469,6 +469,11 @@ func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControlPlaneEndpointSpec) DeepCopyInto(out *ControlPlaneEndpointSpec) { *out = *in + if in.VirtualIPSpec != nil { + in, out := &in.VirtualIPSpec, &out.VirtualIPSpec + *out = new(ControlPlaneVirtualIPSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpointSpec. @@ -481,6 +486,21 @@ func (in *ControlPlaneEndpointSpec) DeepCopy() *ControlPlaneEndpointSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneVirtualIPSpec) DeepCopyInto(out *ControlPlaneVirtualIPSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneVirtualIPSpec. +func (in *ControlPlaneVirtualIPSpec) DeepCopy() *ControlPlaneVirtualIPSpec { + if in == nil { + return nil + } + out := new(ControlPlaneVirtualIPSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DefaultStorage) DeepCopyInto(out *DefaultStorage) { *out = *in @@ -1021,7 +1041,7 @@ func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixSpec) DeepCopyInto(out *NutanixSpec) { *out = *in - out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + in.ControlPlaneEndpoint.DeepCopyInto(&out.ControlPlaneEndpoint) in.PrismCentralEndpoint.DeepCopyInto(&out.PrismCentralEndpoint) } diff --git a/charts/cluster-api-runtime-extensions-nutanix/README.md b/charts/cluster-api-runtime-extensions-nutanix/README.md index c5f12c14f..4c63dd9bd 100644 --- a/charts/cluster-api-runtime-extensions-nutanix/README.md +++ b/charts/cluster-api-runtime-extensions-nutanix/README.md @@ -62,6 +62,8 @@ A Helm chart for cluster-api-runtime-extensions-nutanix | hooks.nfd.crsStrategy.defaultInstallationConfigMap.name | string | `"node-feature-discovery"` | | | hooks.nfd.helmAddonStrategy.defaultValueTemplateConfigMap.create | bool | `true` | | | hooks.nfd.helmAddonStrategy.defaultValueTemplateConfigMap.name | string | `"default-nfd-helm-values-template"` | | +| hooks.virtualIP.kubeVip.defaultTemplateConfigMap.create | bool | `true` | | +| hooks.virtualIP.kubeVip.defaultTemplateConfigMap.name | string | `"default-kube-vip-template"` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"ghcr.io/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix"` | | | image.tag | string | `""` | | diff --git a/charts/cluster-api-runtime-extensions-nutanix/defaultclusterclasses/nutanix-cluster-class.yaml b/charts/cluster-api-runtime-extensions-nutanix/defaultclusterclasses/nutanix-cluster-class.yaml index 9071330d4..6bc9d2f6d 100644 --- a/charts/cluster-api-runtime-extensions-nutanix/defaultclusterclasses/nutanix-cluster-class.yaml +++ b/charts/cluster-api-runtime-extensions-nutanix/defaultclusterclasses/nutanix-cluster-class.yaml @@ -132,72 +132,7 @@ spec: scheduler: extraArgs: tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - files: - - content: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-vip - namespace: kube-system - spec: - containers: - - name: kube-vip - image: ghcr.io/kube-vip/kube-vip:v0.6.4 - imagePullPolicy: IfNotPresent - args: - - manager - env: - - name: vip_arp - value: "true" - - name: address - value: "control_plane_endpoint_ip" - - name: port - value: "control_plane_endpoint_port" - - name: vip_cidr - value: "32" - - name: cp_enable - value: "true" - - name: cp_namespace - value: kube-system - - name: vip_ddns - value: "false" - - name: vip_leaderelection - value: "true" - - name: vip_leaseduration - value: "15" - - name: vip_renewdeadline - value: "10" - - name: vip_retryperiod - value: "2" - - name: svc_enable - value: "false" - - name: lb_enable - value: "false" - - name: enableServicesElection - value: "false" - securityContext: - capabilities: - add: - - NET_ADMIN - - SYS_TIME - - NET_RAW - volumeMounts: - - mountPath: /etc/kubernetes/admin.conf - name: kubeconfig - resources: {} - hostNetwork: true - hostAliases: - - hostnames: - - kubernetes - ip: 127.0.0.1 - volumes: - - name: kubeconfig - hostPath: - type: FileOrCreate - path: /etc/kubernetes/admin.conf - status: {} - owner: root:root - path: /etc/kubernetes/manifests/kube-vip.yaml + files: [] initConfiguration: nodeRegistration: kubeletExtraArgs: @@ -212,14 +147,6 @@ spec: tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 postKubeadmCommands: - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc - - | - KUBERNETES_VERSION_NO_V=${KUBERNETES_VERSION#v} - VERSION_TO_COMPARE=1.29.0 - if [ "$(printf '%s\n' "$KUBERNETES_VERSION_NO_V" "$VERSION_TO_COMPARE" | sort -V | head -n1)" != "$KUBERNETES_VERSION_NO_V" ]; then - if [ -f /run/kubeadm/kubeadm.yaml ]; then - sed -i 's#path: /etc/kubernetes/super-admin.conf#path: /etc/kubernetes/admin.conf#' /etc/kubernetes/manifests/kube-vip.yaml; - fi - fi - echo "after kubeadm call" > /var/log/postkubeadm.log preKubeadmCommands: - echo "before kubeadm call" > /var/log/prekubeadm.log @@ -228,14 +155,6 @@ spec: - echo "127.0.0.1 localhost" >>/etc/hosts - echo "127.0.0.1 kubernetes" >>/etc/hosts - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts - - | - KUBERNETES_VERSION_NO_V=${KUBERNETES_VERSION#v} - VERSION_TO_COMPARE=1.29.0 - if [ "$(printf '%s\n' "$KUBERNETES_VERSION_NO_V" "$VERSION_TO_COMPARE" | sort -V | head -n1)" != "$KUBERNETES_VERSION_NO_V" ]; then - if [ -f /run/kubeadm/kubeadm.yaml ]; then - sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' /etc/kubernetes/manifests/kube-vip.yaml; - fi - fi useExperimentalRetryJoin: true verbosity: 10 --- diff --git a/charts/cluster-api-runtime-extensions-nutanix/templates/virtual-ip/kube-vip/manifests/kube-vip-configmap.yaml b/charts/cluster-api-runtime-extensions-nutanix/templates/virtual-ip/kube-vip/manifests/kube-vip-configmap.yaml new file mode 100644 index 000000000..e7c871df3 --- /dev/null +++ b/charts/cluster-api-runtime-extensions-nutanix/templates/virtual-ip/kube-vip/manifests/kube-vip-configmap.yaml @@ -0,0 +1,75 @@ +# Copyright 2023 D2iQ, Inc. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +#================================================================= +# DO NOT EDIT THIS FILE +# IT HAS BEEN GENERATED BY /hack/addons/update-kube-vip-manifests.sh +#================================================================= +{{- if .Values.hooks.virtualIP.kubeVip.defaultTemplateConfigMap.create }} +apiVersion: v1 +data: + kube-vip.yaml: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: '{{ `{{ .ControlPlaneEndpoint.Port }}` }}' + - name: vip_nodename + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: vip_cidr + value: "32" + - name: dns_mode + value: first + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: '{{ `{{ .ControlPlaneEndpoint.Host }}` }}' + image: ghcr.io/kube-vip/kube-vip:v0.8.0 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +kind: ConfigMap +metadata: + creationTimestamp: null + name: '{{ .Values.hooks.virtualIP.kubeVip.defaultTemplateConfigMap.name }}' +{{- end -}} diff --git a/charts/cluster-api-runtime-extensions-nutanix/values.yaml b/charts/cluster-api-runtime-extensions-nutanix/values.yaml index 3a8fd4999..dfc59c36c 100644 --- a/charts/cluster-api-runtime-extensions-nutanix/values.yaml +++ b/charts/cluster-api-runtime-extensions-nutanix/values.yaml @@ -72,6 +72,12 @@ hooks: create: true name: default-cluster-autoscaler-helm-values-template + virtualIP: + kubeVip: + defaultTemplateConfigMap: + create: true + name: default-kube-vip-template + helmAddonsConfigMap: default-helm-addons-config deployDefaultClusterClasses: true diff --git a/docs/content/customization/nutanix/control-plane-endpoint.md b/docs/content/customization/nutanix/control-plane-endpoint.md index 5c3bdbb75..f28c15441 100644 --- a/docs/content/customization/nutanix/control-plane-endpoint.md +++ b/docs/content/customization/nutanix/control-plane-endpoint.md @@ -22,6 +22,7 @@ spec: controlPlaneEndpoint: host: x.x.x.x port: 6443 + virtualIP: {} ``` Applying this configuration will result in the following value being set: @@ -36,3 +37,34 @@ spec: host: x.x.x.x port: 6443 ``` + +- `KubeadmControlPlaneTemplate` + +```yaml + spec: + kubeadmConfigSpec: + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + ... + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + permissions: "0600" + postKubeadmCommands: + # Only added for clusters version >=v1.29.0 + - |- + if [ -f /run/kubeadm/kubeadm.yaml ]; then + sed -i 's#path: /etc/kubernetes/super-admin.conf#path: ... + fi + preKubeadmCommands: + # Only added for clusters version >=v1.29.0 + - |- + if [ -f /run/kubeadm/kubeadm.yaml ]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: ... + fi +``` diff --git a/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml b/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml index e16c167b1..da302a77c 100644 --- a/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml +++ b/examples/capi-quick-start/nutanix-cluster-calico-crs.yaml @@ -112,6 +112,8 @@ spec: controlPlaneEndpoint: host: ${CONTROL_PLANE_ENDPOINT_IP} port: 6443 + virtualIP: + provider: KubeVIP prismCentralEndpoint: credentials: name: ${CLUSTER_NAME}-pc-creds diff --git a/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml b/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml index fe6c69dbc..be7a6ab65 100644 --- a/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml +++ b/examples/capi-quick-start/nutanix-cluster-calico-helm-addon.yaml @@ -112,6 +112,8 @@ spec: controlPlaneEndpoint: host: ${CONTROL_PLANE_ENDPOINT_IP} port: 6443 + virtualIP: + provider: KubeVIP prismCentralEndpoint: credentials: name: ${CLUSTER_NAME}-pc-creds diff --git a/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml b/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml index 1cc60ccf6..26851f032 100644 --- a/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml +++ b/examples/capi-quick-start/nutanix-cluster-cilium-crs.yaml @@ -112,6 +112,8 @@ spec: controlPlaneEndpoint: host: ${CONTROL_PLANE_ENDPOINT_IP} port: 6443 + virtualIP: + provider: KubeVIP prismCentralEndpoint: credentials: name: ${CLUSTER_NAME}-pc-creds diff --git a/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml b/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml index d111adffa..f50018bf1 100644 --- a/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml +++ b/examples/capi-quick-start/nutanix-cluster-cilium-helm-addon.yaml @@ -112,6 +112,8 @@ spec: controlPlaneEndpoint: host: ${CONTROL_PLANE_ENDPOINT_IP} port: 6443 + virtualIP: + provider: KubeVIP prismCentralEndpoint: credentials: name: ${CLUSTER_NAME}-pc-creds diff --git a/hack/addons/update-kube-vip-manifests.sh b/hack/addons/update-kube-vip-manifests.sh new file mode 100755 index 000000000..6646ba1f0 --- /dev/null +++ b/hack/addons/update-kube-vip-manifests.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +readonly SCRIPT_DIR + +# shellcheck source=hack/common.sh +source "${SCRIPT_DIR}/../common.sh" + +if [ -z "${KUBE_VIP_VERSION:-}" ]; then + echo "Missing argument: KUBE_VIP_VERSION" + exit 1 +fi + +ASSETS_DIR="$(mktemp -d -p "${TMPDIR:-/tmp}")" +readonly ASSETS_DIR +trap_add "rm -rf ${ASSETS_DIR}" EXIT + +readonly FILE_NAME="kube-vip.yaml" + +docker container run --rm ghcr.io/kube-vip/kube-vip:"${KUBE_VIP_VERSION}" \ + manifest pod \ + --arp \ + --address='{{ `{{ .ControlPlaneEndpoint.Host }}` }}' \ + --port=-99999 \ + --controlplane \ + --leaderElection \ + --leaseDuration=15 \ + --leaseRenewDuration=10 \ + --leaseRetry=2 \ + --prometheusHTTPServer='' | + gojq --yaml-input --yaml-output 'del(.metadata.creationTimestamp, .status) | .spec.containers[].imagePullPolicy |= "IfNotPresent"' | + sed "s/\"-99999\"/'{{ \`{{ .ControlPlaneEndpoint.Port }}\` }}'/" >"${ASSETS_DIR}/${FILE_NAME}" + +kubectl create configmap "{{ .Values.hooks.virtualIP.kubeVip.defaultTemplateConfigMap.name }}" --dry-run=client --output yaml \ + --from-file "${ASSETS_DIR}/${FILE_NAME}" \ + >"${ASSETS_DIR}/kube-vip-configmap.yaml" + +# add warning not to edit file directly +cat <"${GIT_REPO_ROOT}/charts/cluster-api-runtime-extensions-nutanix/templates/virtual-ip/kube-vip/manifests/kube-vip-configmap.yaml" +$(cat "${GIT_REPO_ROOT}/hack/license-header.yaml.txt") + +#================================================================= +# DO NOT EDIT THIS FILE +# IT HAS BEEN GENERATED BY /hack/addons/update-kube-vip-manifests.sh +#================================================================= +{{- if .Values.hooks.virtualIP.kubeVip.defaultTemplateConfigMap.create }} +$(cat "${ASSETS_DIR}/kube-vip-configmap.yaml") +{{- end -}} +EOF diff --git a/hack/examples/bases/nutanix/clusterclass/kustomization.yaml.tmpl b/hack/examples/bases/nutanix/clusterclass/kustomization.yaml.tmpl index e837f403a..32333961b 100644 --- a/hack/examples/bases/nutanix/clusterclass/kustomization.yaml.tmpl +++ b/hack/examples/bases/nutanix/clusterclass/kustomization.yaml.tmpl @@ -30,12 +30,36 @@ patches: - op: "remove" path: "/spec/variables" +# Delete the certSANs from the template +# They will be added by the handler. - target: kind: KubeadmControlPlaneTemplate patch: |- - op: "remove" path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/certSANs" +# Delete the kube-vip file. +# Will be templated and added back in the handler if enabled. +- target: + kind: KubeadmControlPlaneTemplate + patch: |- + - op: test + path: /spec/template/spec/kubeadmConfigSpec/files/0/path + value: "/etc/kubernetes/manifests/kube-vip.yaml" + - op: "remove" + path: "/spec/template/spec/kubeadmConfigSpec/files/0" + +# Delete the kube-vip related pre and postKubeadmCommands. +# Will be added back in the handler if enabled. +# If the index of these changes upstream this will need to change, but will show up as a git diff. +- target: + kind: KubeadmControlPlaneTemplate + patch: |- + - op: "remove" + path: "/spec/template/spec/kubeadmConfigSpec/preKubeadmCommands/6" + - op: "remove" + path: "/spec/template/spec/kubeadmConfigSpec/postKubeadmCommands/1" + # FIXME: Debug why some of the patches are needed. # When the handler runs, it sends back multiple patches for individual fields. # But CAPI fails applying them because of missing value. diff --git a/hack/examples/patches/nutanix/initialize-variables.yaml b/hack/examples/patches/nutanix/initialize-variables.yaml index 25f970e4e..ccc760002 100644 --- a/hack/examples/patches/nutanix/initialize-variables.yaml +++ b/hack/examples/patches/nutanix/initialize-variables.yaml @@ -10,6 +10,9 @@ controlPlaneEndpoint: host: ${CONTROL_PLANE_ENDPOINT_IP} port: 6443 + virtualIP: { + provider: KubeVIP + } prismCentralEndpoint: url: https://${NUTANIX_ENDPOINT}:9440 insecure: ${NUTANIX_INSECURE} diff --git a/make/addons.mk b/make/addons.mk index 1fa50d21d..ac87fc612 100644 --- a/make/addons.mk +++ b/make/addons.mk @@ -17,8 +17,10 @@ export AWS_CCM_CHART_VERSION_128 := 0.0.8 export NUTANIX_CCM_CHART_VERSION := 0.3.3 +export KUBE_VIP_VERSION := v0.8.0 + .PHONY: addons.sync -addons.sync: $(addprefix update-addon.,calico cilium nfd cluster-autoscaler aws-ebs-csi aws-ccm.127 aws-ccm.128) +addons.sync: $(addprefix update-addon.,calico cilium nfd cluster-autoscaler aws-ebs-csi aws-ccm.127 aws-ccm.128 kube-vip) .PHONY: update-addon.calico update-addon.calico: ; $(info $(M) updating calico manifests) @@ -44,6 +46,10 @@ update-addon.aws-ebs-csi: ; $(info $(M) updating aws ebs csi manifests) update-addon.aws-ccm.%: ; $(info $(M) updating aws ccm $* manifests) ./hack/addons/update-aws-ccm.sh $(AWS_CCM_VERSION_$*) $(AWS_CCM_CHART_VERSION_$*) +.PHONY: update-addon.kube-vip +update-addon.kube-vip: ; $(info $(M) updating kube-vip manifests) + ./hack/addons/update-kube-vip-manifests.sh + .PHONY: generate-helm-configmap generate-helm-configmap: go run hack/tools/helm-cm/main.go -kustomize-directory="./hack/addons/kustomize" -output-file="./charts/cluster-api-runtime-extensions-nutanix/templates/helm-config.yaml" diff --git a/pkg/handlers/generic/mutation/controlplanevirtualip/inject.go b/pkg/handlers/generic/mutation/controlplanevirtualip/inject.go new file mode 100644 index 000000000..abb354b71 --- /dev/null +++ b/pkg/handlers/generic/mutation/controlplanevirtualip/inject.go @@ -0,0 +1,195 @@ +// Copyright 2023 D2iQ, Inc. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package controlplanevirtualip + +import ( + "context" + "fmt" + + "github.com/spf13/pflag" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers/mutation" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/patches" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/patches/selectors" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/variables" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/controlplanevirtualip/providers" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/options" +) + +const ( + // VariableName is the external patch variable name. + VariableName = "controlPlaneEndpoint" +) + +type Config struct { + *options.GlobalOptions + + defaultKubeVIPConfigMapName string +} + +func (c *Config) AddFlags(prefix string, flags *pflag.FlagSet) { + flags.StringVar( + &c.defaultKubeVIPConfigMapName, + prefix+".default-kube-vip-template-configmap-name", + "default-kube-vip-template", + "default ConfigMap name that holds the kube-vip template used for the control-plane virtual IP", + ) +} + +type ControlPlaneVirtualIP struct { + client client.Reader + config *Config + + variableName string + variableFieldPath []string +} + +// NewControlPlaneVirtualIP is different from other generic handlers. +// It requires variableName and variableFieldPath to be passed from another provider specific handler. +// The code is here to be shared across different providers. +func NewControlPlaneVirtualIP( + cl client.Reader, + config *Config, + variableName string, + variableFieldPath ...string, +) *ControlPlaneVirtualIP { + return &ControlPlaneVirtualIP{ + client: cl, + config: config, + variableName: variableName, + variableFieldPath: variableFieldPath, + } +} + +func (h *ControlPlaneVirtualIP) Mutate( + ctx context.Context, + obj *unstructured.Unstructured, + vars map[string]apiextensionsv1.JSON, + holderRef runtimehooksv1.HolderReference, + _ client.ObjectKey, + clusterGetter mutation.ClusterGetter, +) error { + log := ctrl.LoggerFrom(ctx).WithValues( + "holderRef", holderRef, + ) + + controlPlaneEndpointVar, err := variables.Get[v1alpha1.ControlPlaneEndpointSpec]( + vars, + h.variableName, + h.variableFieldPath..., + ) + if err != nil { + if variables.IsNotFoundError(err) { + log.V(5).Info("ControlPlaneEndpoint variable not defined") + return nil + } + return err + } + + log = log.WithValues( + "variableName", + h.variableName, + "variableFieldPath", + h.variableFieldPath, + "variableValue", + controlPlaneEndpointVar, + ) + + if controlPlaneEndpointVar.VirtualIPSpec == nil { + log.V(5).Info("ControlPlane VirtualIP not set") + return nil + } + + cluster, err := clusterGetter(ctx) + if err != nil { + log.Error( + err, + "failed to get cluster from ControlPlaneVirtualIP mutation handler", + ) + return err + } + + var virtualIPProvider providers.Provider + // only kube-vip is supported, but more providers can be added in the future + if controlPlaneEndpointVar.VirtualIPSpec.Provider == v1alpha1.VirtualIPProviderKubeVIP { + virtualIPProvider = providers.NewKubeVIPFromConfigMapProvider( + h.client, + h.config.defaultKubeVIPConfigMapName, + h.config.DefaultsNamespace(), + ) + } + + return patches.MutateIfApplicable( + obj, + vars, + &holderRef, + selectors.ControlPlane(), + log, + func(obj *controlplanev1.KubeadmControlPlaneTemplate) error { + virtualIPProviderFile, getFileErr := virtualIPProvider.GetFile( + ctx, + controlPlaneEndpointVar, + ) + if getFileErr != nil { + return getFileErr + } + + log.WithValues( + "patchedObjectKind", obj.GetObjectKind().GroupVersionKind().String(), + "patchedObjectName", client.ObjectKeyFromObject(obj), + ).Info(fmt.Sprintf( + "adding %s static Pod file to control plane kubeadm config spec", + virtualIPProvider.Name(), + )) + obj.Spec.Template.Spec.KubeadmConfigSpec.Files = append( + obj.Spec.Template.Spec.KubeadmConfigSpec.Files, + *virtualIPProviderFile, + ) + + preKubeadmCommands, postKubeadmCommands, getCommandsErr := virtualIPProvider.GetCommands( + cluster, + ) + if getCommandsErr != nil { + return getCommandsErr + } + + if len(preKubeadmCommands) > 0 { + log.WithValues( + "patchedObjectKind", obj.GetObjectKind().GroupVersionKind().String(), + "patchedObjectName", client.ObjectKeyFromObject(obj), + ).Info(fmt.Sprintf( + "adding %s preKubeadmCommands to control plane kubeadm config spec", + virtualIPProvider.Name(), + )) + obj.Spec.Template.Spec.KubeadmConfigSpec.PreKubeadmCommands = append( + obj.Spec.Template.Spec.KubeadmConfigSpec.PreKubeadmCommands, + preKubeadmCommands..., + ) + } + + if len(postKubeadmCommands) > 0 { + log.WithValues( + "patchedObjectKind", obj.GetObjectKind().GroupVersionKind().String(), + "patchedObjectName", client.ObjectKeyFromObject(obj), + ).Info(fmt.Sprintf( + "adding %s postKubeadmCommands to control plane kubeadm config spec", + virtualIPProvider.Name(), + )) + obj.Spec.Template.Spec.KubeadmConfigSpec.PostKubeadmCommands = append( + obj.Spec.Template.Spec.KubeadmConfigSpec.PostKubeadmCommands, + postKubeadmCommands..., + ) + } + + return nil + }, + ) +} diff --git a/pkg/handlers/generic/mutation/controlplanevirtualip/inject_test.go b/pkg/handlers/generic/mutation/controlplanevirtualip/inject_test.go new file mode 100644 index 000000000..092b6ecd2 --- /dev/null +++ b/pkg/handlers/generic/mutation/controlplanevirtualip/inject_test.go @@ -0,0 +1,264 @@ +// Copyright 2023 D2iQ, Inc. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package controlplanevirtualip + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers/mutation" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/testutils/capitest" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/testutils/capitest/request" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/clusterconfig" + virtuialipproviders "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/controlplanevirtualip/providers" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/options" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/test/helpers" +) + +func TestControlPlaneEndpointPatch(t *testing.T) { + gomega.RegisterFailHandler(Fail) + RunSpecs(t, "ControlPlane virtual IP suite") +} + +var _ = Describe("Generate ControlPlane virtual IP patches", func() { + testDefs := []struct { + capitest.PatchTestDef + virtualIPTemplate string + cluster *clusterv1.Cluster + }{ + { + PatchTestDef: capitest.PatchTestDef{ + Name: "host and port should be templated in a new file and no pre/post commands", + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + clusterconfig.MetaVariableName, + v1alpha1.ControlPlaneEndpointSpec{ + Host: "10.20.100.10", + Port: 6443, + VirtualIPSpec: &v1alpha1.ControlPlaneVirtualIPSpec{ + Provider: v1alpha1.VirtualIPProviderKubeVIP, + }, + }, + VariableName, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem(""), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElements( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "content", + gomega.ContainSubstring("value: \"10.20.100.10\""), + ), + gomega.HaveKeyWithValue( + "content", + gomega.ContainSubstring("value: \"6443\""), + ), + gomega.HaveKey("owner"), + gomega.HaveKeyWithValue( + "path", + gomega.ContainSubstring("kube-vip"), + ), + gomega.HaveKey("permissions"), + ), + ), + }, + }, + UnexpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/preKubeadmCommands", + ValueMatcher: gomega.ContainElements( + virtuialipproviders.KubeVIPPreKubeadmCommands, + ), + }, + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/postKubeadmCommands", + ValueMatcher: gomega.ContainElements( + virtuialipproviders.KubeVIPPostKubeadmCommands, + ), + }, + }, + }, + virtualIPTemplate: validKubeVIPTemplate, + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: request.ClusterName, + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.ClusterSpec{ + Topology: &clusterv1.Topology{ + Version: "v1.28.100", + }, + }, + }, + }, + { + PatchTestDef: capitest.PatchTestDef{ + Name: "host and port should be templated in a new file with pre/post commands", + Vars: []runtimehooksv1.Variable{ + capitest.VariableWithValue( + clusterconfig.MetaVariableName, + v1alpha1.ControlPlaneEndpointSpec{ + Host: "10.20.100.10", + Port: 6443, + VirtualIPSpec: &v1alpha1.ControlPlaneVirtualIPSpec{ + Provider: v1alpha1.VirtualIPProviderKubeVIP, + }, + }, + VariableName, + ), + }, + RequestItem: request.NewKubeadmControlPlaneTemplateRequestItem( + "", + ), + ExpectedPatchMatchers: []capitest.JSONPatchMatcher{ + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/files", + ValueMatcher: gomega.ContainElements( + gomega.SatisfyAll( + gomega.HaveKeyWithValue( + "content", + gomega.ContainSubstring("value: \"10.20.100.10\""), + ), + gomega.HaveKeyWithValue( + "content", + gomega.ContainSubstring("value: \"6443\""), + ), + gomega.HaveKey("owner"), + gomega.HaveKeyWithValue( + "path", + gomega.ContainSubstring("kube-vip"), + ), + gomega.HaveKey("permissions"), + ), + ), + }, + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/preKubeadmCommands", + ValueMatcher: gomega.ContainElements( + virtuialipproviders.KubeVIPPreKubeadmCommands, + ), + }, + { + Operation: "add", + Path: "/spec/template/spec/kubeadmConfigSpec/postKubeadmCommands", + ValueMatcher: gomega.ContainElements( + virtuialipproviders.KubeVIPPostKubeadmCommands, + ), + }, + }, + }, + virtualIPTemplate: validKubeVIPTemplate, + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: request.ClusterName, + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.ClusterSpec{ + Topology: &clusterv1.Topology{ + Version: "v1.29.0", + }, + }, + }, + }, + } + + // create test node for each case + for idx := range testDefs { + tt := testDefs[idx] + It(tt.Name, func() { + clientScheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(clientScheme)) + utilruntime.Must(clusterv1.AddToScheme(clientScheme)) + // Always initialize the testEnv variable in the closure. + // This will allow ginkgo to initialize testEnv variable during test execution time. + testEnv := helpers.TestEnv + // use direct client instead of controller client. This will allow the patch handler to read k8s object + // that are written by the tests. + // Test cases writes credentials secret that the mutator handler reads. + // Using direct client will enable reading it immediately. + client, err := testEnv.GetK8sClientWithScheme(clientScheme) + gomega.Expect(err).To(gomega.BeNil()) + // setup a test ConfigMap to be used by the handler + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: corev1.NamespaceDefault, + GenerateName: "virtualip-test-", + }, + Data: map[string]string{ + "data": tt.virtualIPTemplate, + }, + } + err = client.Create(context.Background(), cm) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + if tt.cluster != nil { + err = client.Create(context.Background(), tt.cluster) + gomega.Expect(err).To(gomega.BeNil()) + defer func() { + err = client.Delete(context.Background(), tt.cluster) + gomega.Expect(err).To(gomega.BeNil()) + }() + } + + cfg := &Config{ + GlobalOptions: options.NewGlobalOptions(), + defaultKubeVIPConfigMapName: cm.Name, + } + patchGenerator := func() mutation.GeneratePatches { + return mutation.NewMetaGeneratePatchesHandler( + "", + client, + NewControlPlaneVirtualIP(client, cfg, clusterconfig.MetaVariableName, VariableName), + ).(mutation.GeneratePatches) + } + + capitest.AssertGeneratePatches(GinkgoT(), patchGenerator, &tt.PatchTestDef) + }) + } +}) + +var validKubeVIPTemplate = ` +apiVersion: v1 +kind: Pod +metadata: + name: kube-vip + namespace: kube-system +spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v1.1.1 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "{{ .ControlPlaneEndpoint.Host }}" + - name: port + value: "{{ .ControlPlaneEndpoint.Port }}" +` diff --git a/pkg/handlers/generic/mutation/controlplanevirtualip/providers/kubevip.go b/pkg/handlers/generic/mutation/controlplanevirtualip/providers/kubevip.go new file mode 100644 index 000000000..1a41a7704 --- /dev/null +++ b/pkg/handlers/generic/mutation/controlplanevirtualip/providers/kubevip.go @@ -0,0 +1,157 @@ +// Copyright 2023 D2iQ, Inc. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package providers + +import ( + "context" + "fmt" + + "github.com/blang/semver/v4" + corev1 "k8s.io/api/core/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" +) + +var ( + //nolint:lll // for readability prefer to keep the long line + KubeVIPPreKubeadmCommands = []string{`if [ -f /run/kubeadm/kubeadm.yaml ]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' /etc/kubernetes/manifests/kube-vip.yaml; +fi`} + //nolint:lll // for readability prefer to keep the long line + KubeVIPPostKubeadmCommands = []string{`if [ -f /run/kubeadm/kubeadm.yaml ]; then + sed -i 's#path: /etc/kubernetes/super-admin.conf#path: /etc/kubernetes/admin.conf#' /etc/kubernetes/manifests/kube-vip.yaml; +fi`} +) + +type kubeVIPFromConfigMapProvider struct { + client client.Reader + + configMapKey client.ObjectKey +} + +func NewKubeVIPFromConfigMapProvider( + cl client.Reader, + name, namespace string, +) *kubeVIPFromConfigMapProvider { + return &kubeVIPFromConfigMapProvider{ + client: cl, + configMapKey: client.ObjectKey{ + Name: name, + Namespace: namespace, + }, + } +} + +func (p *kubeVIPFromConfigMapProvider) Name() string { + return "kube-vip" +} + +// GetFile reads the kube-vip template from the ConfigMap +// and returns the content a File, templating the required variables. +func (p *kubeVIPFromConfigMapProvider) GetFile( + ctx context.Context, + spec v1alpha1.ControlPlaneEndpointSpec, +) (*bootstrapv1.File, error) { + data, err := getTemplateFromConfigMap(ctx, p.client, p.configMapKey) + if err != nil { + return nil, fmt.Errorf("failed getting template data: %w", err) + } + + kubeVIPStaticPod, err := templateValues(spec, data) + if err != nil { + return nil, fmt.Errorf("failed templating static Pod: %w", err) + } + + return &bootstrapv1.File{ + Content: kubeVIPStaticPod, + Owner: kubeVIPFileOwner, + Path: kubeVIPFilePath, + Permissions: kubeVIPFilePermissions, + }, nil +} + +// +//nolint:gocritic // No need for named return values +func (p *kubeVIPFromConfigMapProvider) GetCommands( + cluster *clusterv1.Cluster, +) ([]string, []string, error) { + // The kube-vip static Pod uses admin.conf on the host to connect to the API server. + // But, starting with Kubernetes 1.29, admin.conf first gets created with no RBAC permissions. + // At the same time, 'kubeadm init' command waits for the API server to be reachable on the kube-vip IP. + // And since the kube-vip Pod is crashlooping with a permissions error, 'kubeadm init' fails. + // To work around this: + // 1. return a preKubeadmCommand to change the kube-vip Pod to use the new super-admin.conf file. + // 2. return a postKubeadmCommand to change the kube-vip Pod back to use admin.conf, + // after kubeadm has assigned it the necessary RBAC permissions. + // + // See https://github.com/kube-vip/kube-vip/issues/684 + needCommands, err := needHackCommands(cluster) + if err != nil { + return nil, nil, fmt.Errorf("failed to determine if kube-vip commands are needed: %w", err) + } + if !needCommands { + return nil, nil, nil + } + + return KubeVIPPreKubeadmCommands, KubeVIPPostKubeadmCommands, nil +} + +type multipleKeysError struct { + configMapKey client.ObjectKey +} + +func (e multipleKeysError) Error() string { + return fmt.Sprintf( + "found multiple keys in ConfigMap %q, when only 1 is expected", + e.configMapKey, + ) +} + +type emptyValuesError struct { + configMapKey client.ObjectKey +} + +func (e emptyValuesError) Error() string { + return fmt.Sprintf( + "could not find any keys with non-empty vaules in ConfigMap %q", + e.configMapKey, + ) +} + +func getTemplateFromConfigMap( + ctx context.Context, + cl client.Reader, + configMapKey client.ObjectKey, +) (string, error) { + configMap := &corev1.ConfigMap{} + err := cl.Get(ctx, configMapKey, configMap) + if err != nil { + return "", fmt.Errorf("failed to get template ConfigMap %q: %w", configMapKey, err) + } + + if len(configMap.Data) > 1 { + return "", multipleKeysError{configMapKey: configMapKey} + } + + // at this point there should only be 1 key ConfigMap, return on the first non-empty value + for _, data := range configMap.Data { + if data != "" { + return data, nil + } + } + + return "", emptyValuesError{configMapKey: configMapKey} +} + +func needHackCommands(cluster *clusterv1.Cluster) (bool, error) { + version, err := semver.ParseTolerant(cluster.Spec.Topology.Version) + if err != nil { + return false, fmt.Errorf("failed to parse version from cluster %w", err) + } + + return version.Minor >= 29, nil +} diff --git a/pkg/handlers/generic/mutation/controlplanevirtualip/providers/kubevip_test.go b/pkg/handlers/generic/mutation/controlplanevirtualip/providers/kubevip_test.go new file mode 100644 index 000000000..9ddf97e85 --- /dev/null +++ b/pkg/handlers/generic/mutation/controlplanevirtualip/providers/kubevip_test.go @@ -0,0 +1,192 @@ +// Copyright 2023 D2iQ, Inc. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package providers + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" +) + +func Test_GetFile(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + controlPlaneEndpointSpec v1alpha1.ControlPlaneEndpointSpec + configMap *corev1.ConfigMap + expectedContent string + expectedErr error + }{ + { + name: "should return templated data with both host and port", + controlPlaneEndpointSpec: v1alpha1.ControlPlaneEndpointSpec{ + Host: "10.20.100.10", + Port: 6443, + }, + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-kube-vip-template", + Namespace: "default", + }, + Data: map[string]string{ + "data": validKubeVIPTemplate, + }, + }, + expectedContent: expectedKubeVIPPod, + }, + } + + for idx := range tests { + tt := tests[idx] // Capture range variable + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + fakeClient := fake.NewClientBuilder().WithObjects(tt.configMap).Build() + + provider := kubeVIPFromConfigMapProvider{ + client: fakeClient, + configMapKey: client.ObjectKeyFromObject(tt.configMap), + } + + file, err := provider.GetFile(context.TODO(), tt.controlPlaneEndpointSpec) + require.Equal(t, tt.expectedErr, err) + assert.Equal(t, tt.expectedContent, file.Content) + assert.NotEmpty(t, file.Path) + assert.NotEmpty(t, file.Owner) + assert.NotEmpty(t, file.Permissions) + }) + } +} + +func Test_getTemplateFromConfigMap(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + configMap *corev1.ConfigMap + expectedData string + expectedErr error + }{ + { + name: "should return data from the only key", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-kube-vip-template", + Namespace: "default", + }, + Data: map[string]string{ + "data": "kube-vip-template", + }, + }, + expectedData: "kube-vip-template", + }, + { + name: "should fail with multipleKeysError", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-kube-vip-template", + Namespace: "default", + }, + Data: map[string]string{ + "data": "kube-vip-template", + "unexpected-key": "unexpected-value", + }, + }, + expectedErr: multipleKeysError{ + configMapKey: client.ObjectKey{ + Name: "default-kube-vip-template", + Namespace: "default", + }, + }, + }, + { + name: "should fail with emptyValuesError", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-kube-vip-template", + Namespace: "default", + }, + Data: map[string]string{ + "data": "", + }, + }, + expectedErr: emptyValuesError{ + configMapKey: client.ObjectKey{ + Name: "default-kube-vip-template", + Namespace: "default", + }, + }, + }, + } + + for idx := range tests { + tt := tests[idx] // Capture range variable + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + fakeClient := fake.NewClientBuilder().WithObjects(tt.configMap).Build() + + data, err := getTemplateFromConfigMap( + context.TODO(), + fakeClient, + client.ObjectKeyFromObject(tt.configMap), + ) + require.Equal(t, tt.expectedErr, err) + assert.Equal(t, tt.expectedData, data) + }) + } +} + +var ( + validKubeVIPTemplate = ` +apiVersion: v1 +kind: Pod +metadata: + name: kube-vip + namespace: kube-system +spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v1.1.1 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "{{ .ControlPlaneEndpoint.Host }}" + - name: port + value: "{{ .ControlPlaneEndpoint.Port }}" +` + + expectedKubeVIPPod = ` +apiVersion: v1 +kind: Pod +metadata: + name: kube-vip + namespace: kube-system +spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v1.1.1 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "10.20.100.10" + - name: port + value: "6443" +` +) diff --git a/pkg/handlers/generic/mutation/controlplanevirtualip/providers/providers.go b/pkg/handlers/generic/mutation/controlplanevirtualip/providers/providers.go new file mode 100644 index 000000000..4582af176 --- /dev/null +++ b/pkg/handlers/generic/mutation/controlplanevirtualip/providers/providers.go @@ -0,0 +1,55 @@ +// Copyright 2023 D2iQ, Inc. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package providers + +import ( + "bytes" + "context" + "fmt" + "text/template" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" +) + +const ( + kubeVIPFileOwner = "root:root" + kubeVIPFilePath = "/etc/kubernetes/manifests/kube-vip.yaml" + kubeVIPFilePermissions = "0600" +) + +// Provider is an interface for getting the kube-vip static Pod as a file. +type Provider interface { + Name() string + GetFile(ctx context.Context, spec v1alpha1.ControlPlaneEndpointSpec) (*bootstrapv1.File, error) + GetCommands(cluster *clusterv1.Cluster) ([]string, []string, error) +} + +func templateValues( + controlPlaneEndpoint v1alpha1.ControlPlaneEndpointSpec, + text string, +) (string, error) { + kubeVIPTemplate, err := template.New("").Parse(text) + if err != nil { + return "", fmt.Errorf("failed to parse template: %w", err) + } + + type input struct { + ControlPlaneEndpoint v1alpha1.ControlPlaneEndpointSpec + } + + templateInput := input{ + ControlPlaneEndpoint: controlPlaneEndpoint, + } + + var b bytes.Buffer + err = kubeVIPTemplate.Execute(&b, templateInput) + if err != nil { + return "", fmt.Errorf("failed setting API endpoint configuration in template: %w", err) + } + + return b.String(), nil +} diff --git a/pkg/handlers/nutanix/handlers.go b/pkg/handlers/nutanix/handlers.go index 73825f376..c6d3f1f55 100644 --- a/pkg/handlers/nutanix/handlers.go +++ b/pkg/handlers/nutanix/handlers.go @@ -8,27 +8,35 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/controlplanevirtualip" nutanixclusterconfig "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/clusterconfig" nutanixmutation "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/mutation" nutanixworkerconfig "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/workerconfig" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/options" ) -type Handlers struct{} +type Handlers struct { + // kubeVIPConfig holds the configuration for the kube-vip control-plane virtual IP. + controlPlaneVirtualIPConfig *controlplanevirtualip.Config +} func New( - _ *options.GlobalOptions, + globalOptions *options.GlobalOptions, ) *Handlers { - return &Handlers{} + return &Handlers{ + controlPlaneVirtualIPConfig: &controlplanevirtualip.Config{GlobalOptions: globalOptions}, + } } func (h *Handlers) AllHandlers(mgr manager.Manager) []handlers.Named { return []handlers.Named{ nutanixclusterconfig.NewVariable(), nutanixworkerconfig.NewVariable(), - nutanixmutation.MetaPatchHandler(mgr), + nutanixmutation.MetaPatchHandler(mgr, h.controlPlaneVirtualIPConfig), nutanixmutation.MetaWorkerPatchHandler(mgr), } } -func (h *Handlers) AddFlags(_ *pflag.FlagSet) {} +func (h *Handlers) AddFlags(flagSet *pflag.FlagSet) { + h.controlPlaneVirtualIPConfig.AddFlags("nutanix", flagSet) +} diff --git a/pkg/handlers/nutanix/mutation/controlplaneendpoint/inject.go b/pkg/handlers/nutanix/mutation/controlplaneendpoint/inject.go index 447058336..faee91aea 100644 --- a/pkg/handlers/nutanix/mutation/controlplaneendpoint/inject.go +++ b/pkg/handlers/nutanix/mutation/controlplaneendpoint/inject.go @@ -5,12 +5,10 @@ package controlplaneendpoint import ( "context" - "fmt" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -86,33 +84,6 @@ func (h *nutanixControlPlaneEndpoint) Mutate( controlPlaneEndpointVar, ) - if err := patches.MutateIfApplicable( - obj, - vars, - &holderRef, - selectors.ControlPlane(), - log, - func(obj *controlplanev1.KubeadmControlPlaneTemplate) error { - commands := []string{ - fmt.Sprintf("sed -i 's/control_plane_endpoint_ip/%s/g' /etc/kubernetes/manifests/kube-vip.yaml", - controlPlaneEndpointVar.Host), - fmt.Sprintf("sed -i 's/control_plane_endpoint_port/%d/g' /etc/kubernetes/manifests/kube-vip.yaml", - controlPlaneEndpointVar.Port), - } - log.WithValues( - "patchedObjectKind", obj.GetObjectKind().GroupVersionKind().String(), - "patchedObjectName", client.ObjectKeyFromObject(obj), - ).Info("adding PreKubeadmCommands to control plane kubeadm config spec") - obj.Spec.Template.Spec.KubeadmConfigSpec.PreKubeadmCommands = append( - obj.Spec.Template.Spec.KubeadmConfigSpec.PreKubeadmCommands, - commands..., - ) - return nil - }, - ); err != nil { - return err - } - return patches.MutateIfApplicable( obj, vars, diff --git a/pkg/handlers/nutanix/mutation/controlplanevirtualip/inject.go b/pkg/handlers/nutanix/mutation/controlplanevirtualip/inject.go new file mode 100644 index 000000000..06df67dd2 --- /dev/null +++ b/pkg/handlers/nutanix/mutation/controlplanevirtualip/inject.go @@ -0,0 +1,25 @@ +// Copyright 2023 D2iQ, Inc. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package controlplanevirtualip + +import ( + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/clusterconfig" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/controlplanevirtualip" +) + +func NewPatch( + cl ctrlclient.Client, + cfg *controlplanevirtualip.Config, +) *controlplanevirtualip.ControlPlaneVirtualIP { + return controlplanevirtualip.NewControlPlaneVirtualIP( + cl, + cfg, + clusterconfig.MetaVariableName, + v1alpha1.NutanixVariableName, + controlplanevirtualip.VariableName, + ) +} diff --git a/pkg/handlers/nutanix/mutation/metapatch_handler.go b/pkg/handlers/nutanix/mutation/metapatch_handler.go index a0d26af09..d05d3edb8 100644 --- a/pkg/handlers/nutanix/mutation/metapatch_handler.go +++ b/pkg/handlers/nutanix/mutation/metapatch_handler.go @@ -9,16 +9,19 @@ import ( "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers/mutation" genericmutation "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation" + "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/generic/mutation/controlplanevirtualip" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/mutation/controlplaneendpoint" + nutanixcontrolplanevirtualip "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/mutation/controlplanevirtualip" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/mutation/machinedetails" "github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/nutanix/mutation/prismcentralendpoint" ) // MetaPatchHandler returns a meta patch handler for mutating CAPX clusters. -func MetaPatchHandler(mgr manager.Manager) handlers.Named { +func MetaPatchHandler(mgr manager.Manager, cfg *controlplanevirtualip.Config) handlers.Named { patchHandlers := append( []mutation.MetaMutator{ controlplaneendpoint.NewPatch(), + nutanixcontrolplanevirtualip.NewPatch(mgr.GetClient(), cfg), prismcentralendpoint.NewPatch(), machinedetails.NewControlPlanePatch(), }, diff --git a/pkg/handlers/options/global.go b/pkg/handlers/options/global.go index 6f1de11a9..0ec8be183 100644 --- a/pkg/handlers/options/global.go +++ b/pkg/handlers/options/global.go @@ -9,7 +9,9 @@ import ( ) func NewGlobalOptions() *GlobalOptions { - return &GlobalOptions{} + return &GlobalOptions{ + defaultsNamespace: corev1.NamespaceDefault, + } } type GlobalOptions struct { @@ -21,7 +23,7 @@ func (o *GlobalOptions) AddFlags(flags *pflag.FlagSet) { flags.StringVar( &o.defaultsNamespace, "defaults-namespace", - corev1.NamespaceDefault, + o.defaultsNamespace, "namespace for default configurations", ) flags.StringVar(