Skip to content

Commit 8c4adbd

Browse files
author
Naadir Jeewa
committed
Support Kubernetes 1.18
Adds and modifies e2e tests to test scenario Makefile updated for local testing Signed-off-by: Naadir Jeewa <[email protected]>
1 parent 7cda00b commit 8c4adbd

17 files changed

+440
-170
lines changed

Diff for: .dockerignore

+2
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,5 @@ Tiltfile
1515
test/infrastructure/docker/e2e/logs/**
1616
**/config/**/*.yaml
1717
_artifacts
18+
Makefile
19+
**/Makefile

Diff for: Makefile

+26
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,8 @@ TAG ?= dev
8686
ARCH ?= amd64
8787
ALL_ARCH = amd64 arm arm64 ppc64le s390x
8888

89+
GINKGO_NODES ?= 1
90+
8991
# Allow overriding the imagePullPolicy
9092
PULL_POLICY ?= Always
9193

@@ -132,6 +134,22 @@ test-capd-e2e: ## Rebuild the docker provider and run the capd-e2e tests
132134
$(MAKE) -C test/infrastructure/docker docker-build REGISTRY=gcr.io/k8s-staging-capi-docker
133135
$(MAKE) -C test/infrastructure/docker run-e2e
134136

137+
E2E_FLAGS := REGISTRY=gcr.io/k8s-staging-cluster-api TAG=dev ARCH=amd64 PULL_POLICY=IfNotPresent GINKGO_FOCUS="$(GINKGO_FOCUS)" GINKGO_NODES=$(GINKGO_NODES) GINKGO_NOCOLOR=false E2E_CONF_FILE=config/docker-dev.yaml ARTIFACTS=$(abspath _artifacts) SKIP_RESOURCE_CLEANUP=false USE_EXISTING_CLUSTER=false
138+
139+
.PHONY: e2e
140+
e2e: ## Run clusterctl e2e
141+
mkdir -p _artifacts
142+
$(E2E_FLAGS) $(MAKE) docker-build
143+
$(E2E_FLAGS) $(MAKE) -C test/infrastructure/docker docker-build
144+
docker pull quay.io/jetstack/cert-manager-cainjector:v0.11.0
145+
docker pull quay.io/jetstack/cert-manager-webhook:v0.11.0
146+
docker pull quay.io/jetstack/cert-manager-controller:v0.11.0
147+
$(E2E_FLAGS) $(MAKE) -C test/e2e run
148+
149+
.PHONY: clean-e2e
150+
clean-e2e: clean-manifests ## Clean-up after clusterctl e2e
151+
rm -rf _artifacts
152+
135153
## --------------------------------------
136154
## Binaries
137155
## --------------------------------------
@@ -201,6 +219,9 @@ lint-full: $(GOLANGCI_LINT) ## Run slower linters to detect possible issues
201219
cd $(E2E_FRAMEWORK_DIR); $(GOLANGCI_LINT) run -v --fast=false
202220
cd $(CAPD_DIR); $(GOLANGCI_LINT) run -v --fast=false
203221

222+
apidiff: $(GO_APIDIFF) ## Check for API differences
223+
$(GO_APIDIFF) $(shell git rev-parse origin/master) --print-compatible
224+
204225
## --------------------------------------
205226
## Generate / Manifests
206227
## --------------------------------------
@@ -529,6 +550,11 @@ clean-book: ## Remove all generated GitBook files
529550
clean-bindata: ## Remove bindata generated folder
530551
rm -rf $(GOBINDATA_CLUSTERCTL_DIR)/manifest
531552

553+
.PHONY: clean-manifests ## Reset manifests in config directories back to master
554+
clean-manifests:
555+
@read -p "WARNING: This will reset all config directories to local master. Press [ENTER] to continue."
556+
git checkout master config bootstrap/kubeadm/config controlplane/kubeadm/config test/infrastructure/docker/config
557+
532558
.PHONY: format-tiltfile
533559
format-tiltfile: ## Format Tiltfile
534560
./hack/verify-starlark.sh fix

Diff for: controlplane/kubeadm/controllers/controller.go

+5
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,11 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
254254
return ctrl.Result{Requeue: true}, nil
255255
}
256256

257+
// Ensure kubeadm role bindings for v1.18+
258+
if err := workloadCluster.AllowBootstrapTokensToGetNodes(ctx); err != nil {
259+
return ctrl.Result{}, errors.Wrap(err, "failed to set role and role binding for kubeadm")
260+
}
261+
257262
// Update kube-proxy daemonset.
258263
if err := workloadCluster.UpdateKubeProxyImageInfo(ctx, kcp); err != nil {
259264
logger.Error(err, "failed to update kube-proxy daemonset")

Diff for: controlplane/kubeadm/controllers/fakes_test.go

+4
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,10 @@ func (f fakeWorkloadCluster) ClusterStatus(_ context.Context) (internal.ClusterS
7878
return f.Status, nil
7979
}
8080

81+
func (f fakeWorkloadCluster) AllowBootstrapTokensToGetNodes(ctx context.Context) error {
82+
return nil
83+
}
84+
8185
func (f fakeWorkloadCluster) ReconcileKubeletRBACRole(ctx context.Context, version semver.Version) error {
8286
return nil
8387
}

Diff for: controlplane/kubeadm/controllers/upgrade.go

+6
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,12 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane(
5757
return ctrl.Result{}, errors.Wrap(err, "failed to reconcile the remote kubelet RBAC binding")
5858
}
5959

60+
// Ensure kubeadm cluster role & bindings for v1.18+
61+
// as per https://github.com/kubernetes/kubernetes/commit/b117a928a6c3f650931bdac02a41fca6680548c4
62+
if err := workloadCluster.AllowBootstrapTokensToGetNodes(ctx); err != nil {
63+
return ctrl.Result{}, errors.Wrap(err, "failed to set role and role binding for kubeadm")
64+
}
65+
6066
if err := workloadCluster.UpdateKubernetesVersionInKubeadmConfigMap(ctx, parsedVersion); err != nil {
6167
return ctrl.Result{}, errors.Wrap(err, "failed to update the kubernetes version in the kubeadm config map")
6268
}

Diff for: controlplane/kubeadm/internal/workload_cluster.go

+1-78
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
"github.com/pkg/errors"
3232
appsv1 "k8s.io/api/apps/v1"
3333
corev1 "k8s.io/api/core/v1"
34-
rbacv1 "k8s.io/api/rbac/v1"
3534
apierrors "k8s.io/apimachinery/pkg/api/errors"
3635
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3736
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
@@ -73,6 +72,7 @@ type WorkloadCluster interface {
7372
RemoveMachineFromKubeadmConfigMap(ctx context.Context, machine *clusterv1.Machine) error
7473
RemoveNodeFromKubeadmConfigMap(ctx context.Context, nodeName string) error
7574
ForwardEtcdLeadership(ctx context.Context, machine *clusterv1.Machine, leaderCandidate *clusterv1.Machine) error
75+
AllowBootstrapTokensToGetNodes(ctx context.Context) error
7676

7777
// State recovery tasks.
7878
ReconcileEtcdMembers(ctx context.Context) error
@@ -251,83 +251,6 @@ func (w *Workload) RemoveNodeFromKubeadmConfigMap(ctx context.Context, name stri
251251
return nil
252252
}
253253

254-
// ReconcileKubeletRBACBinding will create a RoleBinding for the new kubelet version during upgrades.
255-
// If the role binding already exists this function is a no-op.
256-
func (w *Workload) ReconcileKubeletRBACBinding(ctx context.Context, version semver.Version) error {
257-
roleName := fmt.Sprintf("kubeadm:kubelet-config-%d.%d", version.Major, version.Minor)
258-
roleBinding := &rbacv1.RoleBinding{}
259-
err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: roleName, Namespace: metav1.NamespaceSystem}, roleBinding)
260-
if err != nil && !apierrors.IsNotFound(err) {
261-
return errors.Wrapf(err, "failed to determine if kubelet config rbac role binding %q already exists", roleName)
262-
} else if err == nil {
263-
// The required role binding already exists, nothing left to do
264-
return nil
265-
}
266-
267-
newRoleBinding := &rbacv1.RoleBinding{
268-
ObjectMeta: metav1.ObjectMeta{
269-
Name: roleName,
270-
Namespace: metav1.NamespaceSystem,
271-
},
272-
Subjects: []rbacv1.Subject{
273-
{
274-
APIGroup: "rbac.authorization.k8s.io",
275-
Kind: "Group",
276-
Name: "system:nodes",
277-
},
278-
{
279-
APIGroup: "rbac.authorization.k8s.io",
280-
Kind: "Group",
281-
Name: "system:bootstrappers:kubeadm:default-node-token",
282-
},
283-
},
284-
RoleRef: rbacv1.RoleRef{
285-
APIGroup: "rbac.authorization.k8s.io",
286-
Kind: "Role",
287-
Name: roleName,
288-
},
289-
}
290-
if err := w.Client.Create(ctx, newRoleBinding); err != nil && !apierrors.IsAlreadyExists(err) {
291-
return errors.Wrapf(err, "failed to create kubelet rbac role binding %q", roleName)
292-
}
293-
294-
return nil
295-
}
296-
297-
// ReconcileKubeletRBACRole will create a Role for the new kubelet version during upgrades.
298-
// If the role already exists this function is a no-op.
299-
func (w *Workload) ReconcileKubeletRBACRole(ctx context.Context, version semver.Version) error {
300-
majorMinor := fmt.Sprintf("%d.%d", version.Major, version.Minor)
301-
roleName := fmt.Sprintf("kubeadm:kubelet-config-%s", majorMinor)
302-
role := &rbacv1.Role{}
303-
if err := w.Client.Get(ctx, ctrlclient.ObjectKey{Name: roleName, Namespace: metav1.NamespaceSystem}, role); err != nil && !apierrors.IsNotFound(err) {
304-
return errors.Wrapf(err, "failed to determine if kubelet config rbac role %q already exists", roleName)
305-
} else if err == nil {
306-
// The required role already exists, nothing left to do
307-
return nil
308-
}
309-
310-
newRole := &rbacv1.Role{
311-
ObjectMeta: metav1.ObjectMeta{
312-
Name: roleName,
313-
Namespace: metav1.NamespaceSystem,
314-
},
315-
Rules: []rbacv1.PolicyRule{
316-
{
317-
Verbs: []string{"get"},
318-
APIGroups: []string{""},
319-
Resources: []string{"configmaps"},
320-
ResourceNames: []string{fmt.Sprintf("kubelet-config-%s", majorMinor)},
321-
},
322-
},
323-
}
324-
if err := w.Client.Create(ctx, newRole); err != nil && !apierrors.IsAlreadyExists(err) {
325-
return errors.Wrapf(err, "failed to create kubelet rbac role %q", roleName)
326-
}
327-
328-
return nil
329-
}
330-
331254
// ClusterStatus holds stats information about the cluster.
332255
type ClusterStatus struct {
333256
// Nodes are a total count of nodes

Diff for: controlplane/kubeadm/internal/workload_cluster_etcd.go

+3
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,9 @@ func (w *Workload) ForwardEtcdLeadership(ctx context.Context, machine *clusterv1
249249
if leaderCandidate == nil {
250250
return errors.New("leader candidate cannot be nil")
251251
}
252+
if leaderCandidate.Status.NodeRef == nil {
253+
return errors.New("leader has no node reference")
254+
}
252255

253256
nodes, err := w.getControlPlaneNodes(ctx)
254257
if err != nil {

Diff for: controlplane/kubeadm/internal/workload_cluster_etcd_test.go

+8
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,14 @@ func TestForwardEtcdLeadership(t *testing.T) {
339339
leaderCandidate: nil,
340340
expectErr: true,
341341
},
342+
{
343+
name: "returns an error if the leader candidate's noderef is nil",
344+
machine: defaultMachine(),
345+
leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
346+
m.Status.NodeRef = nil
347+
}),
348+
expectErr: true,
349+
},
342350
{
343351
name: "returns an error if it can't retrieve the list of control plane nodes",
344352
machine: defaultMachine(),
+163
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
/*
2+
Copyright 2020 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package internal
18+
19+
import (
20+
"context"
21+
"fmt"
22+
23+
"github.com/blang/semver"
24+
"github.com/pkg/errors"
25+
rbac "k8s.io/api/rbac/v1"
26+
rbacv1 "k8s.io/api/rbac/v1"
27+
apierrors "k8s.io/apimachinery/pkg/api/errors"
28+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
29+
"k8s.io/apimachinery/pkg/runtime"
30+
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
31+
)
32+
33+
const (
34+
// NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in
35+
NodeBootstrapTokenAuthGroup = "system:bootstrappers:kubeadm:default-node-token"
36+
37+
// GetNodesClusterRoleName defines the name of the ClusterRole and ClusterRoleBinding to get nodes
38+
GetNodesClusterRoleName = "kubeadm:get-nodes"
39+
40+
// NodesGroup defines the well-known group for all nodes.
41+
NodesGroup = "system:nodes"
42+
43+
// KubeletConfigMapRolePrefix defines base kubelet configuration ConfigMap role prefix.
44+
KubeletConfigMapRolePrefix = "kubeadm:"
45+
46+
// KubeletConfigMapName defines base kubelet configuration ConfigMap name.
47+
KubeletConfigMapName = "kubelet-config-%d.%d"
48+
)
49+
50+
// EnsureResource creates a resoutce if the target resource doesn't exist. If the resource exists already, this function will ignore the resource instead.
51+
func (w *Workload) EnsureResource(ctx context.Context, obj runtime.Object) error {
52+
testObj := obj.DeepCopyObject()
53+
key, err := ctrlclient.ObjectKeyFromObject(obj)
54+
if err != nil {
55+
return errors.Wrap(err, "unable to derive key for resource")
56+
}
57+
if err := w.Client.Get(ctx, key, testObj); err != nil && !apierrors.IsNotFound(err) {
58+
return errors.Wrapf(err, "failed to determine if resource %s/%s already exists", key.Namespace, key.Name)
59+
} else if err == nil {
60+
// If object already exists, nothing left to do
61+
return nil
62+
}
63+
if err := w.Client.Create(ctx, obj); err != nil {
64+
if !apierrors.IsAlreadyExists(err) {
65+
return errors.Wrapf(err, "unable to create resource %s/%s on workload cluster", key.Namespace, key.Name)
66+
}
67+
}
68+
return nil
69+
}
70+
71+
// AllowBootstrapTokensToGetNodes creates RBAC rules to allow Node Bootstrap Tokens to list nodes
72+
func (w *Workload) AllowBootstrapTokensToGetNodes(ctx context.Context) error {
73+
if err := w.EnsureResource(ctx, &rbac.ClusterRole{
74+
ObjectMeta: metav1.ObjectMeta{
75+
Name: GetNodesClusterRoleName,
76+
Namespace: metav1.NamespaceSystem,
77+
},
78+
Rules: []rbac.PolicyRule{
79+
{
80+
Verbs: []string{"get"},
81+
APIGroups: []string{""},
82+
Resources: []string{"nodes"},
83+
},
84+
},
85+
}); err != nil {
86+
return err
87+
}
88+
89+
return w.EnsureResource(ctx, &rbac.ClusterRoleBinding{
90+
ObjectMeta: metav1.ObjectMeta{
91+
Name: GetNodesClusterRoleName,
92+
Namespace: metav1.NamespaceSystem,
93+
},
94+
RoleRef: rbac.RoleRef{
95+
APIGroup: rbac.GroupName,
96+
Kind: "ClusterRole",
97+
Name: GetNodesClusterRoleName,
98+
},
99+
Subjects: []rbac.Subject{
100+
{
101+
Kind: rbac.GroupKind,
102+
Name: NodeBootstrapTokenAuthGroup,
103+
},
104+
},
105+
})
106+
}
107+
108+
func generateKubeletConfigName(version semver.Version) string {
109+
return fmt.Sprintf(KubeletConfigMapName, version.Major, version.Minor)
110+
}
111+
112+
func generateKubeletConfigRoleName(version semver.Version) string {
113+
return KubeletConfigMapRolePrefix + generateKubeletConfigName(version)
114+
}
115+
116+
// ReconcileKubeletRBACBinding will create a RoleBinding for the new kubelet version during upgrades.
117+
// If the role binding already exists this function is a no-op.
118+
func (w *Workload) ReconcileKubeletRBACBinding(ctx context.Context, version semver.Version) error {
119+
roleName := generateKubeletConfigRoleName(version)
120+
return w.EnsureResource(ctx, &rbac.RoleBinding{
121+
ObjectMeta: metav1.ObjectMeta{
122+
Namespace: metav1.NamespaceSystem,
123+
Name: roleName,
124+
},
125+
Subjects: []rbacv1.Subject{
126+
{
127+
APIGroup: rbac.GroupName,
128+
Kind: rbac.GroupKind,
129+
Name: NodesGroup,
130+
},
131+
{
132+
APIGroup: rbac.GroupName,
133+
Kind: rbac.GroupKind,
134+
Name: NodeBootstrapTokenAuthGroup,
135+
},
136+
},
137+
RoleRef: rbacv1.RoleRef{
138+
APIGroup: rbac.GroupName,
139+
Kind: "Role",
140+
Name: roleName,
141+
},
142+
})
143+
144+
}
145+
146+
// ReconcileKubeletRBACRole will create a Role for the new kubelet version during upgrades.
147+
// If the role already exists this function is a no-op.
148+
func (w *Workload) ReconcileKubeletRBACRole(ctx context.Context, version semver.Version) error {
149+
return w.EnsureResource(ctx, &rbacv1.Role{
150+
ObjectMeta: metav1.ObjectMeta{
151+
Name: generateKubeletConfigRoleName(version),
152+
Namespace: metav1.NamespaceSystem,
153+
},
154+
Rules: []rbacv1.PolicyRule{
155+
{
156+
Verbs: []string{"get"},
157+
APIGroups: []string{""},
158+
Resources: []string{"configmaps"},
159+
ResourceNames: []string{generateKubeletConfigName(version)},
160+
},
161+
},
162+
})
163+
}

0 commit comments

Comments
 (0)