Skip to content

Commit 6b482f5

Browse files
committed
[installer] Add EKS installer test
1 parent ba85e5b commit 6b482f5

File tree

8 files changed

+469
-10
lines changed

8 files changed

+469
-10
lines changed

.werft/eks-installer-tests.yaml

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/aks-installer-tests.yaml -a debug=true`
2+
pod:
3+
serviceAccount: werft
4+
affinity:
5+
nodeAffinity:
6+
requiredDuringSchedulingIgnoredDuringExecution:
7+
nodeSelectorTerms:
8+
- matchExpressions:
9+
- key: dev/workload
10+
operator: In
11+
values:
12+
- "builds"
13+
securityContext:
14+
runAsUser: 0
15+
volumes:
16+
- name: sh-playground-sa-perm
17+
secret:
18+
secretName: sh-playground-sa-perm
19+
- name: sh-playground-dns-perm
20+
secret:
21+
secretName: sh-playground-dns-perm
22+
containers:
23+
- name: nightly-test
24+
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:cw-werft-cred.0
25+
workingDir: /workspace
26+
imagePullPolicy: Always
27+
volumeMounts:
28+
- name: sh-playground-sa-perm
29+
mountPath: /mnt/secrets/sh-playground-sa-perm
30+
- name: sh-playground-dns-perm # this sa is used for the DNS management
31+
mountPath: /mnt/secrets/sh-playground-dns-perm
32+
env:
33+
- name: AWS_ACCESS_KEY_ID
34+
valueFrom:
35+
secretKeyRef:
36+
name: aws-credentials
37+
key: aws-access-key
38+
- name: AWS_SECRET_ACCESS_KEY
39+
valueFrom:
40+
secretKeyRef:
41+
name: aws-credentials
42+
key: aws-secret-key
43+
- name: AWS_REGION
44+
valueFrom:
45+
secretKeyRef:
46+
name: aws-credentials
47+
key: aws-region
48+
- name: WERFT_HOST
49+
value: "werft.werft.svc.cluster.local:7777"
50+
- name: GOOGLE_APPLICATION_CREDENTIALS
51+
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
52+
- name: WERFT_K8S_NAMESPACE
53+
value: "werft"
54+
- name: WERFT_K8S_LABEL
55+
value: "component=werft"
56+
- name: TF_VAR_sa_creds
57+
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
58+
- name: TF_VAR_dns_sa_creds
59+
value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
60+
- name: NODENAME
61+
valueFrom:
62+
fieldRef:
63+
fieldPath: spec.nodeName
64+
command:
65+
- bash
66+
- -c
67+
- |
68+
sleep 1
69+
set -Eeuo pipefail
70+
71+
sudo chown -R gitpod:gitpod /workspace
72+
sudo apt update && apt install gettext-base
73+
74+
export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)
75+
76+
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
77+
printf '{{ toJson . }}' > context.json
78+
79+
npx ts-node .werft/installer-tests.ts "STANDARD_EKS_TEST"
80+
# The bit below makes this a cron job
81+
# plugins:
82+
# cron: "15 3 * * *"

.werft/installer-tests.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,11 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = {
3939
makeTarget: "aks-standard-cluster",
4040
description: "Creating an aks cluster(azure)",
4141
},
42+
STANDARD_EKS_CLUSTER: {
43+
phase: "create-std-eks-cluster",
44+
makeTarget: "eks-standard-cluster",
45+
description: "Creating a EKS cluster with 1 nodepool each for workspace and server",
46+
},
4247
CERT_MANAGER: {
4348
phase: "setup-cert-manager",
4449
makeTarget: "cert-manager",
@@ -174,6 +179,10 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
174179
"DESTROY",
175180
],
176181
},
182+
STANDARD_EKS_TEST: {
183+
DESCRIPTION: "Create an EKS cluster",
184+
PHASES: ["STANDARD_EKS_CLUSTER", "DESTROY"],
185+
},
177186
STANDARD_K3S_PREVIEW: {
178187
DESCRIPTION: "Create a SH Gitpod preview environment on a K3s cluster, created on a GCP instance",
179188
PHASES: [
Lines changed: 232 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,232 @@
1+
terraform {
2+
required_providers {
3+
kubectl = {
4+
source = "gavinbunney/kubectl"
5+
version = ">= 1.7.0"
6+
}
7+
aws = {
8+
version = " ~> 3.0"
9+
source = "registry.terraform.io/hashicorp/aws"
10+
}
11+
}
12+
}
13+
14+
resource "aws_iam_role" "eks_cluster" {
15+
depends_on = [data.aws_subnet_ids.subnet_ids]
16+
name = "iam-${var.cluster_name}"
17+
18+
assume_role_policy = <<POLICY
19+
{
20+
"Version": "2012-10-17",
21+
"Statement": [
22+
{
23+
"Effect": "Allow",
24+
"Principal": {
25+
"Service": "eks.amazonaws.com"
26+
},
27+
"Action": "sts:AssumeRole"
28+
}
29+
]
30+
}
31+
POLICY
32+
}
33+
34+
resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" {
35+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
36+
role = aws_iam_role.eks_cluster.name
37+
}
38+
39+
resource "aws_iam_role_policy_attachment" "AmazonEKSServicePolicy" {
40+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
41+
role = aws_iam_role.eks_cluster.name
42+
}
43+
44+
resource "aws_eks_cluster" "aws_eks" {
45+
name = var.cluster_name
46+
role_arn = aws_iam_role.eks_cluster.arn
47+
48+
vpc_config {
49+
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
50+
}
51+
52+
tags = {
53+
Name = "EKS_tuto"
54+
}
55+
56+
depends_on = [
57+
aws_iam_role.eks_cluster,
58+
]
59+
}
60+
61+
data "aws_eks_cluster" "cluster" {
62+
depends_on = [
63+
aws_eks_cluster.aws_eks,
64+
]
65+
name = resource.aws_eks_cluster.aws_eks.id
66+
}
67+
68+
data "aws_eks_cluster_auth" "cluster" {
69+
depends_on = [
70+
aws_eks_cluster.aws_eks,
71+
]
72+
name = resource.aws_eks_cluster.aws_eks.id
73+
}
74+
75+
76+
resource "aws_iam_role" "eks_nodes" {
77+
name = "iam-ng-${var.cluster_name}"
78+
79+
assume_role_policy = <<POLICY
80+
{
81+
"Version": "2012-10-17",
82+
"Statement": [
83+
{
84+
"Effect": "Allow",
85+
"Principal": {
86+
"Service": "ec2.amazonaws.com"
87+
},
88+
"Action": "sts:AssumeRole"
89+
}
90+
]
91+
}
92+
POLICY
93+
}
94+
95+
resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" {
96+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
97+
role = aws_iam_role.eks_nodes.name
98+
}
99+
100+
resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" {
101+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
102+
role = aws_iam_role.eks_nodes.name
103+
}
104+
105+
locals {
106+
map_roles = <<ROLES
107+
- rolearn: ${aws_iam_role.eks_nodes.arn}
108+
username: system:node:{{EC2PrivateDNSName}}
109+
groups:
110+
- system:bootstrappers
111+
- system:nodes
112+
ROLES
113+
}
114+
115+
resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" {
116+
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
117+
role = aws_iam_role.eks_nodes.name
118+
}
119+
120+
resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly" {
121+
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
122+
role = aws_iam_role.eks_nodes.name
123+
}
124+
125+
resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilderECRContainerBuilds" {
126+
policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds"
127+
role = aws_iam_role.eks_nodes.name
128+
}
129+
130+
resource "aws_launch_template" "eks" {
131+
name = "${var.cluster_name}-template"
132+
update_default_version = true
133+
block_device_mappings {
134+
device_name = "/dev/sda1"
135+
ebs {
136+
volume_size = 100
137+
}
138+
}
139+
credit_specification {
140+
cpu_credits = "standard"
141+
}
142+
ebs_optimized = true
143+
# AMI generated with packer (is private)
144+
image_id = "ami-0f08b4b1a4fd3ebe3"
145+
network_interfaces {
146+
associate_public_ip_address = false
147+
}
148+
}
149+
150+
resource "aws_eks_node_group" "workspace" {
151+
cluster_name = aws_eks_cluster.aws_eks.name
152+
node_group_name = "ngw-${var.cluster_name}"
153+
node_role_arn = aws_iam_role.eks_nodes.arn
154+
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
155+
instance_types = ["m6i.2xlarge"]
156+
labels = {
157+
"gitpod.io/workload_workspace_services" = true
158+
"gitpod.io/workload_workspace_regular" = true
159+
"gitpod.io/workload_workspace_headless" = true
160+
}
161+
162+
scaling_config {
163+
desired_size = 1
164+
max_size = 10
165+
min_size = 1
166+
}
167+
168+
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
169+
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
170+
depends_on = [
171+
resource.aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore,
172+
resource.aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly,
173+
resource.aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilderECRContainerBuilds,
174+
]
175+
176+
launch_template {
177+
id = resource.aws_launch_template.eks.id
178+
version = aws_launch_template.eks.latest_version
179+
}
180+
}
181+
182+
resource "aws_eks_node_group" "services" {
183+
cluster_name = aws_eks_cluster.aws_eks.name
184+
node_group_name = "ngs-${var.cluster_name}"
185+
node_role_arn = aws_iam_role.eks_nodes.arn
186+
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
187+
instance_types = ["m6i.xlarge"]
188+
labels = {
189+
"gitpod.io/workload_meta" = true
190+
"gitpod.io/workload_ide" = true
191+
}
192+
193+
scaling_config {
194+
desired_size = 1
195+
max_size = 10
196+
min_size = 1
197+
}
198+
199+
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
200+
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
201+
depends_on = [
202+
resource.aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore,
203+
resource.aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly,
204+
resource.aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilderECRContainerBuilds,
205+
]
206+
207+
launch_template {
208+
id = resource.aws_launch_template.eks.id
209+
version = aws_launch_template.eks.latest_version
210+
}
211+
}
212+
213+
provider "kubectl" {
214+
host = resource.aws_eks_cluster.aws_eks.endpoint
215+
cluster_ca_certificate = base64decode(resource.aws_eks_cluster.aws_eks.certificate_authority[0].data)
216+
token = data.aws_eks_cluster_auth.cluster.token
217+
config_path = var.kubeconfig
218+
}
219+
220+
output host {
221+
value = resource.aws_eks_cluster.aws_eks.endpoint
222+
}
223+
224+
output ca {
225+
sensitive = true
226+
value = base64decode(resource.aws_eks_cluster.aws_eks.certificate_authority[0].data)
227+
}
228+
229+
output token {
230+
sensitive = true
231+
value = data.aws_eks_cluster_auth.cluster.token
232+
}

0 commit comments

Comments
 (0)