Skip to content

Commit 518ee22

Browse files
committed
[installer] Add EKS installer test
1 parent ba85e5b commit 518ee22

File tree

8 files changed

+456
-10
lines changed

8 files changed

+456
-10
lines changed

.werft/eks-installer-tests.yaml

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/eks-installer-tests.yaml -a debug=true`
2+
pod:
3+
serviceAccount: werft
4+
affinity:
5+
nodeAffinity:
6+
requiredDuringSchedulingIgnoredDuringExecution:
7+
nodeSelectorTerms:
8+
- matchExpressions:
9+
- key: dev/workload
10+
operator: In
11+
values:
12+
- "builds"
13+
securityContext:
14+
runAsUser: 0
15+
volumes:
16+
- name: sh-playground-sa-perm
17+
secret:
18+
secretName: sh-playground-sa-perm
19+
- name: sh-playground-dns-perm
20+
secret:
21+
secretName: sh-playground-dns-perm
22+
- name: sh-aks-perm
23+
secret:
24+
secretName: aks-credentials
25+
containers:
26+
- name: nightly-test
27+
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:cw-werft-cred.0
28+
workingDir: /workspace
29+
imagePullPolicy: Always
30+
volumeMounts:
31+
- name: sh-playground-sa-perm
32+
mountPath: /mnt/secrets/sh-playground-sa-perm
33+
- name: sh-playground-dns-perm # this sa is used for the DNS management
34+
mountPath: /mnt/secrets/sh-playground-dns-perm
35+
env:
36+
- name: AWS_ACCESS_KEY_ID
37+
valueFrom:
38+
secretKeyRef:
39+
name: aws-credentials
40+
key: aws-access-key
41+
- name: AWS_SECRET_ACCESS_KEY
42+
valueFrom:
43+
secretKeyRef:
44+
name: aws-credentials
45+
key: aws-secret-key
46+
- name: AWS_REGION
47+
valueFrom:
48+
secretKeyRef:
49+
name: aws-credentials
50+
key: aws-region
51+
- name: WERFT_HOST
52+
value: "werft.werft.svc.cluster.local:7777"
53+
- name: GOOGLE_APPLICATION_CREDENTIALS
54+
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
55+
- name: WERFT_K8S_NAMESPACE
56+
value: "werft"
57+
- name: WERFT_K8S_LABEL
58+
value: "component=werft"
59+
- name: TF_VAR_sa_creds
60+
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
61+
- name: TF_VAR_dns_sa_creds
62+
value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
63+
- name: NODENAME
64+
valueFrom:
65+
fieldRef:
66+
fieldPath: spec.nodeName
67+
command:
68+
- bash
69+
- -c
70+
- |
71+
sleep 1
72+
set -Eeuo pipefail
73+
74+
sudo chown -R gitpod:gitpod /workspace
75+
sudo apt update && apt install gettext-base
76+
77+
export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)
78+
79+
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
80+
printf '{{ toJson . }}' > context.json
81+
82+
npx ts-node .werft/installer-tests.ts "STANDARD_EKS_TEST"
83+
# The bit below makes this a cron job
84+
# plugins:
85+
# cron: "15 3 * * *"

.werft/installer-tests.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,11 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = {
3939
makeTarget: "aks-standard-cluster",
4040
description: "Creating an aks cluster(azure)",
4141
},
42+
STANDARD_EKS_CLUSTER: {
43+
phase: "create-std-eks-cluster",
44+
makeTarget: "eks-standard-cluster",
45+
description: "Creating a EKS cluster with 1 nodepool each for workspace and server",
46+
},
4247
CERT_MANAGER: {
4348
phase: "setup-cert-manager",
4449
makeTarget: "cert-manager",
@@ -174,6 +179,23 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
174179
"DESTROY",
175180
],
176181
},
182+
STANDARD_EKS_TEST: {
183+
DESCRIPTION: "Create an EKS cluster",
184+
PHASES: [
185+
"STANDARD_EKS_CLUSTER",
186+
"CERT_MANAGER",
187+
// TODO phases are:
188+
// 1) register domains in AWS, associate with route53
189+
// 2) add the associated ns record to gcp(since we use gitpod-self-hsoted.com domain)
190+
// 3) create cluster issuer with route53 as solver
191+
"GENERATE_KOTS_CONFIG",
192+
"INSTALL_GITPOD",
193+
// "CHECK_INSTALLATION",
194+
// "RUN_INTEGRATION_TESTS",
195+
"RESULTS",
196+
"DESTROY",
197+
],
198+
},
177199
STANDARD_K3S_PREVIEW: {
178200
DESCRIPTION: "Create a SH Gitpod preview environment on a K3s cluster, created on a GCP instance",
179201
PHASES: [
Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
terraform {
2+
required_providers {
3+
kubectl = {
4+
source = "gavinbunney/kubectl"
5+
version = ">= 1.7.0"
6+
}
7+
aws = {
8+
version = " ~> 3.0"
9+
source = "registry.terraform.io/hashicorp/aws"
10+
}
11+
}
12+
}
13+
14+
resource "aws_iam_role" "eks_cluster" {
15+
depends_on = [data.aws_subnet_ids.subnet_ids]
16+
name = "iam-${var.cluster_name}"
17+
18+
assume_role_policy = <<POLICY
19+
{
20+
"Version": "2012-10-17",
21+
"Statement": [
22+
{
23+
"Effect": "Allow",
24+
"Principal": {
25+
"Service": "eks.amazonaws.com"
26+
},
27+
"Action": "sts:AssumeRole"
28+
}
29+
]
30+
}
31+
POLICY
32+
}
33+
34+
resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" {
35+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
36+
role = aws_iam_role.eks_cluster.name
37+
}
38+
39+
resource "aws_iam_role_policy_attachment" "AmazonEKSServicePolicy" {
40+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
41+
role = aws_iam_role.eks_cluster.name
42+
}
43+
44+
resource "aws_eks_cluster" "aws_eks" {
45+
name = var.cluster_name
46+
role_arn = aws_iam_role.eks_cluster.arn
47+
48+
vpc_config {
49+
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
50+
}
51+
52+
tags = {
53+
Name = "EKS_tuto"
54+
}
55+
56+
depends_on = [
57+
aws_iam_role.eks_cluster,
58+
]
59+
}
60+
61+
data "aws_eks_cluster" "cluster" {
62+
depends_on = [
63+
aws_eks_cluster.aws_eks,
64+
]
65+
name = resource.aws_eks_cluster.aws_eks.id
66+
}
67+
68+
data "aws_eks_cluster_auth" "cluster" {
69+
depends_on = [
70+
aws_eks_cluster.aws_eks,
71+
]
72+
name = resource.aws_eks_cluster.aws_eks.id
73+
}
74+
75+
76+
resource "aws_iam_role" "eks_nodes" {
77+
name = "iam-ng-${var.cluster_name}"
78+
79+
assume_role_policy = <<POLICY
80+
{
81+
"Version": "2012-10-17",
82+
"Statement": [
83+
{
84+
"Effect": "Allow",
85+
"Principal": {
86+
"Service": "ec2.amazonaws.com"
87+
},
88+
"Action": "sts:AssumeRole"
89+
}
90+
]
91+
}
92+
POLICY
93+
}
94+
95+
resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" {
96+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
97+
role = aws_iam_role.eks_nodes.name
98+
}
99+
100+
resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" {
101+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
102+
role = aws_iam_role.eks_nodes.name
103+
}
104+
105+
locals {
106+
map_roles = <<ROLES
107+
- rolearn: ${aws_iam_role.eks_nodes.arn}
108+
username: system:node:{{EC2PrivateDNSName}}
109+
groups:
110+
- system:bootstrappers
111+
- system:nodes
112+
ROLES
113+
}
114+
115+
resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" {
116+
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
117+
role = aws_iam_role.eks_nodes.name
118+
}
119+
120+
resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly" {
121+
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
122+
role = aws_iam_role.eks_nodes.name
123+
}
124+
125+
resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilderECRContainerBuilds" {
126+
policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds"
127+
role = aws_iam_role.eks_nodes.name
128+
}
129+
130+
resource "aws_launch_template" "eks" {
131+
name = "${var.cluster_name}-template"
132+
update_default_version = true
133+
block_device_mappings {
134+
device_name = "/dev/sda1"
135+
ebs {
136+
volume_size = 100
137+
}
138+
}
139+
credit_specification {
140+
cpu_credits = "standard"
141+
}
142+
ebs_optimized = true
143+
# AMI generated with packer (is private)
144+
image_id = "ami-0f08b4b1a4fd3ebe3"
145+
network_interfaces {
146+
associate_public_ip_address = false
147+
}
148+
}
149+
150+
resource "aws_eks_node_group" "workspace" {
151+
cluster_name = aws_eks_cluster.aws_eks.name
152+
node_group_name = "ngw-${var.cluster_name}"
153+
node_role_arn = aws_iam_role.eks_nodes.arn
154+
subnet_ids = data.aws_subnet_ids.subnet_ids.ids
155+
instance_types = ["m6i.2xlarge"]
156+
labels = {
157+
"gitpod.io/workload_workspace_services" = true
158+
"gitpod.io/workload_workspace_regular" = true
159+
"gitpod.io/workload_workspace_headless" = true
160+
"gitpod.io/workload_meta" = true
161+
"gitpod.io/workload_ide" = true
162+
}
163+
164+
scaling_config {
165+
desired_size = 1
166+
max_size = 10
167+
min_size = 1
168+
}
169+
170+
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
171+
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
172+
depends_on = [
173+
resource.aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore,
174+
resource.aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly,
175+
resource.aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilderECRContainerBuilds,
176+
]
177+
178+
launch_template {
179+
id = resource.aws_launch_template.eks.id
180+
version = aws_launch_template.eks.latest_version
181+
}
182+
}
183+
184+
provider "kubectl" {
185+
host = resource.aws_eks_cluster.aws_eks.endpoint
186+
cluster_ca_certificate = base64decode(resource.aws_eks_cluster.aws_eks.certificate_authority[0].data)
187+
token = data.aws_eks_cluster_auth.cluster.token
188+
config_path = var.kubeconfig
189+
}
190+
191+
output host {
192+
value = resource.aws_eks_cluster.aws_eks.endpoint
193+
}
194+
195+
output ca {
196+
sensitive = true
197+
value = base64decode(resource.aws_eks_cluster.aws_eks.certificate_authority[0].data)
198+
}
199+
200+
output token {
201+
sensitive = true
202+
value = data.aws_eks_cluster_auth.cluster.token
203+
}

0 commit comments

Comments
 (0)