Skip to content

[installer] Add EKS installer test #10709

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 1, 2022
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 88 additions & 0 deletions .werft/eks-installer-tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/eks-installer-tests.yaml -a debug=true`
pod:
serviceAccount: werft
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: dev/workload
operator: In
values:
- "builds"
securityContext:
runAsUser: 0
volumes:
- name: sh-playground-sa-perm
secret:
secretName: sh-playground-sa-perm
- name: sh-playground-dns-perm
secret:
secretName: sh-playground-dns-perm
- name: sh-aks-perm
secret:
secretName: aks-credentials
containers:
- name: nightly-test
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:cw-werft-cred.0
workingDir: /workspace
imagePullPolicy: Always
volumeMounts:
- name: sh-playground-sa-perm
mountPath: /mnt/secrets/sh-playground-sa-perm
- name: sh-playground-dns-perm # this sa is used for the DNS management
mountPath: /mnt/secrets/sh-playground-dns-perm
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-access-key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-secret-key
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-region
- name: WERFT_HOST
value: "werft.werft.svc.cluster.local:7777"
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
- name: WERFT_K8S_NAMESPACE
value: "werft"
- name: WERFT_K8S_LABEL
value: "component=werft"
- name: TF_VAR_sa_creds
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
- name: TF_VAR_dns_sa_creds
value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
command:
- bash
- -c
- |
sleep 1
set -Eeuo pipefail

sudo chown -R gitpod:gitpod /workspace
sudo apt update && apt install gettext-base

export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)"

(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install

npx ts-node .werft/installer-tests.ts "STANDARD_EKS_TEST"
# The bit below makes this a cron job
# plugins:
# cron: "15 3 * * *"
70 changes: 50 additions & 20 deletions .werft/installer-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
"STANDARD_GKE_CLUSTER",
"CERT_MANAGER",
"GCP_MANAGED_DNS",
"CLUSTER_ISSUER",
"GENERATE_KOTS_CONFIG",
"INSTALL_GITPOD",
"CHECK_INSTALLATION",
Expand All @@ -49,6 +50,7 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
PHASES: [
"STANDARD_GKE_CLUSTER",
"CERT_MANAGER",
"CLUSTER_ISSUER",
"GCP_MANAGED_DNS",
"GENERATE_KOTS_CONFIG",
"INSTALL_GITPOD",
Expand All @@ -66,11 +68,12 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
PHASES: [
"STANDARD_K3S_CLUSTER_ON_GCP",
"CERT_MANAGER",
"CLUSTER_ISSUER",
"GENERATE_KOTS_CONFIG",
"INSTALL_GITPOD",
"RESULTS",
"CHECK_INSTALLATION",
"RUN_INTEGRATION_TESTS",
"RESULTS",
"DESTROY",
],
},
Expand All @@ -80,6 +83,7 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
PHASES: [
"STANDARD_K3S_CLUSTER_ON_GCP",
"CERT_MANAGER",
"CLUSTER_ISSUER",
"GENERATE_KOTS_CONFIG",
"INSTALL_GITPOD",
"CHECK_INSTALLATION",
Expand All @@ -92,8 +96,8 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
PHASES: [
"STANDARD_AKS_CLUSTER",
"CERT_MANAGER",
"AZURE_ISSUER",
"AZURE_EXTERNALDNS",
"CLUSTER_ISSUER",
"EXTERNALDNS",
"ADD_NS_RECORD",
"GENERATE_KOTS_CONFIG",
"INSTALL_GITPOD",
Expand All @@ -103,6 +107,23 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
"DESTROY",
],
},
STANDARD_EKS_TEST: {
CLOUD: "aws",
DESCRIPTION: "Create an EKS cluster",
PHASES: [
"STANDARD_EKS_CLUSTER",
"CERT_MANAGER",
"EXTERNALDNS",
"CLUSTER_ISSUER",
"ADD_NS_RECORD",
"GENERATE_KOTS_CONFIG",
"RESULTS",
"INSTALL_GITPOD",
"CHECK_INSTALLATION",
"RUN_INTEGRATION_TESTS",
"DESTROY",
],
},
};

const config: TestConfig = TEST_CONFIGURATIONS[testConfig];
Expand All @@ -128,6 +149,11 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = {
makeTarget: "aks-standard-cluster",
description: "Creating an aks cluster(azure)",
},
STANDARD_EKS_CLUSTER: {
phase: "create-std-eks-cluster",
makeTarget: "eks-standard-cluster",
description: "Creating a EKS cluster with 1 nodepool each for workspace and server",
},
CERT_MANAGER: {
phase: "setup-cert-manager",
makeTarget: "cert-manager",
Expand All @@ -146,19 +172,19 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = {
)} db=${randomize("db", cloud)}`,
description: `Generate KOTS Config file`,
},
AZURE_ISSUER: {
phase: "setup-azure-cluster-issuer",
makeTarget: "azure-issuer",
description: "Deploys ClusterIssuer for azure",
CLUSTER_ISSUER: {
phase: "setup-cluster-issuer",
makeTarget: `cluster-issuer cloud=${cloud}`,
description: `Deploys ClusterIssuer for ${cloud}`,
},
AZURE_EXTERNALDNS: {
phase: "azure-external-dns",
makeTarget: "azure-external-dns",
description: "Deploys external-dns with azure provider",
EXTERNALDNS: {
phase: "external-dns",
makeTarget: `external-dns cloud=${cloud}`,
description: `Deploys external-dns with ${cloud} provider`,
},
ADD_NS_RECORD: {
phase: "add-ns-record",
makeTarget: "add-ns-record",
makeTarget: `add-ns-record cloud=${cloud}`,
description: "Adds NS record for subdomain under gitpod-self-hosted.com",
},
INSTALL_GITPOD_IGNORE_PREFLIGHTS: {
Expand Down Expand Up @@ -189,7 +215,7 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = {
},
DESTROY: {
phase: "destroy",
makeTarget: "cleanup",
makeTarget: `cleanup cloud=${cloud}`,
description: "Destroy the created infrastucture",
},
RESULTS: {
Expand Down Expand Up @@ -224,23 +250,24 @@ export async function installerTests(config: TestConfig) {
}

function callMakeTargets(phase: string, description: string, makeTarget: string) {
werft.phase(phase, `${description}`);
werft.log(phase, `calling ${makeTarget}`);
werft.phase(phase, description);

const response = exec(`make -C ${makefilePath} ${makeTarget}`, {
slice: "call-make-target",
slice: phase,
dontCheckRc: true,
});

if (response.code) {
console.error(`Error: ${response.stderr}`);
werft.fail(phase, "Operation failed");
} else {
werft.log(phase, response.stdout.toString());
werft.done(phase);
return response.code;
}

werft.log(phase, response.stdout.toString());
werft.done(phase);

return response.code;

}

function randomize(resource: string, platform: string): string {
Expand All @@ -254,7 +281,10 @@ function cleanup() {
const phase = "destroy-infrastructure";
werft.phase(phase, "Destroying all the created resources");

const response = exec(`make -C ${makefilePath} cleanup`, { slice: "run-terrafrom-destroy", dontCheckRc: true });
const response = exec(`make -C ${makefilePath} cleanup cloud=${cloud}`, {
slice: "run-terrafrom-destroy",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nitpick:

Suggested change
slice: "run-terrafrom-destroy",
slice: "run-terraform-destroy",

This slice value is only used to identify logs/log groups within werft, correct?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

correct!

dontCheckRc: true,
});

// if the destroy command fail, we check if any resources are pending to be removed
// if nothing is yet to be cleaned, we return with success
Expand Down
34 changes: 26 additions & 8 deletions install/infra/terraform/aks/output.tf
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,32 @@ output "external_dns_secrets" {
}

output "external_dns_settings" {
value = {
provider = "azure"
"azure.resourceGroup" = azurerm_resource_group.gitpod.name
"azure.subscriptionId" = data.azurerm_client_config.current.subscription_id
"azure.tenantId" = data.azurerm_client_config.current.tenant_id
"azure.useManagedIdentityExtension" = true
"azure.userAssignedIdentityID" = azurerm_kubernetes_cluster.k8s.kubelet_identity.0.client_id
}
value = [
{
"name": "provider",
"value": "azure"
},
{
"name": "azure.resourceGroup",
"value": azurerm_resource_group.gitpod.name,
},
{
"name": "azure.subscriptionId",
"value": data.azurerm_client_config.current.subscription_id,
},
{
"name": "azure.tenantId",
"value": data.azurerm_client_config.current.tenant_id,
},
{
"name": "azure.useManagedIdentityExtension",
"value": true
},
{
"name": "azure.userAssignedIdentityID",
"value": azurerm_kubernetes_cluster.k8s.kubelet_identity.0.client_id
},
]
}

output "k8s_connection" {
Expand Down
40 changes: 40 additions & 0 deletions install/infra/terraform/eks/database.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
resource "aws_db_subnet_group" "gitpod_subnets" {
name = "db-sg-${var.cluster_name}"
subnet_ids = [module.vpc.public_subnets[2], module.vpc.public_subnets[3]]
}

resource "aws_security_group" "rdssg" {
name = "dh-sg-${var.cluster_name}"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 0
to_port = 3306
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

resource "aws_db_instance" "gitpod" {
allocated_storage = 10
max_allocated_storage = 100
engine = "mysql"
engine_version = "5.7"
instance_class = "db.t3.micro"
vpc_security_group_ids = [ aws_security_group.rdssg.id ]
identifier = "db-${var.cluster_name}"
name = "gitpod"
username = "gitpod"
password = "gitpod-qwat"
parameter_group_name = "default.mysql5.7"
db_subnet_group_name = aws_db_subnet_group.gitpod_subnets.name
skip_final_snapshot = true
publicly_accessible = true
}
Loading