Skip to content

Commit 362124e

Browse files
mads-hartmannroboquat
authored andcommitted
Add opt-in to use Harvester k3s cluster for development
1 parent 286510e commit 362124e

File tree

4 files changed

+117
-49
lines changed

4 files changed

+117
-49
lines changed

.werft/build.ts

Lines changed: 31 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ Tracing.initialize()
3737
werft = new Werft("build")
3838
})
3939
.then(() => build(context, version))
40+
.then(() => VM.stopKubectlPortForwards())
4041
.then(() => werft.endAllSpans())
4142
.catch((err) => {
4243
werft.rootSpan.setStatus({
@@ -52,6 +53,8 @@ Tracing.initialize()
5253
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
5354
process.exitCode = 1
5455
}
56+
57+
VM.stopKubectlPortForwards()
5558
})
5659

5760
// Werft phases
@@ -77,7 +80,11 @@ const installerSlices = {
7780
}
7881

7982
const vmSlices = {
80-
BOOT_VM: 'Booting VM'
83+
BOOT_VM: 'Booting VM',
84+
START_KUBECTL_PORT_FORWARDS: 'Start kubectl port forwards',
85+
COPY_CERT_MANAGER_RESOURCES: 'Copy CertManager resources from core-dev',
86+
INSTALL_LETS_ENCRYPT_ISSUER: 'Install Lets Encrypt issuer',
87+
KUBECONFIG: 'Getting kubeconfig'
8188
}
8289

8390
export function parseVersion(context) {
@@ -272,7 +279,7 @@ export async function build(context, version) {
272279

273280
const destname = version.split(".")[0];
274281
const namespace = `staging-${destname}`;
275-
const domain = `${destname}.staging.gitpod-dev.com`;
282+
const domain = withVM ? `${destname}.preview.gitpod-dev.com` : `${destname}.staging.gitpod-dev.com`;
276283
const monitoringDomain = `${destname}.preview.gitpod-dev.com`;
277284
const url = `https://${domain}`;
278285
const deploymentConfig: DeploymentConfig = {
@@ -293,18 +300,36 @@ export async function build(context, version) {
293300
if (withVM) {
294301
werft.phase(phases.VM, "Start VM");
295302

296-
if (!VM.vmExists({ name: destname })) {
303+
werft.log(vmSlices.COPY_CERT_MANAGER_RESOURCES, 'Copy over CertManager resources from core-dev')
304+
exec(`kubectl get secret clouddns-dns01-solver-svc-acct -n certmanager -o yaml | sed 's/namespace: certmanager/namespace: cert-manager/g' > clouddns-dns01-solver-svc-acct.yaml`, { slice: vmSlices.COPY_CERT_MANAGER_RESOURCES })
305+
exec(`kubectl get clusterissuer letsencrypt-issuer-gitpod-core-dev -o yaml | sed 's/letsencrypt-issuer-gitpod-core-dev/letsencrypt-issuer/g' > letsencrypt-issuer.yaml`, { slice: vmSlices.COPY_CERT_MANAGER_RESOURCES })
306+
307+
const existingVM = VM.vmExists({ name: destname })
308+
if (!existingVM) {
297309
werft.log(vmSlices.BOOT_VM, 'Starting VM')
298310
VM.startVM({ name: destname })
299311
} else {
300312
werft.log(vmSlices.BOOT_VM, 'VM already exists')
301313
}
302314

303315
werft.log(vmSlices.BOOT_VM, 'Waiting for VM to be ready')
304-
VM.waitForVM({ name: destname, timeoutMS: 1000 * 60 * 3 })
316+
VM.waitForVM({ name: destname, timeoutMS: 1000 * 60 * 3, slice: vmSlices.BOOT_VM })
305317

306-
werft.done(phases.VM)
307-
return
318+
werft.log(vmSlices.START_KUBECTL_PORT_FORWARDS, 'Starting SSH port forwarding')
319+
VM.startSSHProxy({ name: destname, slice: vmSlices.START_KUBECTL_PORT_FORWARDS })
320+
321+
werft.log(vmSlices.START_KUBECTL_PORT_FORWARDS, 'Starting Kube API port forwarding')
322+
VM.startKubeAPIProxy({ name: destname, slice: vmSlices.START_KUBECTL_PORT_FORWARDS })
323+
324+
werft.log(vmSlices.KUBECONFIG, 'Copying k3s kubeconfig')
325+
VM.copyk3sKubeconfig({ path: 'k3s.yml', timeoutMS: 1000 * 60 * 3, slice: vmSlices.KUBECONFIG })
326+
// NOTE: This was a quick have to override the existing kubeconfig so all future kubectl commands use the k3s cluster.
327+
// We might want to keep both kubeconfigs around and be explicit about which one we're using.s
328+
exec(`mv k3s.yml /home/gitpod/.kube/config`)
329+
330+
if (!existingVM) {
331+
exec(`kubectl apply -f clouddns-dns01-solver-svc-acct.yaml -f letsencrypt-issuer.yaml`, { slice: vmSlices.INSTALL_LETS_ENCRYPT_ISSUER, dontCheckRc: true })
332+
}
308333
}
309334

310335
werft.phase(phases.PREDEPLOY, "Checking for existing installations...");

.werft/build.yaml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@ pod:
3636
- name: harvester-kubeconfig
3737
secret:
3838
secretName: harvester-kubeconfig
39+
- name: harvester-vm-ssh-keys
40+
secret:
41+
secretName: harvester-vm-ssh-keys
3942
# - name: deploy-key
4043
# secret:
4144
# secretName: deploy-key
@@ -82,6 +85,8 @@ pod:
8285
readOnly: false
8386
- name: harvester-kubeconfig
8487
mountPath: /mnt/secrets/harvester-kubeconfig
88+
- name: harvester-vm-ssh-keys
89+
mountPath: /mnt/secrets/harvester-vm-ssh-keys
8590
# - name: deploy-key
8691
# mountPath: /mnt/secrets/deploy-key
8792
# readOnly: true
@@ -163,6 +168,12 @@ pod:
163168
export DOCKER_HOST=tcp://$NODENAME:2475
164169
sudo chown -R gitpod:gitpod /workspace
165170
171+
mkdir /workspace/.ssh
172+
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa /workspace/.ssh/id_rsa_harvester_vm
173+
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa.pub /workspace/.ssh/id_rsa_harvester_vm.pub
174+
sudo chmod 600 /workspace/.ssh/id_rsa_harvester_vm
175+
sudo chmod 644 /workspace/.ssh/id_rsa_harvester_vm.pub
176+
166177
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
167178
printf '{{ toJson . }}' > context.json
168179

.werft/vm/manifests.ts

Lines changed: 21 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,9 @@ type VirtualMachineManifestArguments = {
1515
vmName: string
1616
namespace: string
1717
claimName: string,
18-
userDataSecretName: string
1918
}
2019

21-
export function VirtualMachineManifest({ vmName, namespace, claimName, userDataSecretName }: VirtualMachineManifestArguments) {
20+
export function VirtualMachineManifest({ vmName, namespace, claimName }: VirtualMachineManifestArguments) {
2221
return `
2322
apiVersion: kubevirt.io/v1
2423
type: kubevirt.io.virtualmachine
@@ -46,7 +45,7 @@ spec:
4645
machine:
4746
type: q35
4847
cpu:
49-
cores: 1
48+
cores: 4
5049
sockets: 1
5150
threads: 1
5251
devices:
@@ -64,8 +63,8 @@ spec:
6463
bus: virtio
6564
resources:
6665
limits:
67-
memory: 2Gi
68-
cpu: 1
66+
memory: 8Gi
67+
cpu: 4
6968
evictionStrategy: LiveMigrate
7069
networks:
7170
- pod: {}
@@ -76,10 +75,23 @@ spec:
7675
claimName: ${claimName}
7776
- name: cloudinitdisk
7877
cloudInitNoCloud:
79-
networkDataSecretRef:
80-
name: ${userDataSecretName}
81-
secretRef:
82-
name: ${userDataSecretName}
78+
userData: |-
79+
#cloud-config
80+
users:
81+
- name: ubuntu
82+
sudo: "ALL=(ALL) NOPASSWD: ALL"
83+
ssh_authorized_keys:
84+
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/aB/HYsb56V0NBOEab6j33v3LIxRiGqG4fmidAryAXevLyTANJPF8m44KSzSQg7AI7PMy6egxQp/JqH2b+3z1cItWuHZSU+klsKNuf5HxK7AOrND3ahbejZfyYewtKFQ3X9rv5Sk8TAR5gw5oPbkTR61jiLa58Sw7UkhLm2EDguGASb6mBal8iboiF8Wpl8QIvPmJaGIOY2YwXLepwFA3S3kVqW88eh2WFmjTMre5ASLguYNkHXjyb/TuhVFzAvphzpl84RAaEyjKYnk45fh4xRXx+oKqlfKRJJ/Owxa7SmGO+/4rWb3chdnpodHeu7XjERmjYLY+r46sf6n6ySgEht1xAWjMb1uqZqkDx+fDDsjFSeaN3ncX6HSoDOrphFmXYSwaMpZ8v67A791fuUPrMLC+YMckhTuX2g4i3XUdumIWvhaMvKhy/JRRMsfUH0h+KAkBLI6tn5ozoXiQhgM4SAE5HsMr6CydSIzab0yY3sq0avmZgeoc78+8PKPkZG1zRMEspV/hKKBC8hq7nm0bu4IgzuEIYHowOD8svqA0ufhDWxTt6A4Jo0xDzhFyKme7KfmW7SIhpejf3T1Wlf+QINs1hURr8LSOZEyY2SzYmAoQ49N0SSPb5xyG44cptpKcj0WCAJjBJoZqz0F5x9TjJ8XToB5obyJfRHD1JjxoMQ== [email protected]
85+
chpasswd:
86+
list: |
87+
ubuntu:ubuntu
88+
expire: False
89+
runcmd:
90+
- curl -sfL https://get.k3s.io | sh -
91+
- sleep 10
92+
- kubectl label nodes ${vmName} gitpod.io/workload_meta=true gitpod.io/workload_ide=true gitpod.io/workload_workspace_services=true gitpod.io/workload_workspace_regular=true gitpod.io/workload_workspace_headless=true gitpod.io/workspace_0=true gitpod.io/workspace_1=true gitpod.io/workspace_2=true
93+
- kubectl create ns certs
94+
- kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.yaml
8395
`
8496
}
8597

@@ -123,23 +135,3 @@ type UserDataSecretManifestOptions = {
123135
namespace: string,
124136
secretName: string
125137
}
126-
127-
export function UserDataSecretManifest({ namespace, secretName }: UserDataSecretManifestOptions) {
128-
const userdata = Buffer.from(`#cloud-config
129-
users:
130-
- name: ubuntu
131-
lock_passwd: false
132-
sudo: "ALL=(ALL) NOPASSWD: ALL"
133-
passwd: "$6$exDY1mhS4KUYCE/2$zmn9ToZwTKLhCw.b4/b.ZRTIZM30JZ4QrOQ2aOXJ8yk96xpcCof0kxKwuX1kqLG/ygbJ1f8wxED22bTL4F46P0"`).toString("base64")
134-
return `
135-
apiVersion: v1
136-
type: secret
137-
kind: Secret
138-
data:
139-
networkdata: ""
140-
userdata: ${userdata}
141-
metadata:
142-
name: ${secretName}
143-
namespace: ${namespace}
144-
`
145-
}

.werft/vm/vm.ts

Lines changed: 54 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import { exec } from '../util/shell';
2+
import { getGlobalWerftInstance } from '../util/werft';
23

34
import * as Manifests from './manifests'
45

@@ -21,27 +22,18 @@ EOF
2122
*/
2223
export function startVM(options: { name: string }) {
2324
const namespace = `preview-${options.name}`
24-
const userDataSecretName = `userdata-${options.name}`
2525

2626
kubectlApplyManifest(
2727
Manifests.NamespaceManifest({
2828
namespace
2929
})
3030
)
3131

32-
kubectlApplyManifest(
33-
Manifests.UserDataSecretManifest({
34-
namespace,
35-
secretName: userDataSecretName,
36-
})
37-
)
38-
3932
kubectlApplyManifest(
4033
Manifests.VirtualMachineManifest({
4134
namespace,
4235
vmName: options.name,
43-
claimName: `${options.name}-${Date.now()}`,
44-
userDataSecretName
36+
claimName: `${options.name}-${Date.now()}`
4537
}),
4638
{ validate: false }
4739
)
@@ -68,12 +60,13 @@ export function vmExists(options: { name: string }) {
6860
* Wait until the VM Instance reaches the Running status.
6961
* If the VM Instance doesn't reach Running before the timeoutMS it will throw an Error.
7062
*/
71-
export function waitForVM(options: { name: string, timeoutMS: number }) {
63+
export function waitForVM(options: { name: string, timeoutMS: number, slice: string }) {
64+
const werft = getGlobalWerftInstance()
7265
const namespace = `preview-${options.name}`
7366
const startTime = Date.now()
7467
while (true) {
7568

76-
const status = exec(`kubectl --kubeconfig ${KUBECONFIG_PATH} -n ${namespace} get vmi ${options.name} -o jsonpath="{.status.phase}"`, { silent: true }).stdout.trim()
69+
const status = exec(`kubectl --kubeconfig ${KUBECONFIG_PATH} -n ${namespace} get vmi ${options.name} -o jsonpath="{.status.phase}"`, { silent: true, slice: options.slice }).stdout.trim()
7770

7871
if (status == "Running") {
7972
return
@@ -84,8 +77,55 @@ export function waitForVM(options: { name: string, timeoutMS: number }) {
8477
throw new Error("VM didn reach Running status before the timeout")
8578
}
8679

87-
console.log(`VM is not yet running. Current status is ${status}. Sleeping 5 seconds`)
88-
exec('sleep 5', { silent: true })
80+
werft.log(options.slice, `VM is not yet running. Current status is ${status}. Sleeping 5 seconds`)
81+
exec('sleep 5', { silent: true, slice: options.slice })
82+
}
83+
}
84+
85+
/**
86+
* Copies the k3s kubeconfig out of the VM and places it at `path`
87+
* If it doesn't manage to do so before the timeout it will throw an Error
88+
*/
89+
export function copyk3sKubeconfig(options: { path: string, timeoutMS: number, slice: string }) {
90+
const werft = getGlobalWerftInstance()
91+
const startTime = Date.now()
92+
while (true) {
93+
94+
const status = exec(`ssh -i /workspace/.ssh/id_rsa_harvester_vm [email protected] -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 'sudo cat /etc/rancher/k3s/k3s.yaml' > ${options.path}`, { silent: true, dontCheckRc: true, slice: options.slice })
95+
96+
if (status.code == 0) {
97+
return
98+
}
99+
100+
const elapsedTimeMs = Date.now() - startTime
101+
if (elapsedTimeMs > options.timeoutMS) {
102+
throw new Error(`Wasn't able to copy out the kubeconfig before the timeout. Exit code ${status.code}. Stderr: ${status.stderr}. Stdout: ${status.stdout}`)
103+
}
104+
105+
werft.log(options.slice, `Wasn't able to copy out kubeconfig yet. Sleeping 5 seconds`)
106+
exec('sleep 5', { silent: true, slice: options.slice })
89107
}
108+
}
90109

110+
/**
111+
* Proxy 127.0.0.1:22 to :22 in the VM through the k8s service
112+
*/
113+
export function startSSHProxy(options: { name: string, slice: string }) {
114+
const namespace = `preview-${options.name}`
115+
exec(`sudo kubectl --kubeconfig=${KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 22:22`, { async: true, silent: true, slice: options.slice })
116+
}
117+
118+
/**
119+
* Proxy 127.0.0.1:6443 to :6443 in the VM through the k8s service
120+
*/
121+
export function startKubeAPIProxy(options: { name: string, slice: string }) {
122+
const namespace = `preview-${options.name}`
123+
exec(`sudo kubectl --kubeconfig=${KUBECONFIG_PATH} -n ${namespace} port-forward service/proxy 6443:6443`, { async: true, silent: true, slice: options.slice })
124+
}
125+
126+
/**
127+
* Terminates all running kubectl proxies
128+
*/
129+
export function stopKubectlPortForwards() {
130+
exec(`sudo killall kubectl || true`)
91131
}

0 commit comments

Comments
 (0)