@@ -63,7 +63,6 @@ export async function deployToPreviewEnvironment(werft: Werft, jobConfig: JobCon
63
63
withPayment,
64
64
withObservability,
65
65
installEELicense,
66
- withHelm,
67
66
workspaceFeatureFlags,
68
67
dynamicCPULimits,
69
68
storage
@@ -182,41 +181,8 @@ export async function deployToPreviewEnvironment(werft: Werft, jobConfig: JobCon
182
181
. finally ( ( ) => werft . done ( sliceID ) ) ;
183
182
}
184
183
185
- werft . phase ( phases . PREDEPLOY , "Checking for existing installations..." ) ;
186
- // the context namespace is not set at this point
187
- const deploymentKubeconfig = withVM ? PREVIEW_K3S_KUBECONFIG_PATH : CORE_DEV_KUBECONFIG_PATH ;
188
- const hasGitpodHelmInstall = exec ( `helm --kubeconfig ${ deploymentKubeconfig } status ${ helmInstallName } -n ${ deploymentConfig . namespace } ` , { slice : "check for Helm install" , dontCheckRc : true } ) . code === 0 ;
189
- const hasGitpodInstallerInstall = exec ( `kubectl --kubeconfig ${ deploymentKubeconfig } get configmap gitpod-app -n ${ deploymentConfig . namespace } ` , { slice : "check for Installer install" , dontCheckRc : true } ) . code === 0 ;
190
- werft . log ( "result of installation checks" , `has Helm install: ${ hasGitpodHelmInstall } , has Installer install: ${ hasGitpodInstallerInstall } ` ) ;
191
-
192
- if ( withHelm ) {
193
- werft . log ( "using Helm" , "with-helm was specified." ) ;
194
- // you want helm, but left behind a Gitpod Installer installation, force a clean slate
195
- if ( hasGitpodInstallerInstall && ! deploymentConfig . cleanSlateDeployment ) {
196
- werft . log ( "warning!" , "with-helm was specified, there's an Installer install, but, `with-clean-slate-deployment=false`, forcing to true." ) ;
197
- deploymentConfig . cleanSlateDeployment = true ;
198
- }
199
- werft . done ( phases . PREDEPLOY ) ;
200
- werft . phase ( phases . DEPLOY , "deploying" )
201
- await deployToDevWithHelm ( werft , jobConfig , deploymentConfig , workspaceFeatureFlags , dynamicCPULimits , storage ) ;
202
- } // scenario: you pushed code to an existing preview environment built with Helm, and didn't with-clean-slate-deployment=true'
203
- else if ( hasGitpodHelmInstall && ! deploymentConfig . cleanSlateDeployment ) {
204
- werft . log ( "using Helm" , "with-helm was not specified, but, a Helm installation exists, and this is not a clean slate deployment." ) ;
205
- werft . log ( "tip" , "Set 'with-clean-slate-deployment=true' if you wish to remove the Helm install and use the Installer." ) ;
206
- werft . done ( phases . PREDEPLOY ) ;
207
- werft . phase ( phases . DEPLOY , "deploying to dev with Helm" ) ;
208
- await deployToDevWithHelm ( werft , jobConfig , deploymentConfig , workspaceFeatureFlags , dynamicCPULimits , storage ) ;
209
- } else {
210
- // you get here if
211
- // ...it's a new install with no flag overrides or
212
- // ...it's an existing install and a Helm install doesn't exist or
213
- // ...you have a prexisting Helm install, set 'with-clean-slate-deployment=true', but did not specifiy 'with-helm=true'
214
- // Why? The installer is supposed to be a default so we all dog-food it.
215
- // But, its new, so this may help folks transition with less issues.
216
- werft . done ( phases . PREDEPLOY ) ;
217
- werft . phase ( phases . DEPLOY , "deploying to dev with Installer" ) ;
218
- await deployToDevWithInstaller ( werft , jobConfig , deploymentConfig , workspaceFeatureFlags , dynamicCPULimits , storage ) ;
219
- }
184
+ werft . phase ( phases . DEPLOY , "deploying to dev with Installer" ) ;
185
+ await deployToDevWithInstaller ( werft , jobConfig , deploymentConfig , workspaceFeatureFlags , dynamicCPULimits , storage ) ;
220
186
}
221
187
222
188
/*
@@ -357,213 +323,6 @@ async function deployToDevWithInstaller(werft: Werft, jobConfig: JobConfig, depl
357
323
}
358
324
}
359
325
360
- /*
361
- * Deploy a preview environment using Helm
362
- */
363
- async function deployToDevWithHelm ( werft : Werft , jobConfig : JobConfig , deploymentConfig : DeploymentConfig , workspaceFeatureFlags : string [ ] , dynamicCPULimits , storage ) {
364
- const { version, destname, namespace, domain, monitoringDomain, url } = deploymentConfig ;
365
- // find free ports
366
- werft . log ( "find free ports" , "Check for some free ports." ) ;
367
- const [ wsdaemonPortMeta , registryNodePortMeta , nodeExporterPort ] = await findFreeHostPorts ( [
368
- { start : 10000 , end : 11000 } ,
369
- { start : 30000 , end : 31000 } ,
370
- { start : 31001 , end : 32000 } ,
371
- ] , CORE_DEV_KUBECONFIG_PATH , metaEnv ( { slice : "find free ports" , silent : true } ) ) ;
372
- werft . log ( "find free ports" ,
373
- `wsdaemonPortMeta: ${ wsdaemonPortMeta } , registryNodePortMeta: ${ registryNodePortMeta } , and nodeExporterPort ${ nodeExporterPort } .` ) ;
374
- werft . done ( "find free ports" ) ;
375
-
376
- // trigger certificate issuing
377
- werft . log ( 'certificate' , "organizing a certificate for the preview environment..." ) ;
378
- let namespaceRecreatedResolve = undefined ;
379
- let namespaceRecreatedPromise = new Promise ( ( resolve ) => {
380
- namespaceRecreatedResolve = resolve ;
381
- } ) ;
382
-
383
- try {
384
- if ( deploymentConfig . cleanSlateDeployment ) {
385
- // re-create namespace
386
- await cleanStateEnv ( metaEnv ( ) ) ;
387
- } else {
388
- createNamespace ( namespace , CORE_DEV_KUBECONFIG_PATH , metaEnv ( { slice : 'prep' } ) ) ;
389
- }
390
- // Now we want to execute further kubectl operations only in the created namespace
391
- setKubectlContextNamespace ( namespace , metaEnv ( { slice : 'prep' } ) ) ;
392
-
393
- // trigger certificate issuing
394
- werft . log ( 'certificate' , "organizing a certificate for the preview environment..." ) ;
395
- await installMetaCertificates ( werft , jobConfig . repository . branch , jobConfig . withVM , namespace , CORE_DEV_KUBECONFIG_PATH , 'certificate' ) ;
396
- werft . done ( 'certificate' ) ;
397
- await addDNSRecord ( werft , deploymentConfig . namespace , deploymentConfig . domain , false , CORE_DEV_KUBECONFIG_PATH )
398
- werft . done ( 'prep' ) ;
399
- } catch ( err ) {
400
- werft . fail ( 'prep' , err ) ;
401
- }
402
-
403
- // core-dev specific section start
404
- werft . log ( "secret" , "copy secret into namespace" )
405
- try {
406
- const auth = exec ( `printf "%s" "_json_key:$(kubectl --kubeconfig ${ CORE_DEV_KUBECONFIG_PATH } get secret ${ IMAGE_PULL_SECRET_NAME } --namespace=keys -o yaml \
407
- | yq r - data['.dockerconfigjson'] \
408
- | base64 -d)" | base64 -w 0` , { silent : true } ) . stdout . trim ( ) ;
409
- fs . writeFileSync ( "chart/gcp-sa-registry-auth" ,
410
- `{
411
- "auths": {
412
- "eu.gcr.io": {
413
- "auth": "${ auth } "
414
- },
415
- "europe-docker.pkg.dev": {
416
- "auth": "${ auth } "
417
- }
418
- }
419
- }` ) ;
420
- werft . done ( 'secret' ) ;
421
- } catch ( err ) {
422
- werft . fail ( 'secret' , err ) ;
423
- }
424
-
425
- werft . log ( "authProviders" , "copy authProviders" )
426
- try {
427
- exec ( `kubectl --kubeconfig ${ CORE_DEV_KUBECONFIG_PATH } get secret preview-envs-authproviders --namespace=keys -o yaml \
428
- | yq r - data.authProviders \
429
- | base64 -d -w 0 \
430
- > authProviders` , { slice : "authProviders" } ) ;
431
- exec ( `yq merge --inplace .werft/jobs/build/helm/values.dev.yaml ./authProviders` , { slice : "authProviders" } )
432
- werft . done ( 'authProviders' ) ;
433
- } catch ( err ) {
434
- werft . fail ( 'authProviders' , err ) ;
435
- }
436
- // core-dev specific section end
437
-
438
-
439
- // If observability is enabled, we want to deploy it before installing Gitpod itself.
440
- // The reason behind it is because Gitpod components will start sending traces to a non-existent
441
- // OpenTelemetry-collector otherwise.
442
- werft . log ( `observability` , "Running observability static checks." )
443
- werft . log ( `observability` , "Installing monitoring-satellite..." )
444
- if ( deploymentConfig . withObservability ) {
445
- try {
446
- const installMonitoringSatellite = new MonitoringSatelliteInstaller ( {
447
- kubeconfigPath : CORE_DEV_KUBECONFIG_PATH ,
448
- branch : jobConfig . observability . branch ,
449
- satelliteNamespace : namespace ,
450
- clusterName : namespace ,
451
- nodeExporterPort : nodeExporterPort ,
452
- previewDomain : domain ,
453
- stackdriverServiceAccount : STACKDRIVER_SERVICEACCOUNT ,
454
- withVM : false ,
455
- werft : werft
456
- } ) ;
457
- await installMonitoringSatellite . install ( )
458
- } catch ( err ) {
459
- werft . fail ( 'observability' , err ) ;
460
- }
461
- } else {
462
- exec ( `echo '"with-observability" annotation not set, skipping...'` , { slice : `observability` } )
463
- exec ( `echo 'To deploy monitoring-satellite, please add "/werft with-observability" to your PR description.'` , { slice : `observability` } )
464
- }
465
- werft . done ( 'observability' ) ;
466
-
467
- // deployment config
468
- try {
469
- shell . cd ( "/workspace/chart" ) ;
470
- werft . log ( 'helm' , 'installing Gitpod' ) ;
471
-
472
- const commonFlags = addDeploymentFlags ( ) ;
473
- installGitpod ( commonFlags ) ;
474
-
475
- werft . log ( 'helm' , 'done' ) ;
476
- werft . done ( 'helm' ) ;
477
- } catch ( err ) {
478
- werft . fail ( 'deploy' , err ) ;
479
- } finally {
480
- // produce the result independently of Helm succeding, so that in case Helm fails we still have the URL.
481
- exec ( `werft log result -d "dev installation" -c github-check-preview-env url ${ url } /workspaces` ) ;
482
- }
483
-
484
- function installGitpod ( commonFlags : string ) {
485
- let flags = commonFlags
486
- flags += ` --set components.wsDaemon.servicePort=${ wsdaemonPortMeta } ` ;
487
- flags += ` --set components.registryFacade.ports.registry.servicePort=${ registryNodePortMeta } ` ;
488
-
489
- const nodeAffinityValues = getNodeAffinities ( ) ;
490
-
491
- if ( storage === "gcp" ) {
492
- exec ( `kubectl --kubeconfig ${ CORE_DEV_KUBECONFIG_PATH } get secret gcp-sa-gitpod-dev-deployer -n werft -o yaml | yq d - metadata | yq w - metadata.name remote-storage-gcloud | kubectl --kubeconfig ${ CORE_DEV_KUBECONFIG_PATH } apply -f -` ) ;
493
- flags += ` -f ../.werft/jobs/build/helm/values.dev.gcp-storage.yaml` ;
494
- }
495
-
496
- /* A hash is caclulated from the branch name and a subset of that string is parsed to a number x,
497
- x mod the number of different nodepool-sets defined in the files listed in nodeAffinityValues
498
- is used to generate a pseudo-random number that consistent as long as the branchname persists.
499
- We use it to reduce the number of preview-environments accumulating on a singe nodepool.
500
- */
501
- const nodepoolIndex = getNodePoolIndex ( namespace ) ;
502
-
503
- exec ( `helm dependencies up` ) ;
504
- exec ( `/usr/local/bin/helm3 --kubeconfig ${ CORE_DEV_KUBECONFIG_PATH } upgrade --install --timeout 10m -f ../.werft/jobs/build/helm/${ nodeAffinityValues [ nodepoolIndex ] } -f ../.werft/jobs/build/helm/values.dev.yaml ${ flags } ${ helmInstallName } .` ) ;
505
- }
506
-
507
- function addDeploymentFlags ( ) {
508
- let flags = ""
509
- flags += ` --namespace ${ namespace } ` ;
510
- flags += ` --set components.imageBuilder.hostDindData=/mnt/disks/raid0/docker-${ namespace } ` ;
511
- flags += ` --set components.wsDaemon.hostWorkspaceArea=/mnt/disks/raid0/workspaces-${ namespace } ` ;
512
- flags += ` --set version=${ version } ` ;
513
- flags += ` --set hostname=${ domain } ` ;
514
- flags += ` --set devBranch=${ destname } ` ;
515
- workspaceFeatureFlags . forEach ( ( f , i ) => {
516
- flags += ` --set components.server.defaultFeatureFlags[${ i } ]='${ f } '` ;
517
- } ) ;
518
- if ( dynamicCPULimits ) {
519
- flags += ` -f ../.werft/jobs/build/helm/values.variant.cpuLimits.yaml` ;
520
- }
521
- if ( ( deploymentConfig . analytics || "" ) . startsWith ( "segment|" ) ) {
522
- flags += ` --set analytics.writer=segment` ;
523
- flags += ` --set analytics.segmentKey=${ deploymentConfig . analytics ! . substring ( "segment|" . length ) } ` ;
524
- } else if ( ! ! deploymentConfig . analytics ) {
525
- flags += ` --set analytics.writer=${ deploymentConfig . analytics ! } ` ;
526
- }
527
- if ( deploymentConfig . withObservability ) {
528
- flags += ` -f ../.werft/jobs/build/helm/values.tracing.yaml` ;
529
- }
530
- werft . log ( "helm" , "extracting versions" ) ;
531
- try {
532
- exec ( `docker run --rm eu.gcr.io/gitpod-core-dev/build/versions:${ version } cat /versions.yaml | tee versions.yaml` ) ;
533
- } catch ( err ) {
534
- werft . fail ( 'helm' , err ) ;
535
- }
536
- const pathToVersions = `${ shell . pwd ( ) . toString ( ) } /versions.yaml` ;
537
- flags += ` -f ${ pathToVersions } ` ;
538
-
539
- if ( deploymentConfig . installEELicense ) {
540
- // We're adding the license rather late just to prevent accidentially printing it.
541
- // If anyone got ahold of the license not much would be lost, but hey, no need to plaster it on the walls.
542
- flags += ` --set license=${ fs . readFileSync ( '/mnt/secrets/gpsh-coredev/license' ) . toString ( ) } `
543
- }
544
- if ( deploymentConfig . withPayment ) {
545
- flags += ` -f ../.werft/jobs/build/helm/values.payment.yaml` ;
546
- exec ( `cp /mnt/secrets/payment-provider-config/providerOptions payment-core-dev-options.json` ) ;
547
- flags += ` --set payment.chargebee.providerOptionsFile=payment-core-dev-options.json` ;
548
- exec ( `cp /mnt/secrets/payment-webhook-config/license payment-core-dev-webhook.json` ) ;
549
- flags += ` --set components.paymentEndpoint.webhookFile="payment-core-dev-webhook.json"` ;
550
- }
551
- return flags ;
552
- }
553
-
554
- async function cleanStateEnv ( shellOpts : ExecOptions ) {
555
- await wipeAndRecreateNamespace ( helmInstallName , namespace , CORE_DEV_KUBECONFIG_PATH , { ...shellOpts , slice : 'prep' } ) ;
556
- // cleanup non-namespace objects
557
- werft . log ( "predeploy cleanup" , "removing old unnamespaced objects - this might take a while" ) ;
558
- try {
559
- await deleteNonNamespaceObjects ( namespace , destname , CORE_DEV_KUBECONFIG_PATH , { ...shellOpts , slice : 'predeploy cleanup' } ) ;
560
- werft . done ( 'predeploy cleanup' ) ;
561
- } catch ( err ) {
562
- werft . fail ( 'predeploy cleanup' , err ) ;
563
- }
564
- }
565
- }
566
-
567
326
/* A hash is caclulated from the branch name and a subset of that string is parsed to a number x,
568
327
x mod the number of different nodepool-sets defined in the files listed in nodeAffinityValues
569
328
is used to generate a pseudo-random number that consistent as long as the branchname persists.
0 commit comments