diff --git a/components/common-go/kubernetes/kubernetes.go b/components/common-go/kubernetes/kubernetes.go index 07f69f99865b98..6060c5a112bdcf 100644 --- a/components/common-go/kubernetes/kubernetes.go +++ b/components/common-go/kubernetes/kubernetes.go @@ -171,3 +171,27 @@ func GetCondition(conds []metav1.Condition, tpe string) *metav1.Condition { } return nil } + +// ConditionPresentAndTrue returns whether a condition is present and its status set to True. +func ConditionPresentAndTrue(cond []metav1.Condition, tpe string) bool { + for _, c := range cond { + if c.Type == tpe { + return c.Status == metav1.ConditionTrue + } + } + return false +} + +// ConditionWithStatusAndReason returns whether a condition is present, and with the given Reason. +func ConditionWithStatusAndReason(cond []metav1.Condition, tpe string, status bool, reason string) bool { + st := metav1.ConditionFalse + if status { + st = metav1.ConditionTrue + } + for _, c := range cond { + if c.Type == tpe { + return c.Type == tpe && c.Status == st && c.Reason == reason + } + } + return false +} diff --git a/components/ws-manager-mk2/controllers/status.go b/components/ws-manager-mk2/controllers/status.go index 456210b7de3c90..4f1e4b6b918568 100644 --- a/components/ws-manager-mk2/controllers/status.go +++ b/components/ws-manager-mk2/controllers/status.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" + wsk8s "github.com/gitpod-io/gitpod/common-go/kubernetes" workspacev1 "github.com/gitpod-io/gitpod/ws-manager/api/crd/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,7 +45,7 @@ func updateWorkspaceStatus(ctx context.Context, workspace *workspacev1.Workspace // continue below default: // This is exceptional - not sure what to do here. Probably fail the pod - workspace.Status.Conditions = AddUniqueCondition(workspace.Status.Conditions, metav1.Condition{ + workspace.Status.Conditions = wsk8s.AddUniqueCondition(workspace.Status.Conditions, metav1.Condition{ Type: string(workspacev1.WorkspaceConditionFailed), Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now(), @@ -54,7 +55,7 @@ func updateWorkspaceStatus(ctx context.Context, workspace *workspacev1.Workspace return nil } - workspace.Status.Conditions = AddUniqueCondition(workspace.Status.Conditions, metav1.Condition{ + workspace.Status.Conditions = wsk8s.AddUniqueCondition(workspace.Status.Conditions, metav1.Condition{ Type: string(workspacev1.WorkspaceConditionDeployed), Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now(), @@ -83,9 +84,9 @@ func updateWorkspaceStatus(ctx context.Context, workspace *workspacev1.Workspace workspace.Status.Phase = *phase } - if failure != "" && !conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) { + if failure != "" && !wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) { // workspaces can fail only once - once there is a failed condition set, stick with it - workspace.Status.Conditions = AddUniqueCondition(workspace.Status.Conditions, metav1.Condition{ + workspace.Status.Conditions = wsk8s.AddUniqueCondition(workspace.Status.Conditions, metav1.Condition{ Type: string(workspacev1.WorkspaceConditionFailed), Status: metav1.ConditionTrue, LastTransitionTime: metav1.Now(), @@ -105,9 +106,9 @@ func updateWorkspaceStatus(ctx context.Context, workspace *workspacev1.Workspace } } if hasFinalizer { - if conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) || - conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)) || - conditionWithStatusAndReson(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady), false, "InitializationFailure") { + if wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) || + wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)) || + wsk8s.ConditionWithStatusAndReason(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady), false, "InitializationFailure") { workspace.Status.Phase = workspacev1.WorkspacePhaseStopped } diff --git a/components/ws-manager-mk2/controllers/workspace_controller.go b/components/ws-manager-mk2/controllers/workspace_controller.go index 97ded0cf4ce5e1..40a18072ad878d 100644 --- a/components/ws-manager-mk2/controllers/workspace_controller.go +++ b/components/ws-manager-mk2/controllers/workspace_controller.go @@ -16,6 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" + wsk8s "github.com/gitpod-io/gitpod/common-go/kubernetes" config "github.com/gitpod-io/gitpod/ws-manager/api/config" workspacev1 "github.com/gitpod-io/gitpod/ws-manager/api/crd/v1" "github.com/prometheus/client_golang/prometheus" @@ -176,7 +177,7 @@ func (r *WorkspaceReconciler) actOnStatus(ctx context.Context, workspace *worksp switch { // if there is a pod, and it's failed, delete it - case conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) && !isPodBeingDeleted(pod): + case wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) && !isPodBeingDeleted(pod): err := r.Client.Delete(ctx, pod) if errors.IsNotFound(err) { // pod is gone - nothing to do here @@ -185,7 +186,7 @@ func (r *WorkspaceReconciler) actOnStatus(ctx context.Context, workspace *worksp } // if the pod was stopped by request, delete it - case conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionStoppedByRequest)) && !isPodBeingDeleted(pod): + case wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionStoppedByRequest)) && !isPodBeingDeleted(pod): err := r.Client.Delete(ctx, pod) if errors.IsNotFound(err) { // pod is gone - nothing to do here @@ -194,7 +195,7 @@ func (r *WorkspaceReconciler) actOnStatus(ctx context.Context, workspace *worksp } // if the content initialization failed, delete the pod - case conditionWithStatusAndReson(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady), false, "InitializationFailure") && !isPodBeingDeleted(pod): + case wsk8s.ConditionWithStatusAndReason(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady), false, "InitializationFailure") && !isPodBeingDeleted(pod): err := r.Client.Delete(ctx, pod) if errors.IsNotFound(err) { // pod is gone - nothing to do here @@ -243,28 +244,28 @@ func (r *WorkspaceReconciler) updateMetrics(ctx context.Context, workspace *work phase == workspacev1.WorkspacePhaseCreating || phase == workspacev1.WorkspacePhaseInitializing: - if conditionWithStatusAndReson(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady), false, "InitializationFailure") { + if wsk8s.ConditionWithStatusAndReason(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady), false, "InitializationFailure") { r.metrics.countTotalRestoreFailures(&log, workspace) r.metrics.countWorkspaceStartFailures(&log, workspace) } - if conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) { + if wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionFailed)) { r.metrics.countWorkspaceStartFailures(&log, workspace) } case phase == workspacev1.WorkspacePhaseRunning: r.metrics.recordWorkspaceStartupTime(&log, workspace) - if conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady)) { + if wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionContentReady)) { r.metrics.countTotalRestores(&log, workspace) } case phase == workspacev1.WorkspacePhaseStopped: - if conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)) { + if wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupFailure)) { r.metrics.countTotalBackups(&log, workspace) r.metrics.countTotalBackupFailures(&log, workspace) } - if conditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) { + if wsk8s.ConditionPresentAndTrue(workspace.Status.Conditions, string(workspacev1.WorkspaceConditionBackupComplete)) { r.metrics.countTotalBackups(&log, workspace) } @@ -276,24 +277,6 @@ func (r *WorkspaceReconciler) updateMetrics(ctx context.Context, workspace *work r.metrics.rememberWorkspace(workspace) } -func conditionPresentAndTrue(cond []metav1.Condition, tpe string) bool { - for _, c := range cond { - if c.Type == tpe { - return c.Status == metav1.ConditionTrue - } - } - return false -} - -func conditionWithStatusAndReson(cond []metav1.Condition, tpe string, status bool, reason string) bool { - for _, c := range cond { - if c.Type == tpe { - return c.Type == tpe && c.Reason == reason - } - } - return false -} - var ( wsOwnerKey = ".metadata.controller" apiGVStr = workspacev1.GroupVersion.String() @@ -326,18 +309,3 @@ func (r *WorkspaceReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.Pod{}). Complete(r) } - -func AddUniqueCondition(conds []metav1.Condition, cond metav1.Condition) []metav1.Condition { - if cond.Reason == "" { - cond.Reason = "unknown" - } - - for i, c := range conds { - if c.Type == cond.Type { - conds[i] = cond - return conds - } - } - - return append(conds, cond) -} diff --git a/components/ws-manager-mk2/service/manager.go b/components/ws-manager-mk2/service/manager.go index 3ae573e3e5fa7d..9eaeae0862ce3b 100644 --- a/components/ws-manager-mk2/service/manager.go +++ b/components/ws-manager-mk2/service/manager.go @@ -342,10 +342,10 @@ func (wsm *WorkspaceManagerServer) MarkActive(ctx context.Context, req *wsmanapi // We do however maintain the the "closed" flag as annotation on the workspace. This flag should not change // very often and provides a better UX if it persists across ws-manager restarts. - isMarkedClosed := conditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionClosed)) + isMarkedClosed := wsk8s.ConditionPresentAndTrue(ws.Status.Conditions, string(workspacev1.WorkspaceConditionClosed)) if req.Closed && !isMarkedClosed { err = wsm.modifyWorkspace(ctx, req.Id, true, func(ws *workspacev1.Workspace) error { - ws.Status.Conditions = addUniqueCondition(ws.Status.Conditions, metav1.Condition{ + ws.Status.Conditions = wsk8s.AddUniqueCondition(ws.Status.Conditions, metav1.Condition{ Type: string(workspacev1.WorkspaceConditionClosed), Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), @@ -355,7 +355,7 @@ func (wsm *WorkspaceManagerServer) MarkActive(ctx context.Context, req *wsmanapi }) } else if !req.Closed && isMarkedClosed { err = wsm.modifyWorkspace(ctx, req.Id, true, func(ws *workspacev1.Workspace) error { - ws.Status.Conditions = addUniqueCondition(ws.Status.Conditions, metav1.Condition{ + ws.Status.Conditions = wsk8s.AddUniqueCondition(ws.Status.Conditions, metav1.Condition{ Type: string(workspacev1.WorkspaceConditionClosed), Status: metav1.ConditionFalse, LastTransitionTime: metav1.NewTime(now), @@ -375,7 +375,7 @@ func (wsm *WorkspaceManagerServer) MarkActive(ctx context.Context, req *wsmanapi // If it's the first call: Mark the pod with FirstUserActivity condition. if firstUserActivity == nil { err := wsm.modifyWorkspace(ctx, req.Id, true, func(ws *workspacev1.Workspace) error { - ws.Status.Conditions = addUniqueCondition(ws.Status.Conditions, metav1.Condition{ + ws.Status.Conditions = wsk8s.AddUniqueCondition(ws.Status.Conditions, metav1.Condition{ Type: string(workspacev1.WorkspaceConditionFirstUserActivity), Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(now), @@ -970,23 +970,3 @@ func (m *workspaceMetrics) Describe(ch chan<- *prometheus.Desc) { func (m *workspaceMetrics) Collect(ch chan<- prometheus.Metric) { m.totalStartsCounterVec.Collect(ch) } - -func addUniqueCondition(conds []metav1.Condition, cond metav1.Condition) []metav1.Condition { - for i, c := range conds { - if c.Type == cond.Type { - conds[i] = cond - return conds - } - } - - return append(conds, cond) -} - -func conditionPresentAndTrue(cond []metav1.Condition, tpe string) bool { - for _, c := range cond { - if c.Type == tpe { - return c.Status == metav1.ConditionTrue - } - } - return false -}