Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions components/ws-manager-mk2/controllers/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -440,10 +440,12 @@ func (r *WorkspaceReconciler) extractFailure(ctx context.Context, ws *workspacev
if !ws.IsHeadless() {
return fmt.Sprintf("container %s completed; containers of a workspace pod are not supposed to do that", cs.Name), nil
}
} else if !isPodBeingDeleted(pod) && terminationState.ExitCode != containerUnknownExitCode {
} else if !isPodBeingDeleted(pod) && terminationState.ExitCode == containerUnknownExitCode {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In very old versions of Kubernetes, if there was a network issue with the node, the kubelet could not report pod status for a while, and the API server will mark the container status of the pod would be marked as "unknown". Therefore, our previous approach was to treat this situation as a temporary problem and expect it to recover automatically.

But in the recent version, the API server does not change the status of the pod; instead, it modifies the status of the node to mark it as NotReady. Therefore, this unknown situation is actually returned by containerd.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In very old versions of Kubernetes

How old, are you able to tell where the cut off is?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding approval to unblock.

Please check K3s version for gitpod.io is not impacted before removing hold.

return fmt.Sprintf("workspace container %s terminated for an unknown reason: (%s) %s", cs.Name, terminationState.Reason, terminationState.Message), nil
} else if !isPodBeingDeleted(pod) {
// if a container is terminated and it wasn't because of either:
// - regular shutdown
// - the exit code "UNKNOWN" (which might be caused by an intermittent issue and is handled in extractStatusFromPod)
// - the exit code "UNKNOWN" (which might be caused by an intermittent issue
// - another known error
// then we report it as UNKNOWN
phase := workspacev1.WorkspacePhaseUnknown
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,40 @@ var _ = Describe("WorkspaceController", func() {
})
})

It("should handle workspace failure with unknown exit code", func() {
ws := newWorkspace(uuid.NewString(), "default")
m := collectMetricCounts(wsMetrics, ws)
pod := createWorkspaceExpectPod(ws)

markReady(ws)

// Update Pod with failed exit status.
updateObjWithRetries(k8sClient, pod, true, func(pod *corev1.Pod) {
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
LastTerminationState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: containerUnknownExitCode,
},
},
})
})

// Controller should detect container exit and add Failed condition.
expectConditionEventually(ws, string(workspacev1.WorkspaceConditionFailed), metav1.ConditionTrue, "")

expectFinalizerAndMarkBackupCompleted(ws, pod)

expectWorkspaceCleanup(ws, pod)

expectMetricsDelta(m, collectMetricCounts(wsMetrics, ws), metricCounts{
restores: 1,
startFailures: 0,
failures: 1,
stops: map[StopReason]int{StopReasonFailed: 1},
backups: 1,
})
})

It("should clean up timed out workspaces", func() {
ws := newWorkspace(uuid.NewString(), "default")
m := collectMetricCounts(wsMetrics, ws)
Expand Down
Loading