Skip to content

Commit 9c254b4

Browse files
authored
[workspace-starter] Fix re-start issue in case ws-manager emits ALREADY_EXISTS (#19393)
1 parent b704c9c commit 9c254b4

File tree

1 file changed

+12
-0
lines changed

1 file changed

+12
-0
lines changed

components/server/src/workspace/workspace-starter.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -592,6 +592,16 @@ export class WorkspaceStarter {
592592
await new Promise((resolve) => setTimeout(resolve, INSTANCE_START_RETRY_INTERVAL_SECONDS * 1000));
593593
}
594594
} catch (err) {
595+
if (isGrpcError(err) && err.code === grpc.status.ALREADY_EXISTS) {
596+
// This might happen because of timing: When we did the "workspaceAlreadyExists" check above, the DB state was not updated yet.
597+
// But when calling ws-manager to start the workspace, it was already present.
598+
//
599+
// By returning we skip the current cycle and wait for the next run of the workspace-start-controller.
600+
// This gives ws-manager(-bridge) some time to emit(/digest) updates.
601+
log.info(logCtx, "workspace already exists, waiting for ws-manager to push new state", err);
602+
return;
603+
}
604+
595605
let reason: FailedInstanceStartReason = "startOnClusterFailed";
596606
if (isResourceExhaustedError(err)) {
597607
reason = "resourceExhausted";
@@ -731,6 +741,8 @@ export class WorkspaceStarter {
731741
throw err;
732742
} else if (isClusterMaintenanceError(err)) {
733743
throw err;
744+
} else if (isGrpcError(err) && err.code === grpc.status.ALREADY_EXISTS) {
745+
throw err;
734746
} else if ("code" in err && err.code !== grpc.status.OK && lastInstallation !== "") {
735747
log.error({ instanceId: instance.id }, "cannot start workspace on cluster, might retry", err, {
736748
cluster: lastInstallation,

0 commit comments

Comments
 (0)