Skip to content

Commit 27e3251

Browse files
authored
refactor: improve pod status & watching (#1788)
1 parent 97bee82 commit 27e3251

File tree

2 files changed

+144
-27
lines changed

2 files changed

+144
-27
lines changed

desktop/src/views/Pro/Workspace/WorkspaceDetails.tsx

Lines changed: 121 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@ import dayjs from "dayjs"
3131
import { ReactElement, ReactNode, cloneElement, useMemo } from "react"
3232
import { WorkspaceStatus } from "./WorkspaceStatus"
3333
import { ManagementV1DevPodWorkspaceInstanceKubernetesStatus } from "@loft-enterprise/client/gen/models/managementV1DevPodWorkspaceInstanceKubernetesStatus"
34+
import { ManagementV1DevPodWorkspaceInstancePodStatus, ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum } from "@loft-enterprise/client/gen/models/managementV1DevPodWorkspaceInstancePodStatus"
35+
import { ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatus, ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatusPhaseEnum } from "@loft-enterprise/client/gen/models/managementV1DevPodWorkspaceInstancePersistentVolumeClaimStatus"
3436
import { quantityToScalar } from "@kubernetes/client-node/dist/util"
3537

3638
type TWorkspaceDetailsProps = Readonly<{
@@ -359,13 +361,13 @@ function KubernetesDetails({ status }: TKubernetesDetailsProps) {
359361
}
360362
}
361363

362-
if (!mainContainerResources.resources?.requests) {
364+
if (!mainContainerResources.resources?.limits) {
363365
return Object.entries(mainContainerMetrics?.usage ?? {}).map(([type, quantity]) => {
364366
return getResourceDetails(type, undefined, quantity, undefined)
365367
})
366368
}
367369

368-
return Object.entries(mainContainerResources.resources?.requests ?? {}).map(
370+
return Object.entries(mainContainerResources.resources?.limits ?? {}).map(
369371
([type, quantity]) => {
370372
const used = indexedMetrics[type]
371373
let usagePercentage = calculateUsagePercentage(
@@ -386,6 +388,9 @@ function KubernetesDetails({ status }: TKubernetesDetailsProps) {
386388
</StackedWorkspaceInfoDetail>
387389
)}
388390

391+
{status.podStatus && <PodStatus podStatus={status.podStatus} />}
392+
{status.persistentVolumeClaimStatus && <PvcStatus pvcStatus={status.persistentVolumeClaimStatus} />}
393+
389394
{resources.map((resource) => {
390395
return (
391396
<StackedWorkspaceInfoDetail
@@ -415,6 +420,120 @@ function KubernetesDetails({ status }: TKubernetesDetailsProps) {
415420
)
416421
}
417422

423+
function PodStatus({ podStatus }: { podStatus: ManagementV1DevPodWorkspaceInstancePodStatus }) {
424+
const phase = podStatus.phase
425+
const phaseColor = {
426+
[ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum.Pending]: "yellow.500",
427+
[ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum.Running]: "",
428+
[ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum.Succeeded]: "red.400",
429+
[ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum.Failed]: "red.400",
430+
[ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum.Unknown]: "red.400",
431+
}
432+
433+
let reason = podStatus.reason
434+
let message = podStatus.message
435+
if (phase !== ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum.Running) {
436+
// check container status first
437+
const containerStatus = podStatus.containerStatuses?.find((container) => container.name === "devpod" && (container.state?.waiting?.reason || container.state?.terminated?.reason))
438+
if (containerStatus) {
439+
if (containerStatus.state?.waiting) {
440+
reason = containerStatus.state.waiting.reason
441+
message = containerStatus.state.waiting.message
442+
} else if (containerStatus.state?.terminated) {
443+
reason = containerStatus.state.terminated.reason
444+
message = containerStatus.state.terminated.message
445+
if (!containerStatus.state.terminated.message && containerStatus.state.terminated.exitCode != 0) {
446+
message = "Exit code: " + containerStatus.state.terminated.exitCode
447+
}
448+
}
449+
}
450+
451+
// check pod conditions
452+
if (!reason && !message) {
453+
const podCondition = podStatus.conditions?.find((condition) => condition.status === "False" && condition.reason)
454+
if (podCondition) {
455+
reason = podCondition.reason
456+
message = podCondition.message
457+
}
458+
}
459+
460+
// try to find warning event
461+
if (!reason && !message) {
462+
const warningEvent = podStatus.events?.find((event) => event.type === "Warning")
463+
if (warningEvent) {
464+
reason = warningEvent.reason
465+
message = warningEvent.message
466+
}
467+
}
468+
469+
// try to find normal event
470+
if (!reason && !message) {
471+
const normalEvent = podStatus.events?.find((event) => event.type === "Normal")
472+
if (normalEvent) {
473+
reason = normalEvent.reason
474+
message = normalEvent.message
475+
}
476+
}
477+
}
478+
479+
return (
480+
<StackedWorkspaceInfoDetail icon={Dashboard} label={<Text>Pod</Text>}>
481+
<Text color={phase ? phaseColor[phase] : "gray.500"}>
482+
{phase === ManagementV1DevPodWorkspaceInstancePodStatusPhaseEnum.Running ? podStatus.phase : (
483+
(reason && message) ? <Tooltip label={message}>
484+
<Text>{podStatus.phase} ({reason})</Text>
485+
</Tooltip> : (reason ? <Text>{podStatus.phase} ({reason})</Text> : podStatus.phase)
486+
)}
487+
</Text>
488+
</StackedWorkspaceInfoDetail>
489+
)
490+
}
491+
492+
493+
function PvcStatus({ pvcStatus }: { pvcStatus: ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatus }) {
494+
const phase = pvcStatus.phase
495+
const phaseColor = {
496+
[ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatusPhaseEnum.Pending]: "yellow.500",
497+
[ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatusPhaseEnum.Bound]: "",
498+
[ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatusPhaseEnum.Lost]: "red.400",
499+
}
500+
501+
let reason: string | undefined = ""
502+
let message: string | undefined = ""
503+
if (phase !== ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatusPhaseEnum.Bound) {
504+
reason = pvcStatus.conditions?.find((condition) => condition.status === "False")?.reason
505+
message = pvcStatus.conditions?.find((condition) => condition.status === "False")?.message
506+
507+
// try to find warning event
508+
if (!reason && !message) {
509+
const warningEvent = pvcStatus.events?.find((event) => event.type === "Warning")
510+
if (warningEvent) {
511+
reason = warningEvent.reason
512+
message = warningEvent.message
513+
}
514+
}
515+
516+
// try to find normal event
517+
if (!reason && !message) {
518+
const normalEvent = pvcStatus.events?.find((event) => event.type === "Normal")
519+
if (normalEvent) {
520+
reason = normalEvent.reason
521+
message = normalEvent.message
522+
}
523+
}
524+
}
525+
526+
return (
527+
<StackedWorkspaceInfoDetail icon={Dashboard} label={<Text>Volume</Text>}>
528+
<Text color={phase ? phaseColor[phase] : "gray.500"}>
529+
{phase === ManagementV1DevPodWorkspaceInstancePersistentVolumeClaimStatusPhaseEnum.Bound ? pvcStatus.phase : ((reason && message) ? <Tooltip label={message}>
530+
<Text>{pvcStatus.phase} ({reason})</Text>
531+
</Tooltip> : reason ? <Text>{pvcStatus.phase} ({reason})</Text> : pvcStatus.phase) }
532+
</Text>
533+
</StackedWorkspaceInfoDetail>
534+
)
535+
}
536+
418537
const invalidQuantity = -1
419538

420539
function quantityToScalarBigInt(quantity: string | number | undefined): bigint | number {

pkg/daemon/platform/workspace_watcher.go

Lines changed: 23 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ import (
1212
loftclient "github.com/loft-sh/api/v4/pkg/clientset/versioned"
1313
typedmanagementv1 "github.com/loft-sh/api/v4/pkg/clientset/versioned/typed/management/v1"
1414
informers "github.com/loft-sh/api/v4/pkg/informers/externalversions"
15-
informermanagementv1 "github.com/loft-sh/api/v4/pkg/informers/externalversions/management/v1"
1615
"github.com/loft-sh/devpod/pkg/platform"
1716
"github.com/loft-sh/devpod/pkg/platform/client"
1817
"github.com/loft-sh/devpod/pkg/platform/project"
@@ -66,13 +65,11 @@ func startWorkspaceWatcher(ctx context.Context, config watchConfig, onChange cha
6665
return err
6766
}
6867

69-
factory := informers.NewSharedInformerFactoryWithOptions(clientset, time.Second*30,
68+
factory := informers.NewSharedInformerFactoryWithOptions(clientset, time.Second*120,
7069
informers.WithNamespace(project.ProjectNamespace(config.Project)),
7170
)
7271
workspaceInformer := factory.Management().V1().DevPodWorkspaceInstances()
73-
74-
instanceStore := newStore(workspaceInformer, self, config.Context, config.OwnerFilter, config.TsClient, config.Log)
75-
72+
instanceStore := newStore(self, config.Context, config.OwnerFilter, config.TsClient, config.Log)
7673
_, err = workspaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
7774
AddFunc: func(obj interface{}) {
7875
instance, ok := obj.(*managementv1.DevPodWorkspaceInstance)
@@ -117,20 +114,22 @@ func startWorkspaceWatcher(ctx context.Context, config watchConfig, onChange cha
117114

118115
stopCh := make(chan struct{})
119116
defer close(stopCh)
117+
120118
go func() {
121119
defer func() {
122120
if err := recover(); err != nil {
123121
config.Log.Errorf("panic in workspace watcher: %v\n%s", err, debug.Stack())
124122
}
125123
}()
126124

127-
config.Log.Debug("starting workspace watcher")
125+
config.Log.Info("starting workspace watcher")
128126
factory.Start(stopCh)
129127
factory.WaitForCacheSync(stopCh)
130128

131129
// Kick off initial message
132130
onChange(instanceStore.List())
133131

132+
// periodically collect workspace metrics
134133
instanceStore.collectWorkspaceMetrics(ctx, onChange)
135134
}()
136135
select {
@@ -143,7 +142,6 @@ func startWorkspaceWatcher(ctx context.Context, config watchConfig, onChange cha
143142
}
144143

145144
type instanceStore struct {
146-
informer informermanagementv1.DevPodWorkspaceInstanceInformer
147145
self *managementv1.Self
148146
context string
149147
ownerFilter platform.OwnerFilter
@@ -179,9 +177,8 @@ type WorkspaceNetworkMetrics struct {
179177
Timestamp int64 `json:"timestamp,omitempty"`
180178
}
181179

182-
func newStore(informer informermanagementv1.DevPodWorkspaceInstanceInformer, self *managementv1.Self, context string, ownerFilter platform.OwnerFilter, tsClient *tailscale.LocalClient, log log.Logger) *instanceStore {
180+
func newStore(self *managementv1.Self, context string, ownerFilter platform.OwnerFilter, tsClient *tailscale.LocalClient, log log.Logger) *instanceStore {
183181
return &instanceStore{
184-
informer: informer,
185182
self: self,
186183
context: context,
187184
instances: map[string]*ProWorkspaceInstance{},
@@ -376,6 +373,22 @@ func (s *instanceStore) updateWorkspaceLatencies(ctx context.Context) {
376373
wg.Wait()
377374
}
378375

376+
func getClientSet(config *rest.Config) (loftclient.Interface, error) {
377+
clientset, err := loftclient.NewForConfig(config)
378+
if err != nil {
379+
return nil, err
380+
}
381+
mv1 := clientset.ManagementV1()
382+
c := typedmanagementv1.New(&extendedRESTClient{Interface: mv1.RESTClient()})
383+
384+
return &extendedClientset{
385+
Clientset: clientset,
386+
ManagementClient: c,
387+
}, nil
388+
}
389+
390+
var _ rest.Interface = (*extendedRESTClient)(nil)
391+
379392
type extendedClientset struct {
380393
*loftclient.Clientset
381394
ManagementClient typedmanagementv1.ManagementV1Interface
@@ -385,8 +398,6 @@ func (c *extendedClientset) ManagementV1() typedmanagementv1.ManagementV1Interfa
385398
return c.ManagementClient
386399
}
387400

388-
var _ rest.Interface = (*extendedRESTClient)(nil)
389-
390401
type extendedRESTClient struct {
391402
rest.Interface
392403
}
@@ -395,20 +406,7 @@ func (e *extendedRESTClient) Get() *rest.Request {
395406
req := e.Interface.Get()
396407
// We need to pass this to the backend for more information on the management CRD status
397408
req.Param("extended", "true")
409+
req.Param("resync", "10") // resync every 10 seconds in the watch request
398410

399411
return req
400412
}
401-
402-
func getClientSet(config *rest.Config) (loftclient.Interface, error) {
403-
clientset, err := loftclient.NewForConfig(config)
404-
if err != nil {
405-
return nil, err
406-
}
407-
mv1 := clientset.ManagementV1()
408-
c := typedmanagementv1.New(&extendedRESTClient{Interface: mv1.RESTClient()})
409-
410-
return &extendedClientset{
411-
Clientset: clientset,
412-
ManagementClient: c,
413-
}, nil
414-
}

0 commit comments

Comments
 (0)