@@ -138,7 +138,7 @@ const (
138
138
housekeepingPeriod = time .Second * 2
139
139
140
140
// Period for performing eviction monitoring.
141
- // TODO ensure this is in sync with internal cadvisor housekeeping.
141
+ // ensure this is kept in sync with internal cadvisor housekeeping.
142
142
evictionMonitoringPeriod = time .Second * 10
143
143
144
144
// The path in containers' filesystems where the hosts file is mounted.
@@ -440,9 +440,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
440
440
}
441
441
nodeLister := corelisters .NewNodeLister (nodeIndexer )
442
442
443
- // TODO: get the real node object of ourself,
444
- // and use the real node name and UID.
445
- // TODO: what is namespace for node?
443
+ // construct a node reference used for events
446
444
nodeRef := & v1.ObjectReference {
447
445
Kind : "Node" ,
448
446
Name : string (nodeName ),
@@ -948,8 +946,6 @@ type Kubelet struct {
948
946
streamingRuntime kubecontainer.StreamingRuntime
949
947
950
948
// Container runtime service (needed by container runtime Start()).
951
- // TODO(CD): try to make this available without holding a reference in this
952
- // struct. For example, by adding a getter to generic runtime.
953
949
runtimeService internalapi.RuntimeService
954
950
955
951
// reasonCache caches the failure reason of the last creation of all containers, which is
@@ -1069,7 +1065,6 @@ type Kubelet struct {
1069
1065
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
1070
1066
lastNodeUnschedulable bool
1071
1067
1072
- // TODO: think about moving this to be centralized in PodWorkers in follow-on.
1073
1068
// the list of handlers to call during pod admission.
1074
1069
admitHandlers lifecycle.PodAdmitHandlers
1075
1070
@@ -1275,7 +1270,6 @@ func (kl *Kubelet) initializeModules() error {
1275
1270
func (kl * Kubelet ) initializeRuntimeDependentModules () {
1276
1271
if err := kl .cadvisor .Start (); err != nil {
1277
1272
// Fail kubelet and rely on the babysitter to retry starting kubelet.
1278
- // TODO(random-liu): Add backoff logic in the babysitter
1279
1273
klog .Fatalf ("Failed to start cAdvisor %v" , err )
1280
1274
}
1281
1275
@@ -1680,7 +1674,6 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
1680
1674
podPair := kubecontainer.PodPair {APIPod : pod , RunningPod : & runningPod }
1681
1675
1682
1676
kl .podKiller .KillPod (& podPair )
1683
- // TODO: delete the mirror pod here?
1684
1677
1685
1678
// We leave the volume/directory cleanup to the periodic cleanup routine.
1686
1679
return nil
@@ -2003,8 +1996,6 @@ func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) {
2003
1996
kl .handleMirrorPod (pod , start )
2004
1997
continue
2005
1998
}
2006
- // TODO: Evaluate if we need to validate and reject updates.
2007
-
2008
1999
mirrorPod , _ := kl .podManager .GetMirrorPodByPod (pod )
2009
2000
kl .dispatchWork (pod , kubetypes .SyncPodUpdate , mirrorPod , start )
2010
2001
}
@@ -2093,7 +2084,6 @@ func (kl *Kubelet) updateRuntimeUp() {
2093
2084
return
2094
2085
}
2095
2086
// Periodically log the whole runtime status for debugging.
2096
- // TODO(random-liu): Consider to send node event when optional
2097
2087
// condition is unmet.
2098
2088
klog .V (4 ).Infof ("Container runtime status: %v" , s )
2099
2089
networkReady := s .GetRuntimeCondition (kubecontainer .NetworkReady )
0 commit comments