@@ -910,7 +910,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
910
910
if sysruntime .GOOS == "linux" {
911
911
// AppArmor is a Linux kernel security module and it does not support other operating systems.
912
912
klet .appArmorValidator = apparmor .NewValidator ()
913
- klet .softAdmitHandlers .AddPodAdmitHandler (lifecycle .NewAppArmorAdmitHandler (klet .appArmorValidator ))
913
+ klet .admitHandlers .AddPodAdmitHandler (lifecycle .NewAppArmorAdmitHandler (klet .appArmorValidator ))
914
914
}
915
915
916
916
leaseDuration := time .Duration (kubeCfg .NodeLeaseDurationSeconds ) * time .Second
@@ -1292,12 +1292,6 @@ type Kubelet struct {
1292
1292
// the list of handlers to call during pod admission.
1293
1293
admitHandlers lifecycle.PodAdmitHandlers
1294
1294
1295
- // softAdmithandlers are applied to the pod after it is admitted by the Kubelet, but before it is
1296
- // run. A pod rejected by a softAdmitHandler will be left in a Pending state indefinitely. If a
1297
- // rejected pod should not be recreated, or the scheduler is not aware of the rejection rule, the
1298
- // admission rule should be applied by a softAdmitHandler.
1299
- softAdmitHandlers lifecycle.PodAdmitHandlers
1300
-
1301
1295
// the list of handlers to call during pod sync loop.
1302
1296
lifecycle.PodSyncLoopHandlers
1303
1297
@@ -1795,31 +1789,6 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1795
1789
return isTerminal , nil
1796
1790
}
1797
1791
1798
- // If the pod should not be running, we request the pod's containers be stopped. This is not the same
1799
- // as termination (we want to stop the pod, but potentially restart it later if soft admission allows
1800
- // it later). Set the status and phase appropriately
1801
- runnable := kl .canRunPod (pod )
1802
- if ! runnable .Admit {
1803
- // Pod is not runnable; and update the Pod and Container statuses to why.
1804
- if apiPodStatus .Phase != v1 .PodFailed && apiPodStatus .Phase != v1 .PodSucceeded {
1805
- apiPodStatus .Phase = v1 .PodPending
1806
- }
1807
- apiPodStatus .Reason = runnable .Reason
1808
- apiPodStatus .Message = runnable .Message
1809
- // Waiting containers are not creating.
1810
- const waitingReason = "Blocked"
1811
- for _ , cs := range apiPodStatus .InitContainerStatuses {
1812
- if cs .State .Waiting != nil {
1813
- cs .State .Waiting .Reason = waitingReason
1814
- }
1815
- }
1816
- for _ , cs := range apiPodStatus .ContainerStatuses {
1817
- if cs .State .Waiting != nil {
1818
- cs .State .Waiting .Reason = waitingReason
1819
- }
1820
- }
1821
- }
1822
-
1823
1792
// Record the time it takes for the pod to become running
1824
1793
// since kubelet first saw the pod if firstSeenTime is set.
1825
1794
existingStatus , ok := kl .statusManager .GetPodStatus (pod .UID )
@@ -1830,25 +1799,6 @@ func (kl *Kubelet) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType
1830
1799
1831
1800
kl .statusManager .SetPodStatus (pod , apiPodStatus )
1832
1801
1833
- // Pods that are not runnable must be stopped - return a typed error to the pod worker
1834
- if ! runnable .Admit {
1835
- klog .V (2 ).InfoS ("Pod is not runnable and must have running containers stopped" , "pod" , klog .KObj (pod ), "podUID" , pod .UID , "message" , runnable .Message )
1836
- var syncErr error
1837
- p := kubecontainer .ConvertPodStatusToRunningPod (kl .getRuntime ().Type (), podStatus )
1838
- if err := kl .killPod (ctx , pod , p , nil ); err != nil {
1839
- if ! wait .Interrupted (err ) {
1840
- kl .recorder .Eventf (pod , v1 .EventTypeWarning , events .FailedToKillPod , "error killing pod: %v" , err )
1841
- syncErr = fmt .Errorf ("error killing pod: %w" , err )
1842
- utilruntime .HandleError (syncErr )
1843
- }
1844
- } else {
1845
- // There was no error killing the pod, but the pod cannot be run.
1846
- // Return an error to signal that the sync loop should back off.
1847
- syncErr = fmt .Errorf ("pod cannot be run: %v" , runnable .Message )
1848
- }
1849
- return false , syncErr
1850
- }
1851
-
1852
1802
// If the network plugin is not ready, only start the pod if it uses the host network
1853
1803
if err := kl .runtimeState .networkErrors (); err != nil && ! kubecontainer .IsHostNetworkPod (pod ) {
1854
1804
kl .recorder .Eventf (pod , v1 .EventTypeWarning , events .NetworkNotReady , "%s: %v" , NetworkNotReadyErrorMsg , err )
@@ -2332,20 +2282,6 @@ func (kl *Kubelet) canAdmitPod(pods []*v1.Pod, pod *v1.Pod) (bool, string, strin
2332
2282
return true , "" , ""
2333
2283
}
2334
2284
2335
- func (kl * Kubelet ) canRunPod (pod * v1.Pod ) lifecycle.PodAdmitResult {
2336
- attrs := & lifecycle.PodAdmitAttributes {Pod : pod }
2337
- // Get "OtherPods". Rejected pods are failed, so only include admitted pods that are alive.
2338
- attrs .OtherPods = kl .GetActivePods ()
2339
-
2340
- for _ , handler := range kl .softAdmitHandlers {
2341
- if result := handler .Admit (attrs ); ! result .Admit {
2342
- return result
2343
- }
2344
- }
2345
-
2346
- return lifecycle.PodAdmitResult {Admit : true }
2347
- }
2348
-
2349
2285
// syncLoop is the main loop for processing changes. It watches for changes from
2350
2286
// three channels (file, apiserver, and http) and creates a union of them. For
2351
2287
// any new change seen, will run a sync against desired state and running state. If
0 commit comments