@@ -186,7 +186,7 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes(
186
186
// The pod can't be scheduled on the newly created node because of scheduling predicates.
187
187
188
188
// Check if node failed because of topology constraints.
189
- if hasTopologyConstraintError (err ) {
189
+ if isPodUsingHostNameTopologyKey ( pod ) && hasTopologyConstraintError (err ) {
190
190
// If the pod can't be scheduled on the last node because of topology constraints, we can stop binpacking.
191
191
// The pod can't be scheduled on any new node either, because it has the same topology constraints.
192
192
nodeName , err := e .clusterSnapshot .SchedulePodOnAnyNodeMatching (pod , func (nodeInfo * framework.NodeInfo ) bool {
@@ -273,6 +273,22 @@ func hasTopologyConstraintError(err clustersnapshot.SchedulingError) bool {
273
273
return slices .Contains (err .FailingPredicateReasons (), podtopologyspread .ErrReasonConstraintsNotMatch )
274
274
}
275
275
276
+ // isPodUsingHostNameTopoKey returns true if the pod has any topology spread
277
+ // constraint that uses the kubernetes.io/hostname topology key
278
+ func isPodUsingHostNameTopologyKey (pod * apiv1.Pod ) bool {
279
+ if pod == nil || pod .Spec .TopologySpreadConstraints == nil {
280
+ return false
281
+ }
282
+
283
+ for _ , constraint := range pod .Spec .TopologySpreadConstraints {
284
+ if constraint .TopologyKey == apiv1 .LabelHostname {
285
+ return true
286
+ }
287
+ }
288
+
289
+ return false
290
+ }
291
+
276
292
func observeBinpackingHeterogeneity (podsEquivalenceGroups []PodEquivalenceGroup , nodeTemplate * framework.NodeInfo ) {
277
293
node := nodeTemplate .Node ()
278
294
var instanceType , cpuCount string
0 commit comments