Skip to content

Commit 7afd66b

Browse files
committed
WIP: Shard affinity for k8s workloads
1 parent 1252cb5 commit 7afd66b

File tree

4 files changed

+236
-36
lines changed

4 files changed

+236
-36
lines changed

nova/scheduler/filters/shard_filter.py

Lines changed: 101 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@
1919
from oslo_log import log as logging
2020

2121
import nova.conf
22+
from nova import context as nova_context
23+
from nova.objects.build_request import BuildRequest
24+
from nova.objects.instance import InstanceList
2225
from nova.scheduler import filters
2326
from nova.scheduler import utils
2427
from nova import utils as nova_utils
@@ -28,6 +31,9 @@
2831
CONF = nova.conf.CONF
2932

3033
_SERVICE_AUTH = None
34+
GARDENER_PREFIX = "kubernetes.io-cluster-shoot--garden--"
35+
KKS_PREFIX = "kubernikus:kluster"
36+
HANA_PREFIX = "hana_"
3137

3238

3339
class ShardFilter(filters.BaseHostFilter):
@@ -114,11 +120,62 @@ def _get_shards(self, project_id):
114120

115121
return self._PROJECT_SHARD_CACHE.get(project_id)
116122

117-
def host_passes(self, host_state, spec_obj):
123+
def _get_k8s_cluster_instances(self, spec_obj):
124+
"""If the instance will be part of a K8S cluster, it returns
125+
the list of all other instances that are already part of it,
126+
if any.
127+
"""
128+
k8s_filter = self._k8s_instance_query_filter(spec_obj)
129+
130+
if not k8s_filter:
131+
return []
132+
133+
k8s_filter['project_id'] = spec_obj.project_id
134+
135+
return InstanceList.get_by_filters(
136+
nova_context.get_admin_context(), filters=k8s_filter,
137+
expected_attrs=['flavor', 'metadata', 'tags'])
138+
139+
def _k8s_instance_query_filter(self, spec_obj):
140+
elevated = nova_context.get_admin_context()
141+
build_request = BuildRequest.get_by_instance_uuid(
142+
elevated, spec_obj.instance_uuid)
143+
144+
# Kubernikus
145+
kks_tag = next((t.tag for t in build_request.tags
146+
if t.tag.startswith(KKS_PREFIX)), None)
147+
if kks_tag:
148+
return {'tags': [kks_tag]}
149+
150+
# Gardener
151+
gardener_meta = \
152+
{k: v for k, v in build_request.instance.metadata.items()
153+
if k.startswith(GARDENER_PREFIX)}
154+
if gardener_meta:
155+
return {'metadata': gardener_meta}
156+
157+
return None
158+
159+
def filter_all(self, filter_obj_list, spec_obj):
160+
"""Yield objects that pass the filter.
161+
162+
Can be overridden in a subclass, if you need to base filtering
163+
decisions on all objects. Otherwise, one can just override
164+
_filter_one() to filter a single object.
165+
"""
118166
# Only VMware
119167
if utils.is_non_vmware_spec(spec_obj):
120-
return True
168+
for obj in filter_obj_list:
169+
yield obj
170+
return
171+
172+
k8s_instances = self._get_k8s_cluster_instances(spec_obj)
173+
174+
for obj in filter_obj_list:
175+
if self._host_passes(obj, spec_obj, k8s_instances):
176+
yield obj
121177

178+
def _host_passes(self, host_state, spec_obj, k8s_instances):
122179
host_shard_aggrs = [aggr for aggr in host_state.aggregates
123180
if aggr.name.startswith(self._SHARD_PREFIX)]
124181

@@ -148,18 +205,58 @@ def host_passes(self, host_state, spec_obj):
148205
if self._ALL_SHARDS in shards:
149206
LOG.debug('project enabled for all shards %(project_shards)s.',
150207
{'project_shards': shards})
151-
return True
152208
elif host_shard_names & set(shards):
153209
LOG.debug('%(host_state)s shard %(host_shard)s found in project '
154210
'shards %(project_shards)s.',
155211
{'host_state': host_state,
156212
'host_shard': host_shard_names,
157213
'project_shards': shards})
158-
return True
159214
else:
160215
LOG.debug('%(host_state)s shard %(host_shard)s not found in '
161216
'project shards %(project_shards)s.',
162217
{'host_state': host_state,
163218
'host_shard': host_shard_names,
164219
'project_shards': shards})
165220
return False
221+
222+
if not utils.request_is_resize(spec_obj):
223+
# K8S orchestrators are only creating or deleting nodes,
224+
# therefore we shouldn't infer with resize/migrate requests.
225+
return self._host_passes_k8s(host_state, host_shard_names,
226+
spec_obj, k8s_instances)
227+
228+
return True
229+
230+
def _host_passes_k8s(self, host_state, host_shard_names, spec_obj,
231+
k8s_instances):
232+
"""Instances of a K8S cluster must end up on the same shard.
233+
The K8S cluster is identified by the metadata or tags set
234+
by the orchestrator (Gardener or Kubernikus).
235+
"""
236+
if not k8s_instances:
237+
# There are no instances in the cluster, yet.
238+
# We allow any shard for the first instance.
239+
return True
240+
241+
def _is_hana(flavor):
242+
return flavor.name.startswith(HANA_PREFIX)
243+
244+
def _is_same_category(instance, flavor):
245+
"""Check whether instance is from the flavor's family."""
246+
if _is_hana(flavor):
247+
return _is_hana(instance.flavor)
248+
return True
249+
250+
def _instance_matches(instance):
251+
if spec_obj.availability_zone:
252+
if (instance.availability_zone !=
253+
spec_obj.availability_zone):
254+
return False
255+
return _is_same_category(instance, spec_obj.flavor)
256+
257+
k8s_hosts = set([i.host for i in k8s_instances
258+
if _instance_matches(i)])
259+
260+
return any(agg.name in host_shard_names and
261+
set(agg.hosts) & k8s_hosts
262+
for agg in host_state.aggregates)

nova/tests/functional/db/test_console_auth_token.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,11 @@ def setUp(self):
2828
instance = objects.Instance(
2929
context=self.context,
3030
project_id=self.context.project_id,
31-
uuid=uuidsentinel.fake_instance)
31+
uuid=uuidsentinel.fake_build_req)
3232
instance.create()
3333
self.console = objects.ConsoleAuthToken(
3434
context=self.context,
35-
instance_uuid=uuidsentinel.fake_instance,
35+
instance_uuid=uuidsentinel.fake_build_req,
3636
console_type='fake-type',
3737
host='fake-host',
3838
port=1000,

0 commit comments

Comments
 (0)