Skip to content

Commit 140b3b8

Browse files
JohnGarbuttmelwitt
authored andcommitted
Enforce resource limits using oslo.limit
We now enforce limits on resources requested in the flavor. This includes: instances, ram, cores. It also works for any resource class being requested via the flavor chosen, such as custom resource classes relating to Ironic resources. Note because disk resources can be limited, we need to know if the instance is boot from volume or not. This has meant adding extra code to make sure we know that when enforcing the limits. Follow on patches will update the APIs to accurately report the limits being applied to instances, ram and cores. blueprint unified-limits-nova Change-Id: If1df93400dcbcb1d3aac0ade80ae5ecf6ce38d11
1 parent d984a6d commit 140b3b8

File tree

7 files changed

+423
-27
lines changed

7 files changed

+423
-27
lines changed

nova/compute/api.py

Lines changed: 50 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@
6060
from nova.i18n import _
6161
from nova.image import glance
6262
from nova.limit import local as local_limit
63+
from nova.limit import placement as placement_limits
6364
from nova.network import constants
6465
from nova.network import model as network_model
6566
from nova.network import neutron
@@ -1346,6 +1347,25 @@ def _provision_instances(
13461347
# Check quotas
13471348
num_instances = compute_utils.check_num_instances_quota(
13481349
context, flavor, min_count, max_count)
1350+
1351+
# Find out whether or not we are a BFV instance
1352+
if block_device_mapping:
1353+
root = block_device_mapping.root_bdm()
1354+
is_bfv = bool(root and root.is_volume)
1355+
else:
1356+
# If we have no BDMs, we're clearly not BFV
1357+
is_bfv = False
1358+
1359+
# NOTE(johngarbutt) when unified limits not used, this just
1360+
# returns num_instances back again
1361+
# NOTE: If we want to enforce quota on port or cyborg resources in the
1362+
# future, this enforce call will need to move after we have populated
1363+
# the RequestSpec with all of the requested resources and use the real
1364+
# RequestSpec to get the overall resource usage of the instance.
1365+
num_instances = placement_limits.enforce_num_instances_and_flavor(
1366+
context, context.project_id, flavor,
1367+
is_bfv, min_count, num_instances)
1368+
13491369
security_groups = security_group_api.populate_security_groups(
13501370
security_groups)
13511371
port_resource_requests = base_options.pop('port_resource_requests')
@@ -1388,14 +1408,7 @@ def _provision_instances(
13881408
security_groups=security_groups,
13891409
port_resource_requests=port_resource_requests,
13901410
request_level_params=req_lvl_params)
1391-
1392-
if block_device_mapping:
1393-
# Record whether or not we are a BFV instance
1394-
root = block_device_mapping.root_bdm()
1395-
req_spec.is_bfv = bool(root and root.is_volume)
1396-
else:
1397-
# If we have no BDMs, we're clearly not BFV
1398-
req_spec.is_bfv = False
1411+
req_spec.is_bfv = is_bfv
13991412

14001413
# NOTE(danms): We need to record num_instances on the request
14011414
# spec as this is how the conductor knows how many were in this
@@ -2660,6 +2673,9 @@ def restore(self, context, instance):
26602673
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
26612674
compute_utils.check_num_instances_quota(context, flavor, 1, 1,
26622675
project_id=project_id, user_id=user_id)
2676+
is_bfv = compute_utils.is_volume_backed_instance(context, instance)
2677+
placement_limits.enforce_num_instances_and_flavor(context, project_id,
2678+
flavor, is_bfv, 1, 1)
26632679

26642680
self._record_action_start(context, instance, instance_actions.RESTORE)
26652681

@@ -3777,9 +3793,22 @@ def _validate_numa_rebuild(instance, image, flavor):
37773793
# TODO(sean-k-mooney): add PCI NUMA affinity policy check.
37783794

37793795
@staticmethod
3780-
def _check_quota_for_upsize(context, instance, current_flavor, new_flavor):
3796+
def _check_quota_for_upsize(context, instance, current_flavor,
3797+
new_flavor, is_bfv, is_revert):
37813798
project_id, user_id = quotas_obj.ids_from_instance(context,
37823799
instance)
3800+
# NOTE(johngarbutt) for resize, check for sum of existing usage
3801+
# plus the usage from new flavor, as it will be claimed in
3802+
# placement that way, even if there is no change in flavor
3803+
# But for revert resize, we are just removing claims in placement
3804+
# so we can ignore the quota check
3805+
if not is_revert:
3806+
placement_limits.enforce_num_instances_and_flavor(context,
3807+
project_id,
3808+
new_flavor,
3809+
is_bfv, 1, 1)
3810+
3811+
# Old quota system only looks at the change in size.
37833812
# Deltas will be empty if the resize is not an upsize.
37843813
deltas = compute_utils.upsize_quota_delta(new_flavor,
37853814
current_flavor)
@@ -3821,8 +3850,11 @@ def revert_resize(self, context, instance):
38213850
elevated, instance.uuid, 'finished')
38223851

38233852
# If this is a resize down, a revert might go over quota.
3853+
reqspec = objects.RequestSpec.get_by_instance_uuid(
3854+
context, instance.uuid)
38243855
self._check_quota_for_upsize(context, instance, instance.flavor,
3825-
instance.old_flavor)
3856+
instance.old_flavor, reqspec.is_bfv,
3857+
is_revert=True)
38263858

38273859
# The AZ for the server may have changed when it was migrated so while
38283860
# we are in the API and have access to the API DB, update the
@@ -3846,8 +3878,6 @@ def revert_resize(self, context, instance):
38463878
# the scheduler will be using the wrong values. There's no need to do
38473879
# this if the flavor hasn't changed though and we're migrating rather
38483880
# than resizing.
3849-
reqspec = objects.RequestSpec.get_by_instance_uuid(
3850-
context, instance.uuid)
38513881
if reqspec.flavor['id'] != instance.old_flavor['id']:
38523882
reqspec.flavor = instance.old_flavor
38533883
reqspec.numa_topology = hardware.numa_get_constraints(
@@ -4149,9 +4179,16 @@ def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
41494179

41504180
# ensure there is sufficient headroom for upsizes
41514181
if flavor_id:
4182+
# Figure out if the instance is volume-backed but only if we didn't
4183+
# already figure that out above (avoid the extra db hit).
4184+
if volume_backed is None:
4185+
# TODO(johngarbutt) should we just use the request spec?
4186+
volume_backed = compute_utils.is_volume_backed_instance(
4187+
context, instance)
41524188
self._check_quota_for_upsize(context, instance,
41534189
current_flavor,
4154-
new_flavor)
4190+
new_flavor, volume_backed,
4191+
is_revert=False)
41554192

41564193
if not same_flavor:
41574194
image = utils.get_image_from_system_metadata(

nova/compute/utils.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1110,6 +1110,8 @@ def check_num_instances_quota(
11101110
deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
11111111

11121112
try:
1113+
# NOTE(johngarbutt) when using unified limits, this is call
1114+
# is a no op, and as such, this function always returns max_count
11131115
objects.Quotas.check_deltas(context, deltas,
11141116
project_id, user_id=user_id,
11151117
check_project_id=project_id,

nova/conductor/manager.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323

2424
from oslo_config import cfg
2525
from oslo_db import exception as db_exc
26+
from oslo_limit import exception as limit_exceptions
2627
from oslo_log import log as logging
2728
import oslo_messaging as messaging
2829
from oslo_serialization import jsonutils
@@ -45,6 +46,7 @@
4546
from nova import exception
4647
from nova.i18n import _
4748
from nova.image import glance
49+
from nova.limit import placement as placement_limits
4850
from nova import manager
4951
from nova.network import neutron
5052
from nova import notifications
@@ -1632,7 +1634,11 @@ def schedule_and_build_instances(self, context, build_requests,
16321634
compute_utils.check_num_instances_quota(
16331635
context, instance.flavor, 0, 0,
16341636
orig_num_req=len(build_requests))
1635-
except exception.TooManyInstances as exc:
1637+
placement_limits.enforce_num_instances_and_flavor(
1638+
context, context.project_id, instance.flavor,
1639+
request_specs[0].is_bfv, 0, 0)
1640+
except (exception.TooManyInstances,
1641+
limit_exceptions.ProjectOverLimit) as exc:
16361642
with excutils.save_and_reraise_exception():
16371643
self._cleanup_build_artifacts(context, exc, instances,
16381644
build_requests,
Lines changed: 217 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
2+
# not use this file except in compliance with the License. You may obtain
3+
# a copy of the License at
4+
#
5+
# http://www.apache.org/licenses/LICENSE-2.0
6+
#
7+
# Unless required by applicable law or agreed to in writing, software
8+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10+
# License for the specific language governing permissions and limitations
11+
# under the License.
12+
13+
from oslo_limit import fixture as limit_fixture
14+
from oslo_serialization import base64
15+
from oslo_utils.fixture import uuidsentinel as uuids
16+
17+
from nova import context as nova_context
18+
from nova.limit import local as local_limit
19+
from nova.objects import flavor as flavor_obj
20+
from nova.objects import instance_group as group_obj
21+
from nova.tests.functional.api import client
22+
from nova.tests.functional import integrated_helpers
23+
24+
25+
class UnifiedLimitsTest(integrated_helpers._IntegratedTestBase):
26+
27+
def setUp(self):
28+
super(UnifiedLimitsTest, self).setUp()
29+
# Use different project_ids for non-admin and admin.
30+
self.api.project_id = 'fake'
31+
self.admin_api.project_id = 'admin'
32+
33+
self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
34+
reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
35+
local_limit.INJECTED_FILES: 5,
36+
local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
37+
local_limit.INJECTED_FILES_PATH: 255,
38+
local_limit.KEY_PAIRS: 100,
39+
local_limit.SERVER_GROUPS: 10,
40+
local_limit.SERVER_GROUP_MEMBERS: 1,
41+
'servers': 4,
42+
'class:VCPU': 8,
43+
'class:MEMORY_MB': 32768,
44+
'class:DISK_GB': 250}
45+
projlimits = {self.api.project_id: {'servers': 2,
46+
'class:VCPU': 4,
47+
'class:MEMORY_MB': 16384,
48+
'class:DISK_GB': 100}}
49+
self.useFixture(limit_fixture.LimitFixture(reglimits, projlimits))
50+
self.ctx = nova_context.get_admin_context()
51+
52+
def _setup_services(self):
53+
# Use driver with lots of resources so we don't get NoValidHost while
54+
# testing quotas. Need to do this before services are started.
55+
self.flags(compute_driver='fake.FakeDriver')
56+
super(UnifiedLimitsTest, self)._setup_services()
57+
58+
def test_servers(self):
59+
# First test the project limit using the non-admin project.
60+
for i in range(2):
61+
self._create_server(api=self.api)
62+
63+
# Attempt to create a third server should fail.
64+
e = self.assertRaises(
65+
client.OpenStackApiException, self._create_server, api=self.api)
66+
self.assertEqual(403, e.response.status_code)
67+
self.assertIn('servers', e.response.text)
68+
69+
# Then test the default limit using the admin project.
70+
for i in range(4):
71+
self._create_server(api=self.admin_api)
72+
73+
# Attempt to create a fifth server should fail.
74+
e = self.assertRaises(
75+
client.OpenStackApiException, self._create_server,
76+
api=self.admin_api)
77+
self.assertEqual(403, e.response.status_code)
78+
self.assertIn('servers', e.response.text)
79+
80+
def test_vcpu(self):
81+
# First test the project limit using the non-admin project.
82+
# m1.large has vcpus=4 and our project limit is 4, should succeed.
83+
flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.large')
84+
self._create_server(api=self.api, flavor_id=flavor.flavorid)
85+
86+
# m1.small has vcpus=1, should fail because we are at quota.
87+
flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.small')
88+
e = self.assertRaises(
89+
client.OpenStackApiException, self._create_server, api=self.api,
90+
flavor_id=flavor.flavorid)
91+
self.assertEqual(403, e.response.status_code)
92+
self.assertIn('class:VCPU', e.response.text)
93+
94+
# Then test the default limit of 8 using the admin project.
95+
flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.large')
96+
for i in range(2):
97+
self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
98+
99+
# Attempt to create another server with vcpus=1 should fail because we
100+
# are at quota.
101+
flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.small')
102+
e = self.assertRaises(
103+
client.OpenStackApiException, self._create_server,
104+
api=self.admin_api, flavor_id=flavor.flavorid)
105+
self.assertEqual(403, e.response.status_code)
106+
self.assertIn('class:VCPU', e.response.text)
107+
108+
def test_memory_mb(self):
109+
# First test the project limit using the non-admin project.
110+
flavor = flavor_obj.Flavor(
111+
context=self.ctx, memory_mb=16384, vcpus=1, root_gb=1,
112+
flavorid='9', name='m1.custom')
113+
flavor.create()
114+
self._create_server(api=self.api, flavor_id=flavor.flavorid)
115+
116+
# Attempt to create another should fail as we are at quota.
117+
e = self.assertRaises(
118+
client.OpenStackApiException, self._create_server, api=self.api,
119+
flavor_id=flavor.flavorid)
120+
self.assertEqual(403, e.response.status_code)
121+
self.assertIn('class:MEMORY_MB', e.response.text)
122+
123+
# Then test the default limit of 32768 using the admin project.
124+
for i in range(2):
125+
self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
126+
127+
# Attempt to create another server should fail because we are at quota.
128+
e = self.assertRaises(
129+
client.OpenStackApiException, self._create_server,
130+
api=self.admin_api, flavor_id=flavor.flavorid)
131+
self.assertEqual(403, e.response.status_code)
132+
self.assertIn('class:MEMORY_MB', e.response.text)
133+
134+
def test_disk_gb(self):
135+
# First test the project limit using the non-admin project.
136+
flavor = flavor_obj.Flavor(
137+
context=self.ctx, memory_mb=1, vcpus=1, root_gb=100,
138+
flavorid='9', name='m1.custom')
139+
flavor.create()
140+
self._create_server(api=self.api, flavor_id=flavor.flavorid)
141+
142+
# Attempt to create another should fail as we are at quota.
143+
e = self.assertRaises(
144+
client.OpenStackApiException, self._create_server, api=self.api,
145+
flavor_id=flavor.flavorid)
146+
self.assertEqual(403, e.response.status_code)
147+
self.assertIn('class:DISK_GB', e.response.text)
148+
149+
# Then test the default limit of 250 using the admin project.
150+
for i in range(2):
151+
self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
152+
153+
# Attempt to create another server should fail because we are at quota.
154+
e = self.assertRaises(
155+
client.OpenStackApiException, self._create_server,
156+
api=self.admin_api, flavor_id=flavor.flavorid)
157+
self.assertEqual(403, e.response.status_code)
158+
self.assertIn('class:DISK_GB', e.response.text)
159+
160+
def test_no_injected_files(self):
161+
self._create_server()
162+
163+
def test_max_injected_files(self):
164+
# Quota is 5.
165+
files = []
166+
contents = base64.encode_as_text('some content')
167+
for i in range(5):
168+
files.append(('/my/path%d' % i, contents))
169+
server = self._build_server()
170+
personality = [
171+
{'path': item[0], 'contents': item[1]} for item in files]
172+
server['personality'] = personality
173+
self.api.post_server({'server': server})
174+
175+
def test_max_injected_file_content_bytes(self):
176+
# Quota is 10 * 1024
177+
# Hm, apparently quota is checked against the base64 encoded string
178+
# even though the api-ref claims the limit is for the decoded data.
179+
# Subtract 3072 characters to account for that.
180+
content = base64.encode_as_bytes(
181+
''.join(['a' for i in range(10 * 1024 - 3072)]))
182+
server = self._build_server()
183+
personality = [{'path': '/test/path', 'contents': content}]
184+
server['personality'] = personality
185+
self.api.post_server({'server': server})
186+
187+
def test_max_injected_file_path_bytes(self):
188+
# Quota is 255.
189+
path = ''.join(['a' for i in range(255)])
190+
contents = base64.encode_as_text('some content')
191+
server = self._build_server()
192+
personality = [{'path': path, 'contents': contents}]
193+
server['personality'] = personality
194+
self.api.post_server({'server': server})
195+
196+
def test_server_group_members(self):
197+
# Create a server group.
198+
instance_group = group_obj.InstanceGroup(
199+
self.ctx, policy="anti-affinity")
200+
instance_group.name = "foo"
201+
instance_group.project_id = self.ctx.project_id
202+
instance_group.user_id = self.ctx.user_id
203+
instance_group.uuid = uuids.instance_group
204+
instance_group.create()
205+
206+
# Quota for server group members is 1.
207+
server = self._build_server()
208+
hints = {'group': uuids.instance_group}
209+
req = {'server': server, 'os:scheduler_hints': hints}
210+
self.admin_api.post_server(req)
211+
212+
# Attempt to create another server in the group should fail because we
213+
# are at quota.
214+
e = self.assertRaises(
215+
client.OpenStackApiException, self.admin_api.post_server, req)
216+
self.assertEqual(403, e.response.status_code)
217+
self.assertIn('server_group_members', e.response.text)

0 commit comments

Comments
 (0)