|
20 | 20 | import eventlet
|
21 | 21 | import functools
|
22 | 22 | import sys
|
| 23 | +import typing as ty |
23 | 24 |
|
24 | 25 | from keystoneauth1 import exceptions as ks_exc
|
25 | 26 | from oslo_config import cfg
|
|
48 | 49 | from nova.i18n import _
|
49 | 50 | from nova.image import glance
|
50 | 51 | from nova.limit import placement as placement_limits
|
| 52 | +from nova.limit import utils as limit_utils |
51 | 53 | from nova import manager
|
52 | 54 | from nova.network import neutron
|
53 | 55 | from nova import notifications
|
@@ -970,6 +972,33 @@ def _restrict_request_spec_to_cell(context, instance, request_spec):
|
970 | 972 | objects.Destination(
|
971 | 973 | cell=instance_mapping.cell_mapping))
|
972 | 974 |
|
| 975 | + def _recheck_quota( |
| 976 | + self, |
| 977 | + context: nova_context.RequestContext, |
| 978 | + flavor: 'objects.Flavor', |
| 979 | + request_spec: 'objects.RequestSpec', |
| 980 | + orig_num_req: int, |
| 981 | + project_id: ty.Optional[str] = None, |
| 982 | + user_id: ty.Optional[str] = None |
| 983 | + ) -> None: |
| 984 | + # A quota "recheck" is a quota check that is performed *after* quota |
| 985 | + # limited resources are consumed. It is meant to address race |
| 986 | + # conditions where a request that was not over quota at the beginning |
| 987 | + # of the request before resources are allocated becomes over quota |
| 988 | + # after resources (like database rows or placement allocations) are |
| 989 | + # created. An example of this would be a large number of requests for |
| 990 | + # the same resource for the same project sent simultaneously. |
| 991 | + if CONF.quota.recheck_quota: |
| 992 | + # The orig_num_req is the number of instances requested, which is |
| 993 | + # the delta that was quota checked before resources were allocated. |
| 994 | + # This is only used for the exception message is the recheck fails |
| 995 | + # for lack of enough quota. |
| 996 | + compute_utils.check_num_instances_quota( |
| 997 | + context, flavor, 0, 0, project_id=project_id, |
| 998 | + user_id=user_id, orig_num_req=orig_num_req) |
| 999 | + placement_limits.enforce_num_instances_and_flavor( |
| 1000 | + context, project_id, flavor, request_spec.is_bfv, 0, 0) |
| 1001 | + |
973 | 1002 | # TODO(mriedem): Make request_spec required in ComputeTaskAPI RPC v2.0.
|
974 | 1003 | @targets_cell
|
975 | 1004 | def unshelve_instance(self, context, instance, request_spec=None):
|
@@ -1055,6 +1084,30 @@ def safe_image_show(ctx, image_id):
|
1055 | 1084 | host_lists = self._schedule_instances(context,
|
1056 | 1085 | request_spec, [instance.uuid],
|
1057 | 1086 | return_alternates=False)
|
| 1087 | + |
| 1088 | + # NOTE(melwitt): We recheck the quota after allocating the |
| 1089 | + # resources in placement, to prevent users from allocating |
| 1090 | + # more resources than their allowed quota in the event of a |
| 1091 | + # race. This is configurable because it can be expensive if |
| 1092 | + # strict quota limits are not required in a deployment. |
| 1093 | + try: |
| 1094 | + # Quota should only be checked for unshelve only if |
| 1095 | + # resources are being counted in placement. Legacy |
| 1096 | + # quotas continue to consume resources while |
| 1097 | + # SHELVED_OFFLOADED and will not allocate any new |
| 1098 | + # resources during unshelve. |
| 1099 | + if (CONF.quota.count_usage_from_placement or |
| 1100 | + limit_utils.use_unified_limits()): |
| 1101 | + self._recheck_quota( |
| 1102 | + context, instance.flavor, request_spec, 0, |
| 1103 | + project_id=instance.project_id, |
| 1104 | + user_id=instance.user_id) |
| 1105 | + except (exception.TooManyInstances, |
| 1106 | + limit_exceptions.ProjectOverLimit): |
| 1107 | + with excutils.save_and_reraise_exception(): |
| 1108 | + self.report_client.delete_allocation_for_instance( |
| 1109 | + context, instance.uuid, force=True) |
| 1110 | + |
1058 | 1111 | host_list = host_lists[0]
|
1059 | 1112 | selection = host_list[0]
|
1060 | 1113 | scheduler_utils.populate_filter_properties(
|
@@ -1677,27 +1730,22 @@ def schedule_and_build_instances(self, context, build_requests,
|
1677 | 1730 | instances.append(instance)
|
1678 | 1731 | cell_mapping_cache[instance.uuid] = cell
|
1679 | 1732 |
|
1680 |
| - # NOTE(melwitt): We recheck the quota after creating the |
1681 |
| - # objects to prevent users from allocating more resources |
| 1733 | + # NOTE(melwitt): We recheck the quota after allocating the |
| 1734 | + # resources to prevent users from allocating more resources |
1682 | 1735 | # than their allowed quota in the event of a race. This is
|
1683 | 1736 | # configurable because it can be expensive if strict quota
|
1684 | 1737 | # limits are not required in a deployment.
|
1685 |
| - if CONF.quota.recheck_quota: |
1686 |
| - try: |
1687 |
| - compute_utils.check_num_instances_quota( |
1688 |
| - context, instance.flavor, 0, 0, |
1689 |
| - orig_num_req=len(build_requests)) |
1690 |
| - placement_limits.enforce_num_instances_and_flavor( |
1691 |
| - context, context.project_id, instance.flavor, |
1692 |
| - request_specs[0].is_bfv, 0, 0) |
1693 |
| - except (exception.TooManyInstances, |
1694 |
| - limit_exceptions.ProjectOverLimit) as exc: |
1695 |
| - with excutils.save_and_reraise_exception(): |
1696 |
| - self._cleanup_build_artifacts(context, exc, instances, |
1697 |
| - build_requests, |
1698 |
| - request_specs, |
1699 |
| - block_device_mapping, tags, |
1700 |
| - cell_mapping_cache) |
| 1738 | + try: |
| 1739 | + self._recheck_quota(context, instance.flavor, request_specs[0], |
| 1740 | + len(build_requests), project_id=instance.project_id, |
| 1741 | + user_id=instance.user_id |
| 1742 | + ) |
| 1743 | + except (exception.TooManyInstances, |
| 1744 | + limit_exceptions.ProjectOverLimit) as exc: |
| 1745 | + with excutils.save_and_reraise_exception(): |
| 1746 | + self._cleanup_build_artifacts( |
| 1747 | + context, exc, instances, build_requests, request_specs, |
| 1748 | + block_device_mapping, tags, cell_mapping_cache) |
1701 | 1749 |
|
1702 | 1750 | zipped = zip(build_requests, request_specs, host_lists, instances)
|
1703 | 1751 | for (build_request, request_spec, host_list, instance) in zipped:
|
|
0 commit comments