PageRenderTime 24ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 0ms

/nova/limit/placement.py

https://github.com/openstack/nova
Python | 219 lines | 195 code | 10 blank | 14 comment | 3 complexity | f0d87672bfb3624a4378be2d179bbbd1 MD5 | raw file
Possible License(s): Apache-2.0
  1. # Copyright 2022 StackHPC
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  4. # not use this file except in compliance with the License. You may obtain
  5. # a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  11. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing permissions and limitations
  13. # under the License.
  14. import typing as ty
  15. import os_resource_classes as orc
  16. from oslo_limit import exception as limit_exceptions
  17. from oslo_limit import limit
  18. from oslo_log import log as logging
  19. import nova.conf
  20. from nova import exception
  21. from nova.limit import utils as limit_utils
  22. from nova import objects
  23. from nova import quota
  24. from nova.scheduler.client import report
  25. from nova.scheduler import utils
  26. LOG = logging.getLogger(__name__)
  27. CONF = nova.conf.CONF
  28. # Cache to avoid repopulating ksa state
  29. PLACEMENT_CLIENT = None
  30. LEGACY_LIMITS = {
  31. "servers": "instances",
  32. "class:VCPU": "cores",
  33. "class:MEMORY_MB": "ram",
  34. }
  35. def _get_placement_usages(
  36. context: 'nova.context.RequestContext', project_id: str
  37. ) -> ty.Dict[str, int]:
  38. global PLACEMENT_CLIENT
  39. if not PLACEMENT_CLIENT:
  40. PLACEMENT_CLIENT = report.SchedulerReportClient()
  41. return PLACEMENT_CLIENT.get_usages_counts_for_limits(context, project_id)
  42. def _get_usage(
  43. context: 'nova.context.RequestContext',
  44. project_id: str,
  45. resource_names: ty.List[str],
  46. ) -> ty.Dict[str, int]:
  47. """Called by oslo_limit's enforcer"""
  48. if not limit_utils.use_unified_limits():
  49. raise NotImplementedError("Unified limits support is disabled")
  50. count_servers = False
  51. resource_classes = []
  52. for resource in resource_names:
  53. if resource == "servers":
  54. count_servers = True
  55. continue
  56. if not resource.startswith("class:"):
  57. raise ValueError("Unknown resource type: %s" % resource)
  58. # Temporarily strip resource class prefix as placement does not use it.
  59. # Example: limit resource 'class:VCPU' will be returned as 'VCPU' from
  60. # placement.
  61. r_class = resource.lstrip("class:")
  62. if r_class in orc.STANDARDS or orc.is_custom(r_class):
  63. resource_classes.append(r_class)
  64. else:
  65. raise ValueError("Unknown resource class: %s" % r_class)
  66. if not count_servers and len(resource_classes) == 0:
  67. raise ValueError("no resources to check")
  68. resource_counts = {}
  69. if count_servers:
  70. # TODO(melwitt): Change this to count servers from placement once nova
  71. # is using placement consumer types and is able to differentiate
  72. # between "instance" allocations vs "migration" allocations.
  73. if not quota.is_qfd_populated(context):
  74. LOG.error('Must migrate all instance mappings before using '
  75. 'unified limits')
  76. raise ValueError("must first migrate instance mappings")
  77. mappings = objects.InstanceMappingList.get_counts(context, project_id)
  78. resource_counts['servers'] = mappings['project']['instances']
  79. try:
  80. usages = _get_placement_usages(context, project_id)
  81. except exception.UsagesRetrievalFailed as e:
  82. msg = ("Failed to retrieve usages from placement while enforcing "
  83. "%s quota limits." % ", ".join(resource_names))
  84. LOG.error(msg + " Error: " + str(e))
  85. raise exception.UsagesRetrievalFailed(msg)
  86. # Use legacy behavior VCPU = VCPU + PCPU if configured.
  87. if CONF.workarounds.unified_limits_count_pcpu_as_vcpu:
  88. # If PCPU is in resource_classes, that means it was specified in the
  89. # flavor explicitly. In that case, we expect it to have its own limit
  90. # registered and we should not fold it into VCPU.
  91. if orc.PCPU in usages and orc.PCPU not in resource_classes:
  92. usages[orc.VCPU] = (usages.get(orc.VCPU, 0) +
  93. usages.get(orc.PCPU, 0))
  94. for resource_class in resource_classes:
  95. # Need to add back resource class prefix that was stripped earlier
  96. resource_name = 'class:' + resource_class
  97. # Placement doesn't know about classes with zero usage
  98. # so default to zero to tell oslo.limit usage is zero
  99. resource_counts[resource_name] = usages.get(resource_class, 0)
  100. return resource_counts
  101. def _get_deltas_by_flavor(
  102. flavor: 'objects.Flavor', is_bfv: bool, count: int
  103. ) -> ty.Dict[str, int]:
  104. if flavor is None:
  105. raise ValueError("flavor")
  106. if count < 0:
  107. raise ValueError("count")
  108. # NOTE(johngarbutt): this skips bfv, port, and cyborg resources
  109. # but it still gives us better checks than before unified limits
  110. # We need an instance in the DB to use the current is_bfv logic
  111. # which doesn't work well for instances that don't yet have a uuid
  112. deltas_from_flavor = utils.resources_for_limits(flavor, is_bfv)
  113. deltas = {"servers": count}
  114. for resource, amount in deltas_from_flavor.items():
  115. if amount != 0:
  116. deltas["class:%s" % resource] = amount * count
  117. return deltas
  118. def _get_enforcer(
  119. context: 'nova.context.RequestContext', project_id: str
  120. ) -> limit.Enforcer:
  121. # NOTE(johngarbutt) should we move context arg into oslo.limit?
  122. def callback(project_id, resource_names):
  123. return _get_usage(context, project_id, resource_names)
  124. return limit.Enforcer(callback)
  125. def enforce_num_instances_and_flavor(
  126. context: 'nova.context.RequestContext',
  127. project_id: str,
  128. flavor: 'objects.Flavor',
  129. is_bfvm: bool,
  130. min_count: int,
  131. max_count: int,
  132. enforcer: ty.Optional[limit.Enforcer] = None
  133. ) -> int:
  134. """Return max instances possible, else raise TooManyInstances exception."""
  135. if not limit_utils.use_unified_limits():
  136. return max_count
  137. # Ensure the recursion will always complete
  138. if min_count < 0 or min_count > max_count:
  139. raise ValueError("invalid min_count")
  140. if max_count < 0:
  141. raise ValueError("invalid max_count")
  142. deltas = _get_deltas_by_flavor(flavor, is_bfvm, max_count)
  143. enforcer = _get_enforcer(context, project_id)
  144. try:
  145. enforcer.enforce(project_id, deltas)
  146. except limit_exceptions.ProjectOverLimit as e:
  147. # NOTE(johngarbutt) we can do better, but this is very simple
  148. LOG.debug("Limit check failed with count %s retrying with count %s",
  149. max_count, max_count - 1)
  150. try:
  151. return enforce_num_instances_and_flavor(context, project_id,
  152. flavor, is_bfvm, min_count,
  153. max_count - 1,
  154. enforcer=enforcer)
  155. except ValueError:
  156. # Copy the *original* exception message to a OverQuota to
  157. # propagate to the API layer
  158. raise exception.TooManyInstances(str(e))
  159. # no problems with max_count, so we return max count
  160. return max_count
  161. def _convert_keys_to_legacy_name(new_dict):
  162. legacy = {}
  163. for new_name, old_name in LEGACY_LIMITS.items():
  164. # defensive incase oslo or keystone doesn't give us an answer
  165. legacy[old_name] = new_dict.get(new_name) or 0
  166. return legacy
  167. def get_legacy_default_limits():
  168. enforcer = limit.Enforcer(lambda: None)
  169. new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys())
  170. return _convert_keys_to_legacy_name(dict(new_limits))
  171. def get_legacy_project_limits(project_id):
  172. enforcer = limit.Enforcer(lambda: None)
  173. new_limits = enforcer.get_project_limits(project_id, LEGACY_LIMITS.keys())
  174. return _convert_keys_to_legacy_name(dict(new_limits))
  175. def get_legacy_counts(context, project_id):
  176. resource_names = list(LEGACY_LIMITS.keys())
  177. resource_names.sort()
  178. new_usage = _get_usage(context, project_id, resource_names)
  179. return _convert_keys_to_legacy_name(new_usage)