OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

host_manager.py 41KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. # Copyright (c) 2011 OpenStack Foundation
  2. # All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. """
  16. Manage hosts in the current zone.
  17. """
  18. import collections
  19. import functools
  20. import time
  21. try:
  22. from collections import UserDict as IterableUserDict # Python 3
  23. except ImportError:
  24. from UserDict import IterableUserDict # Python 2
  25. import iso8601
  26. from oslo_log import log as logging
  27. from oslo_utils import timeutils
  28. import six
  29. import nova.conf
  30. from nova import context as context_module
  31. from nova import exception
  32. from nova import objects
  33. from nova.pci import stats as pci_stats
  34. from nova.scheduler import filters
  35. from nova.scheduler import weights
  36. from nova import utils
  37. from nova.virt import hardware
  38. CONF = nova.conf.CONF
  39. LOG = logging.getLogger(__name__)
  40. HOST_INSTANCE_SEMAPHORE = "host_instance"
  41. class ReadOnlyDict(IterableUserDict):
  42. """A read-only dict."""
  43. def __init__(self, source=None):
  44. self.data = {}
  45. if source:
  46. self.data.update(source)
  47. def __setitem__(self, key, item):
  48. raise TypeError()
  49. def __delitem__(self, key):
  50. raise TypeError()
  51. def clear(self):
  52. raise TypeError()
  53. def pop(self, key, *args):
  54. raise TypeError()
  55. def popitem(self):
  56. raise TypeError()
  57. def update(self):
  58. raise TypeError()
  59. @utils.expects_func_args('self', 'spec_obj')
  60. def set_update_time_on_success(function):
  61. """Set updated time of HostState when consuming succeed."""
  62. @functools.wraps(function)
  63. def decorated_function(self, spec_obj):
  64. return_value = None
  65. try:
  66. return_value = function(self, spec_obj)
  67. except Exception as e:
  68. # Ignores exception raised from consume_from_request() so that
  69. # booting instance would fail in the resource claim of compute
  70. # node, other suitable node may be chosen during scheduling retry.
  71. LOG.warning("Selected host: %(host)s failed to consume from "
  72. "instance. Error: %(error)s",
  73. {'host': self.host, 'error': e})
  74. else:
  75. now = timeutils.utcnow()
  76. # NOTE(sbauza): Objects are UTC tz-aware by default
  77. self.updated = now.replace(tzinfo=iso8601.UTC)
  78. return return_value
  79. return decorated_function
  80. class HostState(object):
  81. """Mutable and immutable information tracked for a host.
  82. This is an attempt to remove the ad-hoc data structures
  83. previously used and lock down access.
  84. """
  85. def __init__(self, host, node, cell_uuid):
  86. self.host = host
  87. self.nodename = node
  88. self.uuid = None
  89. self._lock_name = (host, node)
  90. # Mutable available resources.
  91. # These will change as resources are virtually "consumed".
  92. self.total_usable_ram_mb = 0
  93. self.total_usable_disk_gb = 0
  94. self.disk_mb_used = 0
  95. self.free_ram_mb = 0
  96. self.free_disk_mb = 0
  97. self.vcpus_total = 0
  98. self.vcpus_used = 0
  99. self.pci_stats = None
  100. self.numa_topology = None
  101. # Additional host information from the compute node stats:
  102. self.num_instances = 0
  103. self.num_io_ops = 0
  104. self.failed_builds = 0
  105. # Other information
  106. self.host_ip = None
  107. self.hypervisor_type = None
  108. self.hypervisor_version = None
  109. self.hypervisor_hostname = None
  110. self.cpu_info = None
  111. self.supported_instances = None
  112. # Resource oversubscription values for the compute host:
  113. self.limits = {}
  114. # Generic metrics from compute nodes
  115. self.metrics = None
  116. # List of aggregates the host belongs to
  117. self.aggregates = []
  118. # Instances on this host
  119. self.instances = {}
  120. # Allocation ratios for this host
  121. self.ram_allocation_ratio = None
  122. self.cpu_allocation_ratio = None
  123. self.disk_allocation_ratio = None
  124. # Host cell (v2) membership
  125. self.cell_uuid = cell_uuid
  126. self.updated = None
  127. def update(self, compute=None, service=None, aggregates=None,
  128. inst_dict=None):
  129. """Update all information about a host."""
  130. @utils.synchronized(self._lock_name)
  131. def _locked_update(self, compute, service, aggregates, inst_dict):
  132. # Scheduler API is inherently multi-threaded as every incoming RPC
  133. # message will be dispatched in it's own green thread. So the
  134. # shared host state should be updated in a consistent way to make
  135. # sure its data is valid under concurrent write operations.
  136. if compute is not None:
  137. LOG.debug("Update host state from compute node: %s", compute)
  138. self._update_from_compute_node(compute)
  139. if aggregates is not None:
  140. LOG.debug("Update host state with aggregates: %s", aggregates)
  141. self.aggregates = aggregates
  142. if service is not None:
  143. LOG.debug("Update host state with service dict: %s", service)
  144. self.service = ReadOnlyDict(service)
  145. if inst_dict is not None:
  146. LOG.debug("Update host state with instances: %s",
  147. list(inst_dict))
  148. self.instances = inst_dict
  149. return _locked_update(self, compute, service, aggregates, inst_dict)
  150. def _update_from_compute_node(self, compute):
  151. """Update information about a host from a ComputeNode object."""
  152. # NOTE(jichenjc): if the compute record is just created but not updated
  153. # some field such as free_disk_gb can be None
  154. if 'free_disk_gb' not in compute or compute.free_disk_gb is None:
  155. LOG.debug('Ignoring compute node %s as its usage has not been '
  156. 'updated yet.', compute.uuid)
  157. return
  158. if (self.updated and compute.updated_at and
  159. self.updated > compute.updated_at):
  160. return
  161. all_ram_mb = compute.memory_mb
  162. self.uuid = compute.uuid
  163. # Assume virtual size is all consumed by instances if use qcow2 disk.
  164. free_gb = compute.free_disk_gb
  165. least_gb = compute.disk_available_least
  166. if least_gb is not None:
  167. if least_gb > free_gb:
  168. # can occur when an instance in database is not on host
  169. LOG.warning(
  170. "Host %(hostname)s has more disk space than database "
  171. "expected (%(physical)s GB > %(database)s GB)",
  172. {'physical': least_gb, 'database': free_gb,
  173. 'hostname': compute.hypervisor_hostname})
  174. free_gb = min(least_gb, free_gb)
  175. free_disk_mb = free_gb * 1024
  176. self.disk_mb_used = compute.local_gb_used * 1024
  177. # NOTE(jogo) free_ram_mb can be negative
  178. self.free_ram_mb = compute.free_ram_mb
  179. self.total_usable_ram_mb = all_ram_mb
  180. self.total_usable_disk_gb = compute.local_gb
  181. self.free_disk_mb = free_disk_mb
  182. self.vcpus_total = compute.vcpus
  183. self.vcpus_used = compute.vcpus_used
  184. self.updated = compute.updated_at
  185. # the ComputeNode.numa_topology field is a StringField so deserialize
  186. self.numa_topology = objects.NUMATopology.obj_from_db_obj(
  187. compute.numa_topology) if compute.numa_topology else None
  188. self.pci_stats = pci_stats.PciDeviceStats(
  189. stats=compute.pci_device_pools)
  190. # All virt drivers report host_ip
  191. self.host_ip = compute.host_ip
  192. self.hypervisor_type = compute.hypervisor_type
  193. self.hypervisor_version = compute.hypervisor_version
  194. self.hypervisor_hostname = compute.hypervisor_hostname
  195. self.cpu_info = compute.cpu_info
  196. if compute.supported_hv_specs:
  197. self.supported_instances = [spec.to_list() for spec
  198. in compute.supported_hv_specs]
  199. else:
  200. self.supported_instances = []
  201. # Don't store stats directly in host_state to make sure these don't
  202. # overwrite any values, or get overwritten themselves. Store in self so
  203. # filters can schedule with them.
  204. self.stats = compute.stats or {}
  205. # Track number of instances on host
  206. self.num_instances = int(self.stats.get('num_instances', 0))
  207. self.num_io_ops = int(self.stats.get('io_workload', 0))
  208. # update metrics
  209. self.metrics = objects.MonitorMetricList.from_json(compute.metrics)
  210. # update allocation ratios given by the ComputeNode object
  211. self.cpu_allocation_ratio = compute.cpu_allocation_ratio
  212. self.ram_allocation_ratio = compute.ram_allocation_ratio
  213. self.disk_allocation_ratio = compute.disk_allocation_ratio
  214. # update failed_builds counter reported by the compute
  215. self.failed_builds = int(self.stats.get('failed_builds', 0))
  216. def consume_from_request(self, spec_obj):
  217. """Incrementally update host state from a RequestSpec object."""
  218. @utils.synchronized(self._lock_name)
  219. @set_update_time_on_success
  220. def _locked(self, spec_obj):
  221. # Scheduler API is inherently multi-threaded as every incoming RPC
  222. # message will be dispatched in its own green thread. So the
  223. # shared host state should be consumed in a consistent way to make
  224. # sure its data is valid under concurrent write operations.
  225. self._locked_consume_from_request(spec_obj)
  226. return _locked(self, spec_obj)
  227. def _locked_consume_from_request(self, spec_obj):
  228. disk_mb = (spec_obj.root_gb +
  229. spec_obj.ephemeral_gb) * 1024
  230. ram_mb = spec_obj.memory_mb
  231. vcpus = spec_obj.vcpus
  232. self.free_ram_mb -= ram_mb
  233. self.free_disk_mb -= disk_mb
  234. self.vcpus_used += vcpus
  235. # Track number of instances on host
  236. self.num_instances += 1
  237. pci_requests = spec_obj.pci_requests
  238. if pci_requests and self.pci_stats:
  239. pci_requests = pci_requests.requests
  240. else:
  241. pci_requests = None
  242. # Calculate the NUMA usage...
  243. if self.numa_topology and spec_obj.numa_topology:
  244. spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
  245. self.numa_topology, spec_obj.numa_topology,
  246. limits=self.limits.get('numa_topology'),
  247. pci_requests=pci_requests, pci_stats=self.pci_stats)
  248. self.numa_topology = hardware.numa_usage_from_instance_numa(
  249. self.numa_topology, spec_obj.numa_topology)
  250. # ...and the PCI usage
  251. if pci_requests:
  252. instance_cells = None
  253. if spec_obj.numa_topology:
  254. instance_cells = spec_obj.numa_topology.cells
  255. self.pci_stats.apply_requests(pci_requests, instance_cells)
  256. # NOTE(sbauza): By considering all cases when the scheduler is called
  257. # and when consume_from_request() is run, we can safely say that there
  258. # is always an IO operation because we want to move the instance
  259. self.num_io_ops += 1
  260. def __repr__(self):
  261. return ("(%(host)s, %(node)s) ram: %(free_ram)sMB "
  262. "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
  263. "instances: %(num_instances)s" %
  264. {'host': self.host, 'node': self.nodename,
  265. 'free_ram': self.free_ram_mb, 'free_disk': self.free_disk_mb,
  266. 'num_io_ops': self.num_io_ops,
  267. 'num_instances': self.num_instances})
  268. class HostManager(object):
  269. """Base HostManager class."""
  270. # Can be overridden in a subclass
  271. def host_state_cls(self, host, node, cell, **kwargs):
  272. return HostState(host, node, cell)
  273. def __init__(self):
  274. self.refresh_cells_caches()
  275. self.filter_handler = filters.HostFilterHandler()
  276. filter_classes = self.filter_handler.get_matching_classes(
  277. CONF.filter_scheduler.available_filters)
  278. self.filter_cls_map = {cls.__name__: cls for cls in filter_classes}
  279. self.filter_obj_map = {}
  280. self.enabled_filters = self._choose_host_filters(self._load_filters())
  281. self.weight_handler = weights.HostWeightHandler()
  282. weigher_classes = self.weight_handler.get_matching_classes(
  283. CONF.filter_scheduler.weight_classes)
  284. self.weighers = [cls() for cls in weigher_classes]
  285. # Dict of aggregates keyed by their ID
  286. self.aggs_by_id = {}
  287. # Dict of set of aggregate IDs keyed by the name of the host belonging
  288. # to those aggregates
  289. self.host_aggregates_map = collections.defaultdict(set)
  290. self._init_aggregates()
  291. self.track_instance_changes = (
  292. CONF.filter_scheduler.track_instance_changes)
  293. # Dict of instances and status, keyed by host
  294. self._instance_info = {}
  295. if self.track_instance_changes:
  296. self._init_instance_info()
  297. def _load_filters(self):
  298. return CONF.filter_scheduler.enabled_filters
  299. def _init_aggregates(self):
  300. elevated = context_module.get_admin_context()
  301. aggs = objects.AggregateList.get_all(elevated)
  302. for agg in aggs:
  303. self.aggs_by_id[agg.id] = agg
  304. for host in agg.hosts:
  305. self.host_aggregates_map[host].add(agg.id)
  306. def update_aggregates(self, aggregates):
  307. """Updates internal HostManager information about aggregates."""
  308. if isinstance(aggregates, (list, objects.AggregateList)):
  309. for agg in aggregates:
  310. self._update_aggregate(agg)
  311. else:
  312. self._update_aggregate(aggregates)
  313. def _update_aggregate(self, aggregate):
  314. self.aggs_by_id[aggregate.id] = aggregate
  315. for host in aggregate.hosts:
  316. self.host_aggregates_map[host].add(aggregate.id)
  317. # Refreshing the mapping dict to remove all hosts that are no longer
  318. # part of the aggregate
  319. for host in self.host_aggregates_map:
  320. if (aggregate.id in self.host_aggregates_map[host] and
  321. host not in aggregate.hosts):
  322. self.host_aggregates_map[host].remove(aggregate.id)
  323. def delete_aggregate(self, aggregate):
  324. """Deletes internal HostManager information about a specific aggregate.
  325. """
  326. if aggregate.id in self.aggs_by_id:
  327. del self.aggs_by_id[aggregate.id]
  328. for host in self.host_aggregates_map:
  329. if aggregate.id in self.host_aggregates_map[host]:
  330. self.host_aggregates_map[host].remove(aggregate.id)
  331. def _init_instance_info(self, computes_by_cell=None):
  332. """Creates the initial view of instances for all hosts.
  333. As this initial population of instance information may take some time,
  334. we don't wish to block the scheduler's startup while this completes.
  335. The async method allows us to simply mock out the _init_instance_info()
  336. method in tests.
  337. :param compute_nodes: a list of nodes to populate instances info for
  338. if is None, compute_nodes will be looked up in database
  339. """
  340. def _async_init_instance_info(computes_by_cell):
  341. context = context_module.RequestContext()
  342. LOG.debug("START:_async_init_instance_info")
  343. self._instance_info = {}
  344. count = 0
  345. if not computes_by_cell:
  346. computes_by_cell = {}
  347. for cell in self.cells.values():
  348. with context_module.target_cell(context, cell) as cctxt:
  349. cell_cns = objects.ComputeNodeList.get_all(
  350. cctxt).objects
  351. computes_by_cell[cell] = cell_cns
  352. count += len(cell_cns)
  353. LOG.debug("Total number of compute nodes: %s", count)
  354. for cell, compute_nodes in computes_by_cell.items():
  355. # Break the queries into batches of 10 to reduce the total
  356. # number of calls to the DB.
  357. batch_size = 10
  358. start_node = 0
  359. end_node = batch_size
  360. while start_node <= len(compute_nodes):
  361. curr_nodes = compute_nodes[start_node:end_node]
  362. start_node += batch_size
  363. end_node += batch_size
  364. filters = {"host": [curr_node.host
  365. for curr_node in curr_nodes],
  366. "deleted": False}
  367. with context_module.target_cell(context, cell) as cctxt:
  368. result = objects.InstanceList.get_by_filters(
  369. cctxt.elevated(), filters)
  370. instances = result.objects
  371. LOG.debug("Adding %s instances for hosts %s-%s",
  372. len(instances), start_node, end_node)
  373. for instance in instances:
  374. host = instance.host
  375. if host not in self._instance_info:
  376. self._instance_info[host] = {"instances": {},
  377. "updated": False}
  378. inst_dict = self._instance_info[host]
  379. inst_dict["instances"][instance.uuid] = instance
  380. # Call sleep() to cooperatively yield
  381. time.sleep(0)
  382. LOG.debug("END:_async_init_instance_info")
  383. # Run this async so that we don't block the scheduler start-up
  384. utils.spawn_n(_async_init_instance_info, computes_by_cell)
  385. def _choose_host_filters(self, filter_cls_names):
  386. """Since the caller may specify which filters to use we need
  387. to have an authoritative list of what is permissible. This
  388. function checks the filter names against a predefined set
  389. of acceptable filters.
  390. """
  391. if not isinstance(filter_cls_names, (list, tuple)):
  392. filter_cls_names = [filter_cls_names]
  393. good_filters = []
  394. bad_filters = []
  395. for filter_name in filter_cls_names:
  396. if filter_name not in self.filter_obj_map:
  397. if filter_name not in self.filter_cls_map:
  398. bad_filters.append(filter_name)
  399. continue
  400. filter_cls = self.filter_cls_map[filter_name]
  401. self.filter_obj_map[filter_name] = filter_cls()
  402. good_filters.append(self.filter_obj_map[filter_name])
  403. if bad_filters:
  404. msg = ", ".join(bad_filters)
  405. raise exception.SchedulerHostFilterNotFound(filter_name=msg)
  406. return good_filters
  407. def get_filtered_hosts(self, hosts, spec_obj, index=0):
  408. """Filter hosts and return only ones passing all filters."""
  409. def _strip_ignore_hosts(host_map, hosts_to_ignore):
  410. ignored_hosts = []
  411. for host in hosts_to_ignore:
  412. for (hostname, nodename) in list(host_map.keys()):
  413. if host.lower() == hostname.lower():
  414. del host_map[(hostname, nodename)]
  415. ignored_hosts.append(host)
  416. ignored_hosts_str = ', '.join(ignored_hosts)
  417. LOG.info('Host filter ignoring hosts: %s', ignored_hosts_str)
  418. def _match_forced_hosts(host_map, hosts_to_force):
  419. forced_hosts = []
  420. lowered_hosts_to_force = [host.lower() for host in hosts_to_force]
  421. for (hostname, nodename) in list(host_map.keys()):
  422. if hostname.lower() not in lowered_hosts_to_force:
  423. del host_map[(hostname, nodename)]
  424. else:
  425. forced_hosts.append(hostname)
  426. if host_map:
  427. forced_hosts_str = ', '.join(forced_hosts)
  428. LOG.info('Host filter forcing available hosts to %s',
  429. forced_hosts_str)
  430. else:
  431. forced_hosts_str = ', '.join(hosts_to_force)
  432. LOG.info("No hosts matched due to not matching "
  433. "'force_hosts' value of '%s'", forced_hosts_str)
  434. def _match_forced_nodes(host_map, nodes_to_force):
  435. forced_nodes = []
  436. for (hostname, nodename) in list(host_map.keys()):
  437. if nodename not in nodes_to_force:
  438. del host_map[(hostname, nodename)]
  439. else:
  440. forced_nodes.append(nodename)
  441. if host_map:
  442. forced_nodes_str = ', '.join(forced_nodes)
  443. LOG.info('Host filter forcing available nodes to %s',
  444. forced_nodes_str)
  445. else:
  446. forced_nodes_str = ', '.join(nodes_to_force)
  447. LOG.info("No nodes matched due to not matching "
  448. "'force_nodes' value of '%s'", forced_nodes_str)
  449. def _get_hosts_matching_request(hosts, requested_destination):
  450. """Get hosts through matching the requested destination.
  451. We will both set host and node to requested destination object
  452. and host will never be None and node will be None in some cases.
  453. Starting with API 2.74 microversion, we also can specify the
  454. host/node to select hosts to launch a server:
  455. - If only host(or only node)(or both host and node) is supplied
  456. and we get one node from get_compute_nodes_by_host_or_node which
  457. is called in resources_from_request_spec function,
  458. the destination will be set both host and node.
  459. - If only host is supplied and we get more than one node from
  460. get_compute_nodes_by_host_or_node which is called in
  461. resources_from_request_spec function, the destination will only
  462. include host.
  463. """
  464. (host, node) = (requested_destination.host,
  465. requested_destination.node)
  466. if node:
  467. requested_nodes = [x for x in hosts
  468. if x.host == host and x.nodename == node]
  469. else:
  470. requested_nodes = [x for x in hosts
  471. if x.host == host]
  472. if requested_nodes:
  473. LOG.info('Host filter only checking host %(host)s and '
  474. 'node %(node)s', {'host': host, 'node': node})
  475. else:
  476. # NOTE(sbauza): The API level should prevent the user from
  477. # providing a wrong destination but let's make sure a wrong
  478. # destination doesn't trample the scheduler still.
  479. LOG.info('No hosts matched due to not matching requested '
  480. 'destination (%(host)s, %(node)s)',
  481. {'host': host, 'node': node})
  482. return iter(requested_nodes)
  483. ignore_hosts = spec_obj.ignore_hosts or []
  484. force_hosts = spec_obj.force_hosts or []
  485. force_nodes = spec_obj.force_nodes or []
  486. requested_node = spec_obj.requested_destination
  487. if requested_node is not None and 'host' in requested_node:
  488. # NOTE(sbauza): Reduce a potentially long set of hosts as much as
  489. # possible to any requested destination nodes before passing the
  490. # list to the filters
  491. hosts = _get_hosts_matching_request(hosts, requested_node)
  492. if ignore_hosts or force_hosts or force_nodes:
  493. # NOTE(deva): we can't assume "host" is unique because
  494. # one host may have many nodes.
  495. name_to_cls_map = {(x.host, x.nodename): x for x in hosts}
  496. if ignore_hosts:
  497. _strip_ignore_hosts(name_to_cls_map, ignore_hosts)
  498. if not name_to_cls_map:
  499. return []
  500. # NOTE(deva): allow force_hosts and force_nodes independently
  501. if force_hosts:
  502. _match_forced_hosts(name_to_cls_map, force_hosts)
  503. if force_nodes:
  504. _match_forced_nodes(name_to_cls_map, force_nodes)
  505. check_type = ('scheduler_hints' in spec_obj and
  506. spec_obj.scheduler_hints.get('_nova_check_type'))
  507. if not check_type and (force_hosts or force_nodes):
  508. # NOTE(deva,dansmith): Skip filters when forcing host or node
  509. # unless we've declared the internal check type flag, in which
  510. # case we're asking for a specific host and for filtering to
  511. # be done.
  512. if name_to_cls_map:
  513. return name_to_cls_map.values()
  514. else:
  515. return []
  516. hosts = six.itervalues(name_to_cls_map)
  517. return self.filter_handler.get_filtered_objects(self.enabled_filters,
  518. hosts, spec_obj, index)
  519. def get_weighed_hosts(self, hosts, spec_obj):
  520. """Weigh the hosts."""
  521. return self.weight_handler.get_weighed_objects(self.weighers,
  522. hosts, spec_obj)
  523. def _get_computes_for_cells(self, context, cells, compute_uuids=None):
  524. """Get a tuple of compute node and service information.
  525. :param context: request context
  526. :param cells: list of CellMapping objects
  527. :param compute_uuids: list of ComputeNode UUIDs. If this is None, all
  528. compute nodes from each specified cell will be returned, otherwise
  529. only the ComputeNode objects with a UUID in the list of UUIDs in
  530. any given cell is returned. If this is an empty list, the returned
  531. compute_nodes tuple item will be an empty dict.
  532. Returns a tuple (compute_nodes, services) where:
  533. - compute_nodes is cell-uuid keyed dict of compute node lists
  534. - services is a dict of services indexed by hostname
  535. """
  536. def targeted_operation(cctxt):
  537. services = objects.ServiceList.get_by_binary(
  538. cctxt, 'nova-compute', include_disabled=True)
  539. if compute_uuids is None:
  540. return services, objects.ComputeNodeList.get_all(cctxt)
  541. else:
  542. return services, objects.ComputeNodeList.get_all_by_uuids(
  543. cctxt, compute_uuids)
  544. timeout = context_module.CELL_TIMEOUT
  545. results = context_module.scatter_gather_cells(context, cells, timeout,
  546. targeted_operation)
  547. compute_nodes = collections.defaultdict(list)
  548. services = {}
  549. for cell_uuid, result in results.items():
  550. if isinstance(result, Exception):
  551. LOG.warning('Failed to get computes for cell %s', cell_uuid)
  552. elif result is context_module.did_not_respond_sentinel:
  553. LOG.warning('Timeout getting computes for cell %s', cell_uuid)
  554. else:
  555. _services, _compute_nodes = result
  556. compute_nodes[cell_uuid].extend(_compute_nodes)
  557. services.update({service.host: service
  558. for service in _services})
  559. return compute_nodes, services
  560. def _get_cell_by_host(self, ctxt, host):
  561. '''Get CellMapping object of a cell the given host belongs to.'''
  562. try:
  563. host_mapping = objects.HostMapping.get_by_host(ctxt, host)
  564. return host_mapping.cell_mapping
  565. except exception.HostMappingNotFound:
  566. LOG.warning('No host-to-cell mapping found for selected '
  567. 'host %(host)s.', {'host': host})
  568. return
  569. def get_compute_nodes_by_host_or_node(self, ctxt, host, node, cell=None):
  570. '''Get compute nodes from given host or node'''
  571. def return_empty_list_for_not_found(func):
  572. def wrapper(*args, **kwargs):
  573. try:
  574. ret = func(*args, **kwargs)
  575. except exception.NotFound:
  576. ret = objects.ComputeNodeList()
  577. return ret
  578. return wrapper
  579. @return_empty_list_for_not_found
  580. def _get_by_host_and_node(ctxt):
  581. compute_node = objects.ComputeNode.get_by_host_and_nodename(
  582. ctxt, host, node)
  583. return objects.ComputeNodeList(objects=[compute_node])
  584. @return_empty_list_for_not_found
  585. def _get_by_host(ctxt):
  586. return objects.ComputeNodeList.get_all_by_host(ctxt, host)
  587. @return_empty_list_for_not_found
  588. def _get_by_node(ctxt):
  589. compute_node = objects.ComputeNode.get_by_nodename(ctxt, node)
  590. return objects.ComputeNodeList(objects=[compute_node])
  591. if host and node:
  592. target_fnc = _get_by_host_and_node
  593. elif host:
  594. target_fnc = _get_by_host
  595. else:
  596. target_fnc = _get_by_node
  597. if host and not cell:
  598. # optimization not to issue queries to every cell DB
  599. cell = self._get_cell_by_host(ctxt, host)
  600. cells = [cell] if cell else self.enabled_cells
  601. timeout = context_module.CELL_TIMEOUT
  602. nodes_by_cell = context_module.scatter_gather_cells(
  603. ctxt, cells, timeout, target_fnc)
  604. # Only one cell should have values for the compute nodes
  605. # so we get them here, or return an empty list if no cell
  606. # has a value
  607. nodes = next(
  608. (nodes for nodes in nodes_by_cell.values() if nodes),
  609. objects.ComputeNodeList())
  610. return nodes
  611. def refresh_cells_caches(self):
  612. # NOTE(tssurya): This function is called from the scheduler manager's
  613. # reset signal handler and also upon startup of the scheduler.
  614. context = context_module.RequestContext()
  615. temp_cells = objects.CellMappingList.get_all(context)
  616. # NOTE(tssurya): filtering cell0 from the list since it need
  617. # not be considered for scheduling.
  618. for c in temp_cells:
  619. if c.is_cell0():
  620. temp_cells.objects.remove(c)
  621. # once its done break for optimization
  622. break
  623. # NOTE(danms, tssurya): global dict, keyed by cell uuid, of cells
  624. # cached which will be refreshed every time a SIGHUP is sent to the
  625. # scheduler.
  626. self.cells = {cell.uuid: cell for cell in temp_cells}
  627. LOG.debug('Found %(count)i cells: %(cells)s',
  628. {'count': len(self.cells),
  629. 'cells': ', '.join(self.cells)})
  630. # NOTE(tssurya): Global cache of only the enabled cells. This way
  631. # scheduling is limited only to the enabled cells. However this
  632. # cache will be refreshed every time a cell is disabled or enabled
  633. # or when a new cell is created as long as a SIGHUP signal is sent
  634. # to the scheduler.
  635. self.enabled_cells = [c for c in temp_cells if not c.disabled]
  636. # Filtering the disabled cells only for logging purposes.
  637. if LOG.isEnabledFor(logging.DEBUG):
  638. disabled_cells = [c for c in temp_cells if c.disabled]
  639. LOG.debug('Found %(count)i disabled cells: %(cells)s',
  640. {'count': len(disabled_cells),
  641. 'cells': ', '.join(
  642. [c.identity for c in disabled_cells])})
  643. # Dict, keyed by host name, to cell UUID to be used to look up the
  644. # cell a particular host is in (used with self.cells).
  645. self.host_to_cell_uuid = {}
  646. def get_host_states_by_uuids(self, context, compute_uuids, spec_obj):
  647. if not self.cells:
  648. LOG.warning("No cells were found")
  649. # Restrict to a single cell if and only if the request spec has a
  650. # requested cell and allow_cross_cell_move=False.
  651. if (spec_obj and 'requested_destination' in spec_obj and
  652. spec_obj.requested_destination and
  653. 'cell' in spec_obj.requested_destination and
  654. not spec_obj.requested_destination.allow_cross_cell_move):
  655. only_cell = spec_obj.requested_destination.cell
  656. else:
  657. only_cell = None
  658. if only_cell:
  659. cells = [only_cell]
  660. else:
  661. cells = self.enabled_cells
  662. compute_nodes, services = self._get_computes_for_cells(
  663. context, cells, compute_uuids=compute_uuids)
  664. return self._get_host_states(context, compute_nodes, services)
  665. def _get_host_states(self, context, compute_nodes, services):
  666. """Returns a generator over HostStates given a list of computes.
  667. Also updates the HostStates internal mapping for the HostManager.
  668. """
  669. # Get resource usage across the available compute nodes:
  670. host_state_map = {}
  671. seen_nodes = set()
  672. for cell_uuid, computes in compute_nodes.items():
  673. for compute in computes:
  674. service = services.get(compute.host)
  675. if not service:
  676. LOG.warning(
  677. "No compute service record found for host %(host)s",
  678. {'host': compute.host})
  679. continue
  680. host = compute.host
  681. node = compute.hypervisor_hostname
  682. state_key = (host, node)
  683. host_state = host_state_map.get(state_key)
  684. if not host_state:
  685. host_state = self.host_state_cls(host, node,
  686. cell_uuid,
  687. compute=compute)
  688. host_state_map[state_key] = host_state
  689. # We force to update the aggregates info each time a
  690. # new request comes in, because some changes on the
  691. # aggregates could have been happening after setting
  692. # this field for the first time
  693. host_state.update(compute,
  694. dict(service),
  695. self._get_aggregates_info(host),
  696. self._get_instance_info(context, compute))
  697. seen_nodes.add(state_key)
  698. return (host_state_map[host] for host in seen_nodes)
  699. def _get_aggregates_info(self, host):
  700. return [self.aggs_by_id[agg_id] for agg_id in
  701. self.host_aggregates_map[host]]
  702. def _get_cell_mapping_for_host(self, context, host_name):
  703. """Finds the CellMapping for a particular host name
  704. Relies on a cache to quickly fetch the CellMapping if we have looked
  705. up this host before, otherwise gets the CellMapping via the
  706. HostMapping record for the given host name.
  707. :param context: nova auth request context
  708. :param host_name: compute service host name
  709. :returns: CellMapping object
  710. :raises: HostMappingNotFound if the host is not mapped to a cell
  711. """
  712. # Check to see if we have the host in our cache.
  713. if host_name in self.host_to_cell_uuid:
  714. cell_uuid = self.host_to_cell_uuid[host_name]
  715. if cell_uuid in self.cells:
  716. return self.cells[cell_uuid]
  717. # Something is wrong so log a warning and just fall through to
  718. # lookup the HostMapping.
  719. LOG.warning('Host %s is expected to be in cell %s but that cell '
  720. 'uuid was not found in our cache. The service may '
  721. 'need to be restarted to refresh the cache.',
  722. host_name, cell_uuid)
  723. # We have not cached this host yet so get the HostMapping, cache the
  724. # result and return the CellMapping.
  725. hm = objects.HostMapping.get_by_host(context, host_name)
  726. cell_mapping = hm.cell_mapping
  727. self.host_to_cell_uuid[host_name] = cell_mapping.uuid
  728. return cell_mapping
  729. def _get_instances_by_host(self, context, host_name):
  730. try:
  731. cm = self._get_cell_mapping_for_host(context, host_name)
  732. except exception.HostMappingNotFound:
  733. # It's possible to hit this when the compute service first starts
  734. # up and casts to update_instance_info with an empty list but
  735. # before the host is mapped in the API database.
  736. LOG.info('Host mapping not found for host %s. Not tracking '
  737. 'instance info for this host.', host_name)
  738. return {}
  739. with context_module.target_cell(context, cm) as cctxt:
  740. uuids = objects.InstanceList.get_uuids_by_host(cctxt, host_name)
  741. # Putting the context in the otherwise fake Instance object at
  742. # least allows out of tree filters to lazy-load fields.
  743. return {uuid: objects.Instance(cctxt, uuid=uuid) for uuid in uuids}
  744. def _get_instance_info(self, context, compute):
  745. """Gets the host instance info from the compute host.
  746. Some sites may disable ``track_instance_changes`` for performance or
  747. isolation reasons. In either of these cases, there will either be no
  748. information for the host, or the 'updated' value for that host dict
  749. will be False. In those cases, we need to grab the current InstanceList
  750. instead of relying on the version in _instance_info.
  751. """
  752. host_name = compute.host
  753. host_info = self._instance_info.get(host_name)
  754. if host_info and host_info.get("updated"):
  755. inst_dict = host_info["instances"]
  756. else:
  757. # Updates aren't flowing from nova-compute.
  758. inst_dict = self._get_instances_by_host(context, host_name)
  759. return inst_dict
  760. def _recreate_instance_info(self, context, host_name):
  761. """Get the InstanceList for the specified host, and store it in the
  762. _instance_info dict.
  763. """
  764. inst_dict = self._get_instances_by_host(context, host_name)
  765. host_info = self._instance_info[host_name] = {}
  766. host_info["instances"] = inst_dict
  767. host_info["updated"] = False
  768. @utils.synchronized(HOST_INSTANCE_SEMAPHORE)
  769. def update_instance_info(self, context, host_name, instance_info):
  770. """Receives an InstanceList object from a compute node.
  771. This method receives information from a compute node when it starts up,
  772. or when its instances have changed, and updates its view of hosts and
  773. instances with it.
  774. """
  775. host_info = self._instance_info.get(host_name)
  776. if host_info:
  777. inst_dict = host_info.get("instances")
  778. for instance in instance_info.objects:
  779. # Overwrite the entry (if any) with the new info.
  780. inst_dict[instance.uuid] = instance
  781. host_info["updated"] = True
  782. else:
  783. instances = instance_info.objects
  784. if len(instances) > 1:
  785. # This is a host sending its full instance list, so use it.
  786. host_info = self._instance_info[host_name] = {}
  787. host_info["instances"] = {instance.uuid: instance
  788. for instance in instances}
  789. host_info["updated"] = True
  790. else:
  791. self._recreate_instance_info(context, host_name)
  792. LOG.info("Received an update from an unknown host '%s'. "
  793. "Re-created its InstanceList.", host_name)
  794. @utils.synchronized(HOST_INSTANCE_SEMAPHORE)
  795. def delete_instance_info(self, context, host_name, instance_uuid):
  796. """Receives the UUID from a compute node when one of its instances is
  797. terminated.
  798. The instance in the local view of the host's instances is removed.
  799. """
  800. host_info = self._instance_info.get(host_name)
  801. if host_info:
  802. inst_dict = host_info["instances"]
  803. # Remove the existing Instance object, if any
  804. inst_dict.pop(instance_uuid, None)
  805. host_info["updated"] = True
  806. else:
  807. self._recreate_instance_info(context, host_name)
  808. LOG.info("Received a delete update from an unknown host '%s'. "
  809. "Re-created its InstanceList.", host_name)
  810. @utils.synchronized(HOST_INSTANCE_SEMAPHORE)
  811. def sync_instance_info(self, context, host_name, instance_uuids):
  812. """Receives the uuids of the instances on a host.
  813. This method is periodically called by the compute nodes, which send a
  814. list of all the UUID values for the instances on that node. This is
  815. used by the scheduler's HostManager to detect when its view of the
  816. compute node's instances is out of sync.
  817. """
  818. host_info = self._instance_info.get(host_name)
  819. if host_info:
  820. local_set = set(host_info["instances"].keys())
  821. compute_set = set(instance_uuids)
  822. if not local_set == compute_set:
  823. self._recreate_instance_info(context, host_name)
  824. LOG.info("The instance sync for host '%s' did not match. "
  825. "Re-created its InstanceList.", host_name)
  826. return
  827. host_info["updated"] = True
  828. LOG.debug("Successfully synced instances from host '%s'.",
  829. host_name)
  830. else:
  831. self._recreate_instance_info(context, host_name)
  832. LOG.info("Received a sync request from an unknown host '%s'. "
  833. "Re-created its InstanceList.", host_name)