OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1253 lines
60KB

  1. # Copyright 2013 IBM Corp.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  4. # not use this file except in compliance with the License. You may obtain
  5. # a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  11. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing permissions and limitations
  13. # under the License.
  14. """Handles database requests from other nova services."""
  15. import contextlib
  16. import copy
  17. import functools
  18. from oslo_config import cfg
  19. from oslo_log import log as logging
  20. import oslo_messaging as messaging
  21. from oslo_utils import excutils
  22. from oslo_utils import versionutils
  23. import six
  24. from nova import availability_zones
  25. from nova.compute import instance_actions
  26. from nova.compute import rpcapi as compute_rpcapi
  27. from nova.compute import task_states
  28. from nova.compute import utils as compute_utils
  29. from nova.compute.utils import wrap_instance_event
  30. from nova.compute import vm_states
  31. from nova.conductor.tasks import live_migrate
  32. from nova.conductor.tasks import migrate
  33. from nova import context as nova_context
  34. from nova.db import base
  35. from nova import exception
  36. from nova.i18n import _
  37. from nova import image
  38. from nova import manager
  39. from nova import network
  40. from nova import notifications
  41. from nova import objects
  42. from nova.objects import base as nova_object
  43. from nova import profiler
  44. from nova import rpc
  45. from nova.scheduler import client as scheduler_client
  46. from nova.scheduler import utils as scheduler_utils
  47. from nova import servicegroup
  48. from nova import utils
  49. LOG = logging.getLogger(__name__)
  50. CONF = cfg.CONF
  51. def targets_cell(fn):
  52. """Wrap a method and automatically target the instance's cell.
  53. This decorates a method with signature func(self, context, instance, ...)
  54. and automatically targets the context with the instance's cell
  55. mapping. It does this by looking up the InstanceMapping.
  56. """
  57. @functools.wraps(fn)
  58. def wrapper(self, context, *args, **kwargs):
  59. instance = kwargs.get('instance') or args[0]
  60. try:
  61. im = objects.InstanceMapping.get_by_instance_uuid(
  62. context, instance.uuid)
  63. except exception.InstanceMappingNotFound:
  64. LOG.error('InstanceMapping not found, unable to target cell',
  65. instance=instance)
  66. im = None
  67. else:
  68. LOG.debug('Targeting cell %(cell)s for conductor method %(meth)s',
  69. {'cell': im.cell_mapping.identity,
  70. 'meth': fn.__name__})
  71. # NOTE(danms): Target our context to the cell for the rest of
  72. # this request, so that none of the subsequent code needs to
  73. # care about it.
  74. nova_context.set_target_cell(context, im.cell_mapping)
  75. return fn(self, context, *args, **kwargs)
  76. return wrapper
  77. class ConductorManager(manager.Manager):
  78. """Mission: Conduct things.
  79. The methods in the base API for nova-conductor are various proxy operations
  80. performed on behalf of the nova-compute service running on compute nodes.
  81. Compute nodes are not allowed to directly access the database, so this set
  82. of methods allows them to get specific work done without locally accessing
  83. the database.
  84. The nova-conductor service also exposes an API in the 'compute_task'
  85. namespace. See the ComputeTaskManager class for details.
  86. """
  87. target = messaging.Target(version='3.0')
  88. def __init__(self, *args, **kwargs):
  89. super(ConductorManager, self).__init__(service_name='conductor',
  90. *args, **kwargs)
  91. self.compute_task_mgr = ComputeTaskManager()
  92. self.additional_endpoints.append(self.compute_task_mgr)
  93. # NOTE(hanlind): This can be removed in version 4.0 of the RPC API
  94. def provider_fw_rule_get_all(self, context):
  95. # NOTE(hanlind): Simulate an empty db result for compat reasons.
  96. return []
  97. def _object_dispatch(self, target, method, args, kwargs):
  98. """Dispatch a call to an object method.
  99. This ensures that object methods get called and any exception
  100. that is raised gets wrapped in an ExpectedException for forwarding
  101. back to the caller (without spamming the conductor logs).
  102. """
  103. try:
  104. # NOTE(danms): Keep the getattr inside the try block since
  105. # a missing method is really a client problem
  106. return getattr(target, method)(*args, **kwargs)
  107. except Exception:
  108. raise messaging.ExpectedException()
  109. def object_class_action_versions(self, context, objname, objmethod,
  110. object_versions, args, kwargs):
  111. objclass = nova_object.NovaObject.obj_class_from_name(
  112. objname, object_versions[objname])
  113. args = tuple([context] + list(args))
  114. result = self._object_dispatch(objclass, objmethod, args, kwargs)
  115. # NOTE(danms): The RPC layer will convert to primitives for us,
  116. # but in this case, we need to honor the version the client is
  117. # asking for, so we do it before returning here.
  118. # NOTE(hanlind): Do not convert older than requested objects,
  119. # see bug #1596119.
  120. if isinstance(result, nova_object.NovaObject):
  121. target_version = object_versions[objname]
  122. requested_version = versionutils.convert_version_to_tuple(
  123. target_version)
  124. actual_version = versionutils.convert_version_to_tuple(
  125. result.VERSION)
  126. do_backport = requested_version < actual_version
  127. other_major_version = requested_version[0] != actual_version[0]
  128. if do_backport or other_major_version:
  129. result = result.obj_to_primitive(
  130. target_version=target_version,
  131. version_manifest=object_versions)
  132. return result
  133. def object_action(self, context, objinst, objmethod, args, kwargs):
  134. """Perform an action on an object."""
  135. oldobj = objinst.obj_clone()
  136. result = self._object_dispatch(objinst, objmethod, args, kwargs)
  137. updates = dict()
  138. # NOTE(danms): Diff the object with the one passed to us and
  139. # generate a list of changes to forward back
  140. for name, field in objinst.fields.items():
  141. if not objinst.obj_attr_is_set(name):
  142. # Avoid demand-loading anything
  143. continue
  144. if (not oldobj.obj_attr_is_set(name) or
  145. getattr(oldobj, name) != getattr(objinst, name)):
  146. updates[name] = field.to_primitive(objinst, name,
  147. getattr(objinst, name))
  148. # This is safe since a field named this would conflict with the
  149. # method anyway
  150. updates['obj_what_changed'] = objinst.obj_what_changed()
  151. return updates, result
  152. def object_backport_versions(self, context, objinst, object_versions):
  153. target = object_versions[objinst.obj_name()]
  154. LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s',
  155. {'obj': objinst.obj_name(),
  156. 'ver': target,
  157. 'manifest': ','.join(
  158. ['%s=%s' % (name, ver)
  159. for name, ver in object_versions.items()])})
  160. return objinst.obj_to_primitive(target_version=target,
  161. version_manifest=object_versions)
  162. def reset(self):
  163. objects.Service.clear_min_version_cache()
  164. @contextlib.contextmanager
  165. def try_target_cell(context, cell):
  166. """If cell is not None call func with context.target_cell.
  167. This is a method to help during the transition period. Currently
  168. various mappings may not exist if a deployment has not migrated to
  169. cellsv2. If there is no mapping call the func as normal, otherwise
  170. call it in a target_cell context.
  171. """
  172. if cell:
  173. with nova_context.target_cell(context, cell) as cell_context:
  174. yield cell_context
  175. else:
  176. yield context
  177. @contextlib.contextmanager
  178. def obj_target_cell(obj, cell):
  179. """Run with object's context set to a specific cell"""
  180. with try_target_cell(obj._context, cell) as target:
  181. with obj.obj_alternate_context(target):
  182. yield target
  183. @profiler.trace_cls("rpc")
  184. class ComputeTaskManager(base.Base):
  185. """Namespace for compute methods.
  186. This class presents an rpc API for nova-conductor under the 'compute_task'
  187. namespace. The methods here are compute operations that are invoked
  188. by the API service. These methods see the operation to completion, which
  189. may involve coordinating activities on multiple compute nodes.
  190. """
  191. target = messaging.Target(namespace='compute_task', version='1.17')
  192. def __init__(self):
  193. super(ComputeTaskManager, self).__init__()
  194. self.compute_rpcapi = compute_rpcapi.ComputeAPI()
  195. self.image_api = image.API()
  196. self.network_api = network.API()
  197. self.servicegroup_api = servicegroup.API()
  198. self.scheduler_client = scheduler_client.SchedulerClient()
  199. self.notifier = rpc.get_notifier('compute', CONF.host)
  200. def reset(self):
  201. LOG.info('Reloading compute RPC API')
  202. compute_rpcapi.LAST_VERSION = None
  203. self.compute_rpcapi = compute_rpcapi.ComputeAPI()
  204. # TODO(tdurakov): remove `live` parameter here on compute task api RPC
  205. # version bump to 2.x
  206. @messaging.expected_exceptions(
  207. exception.NoValidHost,
  208. exception.ComputeServiceUnavailable,
  209. exception.ComputeHostNotFound,
  210. exception.InvalidHypervisorType,
  211. exception.InvalidCPUInfo,
  212. exception.UnableToMigrateToSelf,
  213. exception.DestinationHypervisorTooOld,
  214. exception.InvalidLocalStorage,
  215. exception.InvalidSharedStorage,
  216. exception.HypervisorUnavailable,
  217. exception.InstanceInvalidState,
  218. exception.MigrationPreCheckError,
  219. exception.MigrationPreCheckClientException,
  220. exception.LiveMigrationWithOldNovaNotSupported,
  221. exception.UnsupportedPolicyException)
  222. @targets_cell
  223. @wrap_instance_event(prefix='conductor')
  224. def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
  225. flavor, block_migration, disk_over_commit, reservations=None,
  226. clean_shutdown=True, request_spec=None):
  227. if instance and not isinstance(instance, nova_object.NovaObject):
  228. # NOTE(danms): Until v2 of the RPC API, we need to tolerate
  229. # old-world instance objects here
  230. attrs = ['metadata', 'system_metadata', 'info_cache',
  231. 'security_groups']
  232. instance = objects.Instance._from_db_object(
  233. context, objects.Instance(), instance,
  234. expected_attrs=attrs)
  235. # NOTE: Remove this when we drop support for v1 of the RPC API
  236. if flavor and not isinstance(flavor, objects.Flavor):
  237. # Code downstream may expect extra_specs to be populated since it
  238. # is receiving an object, so lookup the flavor to ensure this.
  239. flavor = objects.Flavor.get_by_id(context, flavor['id'])
  240. if live and not rebuild and not flavor:
  241. self._live_migrate(context, instance, scheduler_hint,
  242. block_migration, disk_over_commit, request_spec)
  243. elif not live and not rebuild and flavor:
  244. instance_uuid = instance.uuid
  245. with compute_utils.EventReporter(context, 'cold_migrate',
  246. instance_uuid):
  247. self._cold_migrate(context, instance, flavor,
  248. scheduler_hint['filter_properties'],
  249. reservations, clean_shutdown, request_spec)
  250. else:
  251. raise NotImplementedError()
  252. def _cold_migrate(self, context, instance, flavor, filter_properties,
  253. reservations, clean_shutdown, request_spec):
  254. image = utils.get_image_from_system_metadata(
  255. instance.system_metadata)
  256. # NOTE(sbauza): If a reschedule occurs when prep_resize(), then
  257. # it only provides filter_properties legacy dict back to the
  258. # conductor with no RequestSpec part of the payload.
  259. if not request_spec:
  260. # Make sure we hydrate a new RequestSpec object with the new flavor
  261. # and not the nested one from the instance
  262. request_spec = objects.RequestSpec.from_components(
  263. context, instance.uuid, image,
  264. flavor, instance.numa_topology, instance.pci_requests,
  265. filter_properties, None, instance.availability_zone)
  266. else:
  267. # NOTE(sbauza): Resizes means new flavor, so we need to update the
  268. # original RequestSpec object for make sure the scheduler verifies
  269. # the right one and not the original flavor
  270. request_spec.flavor = flavor
  271. task = self._build_cold_migrate_task(context, instance, flavor,
  272. request_spec,
  273. reservations, clean_shutdown)
  274. # TODO(sbauza): Provide directly the RequestSpec object once
  275. # _set_vm_state_and_notify() accepts it
  276. legacy_spec = request_spec.to_legacy_request_spec_dict()
  277. try:
  278. task.execute()
  279. except exception.NoValidHost as ex:
  280. vm_state = instance.vm_state
  281. if not vm_state:
  282. vm_state = vm_states.ACTIVE
  283. updates = {'vm_state': vm_state, 'task_state': None}
  284. self._set_vm_state_and_notify(context, instance.uuid,
  285. 'migrate_server',
  286. updates, ex, legacy_spec)
  287. # if the flavor IDs match, it's migrate; otherwise resize
  288. if flavor.id == instance.instance_type_id:
  289. msg = _("No valid host found for cold migrate")
  290. else:
  291. msg = _("No valid host found for resize")
  292. raise exception.NoValidHost(reason=msg)
  293. except exception.UnsupportedPolicyException as ex:
  294. with excutils.save_and_reraise_exception():
  295. vm_state = instance.vm_state
  296. if not vm_state:
  297. vm_state = vm_states.ACTIVE
  298. updates = {'vm_state': vm_state, 'task_state': None}
  299. self._set_vm_state_and_notify(context, instance.uuid,
  300. 'migrate_server',
  301. updates, ex, legacy_spec)
  302. except Exception as ex:
  303. with excutils.save_and_reraise_exception():
  304. updates = {'vm_state': instance.vm_state,
  305. 'task_state': None}
  306. self._set_vm_state_and_notify(context, instance.uuid,
  307. 'migrate_server',
  308. updates, ex, legacy_spec)
  309. # NOTE(sbauza): Make sure we persist the new flavor in case we had
  310. # a successful scheduler call if and only if nothing bad happened
  311. if request_spec.obj_what_changed():
  312. request_spec.save()
  313. def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
  314. ex, request_spec):
  315. scheduler_utils.set_vm_state_and_notify(
  316. context, instance_uuid, 'compute_task', method, updates,
  317. ex, request_spec)
  318. def _cleanup_allocated_networks(
  319. self, context, instance, requested_networks):
  320. try:
  321. # If we were told not to allocate networks let's save ourselves
  322. # the trouble of calling the network API.
  323. if not (requested_networks and requested_networks.no_allocate):
  324. self.network_api.deallocate_for_instance(
  325. context, instance, requested_networks=requested_networks)
  326. except Exception:
  327. LOG.exception('Failed to deallocate networks', instance=instance)
  328. return
  329. instance.system_metadata['network_allocated'] = 'False'
  330. try:
  331. instance.save()
  332. except exception.InstanceNotFound:
  333. # NOTE: It's possible that we're cleaning up the networks
  334. # because the instance was deleted. If that's the case then this
  335. # exception will be raised by instance.save()
  336. pass
  337. @wrap_instance_event(prefix='conductor')
  338. def live_migrate_instance(self, context, instance, scheduler_hint,
  339. block_migration, disk_over_commit, request_spec):
  340. self._live_migrate(context, instance, scheduler_hint,
  341. block_migration, disk_over_commit, request_spec)
  342. def _live_migrate(self, context, instance, scheduler_hint,
  343. block_migration, disk_over_commit, request_spec):
  344. destination = scheduler_hint.get("host")
  345. def _set_vm_state(context, instance, ex, vm_state=None,
  346. task_state=None):
  347. request_spec = {'instance_properties': {
  348. 'uuid': instance.uuid, },
  349. }
  350. scheduler_utils.set_vm_state_and_notify(context,
  351. instance.uuid,
  352. 'compute_task', 'migrate_server',
  353. dict(vm_state=vm_state,
  354. task_state=task_state,
  355. expected_task_state=task_states.MIGRATING,),
  356. ex, request_spec)
  357. migration = objects.Migration(context=context.elevated())
  358. migration.dest_compute = destination
  359. migration.status = 'accepted'
  360. migration.instance_uuid = instance.uuid
  361. migration.source_compute = instance.host
  362. migration.migration_type = 'live-migration'
  363. if instance.obj_attr_is_set('flavor'):
  364. migration.old_instance_type_id = instance.flavor.id
  365. migration.new_instance_type_id = instance.flavor.id
  366. else:
  367. migration.old_instance_type_id = instance.instance_type_id
  368. migration.new_instance_type_id = instance.instance_type_id
  369. migration.create()
  370. task = self._build_live_migrate_task(context, instance, destination,
  371. block_migration, disk_over_commit,
  372. migration, request_spec)
  373. try:
  374. task.execute()
  375. except (exception.NoValidHost,
  376. exception.ComputeHostNotFound,
  377. exception.ComputeServiceUnavailable,
  378. exception.InvalidHypervisorType,
  379. exception.InvalidCPUInfo,
  380. exception.UnableToMigrateToSelf,
  381. exception.DestinationHypervisorTooOld,
  382. exception.InvalidLocalStorage,
  383. exception.InvalidSharedStorage,
  384. exception.HypervisorUnavailable,
  385. exception.InstanceInvalidState,
  386. exception.MigrationPreCheckError,
  387. exception.MigrationPreCheckClientException,
  388. exception.LiveMigrationWithOldNovaNotSupported,
  389. exception.MigrationSchedulerRPCError) as ex:
  390. with excutils.save_and_reraise_exception():
  391. # TODO(johngarbutt) - eventually need instance actions here
  392. _set_vm_state(context, instance, ex, instance.vm_state)
  393. migration.status = 'error'
  394. migration.save()
  395. except Exception as ex:
  396. LOG.error('Migration of instance %(instance_id)s to host'
  397. ' %(dest)s unexpectedly failed.',
  398. {'instance_id': instance.uuid, 'dest': destination},
  399. exc_info=True)
  400. # Reset the task state to None to indicate completion of
  401. # the operation as it is done in case of known exceptions.
  402. _set_vm_state(context, instance, ex, vm_states.ERROR,
  403. task_state=None)
  404. migration.status = 'error'
  405. migration.save()
  406. raise exception.MigrationError(reason=six.text_type(ex))
  407. def _build_live_migrate_task(self, context, instance, destination,
  408. block_migration, disk_over_commit, migration,
  409. request_spec=None):
  410. return live_migrate.LiveMigrationTask(context, instance,
  411. destination, block_migration,
  412. disk_over_commit, migration,
  413. self.compute_rpcapi,
  414. self.servicegroup_api,
  415. self.scheduler_client,
  416. request_spec)
  417. def _build_cold_migrate_task(self, context, instance, flavor,
  418. request_spec, reservations,
  419. clean_shutdown):
  420. return migrate.MigrationTask(context, instance, flavor,
  421. request_spec,
  422. reservations, clean_shutdown,
  423. self.compute_rpcapi,
  424. self.scheduler_client)
  425. def _destroy_build_request(self, context, instance):
  426. # The BuildRequest needs to be stored until the instance is mapped to
  427. # an instance table. At that point it will never be used again and
  428. # should be deleted.
  429. build_request = objects.BuildRequest.get_by_instance_uuid(
  430. context, instance.uuid)
  431. # TODO(alaski): Sync API updates of the build_request to the
  432. # instance before it is destroyed. Right now only locked_by can
  433. # be updated before this is destroyed.
  434. build_request.destroy()
  435. def _populate_instance_mapping(self, context, instance, host):
  436. try:
  437. inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
  438. context, instance.uuid)
  439. except exception.InstanceMappingNotFound:
  440. # NOTE(alaski): If nova-api is up to date this exception should
  441. # never be hit. But during an upgrade it's possible that an old
  442. # nova-api didn't create an instance_mapping during this boot
  443. # request.
  444. LOG.debug('Instance was not mapped to a cell, likely due '
  445. 'to an older nova-api service running.',
  446. instance=instance)
  447. return None
  448. else:
  449. try:
  450. host_mapping = objects.HostMapping.get_by_host(context,
  451. host['host'])
  452. except exception.HostMappingNotFound:
  453. # NOTE(alaski): For now this exception means that a
  454. # deployment has not migrated to cellsv2 and we should
  455. # remove the instance_mapping that has been created.
  456. # Eventually this will indicate a failure to properly map a
  457. # host to a cell and we may want to reschedule.
  458. inst_mapping.destroy()
  459. return None
  460. else:
  461. inst_mapping.cell_mapping = host_mapping.cell_mapping
  462. inst_mapping.save()
  463. return inst_mapping
  464. # NOTE(danms): This is never cell-targeted because it is only used for
  465. # cellsv1 (which does not target cells directly) and n-cpu reschedules
  466. # (which go to the cell conductor and thus are always cell-specific).
  467. def build_instances(self, context, instances, image, filter_properties,
  468. admin_password, injected_files, requested_networks,
  469. security_groups, block_device_mapping=None, legacy_bdm=True):
  470. # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
  471. # 2.0 of the RPC API.
  472. # TODO(danms): Remove this in version 2.0 of the RPC API
  473. if (requested_networks and
  474. not isinstance(requested_networks,
  475. objects.NetworkRequestList)):
  476. requested_networks = objects.NetworkRequestList.from_tuples(
  477. requested_networks)
  478. # TODO(melwitt): Remove this in version 2.0 of the RPC API
  479. flavor = filter_properties.get('instance_type')
  480. if flavor and not isinstance(flavor, objects.Flavor):
  481. # Code downstream may expect extra_specs to be populated since it
  482. # is receiving an object, so lookup the flavor to ensure this.
  483. flavor = objects.Flavor.get_by_id(context, flavor['id'])
  484. filter_properties = dict(filter_properties, instance_type=flavor)
  485. request_spec = {}
  486. try:
  487. # check retry policy. Rather ugly use of instances[0]...
  488. # but if we've exceeded max retries... then we really only
  489. # have a single instance.
  490. # TODO(sbauza): Provide directly the RequestSpec object
  491. # when _set_vm_state_and_notify() and populate_retry()
  492. # accept it
  493. request_spec = scheduler_utils.build_request_spec(
  494. context, image, instances)
  495. scheduler_utils.populate_retry(
  496. filter_properties, instances[0].uuid)
  497. instance_uuids = [instance.uuid for instance in instances]
  498. spec_obj = objects.RequestSpec.from_primitives(
  499. context, request_spec, filter_properties)
  500. hosts = self._schedule_instances(
  501. context, spec_obj, instance_uuids)
  502. except Exception as exc:
  503. updates = {'vm_state': vm_states.ERROR, 'task_state': None}
  504. for instance in instances:
  505. self._set_vm_state_and_notify(
  506. context, instance.uuid, 'build_instances', updates,
  507. exc, request_spec)
  508. try:
  509. # If the BuildRequest stays around then instance show/lists
  510. # will pull from it rather than the errored instance.
  511. self._destroy_build_request(context, instance)
  512. except exception.BuildRequestNotFound:
  513. pass
  514. self._cleanup_allocated_networks(
  515. context, instance, requested_networks)
  516. return
  517. for (instance, host) in six.moves.zip(instances, hosts):
  518. instance.availability_zone = (
  519. availability_zones.get_host_availability_zone(context,
  520. host['host']))
  521. try:
  522. # NOTE(danms): This saves the az change above, refreshes our
  523. # instance, and tells us if it has been deleted underneath us
  524. instance.save()
  525. except (exception.InstanceNotFound,
  526. exception.InstanceInfoCacheNotFound):
  527. LOG.debug('Instance deleted during build', instance=instance)
  528. continue
  529. local_filter_props = copy.deepcopy(filter_properties)
  530. scheduler_utils.populate_filter_properties(local_filter_props,
  531. host)
  532. # The block_device_mapping passed from the api doesn't contain
  533. # instance specific information
  534. bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
  535. context, instance.uuid)
  536. # This is populated in scheduler_utils.populate_retry
  537. num_attempts = local_filter_props.get('retry',
  538. {}).get('num_attempts', 1)
  539. if num_attempts <= 1:
  540. # If this is a reschedule the instance is already mapped to
  541. # this cell and the BuildRequest is already deleted so ignore
  542. # the logic below.
  543. inst_mapping = self._populate_instance_mapping(context,
  544. instance,
  545. host)
  546. try:
  547. self._destroy_build_request(context, instance)
  548. except exception.BuildRequestNotFound:
  549. # This indicates an instance delete has been requested in
  550. # the API. Stop the build, cleanup the instance_mapping and
  551. # potentially the block_device_mappings
  552. # TODO(alaski): Handle block_device_mapping cleanup
  553. if inst_mapping:
  554. inst_mapping.destroy()
  555. return
  556. self.compute_rpcapi.build_and_run_instance(context,
  557. instance=instance, host=host['host'], image=image,
  558. request_spec=request_spec,
  559. filter_properties=local_filter_props,
  560. admin_password=admin_password,
  561. injected_files=injected_files,
  562. requested_networks=requested_networks,
  563. security_groups=security_groups,
  564. block_device_mapping=bdms, node=host['nodename'],
  565. limits=host['limits'])
  566. def _schedule_instances(self, context, request_spec,
  567. instance_uuids=None):
  568. scheduler_utils.setup_instance_group(context, request_spec)
  569. hosts = self.scheduler_client.select_destinations(context,
  570. request_spec, instance_uuids)
  571. return hosts
  572. @targets_cell
  573. def unshelve_instance(self, context, instance, request_spec=None):
  574. sys_meta = instance.system_metadata
  575. def safe_image_show(ctx, image_id):
  576. if image_id:
  577. return self.image_api.get(ctx, image_id, show_deleted=False)
  578. else:
  579. raise exception.ImageNotFound(image_id='')
  580. if instance.vm_state == vm_states.SHELVED:
  581. instance.task_state = task_states.POWERING_ON
  582. instance.save(expected_task_state=task_states.UNSHELVING)
  583. self.compute_rpcapi.start_instance(context, instance)
  584. elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
  585. image = None
  586. image_id = sys_meta.get('shelved_image_id')
  587. # No need to check for image if image_id is None as
  588. # "shelved_image_id" key is not set for volume backed
  589. # instance during the shelve process
  590. if image_id:
  591. with compute_utils.EventReporter(
  592. context, 'get_image_info', instance.uuid):
  593. try:
  594. image = safe_image_show(context, image_id)
  595. except exception.ImageNotFound:
  596. instance.vm_state = vm_states.ERROR
  597. instance.save()
  598. reason = _('Unshelve attempted but the image %s '
  599. 'cannot be found.') % image_id
  600. LOG.error(reason, instance=instance)
  601. raise exception.UnshelveException(
  602. instance_id=instance.uuid, reason=reason)
  603. try:
  604. with compute_utils.EventReporter(context, 'schedule_instances',
  605. instance.uuid):
  606. if not request_spec:
  607. # NOTE(sbauza): We were unable to find an original
  608. # RequestSpec object - probably because the instance is
  609. # old. We need to mock that the old way
  610. filter_properties = {}
  611. request_spec = scheduler_utils.build_request_spec(
  612. context, image, [instance])
  613. else:
  614. # NOTE(sbauza): Force_hosts/nodes needs to be reset
  615. # if we want to make sure that the next destination
  616. # is not forced to be the original host
  617. request_spec.reset_forced_destinations()
  618. # TODO(sbauza): Provide directly the RequestSpec object
  619. # when populate_filter_properties and populate_retry()
  620. # accept it
  621. filter_properties = request_spec.\
  622. to_legacy_filter_properties_dict()
  623. request_spec = request_spec.\
  624. to_legacy_request_spec_dict()
  625. scheduler_utils.populate_retry(filter_properties,
  626. instance.uuid)
  627. request_spec = objects.RequestSpec.from_primitives(
  628. context, request_spec, filter_properties)
  629. # NOTE(cfriesen): Ensure that we restrict the scheduler to
  630. # the cell specified by the instance mapping.
  631. instance_mapping = \
  632. objects.InstanceMapping.get_by_instance_uuid(
  633. context, instance.uuid)
  634. LOG.debug('Requesting cell %(cell)s while unshelving',
  635. {'cell': instance_mapping.cell_mapping.identity},
  636. instance=instance)
  637. if ('requested_destination' in request_spec and
  638. request_spec.requested_destination):
  639. request_spec.requested_destination.cell = (
  640. instance_mapping.cell_mapping)
  641. else:
  642. request_spec.requested_destination = (
  643. objects.Destination(
  644. cell=instance_mapping.cell_mapping))
  645. hosts = self._schedule_instances(context, request_spec,
  646. [instance.uuid])
  647. host_state = hosts[0]
  648. scheduler_utils.populate_filter_properties(
  649. filter_properties, host_state)
  650. (host, node) = (host_state['host'], host_state['nodename'])
  651. instance.availability_zone = (
  652. availability_zones.get_host_availability_zone(
  653. context, host))
  654. self.compute_rpcapi.unshelve_instance(
  655. context, instance, host, image=image,
  656. filter_properties=filter_properties, node=node)
  657. except (exception.NoValidHost,
  658. exception.UnsupportedPolicyException):
  659. instance.task_state = None
  660. instance.save()
  661. LOG.warning("No valid host found for unshelve instance",
  662. instance=instance)
  663. return
  664. except Exception:
  665. with excutils.save_and_reraise_exception():
  666. instance.task_state = None
  667. instance.save()
  668. LOG.error("Unshelve attempted but an error "
  669. "has occurred", instance=instance)
  670. else:
  671. LOG.error('Unshelve attempted but vm_state not SHELVED or '
  672. 'SHELVED_OFFLOADED', instance=instance)
  673. instance.vm_state = vm_states.ERROR
  674. instance.save()
  675. return
  676. def _allocate_for_evacuate_dest_host(self, context, instance, host,
  677. request_spec=None):
  678. # The user is forcing the destination host and bypassing the
  679. # scheduler. We need to copy the source compute node
  680. # allocations in Placement to the destination compute node.
  681. # Normally select_destinations() in the scheduler would do this
  682. # for us, but when forcing the target host we don't call the
  683. # scheduler.
  684. source_node = None # This is used for error handling below.
  685. try:
  686. source_node = objects.ComputeNode.get_by_host_and_nodename(
  687. context, instance.host, instance.node)
  688. dest_node = (
  689. objects.ComputeNode.get_first_node_by_host_for_old_compat(
  690. context, host, use_slave=True))
  691. except exception.ComputeHostNotFound as ex:
  692. with excutils.save_and_reraise_exception():
  693. # TODO(mriedem): This ugly RequestSpec handling should be
  694. # tucked away in _set_vm_state_and_notify.
  695. if request_spec:
  696. request_spec = \
  697. request_spec.to_legacy_request_spec_dict()
  698. else:
  699. request_spec = {}
  700. self._set_vm_state_and_notify(
  701. context, instance.uuid, 'rebuild_server',
  702. {'vm_state': instance.vm_state,
  703. 'task_state': None}, ex, request_spec)
  704. if source_node:
  705. LOG.warning('Specified host %s for evacuate was not '
  706. 'found.', host, instance=instance)
  707. else:
  708. LOG.warning('Source host %s and node %s for evacuate was '
  709. 'not found.', instance.host, instance.node,
  710. instance=instance)
  711. # TODO(mriedem): In Queens, call select_destinations() with a
  712. # skip_filters=True flag so the scheduler does the work of
  713. # claiming resources on the destination in Placement but still
  714. # bypass the scheduler filters, which honors the 'force' flag
  715. # in the API.
  716. try:
  717. scheduler_utils.claim_resources_on_destination(
  718. self.scheduler_client.reportclient, instance,
  719. source_node, dest_node)
  720. except exception.NoValidHost as ex:
  721. with excutils.save_and_reraise_exception():
  722. # TODO(mriedem): This ugly RequestSpec handling should be
  723. # tucked away in _set_vm_state_and_notify.
  724. if request_spec:
  725. request_spec = \
  726. request_spec.to_legacy_request_spec_dict()
  727. else:
  728. request_spec = {}
  729. self._set_vm_state_and_notify(
  730. context, instance.uuid, 'rebuild_server',
  731. {'vm_state': instance.vm_state,
  732. 'task_state': None}, ex, request_spec)
  733. LOG.warning('Specified host %s for evacuate is '
  734. 'invalid.', host, instance=instance)
  735. @targets_cell
  736. def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
  737. injected_files, new_pass, orig_sys_metadata,
  738. bdms, recreate, on_shared_storage,
  739. preserve_ephemeral=False, host=None,
  740. request_spec=None):
  741. with compute_utils.EventReporter(context, 'rebuild_server',
  742. instance.uuid):
  743. node = limits = None
  744. try:
  745. migration = objects.Migration.get_by_instance_and_status(
  746. context, instance.uuid, 'accepted')
  747. except exception.MigrationNotFoundByStatus:
  748. LOG.debug("No migration record for the rebuild/evacuate "
  749. "request.", instance=instance)
  750. migration = None
  751. # The host variable is passed in two cases:
  752. # 1. rebuild - the instance.host is passed to rebuild on the
  753. # same host and bypass the scheduler.
  754. # 2. evacuate with specified host and force=True - the specified
  755. # host is passed and is meant to bypass the scheduler.
  756. # NOTE(mriedem): This could be a lot more straight-forward if we
  757. # had separate methods for rebuild and evacuate...
  758. if host:
  759. # We only create a new allocation on the specified host if
  760. # we're doing an evacuate since that is a move operation.
  761. if host != instance.host:
  762. # If a destination host is forced for evacuate, create
  763. # allocations against it in Placement.
  764. self._allocate_for_evacuate_dest_host(
  765. context, instance, host, request_spec)
  766. else:
  767. # At this point, either the user is doing a rebuild on the
  768. # same host (not evacuate), or they are evacuating and
  769. # specified a host but are not forcing it. The API passes
  770. # host=None in this case but sets up the
  771. # RequestSpec.requested_destination field for the specified
  772. # host.
  773. if not request_spec:
  774. # NOTE(sbauza): We were unable to find an original
  775. # RequestSpec object - probably because the instance is old
  776. # We need to mock that the old way
  777. # TODO(sbauza): Provide directly the RequestSpec object
  778. # when _set_vm_state_and_notify() accepts it
  779. filter_properties = {'ignore_hosts': [instance.host]}
  780. request_spec = scheduler_utils.build_request_spec(
  781. context, image_ref, [instance])
  782. request_spec = objects.RequestSpec.from_primitives(
  783. context, request_spec, filter_properties)
  784. else:
  785. # NOTE(sbauza): Augment the RequestSpec object by excluding
  786. # the source host for avoiding the scheduler to pick it
  787. request_spec.ignore_hosts = request_spec.ignore_hosts or []
  788. request_spec.ignore_hosts.append(instance.host)
  789. # NOTE(sbauza): Force_hosts/nodes needs to be reset
  790. # if we want to make sure that the next destination
  791. # is not forced to be the original host
  792. request_spec.reset_forced_destinations()
  793. try:
  794. hosts = self._schedule_instances(context, request_spec,
  795. [instance.uuid])
  796. host_dict = hosts.pop(0)
  797. host, node, limits = (host_dict['host'],
  798. host_dict['nodename'],
  799. host_dict['limits'])
  800. except exception.NoValidHost as ex:
  801. if migration:
  802. migration.status = 'error'
  803. migration.save()
  804. request_spec = request_spec.to_legacy_request_spec_dict()
  805. with excutils.save_and_reraise_exception():
  806. self._set_vm_state_and_notify(context, instance.uuid,
  807. 'rebuild_server',
  808. {'vm_state': instance.vm_state,
  809. 'task_state': None}, ex, request_spec)
  810. LOG.warning("No valid host found for rebuild",
  811. instance=instance)
  812. except exception.UnsupportedPolicyException as ex:
  813. if migration:
  814. migration.status = 'error'
  815. migration.save()
  816. request_spec = request_spec.to_legacy_request_spec_dict()
  817. with excutils.save_and_reraise_exception():
  818. self._set_vm_state_and_notify(context, instance.uuid,
  819. 'rebuild_server',
  820. {'vm_state': instance.vm_state,
  821. 'task_state': None}, ex, request_spec)
  822. LOG.warning("Server with unsupported policy "
  823. "cannot be rebuilt", instance=instance)
  824. compute_utils.notify_about_instance_usage(
  825. self.notifier, context, instance, "rebuild.scheduled")
  826. instance.availability_zone = (
  827. availability_zones.get_host_availability_zone(
  828. context, host))
  829. self.compute_rpcapi.rebuild_instance(context,
  830. instance=instance,
  831. new_pass=new_pass,
  832. injected_files=injected_files,
  833. image_ref=image_ref,
  834. orig_image_ref=orig_image_ref,
  835. orig_sys_metadata=orig_sys_metadata,
  836. bdms=bdms,
  837. recreate=recreate,
  838. on_shared_storage=on_shared_storage,
  839. preserve_ephemeral=preserve_ephemeral,
  840. migration=migration,
  841. host=host, node=node, limits=limits)
  842. # TODO(avolkov): move method to bdm
  843. @staticmethod
  844. def _volume_size(instance_type, bdm):
  845. size = bdm.get('volume_size')
  846. # NOTE (ndipanov): inherit flavor size only for swap and ephemeral
  847. if (size is None and bdm.get('source_type') == 'blank' and
  848. bdm.get('destination_type') == 'local'):
  849. if bdm.get('guest_format') == 'swap':
  850. size = instance_type.get('swap', 0)
  851. else:
  852. size = instance_type.get('ephemeral_gb', 0)
  853. return size
  854. def _create_block_device_mapping(self, cell, instance_type, instance_uuid,
  855. block_device_mapping):
  856. """Create the BlockDeviceMapping objects in the db.
  857. This method makes a copy of the list in order to avoid using the same
  858. id field in case this is called for multiple instances.
  859. """
  860. LOG.debug("block_device_mapping %s", list(block_device_mapping),
  861. instance_uuid=instance_uuid)
  862. instance_block_device_mapping = copy.deepcopy(block_device_mapping)
  863. for bdm in instance_block_device_mapping:
  864. bdm.volume_size = self._volume_size(instance_type, bdm)
  865. bdm.instance_uuid = instance_uuid
  866. with obj_target_cell(bdm, cell):
  867. bdm.update_or_create()
  868. return instance_block_device_mapping
  869. def _create_tags(self, context, instance_uuid, tags):
  870. """Create the Tags objects in the db."""
  871. if tags:
  872. tag_list = [tag.tag for tag in tags]
  873. instance_tags = objects.TagList.create(
  874. context, instance_uuid, tag_list)
  875. return instance_tags
  876. else:
  877. return tags
  878. def _bury_in_cell0(self, context, request_spec, exc,
  879. build_requests=None, instances=None):
  880. """Ensure all provided build_requests and instances end up in cell0.
  881. Cell0 is the fake cell we schedule dead instances to when we can't
  882. schedule them somewhere real. Requests that don't yet have instances
  883. will get a new instance, created in cell0. Instances that have not yet
  884. been created will be created in cell0. All build requests are destroyed
  885. after we're done. Failure to delete a build request will trigger the
  886. instance deletion, just like the happy path in
  887. schedule_and_build_instances() below.
  888. """
  889. try:
  890. cell0 = objects.CellMapping.get_by_uuid(
  891. context, objects.CellMapping.CELL0_UUID)
  892. except exception.CellMappingNotFound:
  893. # Not yet setup for cellsv2. Instances will need to be written
  894. # to the configured database. This will become a deployment
  895. # error in Ocata.
  896. LOG.error('No cell mapping found for cell0 while '
  897. 'trying to record scheduling failure. '
  898. 'Setup is incomplete.')
  899. return
  900. build_requests = build_requests or []
  901. instances = instances or []
  902. instances_by_uuid = {inst.uuid: inst for inst in instances}
  903. for build_request in build_requests:
  904. if build_request.instance_uuid not in instances_by_uuid:
  905. # This is an instance object with no matching db entry.
  906. instance = build_request.get_new_instance(context)
  907. instances_by_uuid[instance.uuid] = instance
  908. updates = {'vm_state': vm_states.ERROR, 'task_state': None}
  909. legacy_spec = request_spec.to_legacy_request_spec_dict()
  910. for instance in instances_by_uuid.values():
  911. with obj_target_cell(instance, cell0) as cctxt:
  912. instance.create()
  913. # Use the context targeted to cell0 here since the instance is
  914. # now in cell0.
  915. self._set_vm_state_and_notify(
  916. cctxt, instance.uuid, 'build_instances', updates,
  917. exc, legacy_spec)
  918. try:
  919. # We don't need the cell0-targeted context here because the
  920. # instance mapping is in the API DB.
  921. inst_mapping = \
  922. objects.InstanceMapping.get_by_instance_uuid(
  923. context, instance.uuid)
  924. inst_mapping.cell_mapping = cell0
  925. inst_mapping.save()
  926. except exception.InstanceMappingNotFound:
  927. pass
  928. for build_request in build_requests:
  929. try:
  930. build_request.destroy()
  931. except exception.BuildRequestNotFound:
  932. # Instance was deleted before we finished scheduling
  933. inst = instances_by_uuid[build_request.instance_uuid]
  934. with obj_target_cell(inst, cell0):
  935. inst.destroy()
  936. def schedule_and_build_instances(self, context, build_requests,
  937. request_specs, image,
  938. admin_password, injected_files,
  939. requested_networks, block_device_mapping,
  940. tags=None):
  941. # Add all the UUIDs for the instances
  942. instance_uuids = [spec.instance_uuid for spec in request_specs]
  943. try:
  944. hosts = self._schedule_instances(context, request_specs[0],
  945. instance_uuids)
  946. except Exception as exc:
  947. LOG.exception('Failed to schedule instances')
  948. self._bury_in_cell0(context, request_specs[0], exc,
  949. build_requests=build_requests)
  950. return
  951. host_mapping_cache = {}
  952. cell_mapping_cache = {}
  953. instances = []
  954. for (build_request, request_spec, host) in six.moves.zip(
  955. build_requests, request_specs, hosts):
  956. instance = build_request.get_new_instance(context)
  957. # Convert host from the scheduler into a cell record
  958. if host['host'] not in host_mapping_cache:
  959. try:
  960. host_mapping = objects.HostMapping.get_by_host(
  961. context, host['host'])
  962. host_mapping_cache[host['host']] = host_mapping
  963. except exception.HostMappingNotFound as exc:
  964. LOG.error('No host-to-cell mapping found for selected '
  965. 'host %(host)s. Setup is incomplete.',
  966. {'host': host['host']})
  967. self._bury_in_cell0(context, request_spec, exc,
  968. build_requests=[build_request],
  969. instances=[instance])
  970. # This is a placeholder in case the quota recheck fails.
  971. instances.append(None)
  972. continue
  973. else:
  974. host_mapping = host_mapping_cache[host['host']]
  975. cell = host_mapping.cell_mapping
  976. # Before we create the instance, let's make one final check that
  977. # the build request is still around and wasn't deleted by the user
  978. # already.
  979. try:
  980. objects.BuildRequest.get_by_instance_uuid(
  981. context, instance.uuid)
  982. except exception.BuildRequestNotFound:
  983. # the build request is gone so we're done for this instance
  984. LOG.debug('While scheduling instance, the build request '
  985. 'was already deleted.', instance=instance)
  986. # This is a placeholder in case the quota recheck fails.
  987. instances.append(None)
  988. continue
  989. else:
  990. instance.availability_zone = (
  991. availability_zones.get_host_availability_zone(
  992. context, host['host']))
  993. with obj_target_cell(instance, cell):
  994. instance.create()
  995. instances.append(instance)
  996. cell_mapping_cache[instance.uuid] = cell
  997. # NOTE(melwitt): We recheck the quota after creating the
  998. # objects to prevent users from allocating more resources
  999. # than their allowed quota in the event of a race. This is
  1000. # configurable because it can be expensive if strict quota
  1001. # limits are not required in a deployment.
  1002. if CONF.quota.recheck_quota:
  1003. try:
  1004. compute_utils.check_num_instances_quota(
  1005. context, instance.flavor, 0, 0,
  1006. orig_num_req=len(build_requests))
  1007. except exception.TooManyInstances as exc:
  1008. with excutils.save_and_reraise_exception():
  1009. self._cleanup_build_artifacts(context, exc, instances,
  1010. build_requests,
  1011. request_specs,
  1012. cell_mapping_cache)
  1013. for (build_request, request_spec, host, instance) in six.moves.zip(
  1014. build_requests, request_specs, hosts, instances):
  1015. if instance is None:
  1016. # Skip placeholders that were buried in cell0 or had their
  1017. # build requests deleted by the user before instance create.
  1018. continue
  1019. cell = cell_mapping_cache[instance.uuid]
  1020. filter_props = request_spec.to_legacy_filter_properties_dict()
  1021. scheduler_utils.populate_retry(filter_props, instance.uuid)
  1022. scheduler_utils.populate_filter_properties(filter_props,
  1023. host)
  1024. # send a state update notification for the initial create to
  1025. # show it going from non-existent to BUILDING
  1026. notifications.send_update_with_states(context, instance, None,
  1027. vm_states.BUILDING, None, None, service="conductor")
  1028. with obj_target_cell(instance, cell) as cctxt:
  1029. objects.InstanceAction.action_start(
  1030. cctxt, instance.uuid, instance_actions.CREATE,
  1031. want_result=False)
  1032. instance_bdms = self._create_block_device_mapping(
  1033. cell, instance.flavor, instance.uuid, block_device_mapping)
  1034. instance_tags = self._create_tags(cctxt, instance.uuid, tags)
  1035. # TODO(Kevin Zheng): clean this up once instance.create() handles
  1036. # tags; we do this so the instance.create notification in
  1037. # build_and_run_instance in nova-compute doesn't lazy-load tags
  1038. instance.tags = instance_tags if instance_tags \
  1039. else objects.TagList()
  1040. # Update mapping for instance. Normally this check is guarded by
  1041. # a try/except but if we're here we know that a newer nova-api
  1042. # handled the build process and would have created the mapping
  1043. inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
  1044. context, instance.uuid)
  1045. inst_mapping.cell_mapping = cell
  1046. inst_mapping.save()
  1047. if not self._delete_build_request(
  1048. context, build_request, instance, cell, instance_bdms,
  1049. instance_tags):
  1050. # The build request was deleted before/during scheduling so
  1051. # the instance is gone and we don't have anything to build for
  1052. # this one.
  1053. continue
  1054. # NOTE(danms): Compute RPC expects security group names or ids
  1055. # not objects, so convert this to a list of names until we can
  1056. # pass the objects.
  1057. legacy_secgroups = [s.identifier
  1058. for s in request_spec.security_groups]
  1059. with obj_target_cell(instance, cell) as cctxt:
  1060. self.compute_rpcapi.build_and_run_instance(
  1061. cctxt, instance=instance, image=image,
  1062. request_spec=request_spec,
  1063. filter_properties=filter_props,
  1064. admin_password=admin_password,
  1065. injected_files=injected_files,
  1066. requested_networks=requested_networks,
  1067. security_groups=legacy_secgroups,
  1068. block_device_mapping=instance_bdms,
  1069. host=host['host'], node=host['nodename'],
  1070. limits=host['limits'])
  1071. def _cleanup_build_artifacts(self, context, exc, instances, build_requests,
  1072. request_specs, cell_mapping_cache):
  1073. for (instance, build_request, request_spec) in six.moves.zip(
  1074. instances, build_requests, request_specs):
  1075. # Skip placeholders that were buried in cell0 or had their
  1076. # build requests deleted by the user before instance create.
  1077. if instance is None:
  1078. continue
  1079. updates = {'vm_state': vm_states.ERROR, 'task_state': None}
  1080. legacy_spec = request_spec.to_legacy_request_spec_dict()
  1081. self._set_vm_state_and_notify(context, instance.uuid,
  1082. 'build_instances', updates, exc,
  1083. legacy_spec)
  1084. # TODO(mnaser): The cell mapping should already be populated by
  1085. # this point to avoid setting it below here.
  1086. inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
  1087. context, instance.uuid)
  1088. inst_mapping.cell_mapping = cell_mapping_cache[instance.uuid]
  1089. inst_mapping.save()
  1090. # Be paranoid about artifacts being deleted underneath us.
  1091. try:
  1092. build_request.destroy()
  1093. except exception.BuildRequestNotFound:
  1094. pass
  1095. try:
  1096. request_spec.destroy()
  1097. except exception.RequestSpecNotFound:
  1098. pass
  1099. def _delete_build_request(self, context, build_request, instance, cell,
  1100. instance_bdms, instance_tags):
  1101. """Delete a build request after creating the instance in the cell.
  1102. This method handles cleaning up the instance in case the build request
  1103. is already deleted by the time we try to delete it.
  1104. :param context: the context of the request being handled
  1105. :type context: nova.context.RequestContext
  1106. :param build_request: the build request to delete
  1107. :type build_request: nova.objects.BuildRequest
  1108. :param instance: the instance created from the build_request
  1109. :type instance: nova.objects.Instance
  1110. :param cell: the cell in which the instance was created
  1111. :type cell: nova.objects.CellMapping
  1112. :param instance_bdms: list of block device mappings for the instance
  1113. :type instance_bdms: nova.objects.BlockDeviceMappingList
  1114. :param instance_tags: list of tags for the instance
  1115. :type instance_tags: nova.objects.TagList
  1116. :returns: True if the build request was successfully deleted, False if
  1117. the build request was already deleted and the instance is now gone.
  1118. """
  1119. try:
  1120. build_request.destroy()
  1121. except exception.BuildRequestNotFound:
  1122. # This indicates an instance deletion request has been
  1123. # processed, and the build should halt here. Clean up the
  1124. # bdm, tags and instance record.
  1125. with obj_target_cell(instance, cell) as cctxt:
  1126. with compute_utils.notify_about_instance_delete(
  1127. self.notifier, cctxt, instance):
  1128. try:
  1129. instance.destroy()
  1130. except exception.InstanceNotFound:
  1131. pass
  1132. except exception.ObjectActionError:
  1133. # NOTE(melwitt): Instance became scheduled during
  1134. # the destroy, "host changed". Refresh and re-destroy.
  1135. try:
  1136. instance.refresh()
  1137. instance.destroy()
  1138. except exception.InstanceNotFound:
  1139. pass
  1140. for bdm in instance_bdms:
  1141. with obj_target_cell(bdm, cell):
  1142. try:
  1143. bdm.destroy()
  1144. except exception.ObjectActionError:
  1145. pass
  1146. if instance_tags:
  1147. with try_target_cell(context, cell) as target_ctxt:
  1148. try:
  1149. objects.TagList.destroy(target_ctxt, instance.uuid)
  1150. except exception.InstanceNotFound:
  1151. pass
  1152. return False
  1153. return True