Browse Source

use already loaded BDM in instance.<action>

In I18e7483ec9a484a660e1d306fdc0986e1d5f952b BDM was added to the instance
notifications. In general to add BDM to the payload an exta DB query is
needed. However in some places the BDM is already separately before the
notify_about_instance_action is called to send the notification. In this cases
loading the BDM again is unnecessary as the already loaded BDM can be reused.

This patch makes sure that notify_about_instance_action is called with the
already loaded BDM. There will be subsequent patches to do the same with
other notify calls.

Change-Id: I391554d3904a5a60b921ef4714a1cfd0a64a25c2
Related-Bug: #1718226
tags/17.0.0.0b1
Balazs Gibizer 1 year ago
parent
commit
c4fadfd4d2

+ 12
- 1
doc/notification_samples/instance-delete-end.json View File

@@ -4,7 +4,18 @@
4 4
         "nova_object.data":{
5 5
             "architecture":"x86_64",
6 6
             "availability_zone": "nova",
7
-            "block_devices":[],
7
+            "block_devices":[{
8
+                "nova_object.data": {
9
+                    "boot_index": null,
10
+                    "delete_on_termination": false,
11
+                    "device_name": "/dev/sdb",
12
+                    "tag": null,
13
+                    "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
14
+                },
15
+                "nova_object.name": "BlockDevicePayload",
16
+                "nova_object.namespace": "nova",
17
+                "nova_object.version": "1.0"
18
+            }],
8 19
             "created_at":"2012-10-29T13:42:11Z",
9 20
             "deleted_at":"2012-10-29T13:42:11Z",
10 21
             "display_name":"some-server",

+ 48
- 31
nova/compute/manager.py View File

@@ -731,7 +731,7 @@ class ComputeManager(manager.Manager):
731 731
                 system_metadata=system_meta)
732 732
         compute_utils.notify_about_instance_action(context, instance,
733 733
                 self.host, action=fields.NotificationAction.DELETE,
734
-                phase=fields.NotificationPhase.END)
734
+                phase=fields.NotificationPhase.END, bdms=bdms)
735 735
         self._delete_scheduler_instance_info(context, instance.uuid)
736 736
 
737 737
     def _init_instance(self, context, instance):
@@ -2254,7 +2254,7 @@ class ComputeManager(manager.Manager):
2254 2254
                                               "shutdown.start")
2255 2255
             compute_utils.notify_about_instance_action(context, instance,
2256 2256
                     self.host, action=fields.NotificationAction.SHUTDOWN,
2257
-                    phase=fields.NotificationPhase.START)
2257
+                    phase=fields.NotificationPhase.START, bdms=bdms)
2258 2258
 
2259 2259
         network_info = instance.get_network_info()
2260 2260
 
@@ -2341,7 +2341,7 @@ class ComputeManager(manager.Manager):
2341 2341
                                               "shutdown.end")
2342 2342
             compute_utils.notify_about_instance_action(context, instance,
2343 2343
                     self.host, action=fields.NotificationAction.SHUTDOWN,
2344
-                    phase=fields.NotificationPhase.END)
2344
+                    phase=fields.NotificationPhase.END, bdms=bdms)
2345 2345
 
2346 2346
     def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
2347 2347
         exc_info = None
@@ -2377,7 +2377,7 @@ class ComputeManager(manager.Manager):
2377 2377
                                           "delete.start")
2378 2378
         compute_utils.notify_about_instance_action(context, instance,
2379 2379
                 self.host, action=fields.NotificationAction.DELETE,
2380
-                phase=fields.NotificationPhase.START)
2380
+                phase=fields.NotificationPhase.START, bdms=bdms)
2381 2381
 
2382 2382
         self._shutdown_instance(context, instance, bdms)
2383 2383
         # NOTE(dims): instance.info_cache.delete() should be called after
@@ -2688,13 +2688,13 @@ class ComputeManager(manager.Manager):
2688 2688
                               admin_password, network_info=network_info,
2689 2689
                               block_device_info=new_block_device_info)
2690 2690
 
2691
-    def _notify_instance_rebuild_error(self, context, instance, error):
2691
+    def _notify_instance_rebuild_error(self, context, instance, error, bdms):
2692 2692
         self._notify_about_instance_usage(context, instance,
2693 2693
                                           'rebuild.error', fault=error)
2694 2694
         compute_utils.notify_about_instance_action(
2695 2695
             context, instance, self.host,
2696 2696
             action=fields.NotificationAction.REBUILD,
2697
-            phase=fields.NotificationPhase.ERROR, exception=error)
2697
+            phase=fields.NotificationPhase.ERROR, exception=error, bdms=bdms)
2698 2698
 
2699 2699
     @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
2700 2700
     @wrap_exception()
@@ -2801,8 +2801,7 @@ class ComputeManager(manager.Manager):
2801 2801
                 # not raise ComputeResourcesUnavailable.
2802 2802
                 rt.delete_allocation_for_evacuated_instance(
2803 2803
                     instance, scheduled_node, node_type='destination')
2804
-                self._notify_instance_rebuild_error(context, instance, e)
2805
-
2804
+                self._notify_instance_rebuild_error(context, instance, e, bdms)
2806 2805
                 raise exception.BuildAbortException(
2807 2806
                     instance_uuid=instance.uuid, reason=e.format_message())
2808 2807
             except (exception.InstanceNotFound,
@@ -2810,13 +2809,13 @@ class ComputeManager(manager.Manager):
2810 2809
                 LOG.debug('Instance was deleted while rebuilding',
2811 2810
                           instance=instance)
2812 2811
                 self._set_migration_status(migration, 'failed')
2813
-                self._notify_instance_rebuild_error(context, instance, e)
2812
+                self._notify_instance_rebuild_error(context, instance, e, bdms)
2814 2813
             except Exception as e:
2815 2814
                 self._set_migration_status(migration, 'failed')
2816 2815
                 if recreate or scheduled_node is not None:
2817 2816
                     rt.delete_allocation_for_evacuated_instance(
2818 2817
                         instance, scheduled_node, node_type='destination')
2819
-                self._notify_instance_rebuild_error(context, instance, e)
2818
+                self._notify_instance_rebuild_error(context, instance, e, bdms)
2820 2819
                 raise
2821 2820
             else:
2822 2821
                 instance.apply_migration_context()
@@ -2907,7 +2906,8 @@ class ComputeManager(manager.Manager):
2907 2906
         compute_utils.notify_about_instance_action(
2908 2907
             context, instance, self.host,
2909 2908
             action=fields.NotificationAction.REBUILD,
2910
-            phase=fields.NotificationPhase.START)
2909
+            phase=fields.NotificationPhase.START,
2910
+            bdms=bdms)
2911 2911
 
2912 2912
         instance.power_state = self._get_power_state(context, instance)
2913 2913
         instance.task_state = task_states.REBUILDING
@@ -2979,7 +2979,8 @@ class ComputeManager(manager.Manager):
2979 2979
         compute_utils.notify_about_instance_action(
2980 2980
             context, instance, self.host,
2981 2981
             action=fields.NotificationAction.REBUILD,
2982
-            phase=fields.NotificationPhase.END)
2982
+            phase=fields.NotificationPhase.END,
2983
+            bdms=bdms)
2983 2984
 
2984 2985
     def _handle_bad_volumes_detached(self, context, instance, bad_devices,
2985 2986
                                      block_device_info):
@@ -4000,12 +4001,13 @@ class ComputeManager(manager.Manager):
4000 4001
             self._notify_about_instance_usage(
4001 4002
                 context, instance, "resize.start", network_info=network_info)
4002 4003
 
4004
+            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4005
+                    context, instance.uuid)
4006
+
4003 4007
             compute_utils.notify_about_instance_action(context, instance,
4004 4008
                    self.host, action=fields.NotificationAction.RESIZE,
4005
-                   phase=fields.NotificationPhase.START)
4009
+                   phase=fields.NotificationPhase.START, bdms=bdms)
4006 4010
 
4007
-            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4008
-                    context, instance.uuid)
4009 4011
             block_device_info = self._get_instance_block_device_info(
4010 4012
                                 context, instance, bdms=bdms)
4011 4013
 
@@ -4042,7 +4044,7 @@ class ComputeManager(manager.Manager):
4042 4044
 
4043 4045
         compute_utils.notify_about_instance_action(context, instance,
4044 4046
                self.host, action=fields.NotificationAction.RESIZE,
4045
-               phase=fields.NotificationPhase.END)
4047
+               phase=fields.NotificationPhase.END, bdms=bdms)
4046 4048
         self.instance_events.clear_events_for_instance(instance)
4047 4049
 
4048 4050
     def _terminate_volume_connections(self, context, instance, bdms):
@@ -4399,12 +4401,21 @@ class ComputeManager(manager.Manager):
4399 4401
     def _shelve_instance(self, context, instance, image_id,
4400 4402
                          clean_shutdown):
4401 4403
         LOG.info('Shelving', instance=instance)
4404
+        offload = CONF.shelved_offload_time == 0
4405
+        if offload:
4406
+            # Get the BDMs early so we can pass them into versioned
4407
+            # notifications since _shelve_offload_instance needs the
4408
+            # BDMs anyway.
4409
+            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4410
+                context, instance.uuid)
4411
+        else:
4412
+            bdms = None
4402 4413
         compute_utils.notify_usage_exists(self.notifier, context, instance,
4403 4414
                                           current_period=True)
4404 4415
         self._notify_about_instance_usage(context, instance, 'shelve.start')
4405 4416
         compute_utils.notify_about_instance_action(context, instance,
4406 4417
                 self.host, action=fields.NotificationAction.SHELVE,
4407
-                phase=fields.NotificationPhase.START)
4418
+                phase=fields.NotificationPhase.START, bdms=bdms)
4408 4419
 
4409 4420
         def update_task_state(task_state, expected_state=task_states.SHELVING):
4410 4421
             shelving_state_map = {
@@ -4436,11 +4447,11 @@ class ComputeManager(manager.Manager):
4436 4447
         self._notify_about_instance_usage(context, instance, 'shelve.end')
4437 4448
         compute_utils.notify_about_instance_action(context, instance,
4438 4449
                 self.host, action=fields.NotificationAction.SHELVE,
4439
-                phase=fields.NotificationPhase.END)
4450
+                phase=fields.NotificationPhase.END, bdms=bdms)
4440 4451
 
4441
-        if CONF.shelved_offload_time == 0:
4452
+        if offload:
4442 4453
             self._shelve_offload_instance(context, instance,
4443
-                                          clean_shutdown=False)
4454
+                                          clean_shutdown=False, bdms=bdms)
4444 4455
 
4445 4456
     @wrap_exception()
4446 4457
     @reverts_task_state
@@ -4463,13 +4474,17 @@ class ComputeManager(manager.Manager):
4463 4474
             self._shelve_offload_instance(context, instance, clean_shutdown)
4464 4475
         do_shelve_offload_instance()
4465 4476
 
4466
-    def _shelve_offload_instance(self, context, instance, clean_shutdown):
4477
+    def _shelve_offload_instance(self, context, instance, clean_shutdown,
4478
+                                 bdms=None):
4467 4479
         LOG.info('Shelve offloading', instance=instance)
4480
+        if bdms is None:
4481
+            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4482
+                context, instance.uuid)
4468 4483
         self._notify_about_instance_usage(context, instance,
4469 4484
                 'shelve_offload.start')
4470 4485
         compute_utils.notify_about_instance_action(context, instance,
4471 4486
                 self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
4472
-                phase=fields.NotificationPhase.START)
4487
+                phase=fields.NotificationPhase.START, bdms=bdms)
4473 4488
 
4474 4489
         self._power_off_instance(context, instance, clean_shutdown)
4475 4490
         current_power_state = self._get_power_state(context, instance)
@@ -4477,8 +4492,6 @@ class ComputeManager(manager.Manager):
4477 4492
         self.network_api.cleanup_instance_network_on_host(context, instance,
4478 4493
                                                           instance.host)
4479 4494
         network_info = self.network_api.get_instance_nw_info(context, instance)
4480
-        bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4481
-            context, instance.uuid)
4482 4495
 
4483 4496
         block_device_info = self._get_instance_block_device_info(context,
4484 4497
                                                                  instance,
@@ -4516,7 +4529,7 @@ class ComputeManager(manager.Manager):
4516 4529
                 'shelve_offload.end')
4517 4530
         compute_utils.notify_about_instance_action(context, instance,
4518 4531
                 self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
4519
-                phase=fields.NotificationPhase.END)
4532
+                phase=fields.NotificationPhase.END, bdms=bdms)
4520 4533
 
4521 4534
     @wrap_exception()
4522 4535
     @reverts_task_state
@@ -4558,16 +4571,17 @@ class ComputeManager(manager.Manager):
4558 4571
     def _unshelve_instance(self, context, instance, image, filter_properties,
4559 4572
                            node):
4560 4573
         LOG.info('Unshelving', instance=instance)
4574
+        bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4575
+                context, instance.uuid)
4576
+
4561 4577
         self._notify_about_instance_usage(context, instance, 'unshelve.start')
4562 4578
         compute_utils.notify_about_instance_action(context, instance,
4563 4579
                 self.host, action=fields.NotificationAction.UNSHELVE,
4564
-                phase=fields.NotificationPhase.START)
4580
+                phase=fields.NotificationPhase.START, bdms=bdms)
4565 4581
 
4566 4582
         instance.task_state = task_states.SPAWNING
4567 4583
         instance.save()
4568 4584
 
4569
-        bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
4570
-                context, instance.uuid)
4571 4585
         block_device_info = self._prep_block_device(context, instance, bdms)
4572 4586
         scrubbed_keys = self._unshelve_instance_key_scrub(instance)
4573 4587
 
@@ -4624,7 +4638,7 @@ class ComputeManager(manager.Manager):
4624 4638
         self._notify_about_instance_usage(context, instance, 'unshelve.end')
4625 4639
         compute_utils.notify_about_instance_action(context, instance,
4626 4640
                 self.host, action=fields.NotificationAction.UNSHELVE,
4627
-                phase=fields.NotificationPhase.END)
4641
+                phase=fields.NotificationPhase.END, bdms=bdms)
4628 4642
 
4629 4643
     @messaging.expected_exceptions(NotImplementedError)
4630 4644
     @wrap_instance_fault
@@ -6011,7 +6025,8 @@ class ComputeManager(manager.Manager):
6011 6025
         compute_utils.notify_about_instance_action(context, instance,
6012 6026
                 self.host,
6013 6027
                 action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK,
6014
-                phase=fields.NotificationPhase.START)
6028
+                phase=fields.NotificationPhase.START,
6029
+                bdms=bdms)
6015 6030
 
6016 6031
         do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
6017 6032
                 migrate_data)
@@ -6024,9 +6039,11 @@ class ComputeManager(manager.Manager):
6024 6039
         self._notify_about_instance_usage(context, instance,
6025 6040
                                           "live_migration._rollback.end")
6026 6041
         compute_utils.notify_about_instance_action(context, instance,
6042
+
6027 6043
                 self.host,
6028 6044
                 action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK,
6029
-                phase=fields.NotificationPhase.END)
6045
+                phase=fields.NotificationPhase.END,
6046
+                bdms=bdms)
6030 6047
 
6031 6048
         self._set_migration_status(migration, migration_status)
6032 6049
 

+ 5
- 2
nova/compute/utils.py View File

@@ -352,7 +352,7 @@ def _get_fault_and_priority_from_exc(exception):
352 352
 @rpc.if_notifications_enabled
353 353
 def notify_about_instance_action(context, instance, host, action, phase=None,
354 354
                                  source=fields.NotificationSource.COMPUTE,
355
-                                 exception=None):
355
+                                 exception=None, bdms=None):
356 356
     """Send versioned notification about the action made on the instance
357 357
     :param instance: the instance which the action performed on
358 358
     :param host: the host emitting the notification
@@ -360,11 +360,14 @@ def notify_about_instance_action(context, instance, host, action, phase=None,
360 360
     :param phase: the phase of the action
361 361
     :param source: the source of the notification
362 362
     :param exception: the thrown exception (used in error notifications)
363
+    :param bdms: BlockDeviceMappingList object for the instance. If it is not
364
+                provided then we will load it from the db if so configured
363 365
     """
364 366
     fault, priority = _get_fault_and_priority_from_exc(exception)
365 367
     payload = instance_notification.InstanceActionPayload(
366 368
             instance=instance,
367
-            fault=fault)
369
+            fault=fault,
370
+            bdms=bdms)
368 371
     notification = instance_notification.InstanceActionNotification(
369 372
             context=context,
370 373
             priority=priority,

+ 22
- 11
nova/notifications/objects/instance.py View File

@@ -108,14 +108,15 @@ class InstancePayload(base.NotificationPayloadBase):
108 108
         'auto_disk_config': fields.DiskConfigField()
109 109
     }
110 110
 
111
-    def __init__(self, instance):
111
+    def __init__(self, instance, bdms=None):
112 112
         super(InstancePayload, self).__init__()
113 113
         network_info = instance.get_network_info()
114 114
         self.ip_addresses = IpPayload.from_network_info(network_info)
115 115
         self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)
116
-        # TODO(gibi): investigate the possibility to use already in scope bdm
117
-        # when available like in instance.create
118
-        self.block_devices = BlockDevicePayload.from_instance(instance)
116
+        if bdms is not None:
117
+            self.block_devices = BlockDevicePayload.from_bdms(bdms)
118
+        else:
119
+            self.block_devices = BlockDevicePayload.from_instance(instance)
119 120
 
120 121
         self.populate_schema(instance=instance)
121 122
 
@@ -134,8 +135,9 @@ class InstanceActionPayload(InstancePayload):
134 135
         'fault': fields.ObjectField('ExceptionPayload', nullable=True),
135 136
     }
136 137
 
137
-    def __init__(self, instance, fault):
138
-        super(InstanceActionPayload, self).__init__(instance=instance)
138
+    def __init__(self, instance, fault, bdms=None):
139
+        super(InstanceActionPayload, self).__init__(instance=instance,
140
+                                                    bdms=bdms)
139 141
         self.fault = fault
140 142
 
141 143
 
@@ -352,12 +354,21 @@ class BlockDevicePayload(base.NotificationPayloadBase):
352 354
             return None
353 355
 
354 356
         instance_bdms = instance.get_bdms()
355
-        bdms = []
356 357
         if instance_bdms is not None:
357
-            for bdm in instance_bdms:
358
-                if bdm.volume_id is not None:
359
-                    bdms.append(cls(bdm))
360
-        return bdms
358
+            return cls.from_bdms(instance_bdms)
359
+        else:
360
+            return []
361
+
362
+    @classmethod
363
+    def from_bdms(cls, bdms):
364
+        """Returns a list of BlockDevicePayload objects based on the passed
365
+        BlockDeviceMappingList.
366
+        """
367
+        payloads = []
368
+        for bdm in bdms:
369
+            if bdm.volume_id is not None:
370
+                payloads.append(cls(bdm))
371
+        return payloads
361 372
 
362 373
 
363 374
 @nova_base.NovaObjectRegistry.register_notification

+ 18
- 12
nova/tests/unit/compute/test_compute.py View File

@@ -5235,9 +5235,9 @@ class ComputeTestCase(BaseTestCase,
5235 5235
                     clean_shutdown=clean_shutdown)
5236 5236
             mock_notify_action.assert_has_calls([
5237 5237
                 mock.call(self.context, instance, 'fake-mini',
5238
-                      action='resize', phase='start'),
5238
+                      action='resize', phase='start', bdms='fake_bdms'),
5239 5239
                 mock.call(self.context, instance, 'fake-mini',
5240
-                      action='resize', phase='end')])
5240
+                      action='resize', phase='end', bdms='fake_bdms')])
5241 5241
             mock_get_instance_vol_bdinfo.assert_called_once_with(
5242 5242
                     self.context, instance, bdms='fake_bdms')
5243 5243
             mock_terminate_vol_conn.assert_called_once_with(self.context,
@@ -6283,7 +6283,8 @@ class ComputeTestCase(BaseTestCase,
6283 6283
 
6284 6284
         dest_node = objects.ComputeNode(host='foo', uuid=uuids.dest_node)
6285 6285
         mock_get_node.return_value = dest_node
6286
-        mock_bdms.return_value = objects.BlockDeviceMappingList()
6286
+        bdms = objects.BlockDeviceMappingList()
6287
+        mock_bdms.return_value = bdms
6287 6288
 
6288 6289
         @mock.patch('nova.compute.utils.notify_about_instance_action')
6289 6290
         @mock.patch.object(self.compute, '_live_migration_cleanup_flags')
@@ -6297,9 +6298,11 @@ class ComputeTestCase(BaseTestCase,
6297 6298
                 instance.project_id, test.MatchType(dict))
6298 6299
             mock_notify.assert_has_calls([
6299 6300
                 mock.call(c, instance, self.compute.host,
6300
-                          action='live_migration_rollback', phase='start'),
6301
+                          action='live_migration_rollback', phase='start',
6302
+                          bdms=bdms),
6301 6303
                 mock.call(c, instance, self.compute.host,
6302
-                          action='live_migration_rollback', phase='end')])
6304
+                          action='live_migration_rollback', phase='end',
6305
+                          bdms=bdms)])
6303 6306
             mock_nw_api.setup_networks_on_host.assert_called_once_with(
6304 6307
                 c, instance, self.compute.host)
6305 6308
         _test()
@@ -6323,7 +6326,8 @@ class ComputeTestCase(BaseTestCase,
6323 6326
 
6324 6327
         dest_node = objects.ComputeNode(host='foo', uuid=uuids.dest_node)
6325 6328
         mock_get_node.return_value = dest_node
6326
-        mock_bdms.return_value = objects.BlockDeviceMappingList()
6329
+        bdms = objects.BlockDeviceMappingList()
6330
+        mock_bdms.return_value = bdms
6327 6331
 
6328 6332
         @mock.patch('nova.compute.utils.notify_about_instance_action')
6329 6333
         @mock.patch.object(self.compute, '_live_migration_cleanup_flags')
@@ -6338,9 +6342,11 @@ class ComputeTestCase(BaseTestCase,
6338 6342
                 instance.project_id, test.MatchType(dict))
6339 6343
             mock_notify.assert_has_calls([
6340 6344
                 mock.call(c, instance, self.compute.host,
6341
-                          action='live_migration_rollback', phase='start'),
6345
+                          action='live_migration_rollback', phase='start',
6346
+                          bdms=bdms),
6342 6347
                 mock.call(c, instance, self.compute.host,
6343
-                          action='live_migration_rollback', phase='end')])
6348
+                          action='live_migration_rollback', phase='end',
6349
+                          bdms=bdms)])
6344 6350
             mock_nw_api.setup_networks_on_host.assert_called_once_with(
6345 6351
                 c, instance, self.compute.host)
6346 6352
         _test()
@@ -12002,19 +12008,19 @@ class EvacuateHostTestCase(BaseTestCase):
12002 12008
             if vm_states_is_stopped:
12003 12009
                 mock_notify.assert_has_calls([
12004 12010
                     mock.call(ctxt, self.inst, self.inst.host,
12005
-                              action='rebuild', phase='start'),
12011
+                              action='rebuild', phase='start', bdms=bdms),
12006 12012
                     mock.call(ctxt, self.inst, self.inst.host,
12007 12013
                               action='power_off', phase='start'),
12008 12014
                     mock.call(ctxt, self.inst, self.inst.host,
12009 12015
                               action='power_off', phase='end'),
12010 12016
                     mock.call(ctxt, self.inst, self.inst.host,
12011
-                              action='rebuild', phase='end')])
12017
+                              action='rebuild', phase='end', bdms=bdms)])
12012 12018
             else:
12013 12019
                 mock_notify.assert_has_calls([
12014 12020
                     mock.call(ctxt, self.inst, self.inst.host,
12015
-                              action='rebuild', phase='start'),
12021
+                              action='rebuild', phase='start', bdms=bdms),
12016 12022
                     mock.call(ctxt, self.inst, self.inst.host,
12017
-                              action='rebuild', phase='end')])
12023
+                              action='rebuild', phase='end', bdms=bdms)])
12018 12024
 
12019 12025
             mock_setup_networks_on_host.assert_called_once_with(
12020 12026
                 ctxt, self.inst, self.inst.host)

+ 15
- 10
nova/tests/unit/compute/test_compute_mgr.py View File

@@ -155,10 +155,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
155 155
         specd_compute._shutdown_instance = _mark_shutdown
156 156
         mock_inst.info_cache = call_tracker
157 157
 
158
+        mock_bdms = mock.Mock()
158 159
         specd_compute._delete_instance(specd_compute,
159 160
                                        self.context,
160 161
                                        mock_inst,
161
-                                       mock.Mock())
162
+                                       mock_bdms)
162 163
 
163 164
         methods_called = [n for n, a, k in call_tracker.mock_calls]
164 165
         self.assertEqual(['clear_events_for_instance',
@@ -169,7 +170,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
169 170
                                             mock_inst,
170 171
                                             specd_compute.host,
171 172
                                             action='delete',
172
-                                            phase='start')
173
+                                            phase='start',
174
+                                            bdms=mock_bdms)
173 175
 
174 176
     def _make_compute_node(self, hyp_hostname, cn_id):
175 177
             cn = mock.Mock(spec_set=['hypervisor_hostname', 'id',
@@ -296,9 +298,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
296 298
 
297 299
         mock_notify.assert_has_calls([
298 300
             mock.call(self.context, instance, 'fake-mini',
299
-                      action='delete', phase='start'),
301
+                      action='delete', phase='start', bdms=[]),
300 302
             mock.call(self.context, instance, 'fake-mini',
301
-                      action='delete', phase='end')])
303
+                      action='delete', phase='end', bdms=[])])
302 304
 
303 305
     def test_check_device_tagging_no_tagging(self):
304 306
         bdms = objects.BlockDeviceMappingList(objects=[
@@ -1207,9 +1209,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
1207 1209
                         notify=True, try_deallocate_networks=False)
1208 1210
         mock_notify.assert_has_calls([
1209 1211
             mock.call(self.context, instance, 'fake-mini',
1210
-                      action='shutdown', phase='start'),
1212
+                      action='shutdown', phase='start', bdms=bdms),
1211 1213
             mock.call(self.context, instance, 'fake-mini',
1212
-                      action='shutdown', phase='end')])
1214
+                      action='shutdown', phase='end', bdms=bdms)])
1213 1215
 
1214 1216
     @mock.patch('nova.context.RequestContext.elevated')
1215 1217
     @mock.patch('nova.objects.Instance.get_network_info')
@@ -3595,7 +3597,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
3595 3597
         )
3596 3598
         mock_notify.assert_called_once_with(
3597 3599
             mock.ANY, instance, 'fake-mini', action='rebuild', phase='error',
3598
-            exception=exc)
3600
+            exception=exc, bdms=None)
3599 3601
 
3600 3602
     def test_rebuild_deleting(self):
3601 3603
         instance = fake_instance.fake_instance_obj(self.context)
@@ -6088,7 +6090,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
6088 6090
         @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
6089 6091
         def _test(mock_bdm, mock_lmcf, mock_notify, mock_nwapi,
6090 6092
                   mock_notify_about_instance_action):
6091
-            mock_bdm.return_value = objects.BlockDeviceMappingList()
6093
+            bdms = objects.BlockDeviceMappingList()
6094
+            mock_bdm.return_value = bdms
6092 6095
             mock_lmcf.return_value = False, False
6093 6096
             mock_instance = mock.MagicMock()
6094 6097
             compute._rollback_live_migration(self.context,
@@ -6099,9 +6102,11 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
6099 6102
                 mock_instance.project_id, test.MatchType(dict))
6100 6103
             mock_notify_about_instance_action.assert_has_calls([
6101 6104
                 mock.call(self.context, mock_instance, compute.host,
6102
-                          action='live_migration_rollback', phase='start'),
6105
+                          action='live_migration_rollback', phase='start',
6106
+                          bdms=bdms),
6103 6107
                 mock.call(self.context, mock_instance, compute.host,
6104
-                          action='live_migration_rollback', phase='end')])
6108
+                          action='live_migration_rollback', phase='end',
6109
+                          bdms=bdms)])
6105 6110
             self.assertIsInstance(mock_lmcf.call_args_list[0][0][0],
6106 6111
                                   migrate_data_obj.LiveMigrateData)
6107 6112
 

+ 20
- 1
nova/tests/unit/compute/test_compute_utils.py View File

@@ -491,13 +491,23 @@ class UsageInfoTestCase(test.TestCase):
491 491
 
492 492
     def test_notify_about_instance_action(self):
493 493
         instance = create_instance(self.context)
494
+        bdms = block_device_obj.block_device_make_list(
495
+            self.context,
496
+            [fake_block_device.FakeDbBlockDeviceDict(
497
+                {'source_type': 'volume',
498
+                 'device_name': '/dev/vda',
499
+                 'instance_uuid': 'f8000000-0000-0000-0000-000000000000',
500
+                 'destination_type': 'volume',
501
+                 'boot_index': 0,
502
+                 'volume_id': 'de8836ac-d75e-11e2-8271-5254009297d6'})])
494 503
 
495 504
         compute_utils.notify_about_instance_action(
496 505
             self.context,
497 506
             instance,
498 507
             host='fake-compute',
499 508
             action='delete',
500
-            phase='start')
509
+            phase='start',
510
+            bdms=bdms)
501 511
 
502 512
         self.assertEqual(len(fake_notifier.VERSIONED_NOTIFICATIONS), 1)
503 513
         notification = fake_notifier.VERSIONED_NOTIFICATIONS[0]
@@ -522,6 +532,15 @@ class UsageInfoTestCase(test.TestCase):
522 532
             self.assertIn(attr, payload, "Key %s not in payload" % attr)
523 533
 
524 534
         self.assertEqual(payload['image_uuid'], uuids.fake_image_ref)
535
+        self.assertEqual(1, len(payload['block_devices']))
536
+        payload_bdm = payload['block_devices'][0]['nova_object.data']
537
+        self.assertEqual(
538
+            {'boot_index': 0,
539
+             'delete_on_termination': False,
540
+             'device_name': '/dev/vda',
541
+             'tag': None,
542
+             'volume_id': 'de8836ac-d75e-11e2-8271-5254009297d6'},
543
+            payload_bdm)
525 544
 
526 545
     def test_notify_about_instance_create(self):
527 546
         keypair = objects.KeyPair(name='my-key', user_id='fake', type='ssh',

+ 31
- 12
nova/tests/unit/compute/test_shelve.py View File

@@ -47,6 +47,7 @@ def _fake_resources():
47 47
 
48 48
 
49 49
 class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
50
+    @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
50 51
     @mock.patch.object(nova.compute.manager.ComputeManager,
51 52
                        '_terminate_volume_connections')
52 53
     @mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
@@ -58,7 +59,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
58 59
     def _shelve_instance(self, shelved_offload_time, mock_notify,
59 60
                          mock_notify_instance_usage, mock_get_power_state,
60 61
                          mock_snapshot, mock_power_off, mock_terminate,
61
-                         clean_shutdown=True):
62
+                         mock_get_bdms, clean_shutdown=True):
62 63
         mock_get_power_state.return_value = 123
63 64
 
64 65
         CONF.set_override('shelved_offload_time', shelved_offload_time)
@@ -70,6 +71,11 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
70 71
         instance.task_state = task_states.SHELVING
71 72
         instance.save()
72 73
 
74
+        fake_bdms = None
75
+        if shelved_offload_time == 0:
76
+            fake_bdms = objects.BlockDeviceMappingList()
77
+            mock_get_bdms.return_value = fake_bdms
78
+
73 79
         tracking = {'last_state': instance.vm_state}
74 80
 
75 81
         def check_save(expected_task_state=None):
@@ -119,9 +125,9 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
119 125
                                          clean_shutdown=clean_shutdown)
120 126
             mock_notify.assert_has_calls([
121 127
                 mock.call(self.context, instance, 'fake-mini',
122
-                          action='shelve', phase='start'),
128
+                          action='shelve', phase='start', bdms=fake_bdms),
123 129
                 mock.call(self.context, instance, 'fake-mini',
124
-                          action='shelve', phase='end')])
130
+                          action='shelve', phase='end', bdms=fake_bdms)])
125 131
 
126 132
         # prepare expect call lists
127 133
         mock_notify_instance_usage_call_list = [
@@ -184,6 +190,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
184 190
         instance = self._shelve_offload(clean_shutdown=False)
185 191
         mock_power_off.assert_called_once_with(instance, 0, 0)
186 192
 
193
+    @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
187 194
     @mock.patch.object(nova.compute.manager.ComputeManager,
188 195
                        '_terminate_volume_connections')
189 196
     @mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@@ -197,22 +204,26 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
197 204
     @mock.patch('nova.compute.utils.notify_about_instance_action')
198 205
     def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
199 206
                         mock_get_power_state, mock_update_resource_tracker,
200
-                        mock_delete_alloc, mock_terminate,
207
+                        mock_delete_alloc, mock_terminate, mock_get_bdms,
201 208
                         clean_shutdown=True):
202 209
         host = 'fake-mini'
203 210
         instance = self._create_fake_instance_obj(params={'host': host})
204 211
         instance.task_state = task_states.SHELVING
205 212
         instance.save()
206 213
         self.useFixture(utils_fixture.TimeFixture())
214
+        fake_bdms = objects.BlockDeviceMappingList()
215
+        mock_get_bdms.return_value = fake_bdms
207 216
 
208 217
         with mock.patch.object(instance, 'save'):
209 218
             self.compute.shelve_offload_instance(self.context, instance,
210 219
                                                  clean_shutdown=clean_shutdown)
211 220
             mock_notify.assert_has_calls([
212 221
                 mock.call(self.context, instance, 'fake-mini',
213
-                          action='shelve_offload', phase='start'),
222
+                          action='shelve_offload', phase='start',
223
+                          bdms=fake_bdms),
214 224
                 mock.call(self.context, instance, 'fake-mini',
215
-                          action='shelve_offload', phase='end')])
225
+                          action='shelve_offload', phase='end',
226
+                          bdms=fake_bdms)])
216 227
 
217 228
         self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
218 229
         self.assertIsNone(instance.task_state)
@@ -236,6 +247,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
236 247
 
237 248
         return instance
238 249
 
250
+    @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
239 251
     @mock.patch('nova.compute.utils.notify_about_instance_action')
240 252
     @mock.patch.object(nova.compute.manager.ComputeManager,
241 253
                        '_notify_about_instance_usage')
@@ -248,7 +260,10 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
248 260
     def test_unshelve(self, mock_setup_network,
249 261
                       mock_get_power_state, mock_spawn,
250 262
                       mock_prep_block_device, mock_notify_instance_usage,
251
-                      mock_notify_instance_action):
263
+                      mock_notify_instance_action,
264
+                      mock_get_bdms):
265
+        mock_bdms = mock.Mock()
266
+        mock_get_bdms.return_value = mock_bdms
252 267
         instance = self._create_fake_instance_obj()
253 268
         instance.task_state = task_states.UNSHELVING
254 269
         instance.save()
@@ -312,9 +327,9 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
312 327
 
313 328
         mock_notify_instance_action.assert_has_calls([
314 329
             mock.call(self.context, instance, 'fake-mini',
315
-                      action='unshelve', phase='start'),
330
+                      action='unshelve', phase='start', bdms=mock_bdms),
316 331
             mock.call(self.context, instance, 'fake-mini',
317
-                      action='unshelve', phase='end')])
332
+                      action='unshelve', phase='end', bdms=mock_bdms)])
318 333
 
319 334
         # prepare expect call lists
320 335
         mock_notify_instance_usage_call_list = [
@@ -346,6 +361,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
346 361
         self.assertEqual(self.compute.host, instance.host)
347 362
         self.assertFalse(instance.auto_disk_config)
348 363
 
364
+    @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
349 365
     @mock.patch('nova.compute.utils.notify_about_instance_action')
350 366
     @mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
351 367
                        'instance_claim')
@@ -363,7 +379,10 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
363 379
                                     mock_prep_block_device, mock_spawn,
364 380
                                     mock_get_power_state,
365 381
                                     mock_setup_network, mock_instance_claim,
366
-                                    mock_notify_instance_action):
382
+                                    mock_notify_instance_action,
383
+                                    mock_get_bdms):
384
+        mock_bdms = mock.Mock()
385
+        mock_get_bdms.return_value = mock_bdms
367 386
         instance = self._create_fake_instance_obj()
368 387
         node = test_compute.NODENAME
369 388
         limits = {}
@@ -405,9 +424,9 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
405 424
 
406 425
         mock_notify_instance_action.assert_has_calls([
407 426
             mock.call(self.context, instance, 'fake-mini',
408
-                      action='unshelve', phase='start'),
427
+                      action='unshelve', phase='start', bdms=mock_bdms),
409 428
             mock.call(self.context, instance, 'fake-mini',
410
-                      action='unshelve', phase='end')])
429
+                      action='unshelve', phase='end', bdms=mock_bdms)])
411 430
 
412 431
         # prepare expect call lists
413 432
         mock_notify_instance_usage_call_list = [

Loading…
Cancel
Save