Browse Source

Merge "Add min service level check for migrate with bandwidth"

changes/72/671072/18
Zuul 1 week ago
parent
commit
a0622ae883
2 changed files with 641 additions and 3 deletions
  1. 109
    3
      nova/conductor/tasks/migrate.py
  2. 532
    0
      nova/tests/unit/conductor/tasks/test_migrate.py

+ 109
- 3
nova/conductor/tasks/migrate.py View File

@@ -185,6 +185,101 @@ class MigrationTask(base.TaskBase):
185 185
             self.request_spec.requested_destination = objects.Destination(
186 186
                 cell=instance_mapping.cell_mapping)
187 187
 
188
+    def _support_resource_request(self, selection):
189
+        """Returns true if the host is new enough to support resource request
190
+        during migration.
191
+        """
192
+        svc = objects.Service.get_by_host_and_binary(
193
+            self.context, selection.service_host, 'nova-compute')
194
+        return svc.version >= 39
195
+
196
+    # TODO(gibi): Remove this compat code when nova doesn't need to support
197
+    # Train computes any more.
198
+    def _get_host_supporting_request(self, selection_list):
199
+        """Return the first compute selection from the selection_list where
200
+        the service is new enough to support resource request during migration
201
+        and the resources claimed successfully.
202
+
203
+        :param selection_list: a list of Selection objects returned by the
204
+            scheduler
205
+        :return: A two tuple. The first item is a Selection object
206
+            representing the host that supports the request. The second item
207
+            is a list of Selection objects representing the remaining alternate
208
+            hosts.
209
+        :raises MaxRetriesExceeded: if none of the hosts in the selection_list
210
+            is new enough to support the request or we cannot claim resource
211
+            on any of the hosts that are new enough.
212
+        """
213
+
214
+        if not self.request_spec.requested_resources:
215
+            return selection_list[0], selection_list[1:]
216
+
217
+        # Scheduler allocated resources on the first host. So check if the
218
+        # first host is new enough
219
+        if self._support_resource_request(selection_list[0]):
220
+            return selection_list[0], selection_list[1:]
221
+
222
+        # First host is old, so we need to use an alternate. Therefore we have
223
+        # to remove the allocation from the first host.
224
+        self.reportclient.delete_allocation_for_instance(
225
+            self.context, self.instance.uuid)
226
+        LOG.debug(
227
+            'Scheduler returned host %(host)s as a possible migration target '
228
+            'but that host is not new enough to support the migration with '
229
+            'resource request %(request)s. Trying alternate hosts.',
230
+            {'host': selection_list[0].service_host,
231
+             'request': self.request_spec.requested_resources},
232
+            instance=self.instance)
233
+
234
+        alternates = selection_list[1:]
235
+
236
+        for i, selection in enumerate(alternates):
237
+            if self._support_resource_request(selection):
238
+                # this host is new enough so we need to try to claim resources
239
+                # on it
240
+                if selection.allocation_request:
241
+                    alloc_req = jsonutils.loads(
242
+                        selection.allocation_request)
243
+                    resource_claimed = scheduler_utils.claim_resources(
244
+                        self.context, self.reportclient, self.request_spec,
245
+                        self.instance.uuid, alloc_req,
246
+                        selection.allocation_request_version)
247
+
248
+                    if not resource_claimed:
249
+                        LOG.debug(
250
+                            'Scheduler returned alternate host %(host)s as a '
251
+                            'possible migration target but resource claim '
252
+                            'failed on that host. Trying another alternate.',
253
+                            {'host': selection.service_host},
254
+                            instance=self.instance)
255
+                    else:
256
+                        return selection, alternates[i + 1:]
257
+
258
+                else:
259
+                    # Some deployments use different schedulers that do not
260
+                    # use Placement, so they will not have an
261
+                    # allocation_request to claim with. For those cases,
262
+                    # there is no concept of claiming, so just assume that
263
+                    # the resources are available.
264
+                    return selection, alternates[i + 1:]
265
+
266
+            else:
267
+                LOG.debug(
268
+                    'Scheduler returned alternate host %(host)s as a possible '
269
+                    'migration target but that host is not new enough to '
270
+                    'support the migration with resource request %(request)s. '
271
+                    'Trying another alternate.',
272
+                    {'host': selection.service_host,
273
+                     'request': self.request_spec.requested_resources},
274
+                    instance=self.instance)
275
+
276
+        # if we reach this point then none of the hosts was new enough for the
277
+        # request or we failed to claim resources on every alternate
278
+        reason = ("Exhausted all hosts available during compute service level "
279
+                  "check for instance %(instance_uuid)s." %
280
+                  {"instance_uuid": self.instance.uuid})
281
+        raise exception.MaxRetriesExceeded(reason=reason)
282
+
188 283
     def _execute(self):
189 284
         # TODO(sbauza): Remove once all the scheduler.utils methods accept a
190 285
         # RequestSpec object in the signature.
@@ -277,9 +372,9 @@ class MigrationTask(base.TaskBase):
277 372
         # Since there is only ever one instance to migrate per call, we
278 373
         # just need the first returned element.
279 374
         selection_list = selection_lists[0]
280
-        # The selected host is the first item in the list, with the
281
-        # alternates being the remainder of the list.
282
-        selection, self.host_list = selection_list[0], selection_list[1:]
375
+
376
+        selection, self.host_list = self._get_host_supporting_request(
377
+            selection_list)
283 378
 
284 379
         scheduler_utils.fill_provider_mapping(
285 380
             self.context, self.reportclient, self.request_spec, selection)
@@ -295,6 +390,17 @@ class MigrationTask(base.TaskBase):
295 390
         selection = None
296 391
         while self.host_list and not host_available:
297 392
             selection = self.host_list.pop(0)
393
+            if (self.request_spec.requested_resources and not
394
+                    self._support_resource_request(selection)):
395
+                LOG.debug(
396
+                    'Scheduler returned alternate host %(host)s as a possible '
397
+                    'migration target for re-schedule but that host is not '
398
+                    'new enough to support the migration with resource '
399
+                    'request %(request)s. Trying another alternate.',
400
+                    {'host': selection.service_host,
401
+                     'request': self.request_spec.requested_resources},
402
+                    instance=self.instance)
403
+                continue
298 404
             if selection.allocation_request:
299 405
                 alloc_req = jsonutils.loads(selection.allocation_request)
300 406
             else:

+ 532
- 0
nova/tests/unit/conductor/tasks/test_migrate.py View File

@@ -292,6 +292,538 @@ class MigrationTaskTestCase(test.NoDBTestCase):
292 292
             self.instance.uuid, alloc_req, '1.19')
293 293
         mock_fill_provider_mapping.assert_not_called()
294 294
 
295
+    @mock.patch('nova.scheduler.utils.claim_resources')
296
+    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
297
+                'delete_allocation_for_instance')
298
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
299
+    def test_get_host_supporting_request_no_resource_request(
300
+            self, mock_get_service, mock_delete_allocation,
301
+            mock_claim_resources):
302
+        # no resource request so we expect the first host is simply returned
303
+        self.request_spec.requested_resources = []
304
+        task = self._generate_task()
305
+        resources = {
306
+            "resources": {
307
+                "VCPU": 1,
308
+                "MEMORY_MB": 1024,
309
+                "DISK_GB": 100}}
310
+
311
+        first = objects.Selection(
312
+            service_host="host1",
313
+            nodename="node1",
314
+            cell_uuid=uuids.cell1,
315
+            allocation_request=jsonutils.dumps(
316
+                {"allocations": {uuids.host1: resources}}),
317
+            allocation_request_version='1.19')
318
+        alternate = objects.Selection(
319
+            service_host="host2",
320
+            nodename="node2",
321
+            cell_uuid=uuids.cell1,
322
+            allocation_request=jsonutils.dumps(
323
+                {"allocations": {uuids.host2: resources}}),
324
+            allocation_request_version='1.19')
325
+        selection_list = [first, alternate]
326
+
327
+        selected, alternates = task._get_host_supporting_request(
328
+            selection_list)
329
+
330
+        self.assertEqual(first, selected)
331
+        self.assertEqual([alternate], alternates)
332
+        mock_get_service.assert_not_called()
333
+        # The first host was good and the scheduler made allocation on that
334
+        # host. So we don't expect any resource claim manipulation
335
+        mock_delete_allocation.assert_not_called()
336
+        mock_claim_resources.assert_not_called()
337
+
338
+    @mock.patch('nova.scheduler.utils.claim_resources')
339
+    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
340
+                'delete_allocation_for_instance')
341
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
342
+    def test_get_host_supporting_request_first_host_is_new(
343
+            self, mock_get_service, mock_delete_allocation,
344
+            mock_claim_resources):
345
+        self.request_spec.requested_resources = [
346
+            objects.RequestGroup()
347
+        ]
348
+        task = self._generate_task()
349
+        resources = {
350
+            "resources": {
351
+                "VCPU": 1,
352
+                "MEMORY_MB": 1024,
353
+                "DISK_GB": 100}}
354
+
355
+        first = objects.Selection(
356
+            service_host="host1",
357
+            nodename="node1",
358
+            cell_uuid=uuids.cell1,
359
+            allocation_request=jsonutils.dumps(
360
+                {"allocations": {uuids.host1: resources}}),
361
+            allocation_request_version='1.19')
362
+        alternate = objects.Selection(
363
+            service_host="host2",
364
+            nodename="node2",
365
+            cell_uuid=uuids.cell1,
366
+            allocation_request=jsonutils.dumps(
367
+                {"allocations": {uuids.host2: resources}}),
368
+            allocation_request_version='1.19')
369
+        selection_list = [first, alternate]
370
+
371
+        first_service = objects.Service(service_host='host1')
372
+        first_service.version = 39
373
+        mock_get_service.return_value = first_service
374
+
375
+        selected, alternates = task._get_host_supporting_request(
376
+            selection_list)
377
+
378
+        self.assertEqual(first, selected)
379
+        self.assertEqual([alternate], alternates)
380
+        mock_get_service.assert_called_once_with(
381
+            task.context, 'host1', 'nova-compute')
382
+        # The first host was good and the scheduler made allocation on that
383
+        # host. So we don't expect any resource claim manipulation
384
+        mock_delete_allocation.assert_not_called()
385
+        mock_claim_resources.assert_not_called()
386
+
387
+    @mock.patch('nova.scheduler.utils.claim_resources')
388
+    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
389
+                'delete_allocation_for_instance')
390
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
391
+    def test_get_host_supporting_request_first_host_is_old_no_alternates(
392
+            self, mock_get_service, mock_delete_allocation,
393
+            mock_claim_resources):
394
+        self.request_spec.requested_resources = [
395
+            objects.RequestGroup()
396
+        ]
397
+        task = self._generate_task()
398
+        resources = {
399
+            "resources": {
400
+                "VCPU": 1,
401
+                "MEMORY_MB": 1024,
402
+                "DISK_GB": 100}}
403
+
404
+        first = objects.Selection(
405
+            service_host="host1",
406
+            nodename="node1",
407
+            cell_uuid=uuids.cell1,
408
+            allocation_request=jsonutils.dumps(
409
+                {"allocations": {uuids.host1: resources}}),
410
+            allocation_request_version='1.19')
411
+        selection_list = [first]
412
+
413
+        first_service = objects.Service(service_host='host1')
414
+        first_service.version = 38
415
+        mock_get_service.return_value = first_service
416
+
417
+        self.assertRaises(
418
+            exception.MaxRetriesExceeded, task._get_host_supporting_request,
419
+            selection_list)
420
+
421
+        mock_get_service.assert_called_once_with(
422
+            task.context, 'host1', 'nova-compute')
423
+        mock_delete_allocation.assert_called_once_with(
424
+            task.context, self.instance.uuid)
425
+        mock_claim_resources.assert_not_called()
426
+
427
+    @mock.patch.object(migrate.LOG, 'debug')
428
+    @mock.patch('nova.scheduler.utils.claim_resources')
429
+    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
430
+                'delete_allocation_for_instance')
431
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
432
+    def test_get_host_supporting_request_first_host_is_old_second_good(
433
+            self, mock_get_service, mock_delete_allocation,
434
+            mock_claim_resources, mock_debug):
435
+
436
+        self.request_spec.requested_resources = [
437
+            objects.RequestGroup()
438
+        ]
439
+        task = self._generate_task()
440
+        resources = {
441
+            "resources": {
442
+                "VCPU": 1,
443
+                "MEMORY_MB": 1024,
444
+                "DISK_GB": 100}}
445
+
446
+        first = objects.Selection(
447
+            service_host="host1",
448
+            nodename="node1",
449
+            cell_uuid=uuids.cell1,
450
+            allocation_request=jsonutils.dumps(
451
+                {"allocations": {uuids.host1: resources}}),
452
+            allocation_request_version='1.19')
453
+        second = objects.Selection(
454
+            service_host="host2",
455
+            nodename="node2",
456
+            cell_uuid=uuids.cell1,
457
+            allocation_request=jsonutils.dumps(
458
+                {"allocations": {uuids.host2: resources}}),
459
+            allocation_request_version='1.19')
460
+        third = objects.Selection(
461
+            service_host="host3",
462
+            nodename="node3",
463
+            cell_uuid=uuids.cell1,
464
+            allocation_request=jsonutils.dumps(
465
+                {"allocations": {uuids.host3: resources}}),
466
+            allocation_request_version='1.19')
467
+        selection_list = [first, second, third]
468
+
469
+        first_service = objects.Service(service_host='host1')
470
+        first_service.version = 38
471
+        second_service = objects.Service(service_host='host2')
472
+        second_service.version = 39
473
+        mock_get_service.side_effect = [first_service, second_service]
474
+
475
+        selected, alternates = task._get_host_supporting_request(
476
+            selection_list)
477
+
478
+        self.assertEqual(second, selected)
479
+        self.assertEqual([third], alternates)
480
+        mock_get_service.assert_has_calls([
481
+            mock.call(task.context, 'host1', 'nova-compute'),
482
+            mock.call(task.context, 'host2', 'nova-compute'),
483
+        ])
484
+        mock_delete_allocation.assert_called_once_with(
485
+            task.context, self.instance.uuid)
486
+        mock_claim_resources.assert_called_once_with(
487
+            self.context, task.reportclient, task.request_spec,
488
+            self.instance.uuid, {"allocations": {uuids.host2: resources}},
489
+            '1.19')
490
+
491
+        mock_debug.assert_called_once_with(
492
+            'Scheduler returned host %(host)s as a possible migration target '
493
+            'but that host is not new enough to support the migration with '
494
+            'resource request %(request)s. Trying alternate hosts.',
495
+            {'host': 'host1',
496
+             'request': self.request_spec.requested_resources},
497
+            instance=self.instance)
498
+
499
+    @mock.patch.object(migrate.LOG, 'debug')
500
+    @mock.patch('nova.scheduler.utils.claim_resources')
501
+    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
502
+                'delete_allocation_for_instance')
503
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
504
+    def test_get_host_supporting_request_first_host_is_old_second_claim_fails(
505
+            self, mock_get_service, mock_delete_allocation,
506
+            mock_claim_resources, mock_debug):
507
+        self.request_spec.requested_resources = [
508
+            objects.RequestGroup()
509
+        ]
510
+        task = self._generate_task()
511
+        resources = {
512
+            "resources": {
513
+                "VCPU": 1,
514
+                "MEMORY_MB": 1024,
515
+                "DISK_GB": 100}}
516
+
517
+        first = objects.Selection(
518
+            service_host="host1",
519
+            nodename="node1",
520
+            cell_uuid=uuids.cell1,
521
+            allocation_request=jsonutils.dumps(
522
+                {"allocations": {uuids.host1: resources}}),
523
+            allocation_request_version='1.19')
524
+        second = objects.Selection(
525
+            service_host="host2",
526
+            nodename="node2",
527
+            cell_uuid=uuids.cell1,
528
+            allocation_request=jsonutils.dumps(
529
+                {"allocations": {uuids.host2: resources}}),
530
+            allocation_request_version='1.19')
531
+        third = objects.Selection(
532
+            service_host="host3",
533
+            nodename="node3",
534
+            cell_uuid=uuids.cell1,
535
+            allocation_request=jsonutils.dumps(
536
+                {"allocations": {uuids.host3: resources}}),
537
+            allocation_request_version='1.19')
538
+        fourth = objects.Selection(
539
+            service_host="host4",
540
+            nodename="node4",
541
+            cell_uuid=uuids.cell1,
542
+            allocation_request=jsonutils.dumps(
543
+                {"allocations": {uuids.host4: resources}}),
544
+            allocation_request_version='1.19')
545
+        selection_list = [first, second, third, fourth]
546
+
547
+        first_service = objects.Service(service_host='host1')
548
+        first_service.version = 38
549
+        second_service = objects.Service(service_host='host2')
550
+        second_service.version = 39
551
+        third_service = objects.Service(service_host='host3')
552
+        third_service.version = 39
553
+        mock_get_service.side_effect = [
554
+            first_service, second_service, third_service]
555
+        # not called for the first host but called for the second and third
556
+        # make the second claim fail to force the selection of the third
557
+        mock_claim_resources.side_effect = [False, True]
558
+
559
+        selected, alternates = task._get_host_supporting_request(
560
+            selection_list)
561
+
562
+        self.assertEqual(third, selected)
563
+        self.assertEqual([fourth], alternates)
564
+        mock_get_service.assert_has_calls([
565
+            mock.call(task.context, 'host1', 'nova-compute'),
566
+            mock.call(task.context, 'host2', 'nova-compute'),
567
+            mock.call(task.context, 'host3', 'nova-compute'),
568
+        ])
569
+        mock_delete_allocation.assert_called_once_with(
570
+            task.context, self.instance.uuid)
571
+        mock_claim_resources.assert_has_calls([
572
+            mock.call(
573
+                self.context, task.reportclient, task.request_spec,
574
+                self.instance.uuid,
575
+                {"allocations": {uuids.host2: resources}}, '1.19'),
576
+            mock.call(
577
+                self.context, task.reportclient, task.request_spec,
578
+                self.instance.uuid,
579
+                {"allocations": {uuids.host3: resources}}, '1.19'),
580
+        ])
581
+        mock_debug.assert_has_calls([
582
+            mock.call(
583
+                'Scheduler returned host %(host)s as a possible migration '
584
+                'target but that host is not new enough to support the '
585
+                'migration with resource request %(request)s. Trying '
586
+                'alternate hosts.',
587
+                {'host': 'host1',
588
+                 'request': self.request_spec.requested_resources},
589
+                instance=self.instance),
590
+            mock.call(
591
+                'Scheduler returned alternate host %(host)s as a possible '
592
+                'migration target but resource claim '
593
+                'failed on that host. Trying another alternate.',
594
+                {'host': 'host2'},
595
+                instance=self.instance),
596
+        ])
597
+
598
+    @mock.patch.object(migrate.LOG, 'debug')
599
+    @mock.patch('nova.scheduler.utils.claim_resources')
600
+    @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
601
+                'delete_allocation_for_instance')
602
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
603
+    def test_get_host_supporting_request_both_first_and_second_too_old(
604
+            self, mock_get_service, mock_delete_allocation,
605
+            mock_claim_resources, mock_debug):
606
+        self.request_spec.requested_resources = [
607
+            objects.RequestGroup()
608
+        ]
609
+        task = self._generate_task()
610
+        resources = {
611
+            "resources": {
612
+                "VCPU": 1,
613
+                "MEMORY_MB": 1024,
614
+                "DISK_GB": 100}}
615
+
616
+        first = objects.Selection(
617
+            service_host="host1",
618
+            nodename="node1",
619
+            cell_uuid=uuids.cell1,
620
+            allocation_request=jsonutils.dumps(
621
+                {"allocations": {uuids.host1: resources}}),
622
+            allocation_request_version='1.19')
623
+        second = objects.Selection(
624
+            service_host="host2",
625
+            nodename="node2",
626
+            cell_uuid=uuids.cell1,
627
+            allocation_request=jsonutils.dumps(
628
+                {"allocations": {uuids.host2: resources}}),
629
+            allocation_request_version='1.19')
630
+        third = objects.Selection(
631
+            service_host="host3",
632
+            nodename="node3",
633
+            cell_uuid=uuids.cell1,
634
+            allocation_request=jsonutils.dumps(
635
+                {"allocations": {uuids.host3: resources}}),
636
+            allocation_request_version='1.19')
637
+        fourth = objects.Selection(
638
+            service_host="host4",
639
+            nodename="node4",
640
+            cell_uuid=uuids.cell1,
641
+            allocation_request=jsonutils.dumps(
642
+                {"allocations": {uuids.host4: resources}}),
643
+            allocation_request_version='1.19')
644
+        selection_list = [first, second, third, fourth]
645
+
646
+        first_service = objects.Service(service_host='host1')
647
+        first_service.version = 38
648
+        second_service = objects.Service(service_host='host2')
649
+        second_service.version = 38
650
+        third_service = objects.Service(service_host='host3')
651
+        third_service.version = 39
652
+        mock_get_service.side_effect = [
653
+            first_service, second_service, third_service]
654
+        # not called for the first and second hosts but called for the third
655
+        mock_claim_resources.side_effect = [True]
656
+
657
+        selected, alternates = task._get_host_supporting_request(
658
+            selection_list)
659
+
660
+        self.assertEqual(third, selected)
661
+        self.assertEqual([fourth], alternates)
662
+        mock_get_service.assert_has_calls([
663
+            mock.call(task.context, 'host1', 'nova-compute'),
664
+            mock.call(task.context, 'host2', 'nova-compute'),
665
+            mock.call(task.context, 'host3', 'nova-compute'),
666
+        ])
667
+        mock_delete_allocation.assert_called_once_with(
668
+            task.context, self.instance.uuid)
669
+        mock_claim_resources.assert_called_once_with(
670
+            self.context, task.reportclient, task.request_spec,
671
+            self.instance.uuid,
672
+            {"allocations": {uuids.host3: resources}}, '1.19')
673
+        mock_debug.assert_has_calls([
674
+            mock.call(
675
+                'Scheduler returned host %(host)s as a possible migration '
676
+                'target but that host is not new enough to support the '
677
+                'migration with resource request %(request)s. Trying '
678
+                'alternate hosts.',
679
+                {'host': 'host1',
680
+                 'request': self.request_spec.requested_resources},
681
+                instance=self.instance),
682
+            mock.call(
683
+                'Scheduler returned alternate host %(host)s as a possible '
684
+                'migration target but that host is not new enough to support '
685
+                'the migration with resource request %(request)s. Trying '
686
+                'another alternate.',
687
+                {'host': 'host2',
688
+                 'request': self.request_spec.requested_resources},
689
+                instance=self.instance),
690
+        ])
691
+
692
+    @mock.patch.object(migrate.LOG, 'debug')
693
+    @mock.patch('nova.scheduler.utils.fill_provider_mapping')
694
+    @mock.patch('nova.scheduler.utils.claim_resources')
695
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
696
+    def test_reschedule_old_compute_skipped(
697
+            self, mock_get_service, mock_claim_resources, mock_fill_mapping,
698
+            mock_debug):
699
+        self.request_spec.requested_resources = [
700
+            objects.RequestGroup()
701
+        ]
702
+        task = self._generate_task()
703
+        resources = {
704
+            "resources": {
705
+                "VCPU": 1,
706
+                "MEMORY_MB": 1024,
707
+                "DISK_GB": 100}}
708
+
709
+        first = objects.Selection(
710
+            service_host="host1",
711
+            nodename="node1",
712
+            cell_uuid=uuids.cell1,
713
+            allocation_request=jsonutils.dumps(
714
+                {"allocations": {uuids.host1: resources}}),
715
+            allocation_request_version='1.19')
716
+        second = objects.Selection(
717
+            service_host="host2",
718
+            nodename="node2",
719
+            cell_uuid=uuids.cell1,
720
+            allocation_request=jsonutils.dumps(
721
+                {"allocations": {uuids.host2: resources}}),
722
+            allocation_request_version='1.19')
723
+
724
+        first_service = objects.Service(service_host='host1')
725
+        first_service.version = 38
726
+        second_service = objects.Service(service_host='host2')
727
+        second_service.version = 39
728
+        mock_get_service.side_effect = [first_service, second_service]
729
+
730
+        # set up task for re-schedule
731
+        task.host_list = [first, second]
732
+
733
+        selected = task._reschedule()
734
+
735
+        self.assertEqual(second, selected)
736
+        self.assertEqual([], task.host_list)
737
+        mock_get_service.assert_has_calls([
738
+            mock.call(task.context, 'host1', 'nova-compute'),
739
+            mock.call(task.context, 'host2', 'nova-compute'),
740
+        ])
741
+        mock_claim_resources.assert_called_once_with(
742
+            self.context.elevated(), task.reportclient, task.request_spec,
743
+            self.instance.uuid,
744
+            {"allocations": {uuids.host2: resources}}, '1.19')
745
+        mock_fill_mapping.assert_called_once_with(
746
+            task.context, task.reportclient, task.request_spec, second)
747
+        mock_debug.assert_has_calls([
748
+            mock.call(
749
+                'Scheduler returned alternate host %(host)s as a possible '
750
+                'migration target for re-schedule but that host is not '
751
+                'new enough to support the migration with resource '
752
+                'request %(request)s. Trying another alternate.',
753
+                {'host': 'host1',
754
+                 'request': self.request_spec.requested_resources},
755
+                instance=self.instance),
756
+        ])
757
+
758
+    @mock.patch.object(migrate.LOG, 'debug')
759
+    @mock.patch('nova.scheduler.utils.fill_provider_mapping')
760
+    @mock.patch('nova.scheduler.utils.claim_resources')
761
+    @mock.patch('nova.objects.Service.get_by_host_and_binary')
762
+    def test_reschedule_old_computes_no_more_alternates(
763
+            self, mock_get_service, mock_claim_resources, mock_fill_mapping,
764
+            mock_debug):
765
+        self.request_spec.requested_resources = [
766
+            objects.RequestGroup()
767
+        ]
768
+        task = self._generate_task()
769
+        resources = {
770
+            "resources": {
771
+                "VCPU": 1,
772
+                "MEMORY_MB": 1024,
773
+                "DISK_GB": 100}}
774
+
775
+        first = objects.Selection(
776
+            service_host="host1",
777
+            nodename="node1",
778
+            cell_uuid=uuids.cell1,
779
+            allocation_request=jsonutils.dumps(
780
+                {"allocations": {uuids.host1: resources}}),
781
+            allocation_request_version='1.19')
782
+        second = objects.Selection(
783
+            service_host="host2",
784
+            nodename="node2",
785
+            cell_uuid=uuids.cell1,
786
+            allocation_request=jsonutils.dumps(
787
+                {"allocations": {uuids.host2: resources}}),
788
+            allocation_request_version='1.19')
789
+
790
+        first_service = objects.Service(service_host='host1')
791
+        first_service.version = 38
792
+        second_service = objects.Service(service_host='host2')
793
+        second_service.version = 38
794
+        mock_get_service.side_effect = [first_service, second_service]
795
+
796
+        # set up task for re-schedule
797
+        task.host_list = [first, second]
798
+
799
+        self.assertRaises(exception.MaxRetriesExceeded, task._reschedule)
800
+
801
+        self.assertEqual([], task.host_list)
802
+        mock_get_service.assert_has_calls([
803
+            mock.call(task.context, 'host1', 'nova-compute'),
804
+            mock.call(task.context, 'host2', 'nova-compute'),
805
+        ])
806
+        mock_claim_resources.assert_not_called()
807
+        mock_fill_mapping.assert_not_called()
808
+        mock_debug.assert_has_calls([
809
+            mock.call(
810
+                'Scheduler returned alternate host %(host)s as a possible '
811
+                'migration target for re-schedule but that host is not '
812
+                'new enough to support the migration with resource '
813
+                'request %(request)s. Trying another alternate.',
814
+                {'host': 'host1',
815
+                 'request': self.request_spec.requested_resources},
816
+                instance=self.instance),
817
+            mock.call(
818
+                'Scheduler returned alternate host %(host)s as a possible '
819
+                'migration target for re-schedule but that host is not '
820
+                'new enough to support the migration with resource '
821
+                'request %(request)s. Trying another alternate.',
822
+                {'host': 'host2',
823
+                 'request': self.request_spec.requested_resources},
824
+                instance=self.instance),
825
+        ])
826
+
295 827
 
296 828
 class MigrationTaskAllocationUtils(test.NoDBTestCase):
297 829
     @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')

Loading…
Cancel
Save