Browse Source

Merge "Func test for migrate server with ports having resource request"

changes/72/671072/18
Zuul 1 week ago
parent
commit
8126ba54d1

+ 0
- 3
nova/tests/fixtures.py View File

@@ -1484,9 +1484,6 @@ class NeutronFixture(fixtures.Fixture):
1484 1484
             'nova.network.neutronv2.api.API.remove_fixed_ip_from_instance',
1485 1485
             lambda *args, **kwargs: network_model.NetworkInfo.hydrate(
1486 1486
                 NeutronFixture.nw_info))
1487
-        self.test.stub_out(
1488
-            'nova.network.neutronv2.api.API.migrate_instance_finish',
1489
-            lambda *args, **kwargs: None)
1490 1487
         self.test.stub_out(
1491 1488
             'nova.network.security_group.neutron_driver.SecurityGroupAPI.'
1492 1489
             'get_instances_security_groups_bindings',

+ 4
- 1
nova/tests/functional/integrated_helpers.py View File

@@ -267,7 +267,7 @@ class InstanceHelperMixin(object):
267 267
 
268 268
     def _build_minimal_create_server_request(self, api, name, image_uuid=None,
269 269
                                              flavor_id=None, networks=None,
270
-                                             az=None):
270
+                                             az=None, host=None):
271 271
         server = {}
272 272
 
273 273
         # We now have a valid imageId
@@ -282,6 +282,9 @@ class InstanceHelperMixin(object):
282 282
             server['networks'] = networks
283 283
         if az is not None:
284 284
             server['availability_zone'] = az
285
+        # This requires at least microversion 2.74 to work
286
+        if host is not None:
287
+            server['host'] = host
285 288
         return server
286 289
 
287 290
     def _wait_until_deleted(self, server):

+ 244
- 2
nova/tests/functional/test_servers.py View File

@@ -5561,11 +5561,12 @@ class PortResourceRequestBasedSchedulingTestBase(
5561 5561
             self._resources_from_flavor(flavor),
5562 5562
             compute_allocations)
5563 5563
 
5564
-    def _create_server(self, flavor, networks):
5564
+    def _create_server(self, flavor, networks, host=None):
5565 5565
         server_req = self._build_minimal_create_server_request(
5566 5566
             self.api, 'bandwidth-aware-server',
5567 5567
             image_uuid='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
5568
-            flavor_id=flavor['id'], networks=networks)
5568
+            flavor_id=flavor['id'], networks=networks,
5569
+            host=host)
5569 5570
         return self.api.post_server({'server': server_req})
5570 5571
 
5571 5572
     def _set_provider_inventories(self, rp_uuid, inventories):
@@ -6357,6 +6358,247 @@ class PortResourceRequestBasedSchedulingTest(
6357 6358
             port_binding['pci_slot'])
6358 6359
 
6359 6360
 
6361
+class HostNameWeigher(weights.BaseHostWeigher):
6362
+    # Weigher to make the scheduler alternate host list deterministic
6363
+    _weights = {'host1': 100, 'host2': 50, 'host3': 10}
6364
+
6365
+    def _weigh_object(self, host_state, weight_properties):
6366
+        # Any undefined host gets no weight.
6367
+        return self._weights.get(host_state.host, 0)
6368
+
6369
+
6370
+class ServerMoveWithPortResourceRequestTest(
6371
+        PortResourceRequestBasedSchedulingTestBase):
6372
+
6373
+    def setUp(self):
6374
+        # Use our custom weigher defined above to make sure that we have
6375
+        # a predictable host order in the alternate list returned by the
6376
+        # scheduler for migration.
6377
+        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
6378
+                   group='filter_scheduler')
6379
+        super(ServerMoveWithPortResourceRequestTest, self).setUp()
6380
+
6381
+        # The API actively rejecting the move operations with resource
6382
+        # request so we have to turn off that check.
6383
+        # TODO(gibi): Remove this when the move operations are supported and
6384
+        # the API check is removed.
6385
+        patcher = mock.patch(
6386
+            'nova.api.openstack.common.'
6387
+            'supports_port_resource_request_during_move',
6388
+            return_value=True)
6389
+        self.addCleanup(patcher.stop)
6390
+        patcher.start()
6391
+
6392
+        self.compute2 = self._start_compute('host2')
6393
+        self.compute2_rp_uuid = self._get_provider_uuid_by_host('host2')
6394
+        self._create_networking_rp_tree(self.compute2_rp_uuid)
6395
+        self.compute2_service_id = self.admin_api.get_services(
6396
+            host='host2', binary='nova-compute')[0]['id']
6397
+
6398
+    def _check_allocation(
6399
+            self, server, compute_rp_uuid, non_qos_port, qos_port,
6400
+            migration_uuid=None, source_compute_rp_uuid=None):
6401
+
6402
+        updated_non_qos_port = self.neutron.show_port(
6403
+            non_qos_port['id'])['port']
6404
+        updated_qos_port = self.neutron.show_port(qos_port['id'])['port']
6405
+
6406
+        allocations = self.placement_api.get(
6407
+            '/allocations/%s' % server['id']).body['allocations']
6408
+
6409
+        # We expect one set of allocations for the compute resources on the
6410
+        # compute rp and one set for the networking resources on the ovs bridge
6411
+        # rp due to the qos_port resource request
6412
+        self.assertEqual(2, len(allocations))
6413
+        self.assertComputeAllocationMatchesFlavor(
6414
+            allocations, compute_rp_uuid, self.flavor)
6415
+        network_allocations = allocations[
6416
+            self.ovs_bridge_rp_per_host[compute_rp_uuid]]['resources']
6417
+        self.assertPortMatchesAllocation(qos_port, network_allocations)
6418
+
6419
+        # We expect that only the RP uuid of the networking RP having the port
6420
+        # allocation is sent in the port binding for the port having resource
6421
+        # request
6422
+        qos_binding_profile = updated_qos_port['binding:profile']
6423
+        self.assertEqual(self.ovs_bridge_rp_per_host[compute_rp_uuid],
6424
+                         qos_binding_profile['allocation'])
6425
+
6426
+        # And we expect not to have any allocation set in the port binding for
6427
+        # the port that doesn't have resource request
6428
+        self.assertNotIn('binding:profile', updated_non_qos_port)
6429
+
6430
+        if migration_uuid:
6431
+            migration_allocations = self.placement_api.get(
6432
+                '/allocations/%s' % migration_uuid).body['allocations']
6433
+
6434
+            # We expect one set of allocations for the compute resources on the
6435
+            # compute rp and one set for the networking resources on the ovs
6436
+            # bridge rp due to the qos_port resource request
6437
+            self.assertEqual(2, len(migration_allocations))
6438
+            self.assertComputeAllocationMatchesFlavor(
6439
+                migration_allocations, source_compute_rp_uuid, self.flavor)
6440
+            network_allocations = migration_allocations[
6441
+                self.ovs_bridge_rp_per_host[
6442
+                    source_compute_rp_uuid]]['resources']
6443
+            self.assertPortMatchesAllocation(qos_port, network_allocations)
6444
+
6445
+    def _create_server_with_ports(self, non_qos_port, qos_port):
6446
+        server = self._create_server(
6447
+            flavor=self.flavor,
6448
+            networks=[{'port': non_qos_port['id']},
6449
+                      {'port': qos_port['id']}],
6450
+            host='host1')
6451
+        return self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
6452
+
6453
+    def _delete_server_and_check_allocations(self, qos_port, server):
6454
+        self._delete_and_check_allocations(server)
6455
+
6456
+        # assert that unbind removes the allocation from the binding of the
6457
+        # port that got allocation during the bind
6458
+        updated_qos_port = self.neutron.show_port(qos_port['id'])['port']
6459
+        binding_profile = updated_qos_port['binding:profile']
6460
+        self.assertNotIn('allocation', binding_profile)
6461
+
6462
+    def test_migrate_server_with_qos_port_old_dest_compute_no_alternate(self):
6463
+        """Create a situation where the only migration target host returned
6464
+        by the scheduler is too old and therefore the migration fails.
6465
+        """
6466
+        non_qos_port = self.neutron.port_1
6467
+        qos_port = self.neutron.port_with_resource_request
6468
+
6469
+        server = self._create_server_with_ports(non_qos_port, qos_port)
6470
+
6471
+        # check that the server allocates from the current host properly
6472
+        self._check_allocation(
6473
+            server, self.compute1_rp_uuid, non_qos_port, qos_port)
6474
+
6475
+        orig_get_service = nova.objects.Service.get_by_host_and_binary
6476
+
6477
+        def fake_get_service(context, host, binary):
6478
+            if host == 'host1':
6479
+                return orig_get_service(context, host, binary)
6480
+            if host == 'host2':
6481
+                service = orig_get_service(context, host, binary)
6482
+                service.version = 38
6483
+                return service
6484
+
6485
+        with mock.patch(
6486
+                'nova.objects.Service.get_by_host_and_binary',
6487
+                side_effect=fake_get_service):
6488
+
6489
+            ex = self.assertRaises(
6490
+                client.OpenStackApiException,
6491
+                self.api.post_server_action, server['id'], {'migrate': None})
6492
+
6493
+        self.assertEqual(400, ex.response.status_code)
6494
+        self.assertIn('No valid host was found.', six.text_type(ex))
6495
+
6496
+        # check that the server still allocates from the original host
6497
+        self._check_allocation(
6498
+            server, self.compute1_rp_uuid, non_qos_port, qos_port)
6499
+
6500
+        # but the migration allocation is gone
6501
+        migration_uuid = self.get_migration_uuid_for_instance(server['id'])
6502
+        migration_allocations = self.placement_api.get(
6503
+            '/allocations/%s' % migration_uuid).body['allocations']
6504
+        self.assertEqual({}, migration_allocations)
6505
+
6506
+        self._delete_server_and_check_allocations(qos_port, server)
6507
+
6508
+    def test_migrate_server_with_qos_port_old_dest_compute_alternate(self):
6509
+        """Create a situation where the first migration target host returned
6510
+        by the scheduler is too old and therefore the second host is selected
6511
+        by the MigrationTask.
6512
+        """
6513
+        self._start_compute('host3')
6514
+        compute3_rp_uuid = self._get_provider_uuid_by_host('host3')
6515
+        self._create_networking_rp_tree(compute3_rp_uuid)
6516
+
6517
+        non_qos_port = self.neutron.port_1
6518
+        qos_port = self.neutron.port_with_resource_request
6519
+
6520
+        server = self._create_server_with_ports(non_qos_port, qos_port)
6521
+
6522
+        # check that the server allocates from the current host properly
6523
+        self._check_allocation(
6524
+            server, self.compute1_rp_uuid, non_qos_port, qos_port)
6525
+
6526
+        orig_get_service = nova.objects.Service.get_by_host_and_binary
6527
+
6528
+        def fake_get_service(context, host, binary):
6529
+            if host == 'host1':
6530
+                return orig_get_service(context, host, binary)
6531
+            if host == 'host2':
6532
+                service = orig_get_service(context, host, binary)
6533
+                service.version = 38
6534
+                return service
6535
+            if host == 'host3':
6536
+                service = orig_get_service(context, host, binary)
6537
+                service.version = 39
6538
+                return service
6539
+
6540
+        with mock.patch(
6541
+                'nova.objects.Service.get_by_host_and_binary',
6542
+                side_effect=fake_get_service):
6543
+
6544
+            self.api.post_server_action(server['id'], {'migrate': None})
6545
+
6546
+        self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
6547
+
6548
+        migration_uuid = self.get_migration_uuid_for_instance(server['id'])
6549
+
6550
+        # check that server allocates from host3
6551
+        self._check_allocation(
6552
+            server, compute3_rp_uuid, non_qos_port, qos_port,
6553
+            migration_uuid, source_compute_rp_uuid=self.compute1_rp_uuid)
6554
+
6555
+        self.api.post_server_action(server['id'], {'confirmResize': None})
6556
+        self._wait_for_migration_status(server, ['confirmed'])
6557
+
6558
+        # check that allocation is still OK
6559
+        self._check_allocation(
6560
+            server, compute3_rp_uuid, non_qos_port, qos_port)
6561
+        # but the migration allocation is gone
6562
+        migration_allocations = self.placement_api.get(
6563
+            '/allocations/%s' % migration_uuid).body['allocations']
6564
+        self.assertEqual({}, migration_allocations)
6565
+
6566
+        self._delete_server_and_check_allocations(qos_port, server)
6567
+
6568
+    def test_migrate_server_with_qos_port(self):
6569
+        non_qos_port = self.neutron.port_1
6570
+        qos_port = self.neutron.port_with_resource_request
6571
+
6572
+        server = self._create_server_with_ports(non_qos_port, qos_port)
6573
+
6574
+        # check that the server allocates from the current host properly
6575
+        self._check_allocation(
6576
+            server, self.compute1_rp_uuid, non_qos_port, qos_port)
6577
+
6578
+        self.api.post_server_action(server['id'], {'migrate': None})
6579
+        self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
6580
+
6581
+        migration_uuid = self.get_migration_uuid_for_instance(server['id'])
6582
+
6583
+        # check that server allocates from the new host properly
6584
+        self._check_allocation(
6585
+            server, self.compute2_rp_uuid, non_qos_port, qos_port,
6586
+            migration_uuid, source_compute_rp_uuid=self.compute1_rp_uuid)
6587
+
6588
+        self.api.post_server_action(server['id'], {'confirmResize': None})
6589
+        self._wait_for_migration_status(server, ['confirmed'])
6590
+
6591
+        # check that allocation is still OK
6592
+        self._check_allocation(
6593
+            server, self.compute2_rp_uuid, non_qos_port, qos_port)
6594
+        # but the migration allocation is gone
6595
+        migration_allocations = self.placement_api.get(
6596
+            '/allocations/%s' % migration_uuid).body['allocations']
6597
+        self.assertEqual({}, migration_allocations)
6598
+
6599
+        self._delete_server_and_check_allocations(qos_port, server)
6600
+
6601
+
6360 6602
 class PortResourceRequestReSchedulingTest(
6361 6603
         PortResourceRequestBasedSchedulingTestBase):
6362 6604
     """Similar to PortResourceRequestBasedSchedulingTest

Loading…
Cancel
Save