From 34944ece1669bff6323239acbb2402007707a851 Mon Sep 17 00:00:00 2001 From: Hiroaki Kobayashi Date: Thu, 18 May 2017 17:13:26 +0900 Subject: [PATCH] Gather all operations for the reservations table into the manager Records in the reservations table in the Blazar DB are created and updated by both the manager and the resource plugin [1]. This results in a lack of separation of concerns. This patch migrates such operations from the resource plugin to the manager. It makes the manager responsible for the reservations table, and the resource plugin responsible only for resource specific tables (e.g. the computehost_reservations table). It also changes the resource_id field in the reservations table to hold an ID of the record of resource-specific reservations tables, e.g. computehost_reservations table for the physical-host plugin and instance_reservations table for the new instance plugin [2]. An ID of resource itself which was held in the resource_id field is migrated to the resource specific tables, so a new field 'aggregate_id' is added into the computehost_rservations table. The instance_reservations table of the new instance plugin is expected to have resource IDs like a flavor ID, and aggregate ID, and instance IDs. The ID of the record of the instance_reservations table is expected to be held in the resource_id field of the reservations table. [1] https://etherpad.openstack.org/p/blazar-resource-plugin [2] I03315216c3a6203e088e914ffb9fedf1e672d732 Change-Id: Ia042020088af9e34e3430c44814cd69f4ed303fb --- ...bb2cd2_add_aggregate_id_field_into_the_.py | 44 +++++ blazar/db/sqlalchemy/models.py | 1 + blazar/manager/service.py | 28 ++- blazar/plugins/base.py | 13 +- blazar/plugins/dummy_vm_plugin.py | 3 + blazar/plugins/instances/vm_plugin.py | 3 + blazar/plugins/oshosts/host_plugin.py | 113 +++++------ blazar/tests/manager/test_service.py | 8 + .../plugins/test_physical_host_plugin.py | 182 +++++++----------- 9 files changed, 203 insertions(+), 192 deletions(-) create mode 100644 blazar/db/migration/alembic_migrations/versions/7f1a7bbb2cd2_add_aggregate_id_field_into_the_.py diff --git a/blazar/db/migration/alembic_migrations/versions/7f1a7bbb2cd2_add_aggregate_id_field_into_the_.py b/blazar/db/migration/alembic_migrations/versions/7f1a7bbb2cd2_add_aggregate_id_field_into_the_.py new file mode 100644 index 00000000..dbb27520 --- /dev/null +++ b/blazar/db/migration/alembic_migrations/versions/7f1a7bbb2cd2_add_aggregate_id_field_into_the_.py @@ -0,0 +1,44 @@ +# Copyright 2017 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Add aggregate_id field into the computehost_reservations table + +Revision ID: 7f1a7bbb2cd2 +Revises: 1fd6c2eded89 +Create Date: 2017-05-18 09:23:29.730233 + +""" + +# revision identifiers, used by Alembic. +revision = '7f1a7bbb2cd2' +down_revision = '1fd6c2eded89' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('computehost_reservations', + sa.Column('aggregate_id', + sa.Integer, + nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('computehost_reservations', 'aggregate_id') + # ### end Alembic commands ### diff --git a/blazar/db/sqlalchemy/models.py b/blazar/db/sqlalchemy/models.py index 3f2487ad..45f5e932 100644 --- a/blazar/db/sqlalchemy/models.py +++ b/blazar/db/sqlalchemy/models.py @@ -146,6 +146,7 @@ class ComputeHostReservation(mb.BlazarBase): id = _id_column() reservation_id = sa.Column(sa.String(36), sa.ForeignKey('reservations.id')) + aggregate_id = sa.Column(sa.Integer) resource_properties = sa.Column(MediumText()) count_range = sa.Column(sa.String(36)) hypervisor_properties = sa.Column(MediumText()) diff --git a/blazar/manager/service.py b/blazar/manager/service.py index 8380163d..297825e6 100644 --- a/blazar/manager/service.py +++ b/blazar/manager/service.py @@ -256,13 +256,7 @@ class ManagerService(service_utils.RPCServer): reservation['lease_id'] = lease['id'] reservation['start_date'] = lease['start_date'] reservation['end_date'] = lease['end_date'] - resource_type = reservation['resource_type'] - if resource_type in self.plugins: - self.plugins[resource_type].create_reservation( - reservation) - else: - raise exceptions.UnsupportedResourceType( - resource_type) + self._create_reservation(reservation) except (exceptions.UnsupportedResourceType, common_ex.BlazarException): LOG.exception("Failed to create reservation for a lease. " @@ -411,6 +405,9 @@ class ManagerService(service_utils.RPCServer): def end_lease(self, lease_id, event_id): lease = self.get_lease(lease_id) + for reservation in lease['reservations']: + db_api.reservation_update(reservation['id'], + {'status': 'completed'}) with trusts.create_ctx_from_trust(lease['trust_id']): self._basic_action(lease_id, event_id, 'on_end', 'deleted') @@ -446,6 +443,23 @@ class ManagerService(service_utils.RPCServer): db_api.event_update(event_id, {'status': event_status}) + def _create_reservation(self, values): + resource_type = values['resource_type'] + if resource_type not in self.plugins: + raise exceptions.UnsupportedResourceType(resource_type) + reservation_values = { + 'lease_id': values['lease_id'], + 'resource_type': resource_type, + 'status': 'pending' + } + reservation = db_api.reservation_create(reservation_values) + resource_id = self.plugins[resource_type].reserve_resource( + reservation['id'], + values + ) + db_api.reservation_update(reservation['id'], + {'resource_id': resource_id}) + def _send_notification(self, lease, ctx, events=[]): payload = notification_api.format_lease_payload(lease) diff --git a/blazar/plugins/base.py b/blazar/plugins/base.py index 2151eb41..d73f8caa 100644 --- a/blazar/plugins/base.py +++ b/blazar/plugins/base.py @@ -59,15 +59,10 @@ class BasePlugin(object): 'description': self.description, } - def create_reservation(self, values): - """Create reservation.""" - reservation_values = { - 'lease_id': values['lease_id'], - 'resource_id': values['resource_id'], - 'resource_type': values['resource_type'], - 'status': 'pending' - } - db_api.reservation_create(reservation_values) + @abc.abstractmethod + def reserve_resource(self, reservation_id, values): + """Reserve resource.""" + pass def update_reservation(self, reservation_id, values): """Update reservation.""" diff --git a/blazar/plugins/dummy_vm_plugin.py b/blazar/plugins/dummy_vm_plugin.py index 575de8f3..972594ba 100644 --- a/blazar/plugins/dummy_vm_plugin.py +++ b/blazar/plugins/dummy_vm_plugin.py @@ -22,6 +22,9 @@ class DummyVMPlugin(base.BasePlugin): title = 'Dummy VM Plugin' description = 'This plugin does nothing.' + def reserve_resource(self, reservation_id, values): + return None + def on_start(self, resource_id): """Dummy VM plugin does nothing.""" return 'VM %s should be waked up this moment.' % resource_id diff --git a/blazar/plugins/instances/vm_plugin.py b/blazar/plugins/instances/vm_plugin.py index bd63c009..36e355a7 100644 --- a/blazar/plugins/instances/vm_plugin.py +++ b/blazar/plugins/instances/vm_plugin.py @@ -45,6 +45,9 @@ class VMPlugin(base.BasePlugin, nova.NovaClientWrapper): description = ("This is basic plugin for VM management. " "It can start, snapshot and suspend VMs") + def reserve_resource(self, reservation_id, values): + return None + def on_start(self, resource_id): try: self.nova.servers.unshelve(resource_id) diff --git a/blazar/plugins/oshosts/host_plugin.py b/blazar/plugins/oshosts/host_plugin.py index ab22cd0c..37ca4f50 100644 --- a/blazar/plugins/oshosts/host_plugin.py +++ b/blazar/plugins/oshosts/host_plugin.py @@ -16,7 +16,6 @@ import datetime import json -import uuid from oslo_config import cfg import six @@ -65,27 +64,19 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper): project_name=CONF.os_admin_project_name, project_domain_name=CONF.os_admin_user_domain_name) - def create_reservation(self, values): + def reserve_resource(self, reservation_id, values): """Create reservation.""" pool = rp.ReservationPool() - pool_name = str(uuid.uuid4()) - pool_instance = pool.create(name=pool_name) - reservation_values = { - 'id': pool_name, - 'lease_id': values['lease_id'], - 'resource_id': pool_instance.id, - 'resource_type': values['resource_type'], - 'status': 'pending', - } + pool_instance = pool.create(name=reservation_id) min_hosts = values.get('min') max_hosts = values.get('max') if 0 <= min_hosts and min_hosts <= max_hosts: count_range = str(min_hosts) + '-' + str(max_hosts) else: raise manager_ex.InvalidRange() - reservation = db_api.reservation_create(reservation_values) - host_values = { - 'reservation_id': reservation['id'], + host_rsrv_values = { + 'reservation_id': reservation_id, + 'aggregate_id': pool_instance.id, 'resource_properties': values['resource_properties'], 'hypervisor_properties': values['hypervisor_properties'], 'count_range': count_range, @@ -100,24 +91,22 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper): ) if not host_ids: pool.delete(pool_instance.id) - db_api.reservation_destroy(reservation['id']) raise manager_ex.NotEnoughHostsAvailable() - - db_api.host_reservation_create(host_values) + host_reservation = db_api.host_reservation_create(host_rsrv_values) for host_id in host_ids: db_api.host_allocation_create({'compute_host_id': host_id, - 'reservation_id': reservation['id']}) + 'reservation_id': reservation_id}) + return host_reservation['id'] def update_reservation(self, reservation_id, values): """Update reservation.""" reservation = db_api.reservation_get(reservation_id) lease = db_api.lease_get(reservation['lease_id']) - pool = rp.ReservationPool() - hosts_in_pool = pool.get_computehosts( - reservation['resource_id']) + host_reservation = None if (values['start_date'] < lease['start_date'] or values['end_date'] > lease['end_date']): allocations = [] + hosts_in_pool = [] for allocation in db_api.host_allocation_get_all_by_values( reservation_id=reservation_id): full_periods = db_utils.get_full_periods( @@ -138,16 +127,12 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper): full_periods[0][0] == max_start and full_periods[0][1] == min_end)): allocations.append(allocation) - if (hosts_in_pool and - self.nova.hypervisors.get( - self._get_hypervisor_from_name_or_id( - allocation['compute_host_id']) - ).__dict__['running_vms'] > 0): - raise manager_ex.NotEnoughHostsAvailable() if allocations: - host_reservation = ( - db_api.host_reservation_get_by_reservation_id( - reservation_id)) + host_reservation = db_api.host_reservation_get( + reservation['resource_id']) + pool = rp.ReservationPool() + hosts_in_pool.extend(pool.get_computehosts( + host_reservation['aggregate_id'])) host_ids = self._matching_hosts( host_reservation['hypervisor_properties'], host_reservation['resource_properties'], @@ -159,7 +144,17 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper): if hosts_in_pool: old_hosts = [allocation['compute_host_id'] for allocation in allocations] - pool.remove_computehost(reservation['resource_id'], + # TODO(hiro-kobayashi): This condition check is not enough + # to prevent race conditions. It should not be allowed to + # reallocate a reservation to another hosts if the lease + # has been already started. + # Report: https://bugs.launchpad.net/blazar/+bug/1692805 + for host in old_hosts: + if self.nova.hypervisors.get( + self._get_hypervisor_from_name_or_id(host) + ).__dict__['running_vms'] > 0: + raise manager_ex.NotEnoughHostsAvailable() + pool.remove_computehost(host_reservation['aggregate_id'], old_hosts) for allocation in allocations: db_api.host_allocation_destroy(allocation['id']) @@ -169,45 +164,37 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper): 'reservation_id': reservation_id}) if hosts_in_pool: host = db_api.host_get(host_id) - pool.add_computehost(reservation['resource_id'], + pool.add_computehost(host_reservation['aggregate_id'], host['service_name']) def on_start(self, resource_id): """Add the hosts in the pool.""" - reservations = db_api.reservation_get_all_by_values( - resource_id=resource_id) - for reservation in reservations: - pool = rp.ReservationPool() - for allocation in db_api.host_allocation_get_all_by_values( - reservation_id=reservation['id']): - host = db_api.host_get(allocation['compute_host_id']) - pool.add_computehost(reservation['resource_id'], - host['service_name']) + host_reservation = db_api.host_reservation_get(resource_id) + pool = rp.ReservationPool() + for allocation in db_api.host_allocation_get_all_by_values( + reservation_id=host_reservation['reservation_id']): + host = db_api.host_get(allocation['compute_host_id']) + pool.add_computehost(host_reservation['aggregate_id'], + host['service_name']) def on_end(self, resource_id): """Remove the hosts from the pool.""" - reservations = db_api.reservation_get_all_by_values( - resource_id=resource_id) - for reservation in reservations: - db_api.reservation_update(reservation['id'], - {'status': 'completed'}) - host_reservation = db_api.host_reservation_get_by_reservation_id( - reservation['id']) - db_api.host_reservation_update(host_reservation['id'], - {'status': 'completed'}) - allocations = db_api.host_allocation_get_all_by_values( - reservation_id=reservation['id']) - for allocation in allocations: - db_api.host_allocation_destroy(allocation['id']) - pool = rp.ReservationPool() - for host in pool.get_computehosts(reservation['resource_id']): - for server in self.nova.servers.list( - search_opts={"host": host}): - self.nova.servers.delete(server=server) - try: - pool.delete(reservation['resource_id']) - except manager_ex.AggregateNotFound: - pass + host_reservation = db_api.host_reservation_get(resource_id) + db_api.host_reservation_update(host_reservation['id'], + {'status': 'completed'}) + allocations = db_api.host_allocation_get_all_by_values( + reservation_id=host_reservation['reservation_id']) + for allocation in allocations: + db_api.host_allocation_destroy(allocation['id']) + pool = rp.ReservationPool() + for host in pool.get_computehosts(host_reservation['aggregate_id']): + for server in self.nova.servers.list( + search_opts={"host": host}): + self.nova.servers.delete(server=server) + try: + pool.delete(host_reservation['aggregate_id']) + except manager_ex.AggregateNotFound: + pass def _get_extra_capabilities(self, host_id): extra_capabilities = {} diff --git a/blazar/tests/manager/test_service.py b/blazar/tests/manager/test_service.py index 05b6d9ca..635b0b84 100644 --- a/blazar/tests/manager/test_service.py +++ b/blazar/tests/manager/test_service.py @@ -47,6 +47,9 @@ class FakePlugin(base.BasePlugin): title = 'Fake Plugin' description = 'This plugin is fake.' + def reserve_resource(self, reservation_id, values): + return None + def on_start(self, resource_id): return 'Resorce %s should be started this moment.' % resource_id @@ -123,6 +126,7 @@ class ServiceTestCase(tests.TestCase): self.lease_create = self.patch(self.db_api, 'lease_create') self.lease_update = self.patch(self.db_api, 'lease_update') self.lease_destroy = self.patch(self.db_api, 'lease_destroy') + self.reservation_create = self.patch(self.db_api, 'reservation_create') self.reservation_update = self.patch(self.db_api, 'reservation_update') self.event_update = self.patch(self.db_api, 'event_update') self.manager.plugins = {'virtual:instance': self.fake_plugin} @@ -1087,6 +1091,10 @@ class ServiceTestCase(tests.TestCase): self.manager.end_lease(self.lease_id, '1') + self.reservation_update.assert_called_with( + self.lease['reservations'][0]['id'], + {'status': 'completed'} + ) self.trust_ctx.assert_called_once_with(self.lease['trust_id']) basic_action.assert_called_once_with(self.lease_id, '1', 'on_end', 'deleted') diff --git a/blazar/tests/plugins/test_physical_host_plugin.py b/blazar/tests/plugins/test_physical_host_plugin.py index e863403d..c9abb9f1 100644 --- a/blazar/tests/plugins/test_physical_host_plugin.py +++ b/blazar/tests/plugins/test_physical_host_plugin.py @@ -14,7 +14,6 @@ # limitations under the License. import datetime -import uuid import mock from novaclient import client as nova_client @@ -312,32 +311,18 @@ class PhysicalHostPluginTestCase(tests.TestCase): 'end_date': now + datetime.timedelta(hours=1), 'resource_type': plugin.RESOURCE_TYPE, } - reservation_values = { - 'id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509', - 'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c', - 'resource_id': '1', - 'resource_type': plugin.RESOURCE_TYPE, - 'status': 'pending', - } - uuid4 = self.patch(uuid, 'uuid4') - uuid4.return_value = uuid.UUID('441c1476-9f8f-4700-9f30-cd9b6fef3509') - self.rp_create.return_value = mock.MagicMock(id='1') - reservation_create = self.patch(self.db_api, 'reservation_create') - reservation_create.return_value = { - 'id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e', - } + self.rp_create.return_value = mock.MagicMock(id=1) + host_reservation_create = self.patch(self.db_api, + 'host_reservation_create') matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts') matching_hosts.return_value = [] pool_delete = self.patch(self.rp.ReservationPool, 'delete') - reservation_delete = self.patch(self.db_api, 'reservation_destroy') - self.assertRaises(manager_exceptions.NotEnoughHostsAvailable, - self.fake_phys_plugin.create_reservation, values) - - reservation_create.assert_called_once_with(reservation_values) - pool_delete.assert_called_once_with('1') - reservation_delete.assert_called_once_with( - u'f9894fcf-e2ed-41e9-8a4c-92fac332608e') + self.fake_phys_plugin.reserve_resource, + u'f9894fcf-e2ed-41e9-8a4c-92fac332608e', + values) + host_reservation_create.assert_not_called() + pool_delete.assert_called_once_with(1) def test_create_reservation_hosts_available(self): values = { @@ -350,20 +335,7 @@ class PhysicalHostPluginTestCase(tests.TestCase): 'end_date': datetime.datetime(2013, 12, 19, 21, 00), 'resource_type': plugin.RESOURCE_TYPE, } - reservation_values = { - 'id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509', - 'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c', - 'resource_id': '1', - 'resource_type': plugin.RESOURCE_TYPE, - 'status': 'pending', - } - uuid4 = self.patch(uuid, 'uuid4') - uuid4.return_value = uuid.UUID('441c1476-9f8f-4700-9f30-cd9b6fef3509') - self.rp_create.return_value = mock.MagicMock(id='1') - reservation_create = self.patch(self.db_api, 'reservation_create') - reservation_create.return_value = { - 'id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e', - } + self.rp_create.return_value = mock.MagicMock(id=1) host_reservation_create = self.patch(self.db_api, 'host_reservation_create') matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts') @@ -371,24 +343,26 @@ class PhysicalHostPluginTestCase(tests.TestCase): host_allocation_create = self.patch( self.db_api, 'host_allocation_create') - self.fake_phys_plugin.create_reservation(values) - reservation_create.assert_called_once_with(reservation_values) + self.fake_phys_plugin.reserve_resource( + u'441c1476-9f8f-4700-9f30-cd9b6fef3509', + values) host_values = { - 'reservation_id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e', + 'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509', + 'aggregate_id': 1, 'resource_properties': '', 'hypervisor_properties': '["=", "$memory_mb", "256"]', 'count_range': '1-1', - 'status': 'pending', + 'status': 'pending' } host_reservation_create.assert_called_once_with(host_values) calls = [ mock.call( {'compute_host_id': 'host1', - 'reservation_id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e', + 'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509', }), mock.call( {'compute_host_id': 'host2', - 'reservation_id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e', + 'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509', }), ] host_allocation_create.assert_has_calls(calls) @@ -406,7 +380,8 @@ class PhysicalHostPluginTestCase(tests.TestCase): } self.assertRaises( manager_exceptions.InvalidRange, - self.fake_phys_plugin.create_reservation, + self.fake_phys_plugin.reserve_resource, + u'441c1476-9f8f-4700-9f30-cd9b6fef3509', values) def test_update_reservation_shorten(self): @@ -424,12 +399,16 @@ class PhysicalHostPluginTestCase(tests.TestCase): 'start_date': datetime.datetime(2013, 12, 19, 20, 00), 'end_date': datetime.datetime(2013, 12, 19, 21, 00) } - host_allocation_get_all = self.patch( - self.db_api, - 'host_allocation_get_all_by_values') + host_reservation_get = self.patch(self.db_api, 'host_reservation_get') + host_reservation_get.return_value = { + 'aggregate_id': 1 + } get_computehosts = self.patch(self.rp.ReservationPool, 'get_computehosts') get_computehosts.return_value = ['host1'] + host_allocation_get_all = self.patch( + self.db_api, + 'host_allocation_get_all_by_values') self.fake_phys_plugin.update_reservation( '706eb3bc-07ed-4383-be93-b32845ece672', values) @@ -450,9 +429,7 @@ class PhysicalHostPluginTestCase(tests.TestCase): 'start_date': datetime.datetime(2013, 12, 19, 20, 00), 'end_date': datetime.datetime(2013, 12, 19, 21, 00) } - host_reservation_get_by_reservation_id = self.patch( - self.db_api, - 'host_reservation_get_by_reservation_id') + host_reservation_get = self.patch(self.db_api, 'host_reservation_get') host_allocation_get_all = self.patch( self.db_api, 'host_allocation_get_all_by_values') @@ -467,13 +444,10 @@ class PhysicalHostPluginTestCase(tests.TestCase): (datetime.datetime(2013, 12, 19, 20, 00), datetime.datetime(2013, 12, 19, 21, 00)) ] - get_computehosts = self.patch(self.rp.ReservationPool, - 'get_computehosts') - get_computehosts.return_value = ['host1'] self.fake_phys_plugin.update_reservation( '706eb3bc-07ed-4383-be93-b32845ece672', values) - host_reservation_get_by_reservation_id.assert_not_called() + host_reservation_get.assert_not_called() def test_update_reservation_move_failure(self): values = { @@ -490,9 +464,14 @@ class PhysicalHostPluginTestCase(tests.TestCase): 'start_date': datetime.datetime(2013, 12, 19, 20, 00), 'end_date': datetime.datetime(2013, 12, 19, 21, 00) } - host_reservation_get_by_reservation_id = self.patch( + host_reservation_get = self.patch( self.db_api, - 'host_reservation_get_by_reservation_id') + 'host_reservation_get') + host_reservation_get.return_value = { + 'aggregate_id': 1, + 'hypervisor_properties': '["=", "$memory_mb", "256"]', + 'resource_properties': '' + } host_allocation_get_all = self.patch( self.db_api, 'host_allocation_get_all_by_values') @@ -510,6 +489,8 @@ class PhysicalHostPluginTestCase(tests.TestCase): get_computehosts = self.patch(self.rp.ReservationPool, 'get_computehosts') get_computehosts.return_value = ['host1'] + matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts') + matching_hosts.return_value = ['host2'] self.patch(self.fake_phys_plugin, '_get_hypervisor_from_name_or_id') get_hypervisors = self.patch(self.nova.hypervisors, 'get') get_hypervisors.return_value = mock.MagicMock(running_vms=1) @@ -518,7 +499,7 @@ class PhysicalHostPluginTestCase(tests.TestCase): self.fake_phys_plugin.update_reservation, '706eb3bc-07ed-4383-be93-b32845ece672', values) - host_reservation_get_by_reservation_id.assert_not_called() + host_reservation_get.assert_called() def test_update_reservation_move_overlap(self): values = { @@ -577,9 +558,14 @@ class PhysicalHostPluginTestCase(tests.TestCase): } host_get = self.patch(self.db_api, 'host_get') host_get.return_value = {'service_name': 'host2'} - host_reservation_get_by_reservation_id = self.patch( + host_reservation_get = self.patch( self.db_api, - 'host_reservation_get_by_reservation_id') + 'host_reservation_get') + host_reservation_get.return_value = { + 'aggregate_id': 1, + 'hypervisor_properties': '["=", "$memory_mb", "256"]', + 'resource_properties': '' + } host_allocation_get_all = self.patch( self.db_api, 'host_allocation_get_all_by_values') @@ -611,8 +597,8 @@ class PhysicalHostPluginTestCase(tests.TestCase): self.fake_phys_plugin.update_reservation( '706eb3bc-07ed-4383-be93-b32845ece672', values) - host_reservation_get_by_reservation_id.assert_called_with( - '706eb3bc-07ed-4383-be93-b32845ece672') + host_reservation_get.assert_called_with( + u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b') host_allocation_destroy.assert_called_with( 'dd305477-4df8-4547-87f6-69069ee546a6') host_allocation_create.assert_called_with( @@ -622,27 +608,22 @@ class PhysicalHostPluginTestCase(tests.TestCase): } ) self.remove_compute_host.assert_called_with( - '91253650-cc34-4c4f-bbe8-c943aa7d0c9b', + 1, ['host1'] ) self.add_compute_host.assert_called_with( - '91253650-cc34-4c4f-bbe8-c943aa7d0c9b', + 1, 'host2' ) def test_on_start(self): - reservation_get_all_by_values = self.patch( - self.db_api, 'reservation_get_all_by_values') - - reservation_get_all_by_values.return_value = [ - { - 'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c', - 'resource_id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', - } - ] + host_reservation_get = self.patch(self.db_api, 'host_reservation_get') + host_reservation_get.return_value = { + 'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c', + 'aggregate_id': 1, + } host_allocation_get_all_by_values = self.patch( self.db_api, 'host_allocation_get_all_by_values') - host_allocation_get_all_by_values.return_value = [ {'compute_host_id': 'host1'}, ] @@ -654,25 +635,14 @@ class PhysicalHostPluginTestCase(tests.TestCase): self.fake_phys_plugin.on_start(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') add_computehost.assert_called_with( - u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', 'host1_hostname') + 1, 'host1_hostname') def test_on_end_with_instances(self): - reservation_get_all_by_values = self.patch( - self.db_api, - 'reservation_get_all_by_values') - - reservation_get_all_by_values.return_value = [ - { - 'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c', - 'resource_id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', - } - ] - reservation_update = self.patch(self.db_api, 'reservation_update') - host_reservation_get_by_reservation_id = self.patch( - self.db_api, - 'host_reservation_get_by_reservation_id') - host_reservation_get_by_reservation_id.return_value = { - 'id': u'35fc4e6a-ba57-4a36-be30-6012377a0387', + host_reservation_get = self.patch(self.db_api, 'host_reservation_get') + host_reservation_get.return_value = { + 'id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', + 'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c', + 'aggregate_id': 1 } host_reservation_update = self.patch( self.db_api, @@ -696,32 +666,20 @@ class PhysicalHostPluginTestCase(tests.TestCase): delete_server = self.patch(self.ServerManager, 'delete') delete_pool = self.patch(self.rp.ReservationPool, 'delete') self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') - reservation_update.assert_called_with( - u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'}) host_reservation_update.assert_called_with( - u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'}) + u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', {'status': 'completed'}) host_allocation_destroy.assert_called_with( u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f') delete_server.assert_any_call(server='server1') delete_server.assert_any_call(server='server2') - delete_pool.assert_called_with(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') + delete_pool.assert_called_with(1) def test_on_end_without_instances(self): - reservation_get_all_by_values = self.patch( - self.db_api, - 'reservation_get_all_by_values') - reservation_get_all_by_values.return_value = [ - { - 'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c', - 'resource_id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', - }, - ] - reservation_update = self.patch(self.db_api, 'reservation_update') - host_reservation_get_by_reservation_id = self.patch( - self.db_api, - 'host_reservation_get_by_reservation_id') - host_reservation_get_by_reservation_id.return_value = { - 'id': u'35fc4e6a-ba57-4a36-be30-6012377a0387', + host_reservation_get = self.patch(self.db_api, 'host_reservation_get') + host_reservation_get.return_value = { + 'id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', + 'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c', + 'aggregate_id': 1 } host_reservation_update = self.patch( self.db_api, @@ -745,14 +703,12 @@ class PhysicalHostPluginTestCase(tests.TestCase): delete_server = self.patch(self.ServerManager, 'delete') delete_pool = self.patch(self.rp.ReservationPool, 'delete') self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') - reservation_update.assert_called_with( - u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'}) host_reservation_update.assert_called_with( - u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'}) + u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', {'status': 'completed'}) host_allocation_destroy.assert_called_with( u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f') delete_server.assert_not_called() - delete_pool.assert_called_with(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') + delete_pool.assert_called_with(1) def test_matching_hosts_not_allocated_hosts(self): def host_allocation_get_all_by_values(**kwargs):