Merge "Delete instances at the end of leases"

This commit is contained in:
Jenkins 2017-04-17 01:15:04 +00:00 committed by Gerrit Code Review
commit ebf507933f
4 changed files with 46 additions and 20 deletions

View File

@ -35,10 +35,14 @@ from blazar.utils import trusts
plugin_opts = [ plugin_opts = [
cfg.StrOpt('on_end', cfg.StrOpt('on_end',
default='on_end', default='on_end',
deprecated_for_removal=True,
deprecated_since='0.3.0',
help='Actions which we will use in the end of the lease'), help='Actions which we will use in the end of the lease'),
cfg.StrOpt('on_start', cfg.StrOpt('on_start',
default='on_start', default='on_start',
help='Actions which we will use at the start of the lease'), deprecated_for_removal=True,
deprecated_since='0.3.0',
help='Actions which we will use at the start of the lease')
] ]
CONF = cfg.CONF CONF = cfg.CONF
@ -185,15 +189,17 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
{'status': 'completed'}) {'status': 'completed'})
allocations = db_api.host_allocation_get_all_by_values( allocations = db_api.host_allocation_get_all_by_values(
reservation_id=reservation['id']) reservation_id=reservation['id'])
pool = rp.ReservationPool()
for allocation in allocations: for allocation in allocations:
db_api.host_allocation_destroy(allocation['id']) db_api.host_allocation_destroy(allocation['id'])
if self.nova.hypervisors.get( pool = rp.ReservationPool()
self._get_hypervisor_from_name_or_id( for host in pool.get_computehosts(reservation['resource_id']):
allocation['compute_host_id']) for server in self.nova.servers.list(
).__dict__['running_vms'] == 0: search_opts={"host": host}):
self.nova.servers.delete(server=server)
try:
pool.delete(reservation['resource_id']) pool.delete(reservation['resource_id'])
# TODO(frossigneux) Kill, migrate, or increase fees... except manager_ex.AggregateNotFound:
pass
def _get_extra_capabilities(self, host_id): def _get_extra_capabilities(self, host_id):
extra_capabilities = {} extra_capabilities = {}

View File

@ -32,6 +32,7 @@ from blazar.plugins.oshosts import nova_inventory
from blazar.plugins.oshosts import reservation_pool as rp from blazar.plugins.oshosts import reservation_pool as rp
from blazar import tests from blazar import tests
from blazar.utils.openstack import base from blazar.utils.openstack import base
from blazar.utils.openstack.nova import ServerManager
from blazar.utils import trusts from blazar.utils import trusts
@ -161,6 +162,8 @@ class PhysicalHostPluginTestCase(tests.TestCase):
self.trust_ctx = self.patch(self.trusts, 'create_ctx_from_trust') self.trust_ctx = self.patch(self.trusts, 'create_ctx_from_trust')
self.trust_create = self.patch(self.trusts, 'create_trust') self.trust_create = self.patch(self.trusts, 'create_trust')
self.ServerManager = ServerManager
def test_get_host(self): def test_get_host(self):
host = self.fake_phys_plugin.get_computehost(self.fake_host_id) host = self.fake_phys_plugin.get_computehost(self.fake_host_id)
self.db_host_get.assert_called_once_with('1') self.db_host_get.assert_called_once_with('1')
@ -687,10 +690,13 @@ class PhysicalHostPluginTestCase(tests.TestCase):
host_allocation_destroy = self.patch( host_allocation_destroy = self.patch(
self.db_api, self.db_api,
'host_allocation_destroy') 'host_allocation_destroy')
delete = self.patch(self.rp.ReservationPool, 'delete') get_computehosts = self.patch(self.rp.ReservationPool,
self.patch(self.fake_phys_plugin, '_get_hypervisor_from_name_or_id') 'get_computehosts')
get_hypervisors = self.patch(self.nova.hypervisors, 'get') get_computehosts.return_value = ['host']
get_hypervisors.return_value = mock.MagicMock(running_vms=1) list_servers = self.patch(self.ServerManager, 'list')
list_servers.return_value = ['server1', 'server2']
delete_server = self.patch(self.ServerManager, 'delete')
delete_pool = self.patch(self.rp.ReservationPool, 'delete')
self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
reservation_update.assert_called_with( reservation_update.assert_called_with(
u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'}) u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'})
@ -698,7 +704,9 @@ class PhysicalHostPluginTestCase(tests.TestCase):
u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'}) u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'})
host_allocation_destroy.assert_called_with( host_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f') u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
assert not delete.called delete_server.assert_any_call(server='server1')
delete_server.assert_any_call(server='server2')
delete_pool.assert_called_with(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
def test_on_end_without_instances(self): def test_on_end_without_instances(self):
reservation_get_all_by_values = self.patch( reservation_get_all_by_values = self.patch(
@ -731,10 +739,13 @@ class PhysicalHostPluginTestCase(tests.TestCase):
host_allocation_destroy = self.patch( host_allocation_destroy = self.patch(
self.db_api, self.db_api,
'host_allocation_destroy') 'host_allocation_destroy')
delete = self.patch(self.rp.ReservationPool, 'delete') get_computehosts = self.patch(self.rp.ReservationPool,
self.patch(self.fake_phys_plugin, '_get_hypervisor_from_name_or_id') 'get_computehosts')
get_hypervisors = self.patch(self.nova.hypervisors, 'get') get_computehosts.return_value = ['host']
get_hypervisors.return_value = mock.MagicMock(running_vms=0) list_servers = self.patch(self.ServerManager, 'list')
list_servers.return_value = []
delete_server = self.patch(self.ServerManager, 'delete')
delete_pool = self.patch(self.rp.ReservationPool, 'delete')
self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
reservation_update.assert_called_with( reservation_update.assert_called_with(
u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'}) u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'})
@ -742,7 +753,8 @@ class PhysicalHostPluginTestCase(tests.TestCase):
u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'}) u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'})
host_allocation_destroy.assert_called_with( host_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f') u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
delete.assert_called_with(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8') delete_server.assert_not_called()
delete_pool.assert_called_with(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
def test_matching_hosts_not_allocated_hosts(self): def test_matching_hosts_not_allocated_hosts(self):
def host_allocation_get_all_by_values(**kwargs): def host_allocation_get_all_by_values(**kwargs):

View File

@ -126,8 +126,6 @@ Then edit */etc/blazar/blazar.conf* using the following example:
auth_uri=<auth_uri> auth_uri=<auth_uri>
[physical:host] [physical:host]
on_start=on_start
on_end=on_end
aggregate_freepool_name=freepool aggregate_freepool_name=freepool
project_id_key=blazar:project project_id_key=blazar:project
blazar_owner=blazar:owner blazar_owner=blazar:owner

View File

@ -0,0 +1,10 @@
---
features:
- |
The physical-host plugin has been changed to force-delete all instances on
leased hosts at the end of a lease for preventing failures of other
following leases.
deprecations:
- |
The *on_start* and the *on_end* configs for the physical-host plugin have
been removed.