Physical host reservation plugin

Implements bp:host-manager

Change-Id: I782513c579f3825448bf1de5261e943e1653d521
This commit is contained in:
François Rossigneux 2014-01-02 19:03:41 +01:00 committed by Swann Croiset
parent 4f597be33d
commit 50322e2f9f
6 changed files with 776 additions and 236 deletions

View File

@ -68,6 +68,8 @@ class HostHavingServers(exceptions.ClimateException):
msg_fmt = _("Servers [%(servers)s] found for host %(host)s")
# oshost plugin related exceptions
class CantAddExtraCapability(exceptions.ClimateException):
code = 409
msg_fmt = _("Can't add extracapabilities %(keys)s to Host %(host)s")
@ -91,3 +93,7 @@ class WrongClientVersion(exceptions.ClimateException):
class NoManagementUrl(exceptions.NotFound):
code = 404
msg_fmt = _("You haven't management url for service")
class HypervisorNotFound(exceptions.ClimateException):
msg_fmt = _("Aggregate '%(pool)s' not found!")

View File

@ -0,0 +1,366 @@
# -*- coding: utf-8 -*-
#
# Author: François Rossigneux <francois.rossigneux@inria.fr>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
from novaclient import client
from oslo.config import cfg
from climate import context
from climate.db import api as db_api
from climate.db import utils as db_utils
from climate.manager import exceptions as manager_ex
from climate.openstack.common import uuidutils
from climate.plugins import base
from climate.plugins.oshosts import nova_inventory
from climate.plugins.oshosts import reservation_pool as rp
from climate.utils import service as service_utils
class PhysicalHostPlugin(base.BasePlugin):
"""Plugin for physical host resource."""
resource_type = 'physical:host'
title = 'Physical Host Plugin'
description = 'This plugin starts and shutdowns the hosts.'
freepool_name = cfg.CONF[resource_type].aggregate_freepool_name
pool = None
inventory = None
def __init__(self):
#TODO(sbauza): use catalog to find the url
auth_url = "%s://%s:%s/v2.0" % (cfg.CONF.os_auth_protocol,
cfg.CONF.os_auth_host,
cfg.CONF.os_auth_port)
#TODO(scroiset): use client wrapped by climate and use trust
self.nova = client.Client('2',
username=cfg.CONF.climate_username,
api_key=cfg.CONF.climate_password,
auth_url=auth_url,
project_id=cfg.CONF.climate_tenant_name)
def create_reservation(self, values):
"""Create reservation."""
pool = rp.ReservationPool()
pool_name = uuidutils.generate_uuid()
pool_instance = pool.create(name=pool_name)
reservation_values = {
'id': pool_name,
'lease_id': values['lease_id'],
'resource_id': pool_instance.id,
'resource_type': values['resource_type'],
'status': 'pending',
}
reservation = db_api.reservation_create(reservation_values)
count_range = str(values['min']) + '-' + str(values['max'])
host_values = {
'reservation_id': reservation['id'],
'resource_properties': values['resource_properties'],
'hypervisor_properties': values['hypervisor_properties'],
'count_range': count_range,
'status': 'pending',
}
db_api.host_reservation_create(host_values)
host_ids = self._matching_hosts(
values['hypervisor_properties'],
values['resource_properties'],
count_range,
values['start_date'],
values['end_date'],
)
if not host_ids:
raise RuntimeError('Not enough hosts available')
for host_id in host_ids:
db_api.host_allocation_create({'compute_host_id': host_id,
'reservation_id': reservation['id']})
def on_start(self, resource_id):
"""Add the hosts in the pool."""
reservations = db_api.reservation_get_all_by_values(
resource_id=resource_id)
for reservation in reservations:
pool = rp.ReservationPool()
for allocation in db_api.host_allocation_get_all_by_values(
reservation_id=reservation['id']):
host = db_api.host_get(allocation['compute_host_id'])
host_name = host['hypervisor_hostname']
pool.add_computehost(reservation['resource_id'], host_name)
def on_end(self, resource_id):
"""Remove the hosts from the pool."""
reservations = db_api.reservation_get_all_by_values(
resource_id=resource_id)
for reservation in reservations:
db_api.reservation_update(reservation['id'],
{'status': 'completed'})
host_reservation = db_api.host_reservation_get_by_reservation_id(
reservation['id'])
db_api.host_reservation_update(host_reservation['id'],
{'status': 'completed'})
allocations = db_api.host_allocation_get_all_by_values(
reservation_id=reservation['id'])
pool = rp.ReservationPool()
for allocation in allocations:
db_api.host_allocation_destroy(allocation['id'])
if self.nova.hypervisors.get(
self._get_hypervisor_from_name(
allocation['compute_host_id'])
).__dict__['running_vms'] == 0:
pool.delete(reservation['resource_id'])
#TODO(frossigneux) Kill, migrate, or increase fees...
def setup(self, conf):
# Create freepool if not exists
with context.ClimateContext() as ctx:
ctx = ctx.elevated()
if self.pool is None:
self.pool = rp.ReservationPool()
if self.inventory is None:
self.inventory = nova_inventory.NovaInventory()
if not self._freepool_exists():
self.pool.create(name=self.freepool_name, az=None)
def _freepool_exists(self):
try:
self.pool.get_aggregate_from_name_or_id(self.freepool_name)
return True
except manager_ex.AggregateNotFound:
return False
def _get_extra_capabilities(self, host_id):
extra_capabilities = {}
raw_extra_capabilities = \
db_api.host_extra_capability_get_all_per_host(host_id)
for capability in raw_extra_capabilities:
key = capability['capability_name']
extra_capabilities[key] = capability['capability_value']
return extra_capabilities
@service_utils.export_context
def get_computehost(self, host_id):
host = db_api.host_get(host_id)
extra_capabilities = self._get_extra_capabilities(host_id)
if host is not None and extra_capabilities:
res = host.copy()
res.update(extra_capabilities)
return res
else:
return host
@service_utils.export_context
def list_computehosts(self):
raw_host_list = db_api.host_list()
host_list = []
for host in raw_host_list:
host_list.append(self.get_computehost(host['id']))
return host_list
@service_utils.export_context
def create_computehost(self, host_values):
# TODO(sbauza):
# - Exception handling for HostNotFound
host_id = host_values.pop('id', None)
host_name = host_values.pop('name', None)
host_ref = host_id or host_name
if host_ref is None:
raise manager_ex.InvalidHost(host=host_values)
servers = self.inventory.get_servers_per_host(host_ref)
if servers:
raise manager_ex.HostHavingServers(host=host_ref,
servers=servers)
host_details = self.inventory.get_host_details(host_ref)
# NOTE(sbauza): Only last duplicate name for same extra capability will
# be stored
to_store = set(host_values.keys()) - set(host_details.keys())
extra_capabilities_keys = to_store
extra_capabilities = dict(
(key, host_values[key]) for key in extra_capabilities_keys
)
self.pool.add_computehost(self.freepool_name, host_ref)
host = None
cantaddextracapability = []
try:
host = db_api.host_create(host_details)
except RuntimeError:
#We need to rollback
# TODO(sbauza): Investigate use of Taskflow for atomic transactions
self.pool.remove_computehost(self.freepool_name, host_ref)
if host:
for key in extra_capabilities:
values = {'computehost_id': host['id'],
'capability_name': key,
'capability_value': extra_capabilities[key],
}
try:
db_api.host_extra_capability_create(values)
except RuntimeError:
cantaddextracapability.append(key)
if cantaddextracapability:
raise manager_ex.CantAddExtraCapability(
keys=cantaddextracapability,
host=host['id'])
if host:
return self.get_computehost(host['id'])
else:
return None
@service_utils.export_context
def update_computehost(self, host_id, values):
# NOTE (sbauza): Only update existing extra capabilites, don't create
# other ones
if values:
cant_update_extra_capability = []
for value in values:
capabilities = db_api.host_extra_capability_get_all_per_name(
host_id,
value,
)
for raw_capability in capabilities:
capability = {
'capability_name': value,
'capability_value': values[value],
}
try:
db_api.host_extra_capability_update(
raw_capability['id'], capability)
except RuntimeError:
cant_update_extra_capability.append(
raw_capability['capability_name'])
if cant_update_extra_capability:
raise manager_ex.CantAddExtraCapability(
host=host_id,
keys=cant_update_extra_capability)
return self.get_computehost(host_id)
@service_utils.export_context
def delete_computehost(self, host_id):
# TODO(sbauza):
# - Check if no leases having this host scheduled
servers = self.inventory.get_servers_per_host(host_id)
if servers:
raise manager_ex.HostHavingServers(host=host_id,
servers=servers)
host = db_api.host_get(host_id)
if not host:
raise manager_ex.HostNotFound(host=host_id)
try:
self.pool.remove_computehost(self.freepool_name,
host['hypervisor_hostname'])
# NOTE(sbauza): Extracapabilities will be destroyed thanks to
# the DB FK.
db_api.host_destroy(host_id)
except RuntimeError:
# Nothing so bad, but we need to advert the admin he has to rerun
raise manager_ex.CantRemoveHost(host=host_id,
pool=self.freepool_name)
def _matching_hosts(self, hypervisor_properties, resource_properties,
count_range, start_date, end_date):
"""Return the matching hosts (preferably not allocated)
"""
count_range = count_range.split('-')
min_host = count_range[0]
max_host = count_range[1]
allocated_host_ids = []
not_allocated_host_ids = []
filter_array = []
# TODO(frossigneux) support "or" operator
if hypervisor_properties:
filter_array = self._convert_requirements(
hypervisor_properties)
if resource_properties:
filter_array += self._convert_requirements(
resource_properties)
for host in db_api.host_get_all_by_queries(filter_array):
if not db_api.host_allocation_get_all_by_values(
compute_host_id=host['id']):
not_allocated_host_ids.append(host['id'])
elif db_utils.get_free_periods(
host['id'],
start_date,
end_date,
end_date - start_date,
) == [
(start_date, end_date),
]:
allocated_host_ids.append(host['id'])
if len(not_allocated_host_ids) >= int(min_host):
return not_allocated_host_ids[:int(max_host)]
all_host_ids = allocated_host_ids + not_allocated_host_ids
if len(all_host_ids) >= int(min_host):
return all_host_ids[:int(max_host)]
else:
return []
def _convert_requirements(self, requirements):
"""Convert the requirements to an array of strings.
["key op value", "key op value", ...]
"""
# TODO(frossigneux) Support the "or" operator
# Convert text to json
if isinstance(requirements, six.string_types):
requirements = json.loads(requirements)
# Requirement list looks like ['<', '$ram', '1024']
if self._requirements_with_three_elements(requirements):
result = []
if requirements[0] == '=':
requirements[0] = '=='
string = (requirements[1][1:] + " " + requirements[0] + " " +
requirements[2])
result.append(string)
return result
# Remove the 'and' element at the head of the requirement list
elif self._requirements_with_and_keyword(requirements):
return [self._convert_requirements(x)[0]
for x in requirements[1:]]
# Empty requirement list
elif isinstance(requirements, list) and not requirements:
return requirements
else:
raise RuntimeError('Malformed requirements')
def _requirements_with_three_elements(self, requirements):
"""Return true if requirement list looks like ['<', '$ram', '1024']."""
return (isinstance(requirements, list) and
len(requirements) == 3 and
isinstance(requirements[0], six.string_types) and
isinstance(requirements[1], six.string_types) and
isinstance(requirements[2], six.string_types) and
requirements[0] in ['==', '=', '!=', '>=', '<=', '>', '<'] and
len(requirements[1]) > 1 and requirements[1][0] == '$' and
len(requirements[2]) > 1)
def _requirements_with_and_keyword(self, requirements):
return (len(requirements) > 1 and
isinstance(requirements[0], six.string_types) and
requirements[0] == 'and' and
all(self._convert_requirements(x) for x in requirements[1:]))
def _get_hypervisor_from_name(self, hypervisor_name):
"""Return an hypervisor by name or an id."""
hypervisor = None
all_hypervisors = self.nova.hypervisors.list()
for hyp in all_hypervisors:
if hypervisor_name == hyp.hypervisor_hostname:
hypervisor = hyp
if hypervisor:
return hypervisor
else:
raise manager_ex.HypervisorNotFound(pool=hypervisor_name)

View File

@ -1,183 +0,0 @@
# -*- coding: utf-8 -*-
#
# Author: François Rossigneux <francois.rossigneux@inria.fr>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from climate import context
from climate.db import api as db_api
from climate.manager import exceptions as manager_exceptions
from climate.plugins import base
from climate.plugins.oshosts import nova_inventory
from climate.plugins.oshosts import reservation_pool as rp
from climate.utils import service as service_utils
class PhysicalHostPlugin(base.BasePlugin):
"""Plugin for physical host resource."""
resource_type = 'physical:host'
title = 'Physical Host Plugin'
description = 'This plugin starts and shutdowns the hosts.'
freepool_name = cfg.CONF[resource_type].aggregate_freepool_name
pool = None
inventory = None
def on_start(self, resource_id):
"""Add the hosts in the pool."""
pass
def on_end(self, resource_id):
"""Remove the hosts from the pool."""
pass
def setup(self, conf):
# Create freepool if not exists
with context.ClimateContext() as ctx:
ctx = ctx.elevated()
if self.pool is None:
self.pool = rp.ReservationPool()
if self.inventory is None:
self.inventory = nova_inventory.NovaInventory()
if not self._freepool_exists():
self.pool.create(name=self.freepool_name, az=None)
def _freepool_exists(self):
try:
self.pool.get_aggregate_from_name_or_id(self.freepool_name)
return True
except manager_exceptions.AggregateNotFound:
return False
def _get_extra_capabilities(self, host_id):
extra_capabilities = {}
raw_extra_capabilities = \
db_api.host_extra_capability_get_all_per_host(host_id)
for capability in raw_extra_capabilities:
extra_capabilities[capability['capability_name']] = \
capability['capability_value']
return extra_capabilities
@service_utils.export_context
def get_computehost(self, host_id):
host = db_api.host_get(host_id)
extra_capabilities = self._get_extra_capabilities(host_id)
if host is not None and extra_capabilities:
res = host.copy()
res.update(extra_capabilities)
return res
else:
return host
@service_utils.export_context
def list_computehosts(self):
raw_host_list = db_api.host_list()
host_list = []
for host in raw_host_list:
host_list.append(self.get_computehost(host['id']))
return host_list
@service_utils.export_context
def create_computehost(self, host_values):
# TODO(sbauza):
# - Exception handling for HostNotFound
host_id = host_values.pop('id', None)
host_name = host_values.pop('name', None)
host_ref = host_id or host_name
if host_ref is None:
raise manager_exceptions.InvalidHost(host=host_values)
servers = self.inventory.get_servers_per_host(host_ref)
if servers:
raise manager_exceptions.HostHavingServers(host=host_ref,
servers=servers)
host_details = self.inventory.get_host_details(host_ref)
# NOTE(sbauza): Only last duplicate name for same extra capability will
# be stored
extra_capabilities_keys = \
set(host_values.keys()) - set(host_details.keys())
extra_capabilities = \
dict((key, host_values[key]) for key in extra_capabilities_keys)
self.pool.add_computehost(self.freepool_name, host_ref)
host = None
cantaddextracapability = []
try:
host = db_api.host_create(host_details)
except RuntimeError:
#We need to rollback
# TODO(sbauza): Investigate use of Taskflow for atomic transactions
self.pool.remove_computehost(self.freepool_name, host_ref)
if host:
for key in extra_capabilities:
values = {'computehost_id': host['id'],
'capability_name': key,
'capability_value': extra_capabilities[key]}
try:
db_api.host_extra_capability_create(values)
except RuntimeError:
cantaddextracapability.append(key)
if cantaddextracapability:
raise manager_exceptions.CantAddExtraCapability(
keys=cantaddextracapability, host=host['id'])
if host:
return self.get_computehost(host['id'])
else:
return None
@service_utils.export_context
def update_computehost(self, host_id, values):
# NOTE (sbauza): Only update existing extra capabilites, don't create
# other ones
if values:
cantupdateextracapability = []
for value in values:
capabilities = \
db_api.host_extra_capability_get_all_per_name(host_id,
value)
for raw_capability in capabilities:
capability = {'capability_name': value,
'capability_value': values[value]}
try:
db_api.host_extra_capability_update(
raw_capability['id'], capability)
except RuntimeError:
cantupdateextracapability.append(
raw_capability['capability_name'])
if cantupdateextracapability:
raise manager_exceptions.CantAddExtraCapability(
host=host_id, keys=cantupdateextracapability)
return self.get_computehost(host_id)
@service_utils.export_context
def delete_computehost(self, host_id):
# TODO(sbauza):
# - Check if no leases having this host scheduled
servers = self.inventory.get_servers_per_host(host_id)
if servers:
raise manager_exceptions.HostHavingServers(host=host_id,
servers=servers)
host = db_api.host_get(host_id)
if not host:
raise manager_exceptions.HostNotFound(host=host_id)
try:
self.pool.remove_computehost(self.freepool_name,
host['hypervisor_hostname'])
# NOTE(sbauza): Extracapabilities will be destroyed thanks to
# the DB FK.
db_api.host_destroy(host_id)
except RuntimeError:
# Nothing so bad, but we need to advert the admin he has to rerun
raise manager_exceptions.CantRemoveHost(host=host_id,
pool=self.freepool_name)

View File

@ -25,7 +25,7 @@ from climate.db import api as db_api
from climate import exceptions
from climate.manager import service
from climate.plugins import dummy_vm_plugin
from climate.plugins import physical_host_plugin
from climate.plugins.oshosts import host_plugin
from climate import tests
from climate.utils import trusts
@ -47,8 +47,8 @@ class ServiceTestCase(tests.TestCase):
self.fake_plugin = self.patch(self.dummy_plugin, 'DummyVMPlugin')
self.physical_host_plugin = physical_host_plugin
self.fake_phys_plugin = self.patch(self.physical_host_plugin,
self.host_plugin = host_plugin
self.fake_phys_plugin = self.patch(self.host_plugin,
'PhysicalHostPlugin')
self.manager = self.service.ManagerService('127.0.0.1')

View File

@ -13,32 +13,45 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
import testtools
from climate import context
from climate.db import api as db_api
from climate.db import utils as db_utils
from climate.manager import exceptions as manager_exceptions
from climate.manager import service
from climate.openstack.common import uuidutils
from climate.plugins.oshosts import host_plugin
from climate.plugins.oshosts import nova_inventory
from climate.plugins.oshosts import reservation_pool as rp
from climate.plugins import physical_host_plugin
from climate import tests
from novaclient import client as nova_client
class AggregateFake(object):
def __init__(self, i, name, hosts):
self.id = i
self.name = name
self.hosts = hosts
class PhysicalHostPlugingSetupOnlyTestCase(tests.TestCase):
def setUp(self):
super(PhysicalHostPlugingSetupOnlyTestCase, self).setUp()
self.context = context
self.patch(self.context, 'ClimateContext')
self.physical_host_plugin = physical_host_plugin
self.fake_phys_plugin = \
self.physical_host_plugin.PhysicalHostPlugin()
self.host_plugin = host_plugin
self.fake_phys_plugin = self.host_plugin.PhysicalHostPlugin()
self.rp = rp
self.nova_inventory = nova_inventory
self.rp_create = self.patch(self.rp.ReservationPool, 'create')
self.db_api = db_api
self.db_host_extra_capability_get_all_per_host = \
self.patch(self.db_api, 'host_extra_capability_get_all_per_host')
self.db_host_extra_capability_get_all_per_host = (
self.patch(self.db_api, 'host_extra_capability_get_all_per_host'))
def test_setup(self):
def fake_setup():
@ -62,23 +75,25 @@ class PhysicalHostPlugingSetupOnlyTestCase(tests.TestCase):
def test__freepool_exists_with_freepool_missing(self):
def fake_get_aggregate_from_name_or_id(*args, **kwargs):
raise manager_exceptions.AggregateNotFound
mock = self.patch(self.rp.ReservationPool,
'get_aggregate_from_name_or_id')
mock.side_effect = fake_get_aggregate_from_name_or_id
rp_mock = self.patch(self.rp.ReservationPool,
'get_aggregate_from_name_or_id')
rp_mock.side_effect = fake_get_aggregate_from_name_or_id
self.fake_phys_plugin.setup(None)
self.assertEqual(self.fake_phys_plugin._freepool_exists(), False)
def test__get_extra_capabilities_with_values(self):
self.db_host_extra_capability_get_all_per_host.return_value = \
[{'id': 1,
'capability_name': 'foo',
'capability_value': 'bar',
'other': 'value',
'computehost_id': 1},
{'id': 2,
'capability_name': 'buzz',
'capability_value': 'word',
'computehost_id': 1}]
self.db_host_extra_capability_get_all_per_host.return_value = [
{'id': 1,
'capability_name': 'foo',
'capability_value': 'bar',
'other': 'value',
'computehost_id': 1
},
{'id': 2,
'capability_name': 'buzz',
'capability_value': 'word',
'computehost_id': 1
}]
res = self.fake_phys_plugin._get_extra_capabilities(1)
self.assertEqual({'foo': 'bar', 'buzz': 'word'}, res)
@ -89,29 +104,35 @@ class PhysicalHostPlugingSetupOnlyTestCase(tests.TestCase):
class PhysicalHostPluginTestCase(tests.TestCase):
def setUp(self):
super(PhysicalHostPluginTestCase, self).setUp()
self.context = context
self.patch(self.context, 'ClimateContext')
self.nova_client = nova_client
self.nova = self.patch(self.nova_client, 'Client').return_value
self.service = service
self.manager = self.service.ManagerService('127.0.0.1')
self.fake_host_id = '1'
self.fake_host = {'id': self.fake_host_id,
'hypervisor_hostname': 'foo',
'vcpus': 4,
'cpu_info': 'foo',
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'memory_mb': 8192,
'local_gb': 10}
self.fake_host = {
'id': self.fake_host_id,
'hypervisor_hostname': 'foo',
'vcpus': 4,
'cpu_info': 'foo',
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'memory_mb': 8192,
'local_gb': 10,
}
self.physical_host_plugin = physical_host_plugin
self.fake_phys_plugin = \
self.physical_host_plugin.PhysicalHostPlugin()
self.host_plugin = host_plugin
self.fake_phys_plugin = self.host_plugin.PhysicalHostPlugin()
self.db_api = db_api
self.db_utils = db_utils
self.db_host_get = self.patch(self.db_api, 'host_get')
self.db_host_get.return_value = self.fake_host
@ -120,14 +141,17 @@ class PhysicalHostPluginTestCase(tests.TestCase):
self.db_host_update = self.patch(self.db_api, 'host_update')
self.db_host_destroy = self.patch(self.db_api, 'host_destroy')
self.db_host_extra_capability_get_all_per_host = \
self.patch(self.db_api, 'host_extra_capability_get_all_per_host')
self.db_host_extra_capability_get_all_per_name = \
self.patch(self.db_api, 'host_extra_capability_get_all_per_name')
self.db_host_extra_capability_create = \
self.patch(self.db_api, 'host_extra_capability_create')
self.db_host_extra_capability_update = \
self.patch(self.db_api, 'host_extra_capability_update')
self.db_host_extra_capability_get_all_per_host = self.patch(
self.db_api, 'host_extra_capability_get_all_per_host')
self.db_host_extra_capability_get_all_per_name = self.patch(
self.db_api, 'host_extra_capability_get_all_per_name')
self.db_host_extra_capability_create = self.patch(
self.db_api, 'host_extra_capability_create')
self.db_host_extra_capability_update = self.patch(
self.db_api, 'host_extra_capability_update')
self.rp = rp
self.nova_inventory = nova_inventory
@ -141,10 +165,13 @@ class PhysicalHostPluginTestCase(tests.TestCase):
self.get_servers_per_host = self.patch(
self.nova_inventory.NovaInventory, 'get_servers_per_host')
self.get_servers_per_host.return_value = None
self.get_extra_capabilities = self.patch(self.fake_phys_plugin,
'_get_extra_capabilities')
self.get_extra_capabilities.return_value = {'foo': 'bar',
'buzz': 'word'}
self.get_extra_capabilities = self.patch(
self.fake_phys_plugin, '_get_extra_capabilities')
self.get_extra_capabilities.return_value = {
'foo': 'bar',
'buzz': 'word',
}
self.fake_phys_plugin.setup(None)
def test_get_host(self):
@ -179,7 +206,8 @@ class PhysicalHostPluginTestCase(tests.TestCase):
fake_request = fake_host.copy()
fake_capa = {'computehost_id': '1',
'capability_name': 'foo',
'capability_value': 'bar'}
'capability_value': 'bar',
}
self.get_extra_capabilities.return_value = {'foo': 'bar'}
self.db_host_create.return_value = self.fake_host
host = self.fake_phys_plugin.create_computehost(fake_request)
@ -221,10 +249,12 @@ class PhysicalHostPluginTestCase(tests.TestCase):
def test_update_host(self):
host_values = {'foo': 'baz'}
self.db_host_extra_capability_get_all_per_name.return_value = \
[{'id': '1',
self.db_host_extra_capability_get_all_per_name.return_value = [
{'id': '1',
'capability_name': 'foo',
'capability_value': 'bar'}]
'capability_value': 'bar'
},
]
self.fake_phys_plugin.update_computehost(self.fake_host_id,
host_values)
self.db_host_extra_capability_update.assert_called_once_with(
@ -234,10 +264,12 @@ class PhysicalHostPluginTestCase(tests.TestCase):
def fake_db_host_extra_capability_update(*args, **kwargs):
raise RuntimeError
host_values = {'foo': 'baz'}
self.db_host_extra_capability_get_all_per_name.return_value = \
[{'id': '1',
self.db_host_extra_capability_get_all_per_name.return_value = [
{'id': '1',
'capability_name': 'foo',
'capability_value': 'bar'}]
'capability_value': 'bar'
},
]
self.db_host_extra_capability_update.side_effect = \
fake_db_host_extra_capability_update
self.assertRaises(manager_exceptions.CantAddExtraCapability,
@ -268,3 +300,322 @@ class PhysicalHostPluginTestCase(tests.TestCase):
self.assertRaises(manager_exceptions.CantRemoveHost,
self.fake_phys_plugin.delete_computehost,
self.fake_host_id)
def test_create_reservation_no_hosts_available(self):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': u'1',
'max': u'1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00),
'resource_type': u'physical:host',
}
reservation_values = {
'id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'resource_id': '1',
'resource_type': u'physical:host',
'status': 'pending',
}
generate_uuid = self.patch(uuidutils, 'generate_uuid')
generate_uuid.return_value = u'441c1476-9f8f-4700-9f30-cd9b6fef3509'
self.rp_create.return_value = mock.MagicMock(id='1')
reservation_create = self.patch(self.db_api, 'reservation_create')
reservation_create.return_value = {
'id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
}
host_reservation_create = self.patch(self.db_api,
'host_reservation_create')
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = []
self.assertRaises(RuntimeError,
self.fake_phys_plugin.create_reservation, values)
reservation_create.assert_called_once_with(reservation_values)
host_values = {
'reservation_id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
'resource_properties': '',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'count_range': '1-1',
'status': 'pending'
}
host_reservation_create.assert_called_once_with(host_values)
def test_create_reservation_hosts_available(self):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': u'1',
'max': u'1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00),
'resource_type': u'physical:host',
}
reservation_values = {
'id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'resource_id': '1',
'resource_type': u'physical:host',
'status': 'pending',
}
generate_uuid = self.patch(uuidutils, 'generate_uuid')
generate_uuid.return_value = u'441c1476-9f8f-4700-9f30-cd9b6fef3509'
self.rp_create.return_value = mock.MagicMock(id='1')
reservation_create = self.patch(self.db_api, 'reservation_create')
reservation_create.return_value = {
'id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
}
host_reservation_create = self.patch(self.db_api,
'host_reservation_create')
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = ['host1', 'host2']
host_allocation_create = self.patch(
self.db_api,
'host_allocation_create')
self.fake_phys_plugin.create_reservation(values)
reservation_create.assert_called_once_with(reservation_values)
host_values = {
'reservation_id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
'resource_properties': '',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'count_range': '1-1',
'status': 'pending',
}
host_reservation_create.assert_called_once_with(host_values)
calls = [
mock.call(
{'compute_host_id': 'host1',
'reservation_id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
}),
mock.call(
{'compute_host_id': 'host2',
'reservation_id': u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
}),
]
host_allocation_create.assert_has_calls(calls)
def test_on_start(self):
reservation_get_all_by_values = self.patch(
self.db_api, 'reservation_get_all_by_values')
reservation_get_all_by_values.return_value = [
{
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'resource_id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8',
}
]
host_allocation_get_all_by_values = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all_by_values.return_value = [
{'compute_host_id': 'host1'},
]
host_get = self.patch(self.db_api, 'host_get')
host_get.return_value = {'hypervisor_hostname': 'host1_hostname'}
add_computehost = self.patch(
self.rp.ReservationPool, 'add_computehost')
self.fake_phys_plugin.on_start(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
add_computehost.assert_called_with(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', 'host1_hostname')
def test_on_end_with_instances(self):
reservation_get_all_by_values = self.patch(
self.db_api,
'reservation_get_all_by_values')
reservation_get_all_by_values.return_value = [
{
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'resource_id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8',
}
]
reservation_update = self.patch(self.db_api, 'reservation_update')
host_reservation_get_by_reservation_id = self.patch(
self.db_api,
'host_reservation_get_by_reservation_id')
host_reservation_get_by_reservation_id.return_value = {
'id': u'35fc4e6a-ba57-4a36-be30-6012377a0387',
}
host_reservation_update = self.patch(
self.db_api,
'host_reservation_update')
host_allocation_get_all_by_values = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all_by_values.return_value = [
{'id': u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f',
'compute_host_id': u'cdae2a65-236f-475a-977d-f6ad82f828b7',
},
]
host_allocation_destroy = self.patch(
self.db_api,
'host_allocation_destroy')
delete = self.patch(self.rp.ReservationPool, 'delete')
self.patch(self.fake_phys_plugin, '_get_hypervisor_from_name')
get_hypervisors = self.patch(self.nova.hypervisors, 'get')
get_hypervisors.return_value = mock.MagicMock(running_vms=1)
self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
reservation_update.assert_called_with(
u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'})
host_reservation_update.assert_called_with(
u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'})
host_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
assert not delete.called
def test_on_end_without_instances(self):
reservation_get_all_by_values = self.patch(
self.db_api,
'reservation_get_all_by_values')
reservation_get_all_by_values.return_value = [
{
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'resource_id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8',
},
]
reservation_update = self.patch(self.db_api, 'reservation_update')
host_reservation_get_by_reservation_id = self.patch(
self.db_api,
'host_reservation_get_by_reservation_id')
host_reservation_get_by_reservation_id.return_value = {
'id': u'35fc4e6a-ba57-4a36-be30-6012377a0387',
}
host_reservation_update = self.patch(
self.db_api,
'host_reservation_update')
host_allocation_get_all_by_values = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all_by_values.return_value = [
{'id': u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f',
'compute_host_id': u'cdae2a65-236f-475a-977d-f6ad82f828b7',
},
]
host_allocation_destroy = self.patch(
self.db_api,
'host_allocation_destroy')
delete = self.patch(self.rp.ReservationPool, 'delete')
self.patch(self.fake_phys_plugin, '_get_hypervisor_from_name')
get_hypervisors = self.patch(self.nova.hypervisors, 'get')
get_hypervisors.return_value = mock.MagicMock(running_vms=0)
self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
reservation_update.assert_called_with(
u'593e7028-c0d1-4d76-8642-2ffd890b324c', {'status': 'completed'})
host_reservation_update.assert_called_with(
u'35fc4e6a-ba57-4a36-be30-6012377a0387', {'status': 'completed'})
host_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
delete.assert_called_with(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
def test_matching_hosts_not_allocated_hosts(self):
def host_allocation_get_all_by_values(**kwargs):
if kwargs['compute_host_id'] == 'host1':
return True
host_get = self.patch(
self.db_api,
'host_get_all_by_queries')
host_get.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'},
]
host_get = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_get.side_effect = host_allocation_get_all_by_values
host_get = self.patch(
self.db_utils,
'get_free_periods')
host_get.return_value = [
(datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00)),
]
result = self.fake_phys_plugin._matching_hosts(
'[]', '[]', '1-3',
datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
self.assertEqual(['host2', 'host3'], result)
def test_matching_hosts_allocated_hosts(self):
def host_allocation_get_all_by_values(**kwargs):
if kwargs['compute_host_id'] == 'host1':
return True
host_get = self.patch(
self.db_api,
'host_get_all_by_queries')
host_get.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'},
]
host_get = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_get.side_effect = host_allocation_get_all_by_values
host_get = self.patch(
self.db_utils,
'get_free_periods')
host_get.return_value = [
(datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00)),
]
result = self.fake_phys_plugin._matching_hosts(
'[]', '[]', '3-3',
datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
self.assertEqual(['host1', 'host2', 'host3'], result)
def test_matching_hosts_not_matching(self):
host_get = self.patch(
self.db_api,
'host_get_all_by_queries')
host_get.return_value = []
result = self.fake_phys_plugin._matching_hosts(
'["=", "$memory_mb", "2048"]', '[]', '1-1',
datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
self.assertEqual([], result)
def test_convert_requirements_empty(self):
request = '[]'
result = self.fake_phys_plugin._convert_requirements(request)
self.assertEqual([], result)
def test_convert_requirements_small(self):
request = '["=", "$memory", "4096"]'
result = self.fake_phys_plugin._convert_requirements(request)
self.assertEqual(['memory == 4096'], result)
def test_convert_requirements_with_incorrect_syntax_1(self):
self.assertRaises(
RuntimeError, self.fake_phys_plugin._convert_requirements,
'["a", "$memory", "4096"]')
def test_convert_requirements_with_incorrect_syntax_2(self):
self.assertRaises(
RuntimeError, self.fake_phys_plugin._convert_requirements,
'["=", "memory", "4096"]')
def test_convert_requirements_with_incorrect_syntax_3(self):
self.assertRaises(
RuntimeError, self.fake_phys_plugin._convert_requirements,
'["=", "$memory", 4096]')
def test_convert_requirements_complex(self):
request = '["and", [">", "$memory", "4096"], [">", "$disk", "40"]]'
result = self.fake_phys_plugin._convert_requirements(request)
self.assertEqual(['memory > 4096', 'disk > 40'], result)
def test_convert_requirements_complex_with_incorrect_syntax_1(self):
self.assertRaises(
RuntimeError, self.fake_phys_plugin._convert_requirements,
'["and", [">", "memory", "4096"], [">", "$disk", "40"]]')
def test_convert_requirements_complex_with_incorrect_syntax_2(self):
self.assertRaises(
RuntimeError, self.fake_phys_plugin._convert_requirements,
'["fail", [">", "$memory", "4096"], [">", "$disk", "40"]]')

View File

@ -35,7 +35,7 @@ console_scripts =
climate.resource.plugins =
dummy.vm.plugin=climate.plugins.dummy_vm_plugin:DummyVMPlugin
physical.host.plugin=climate.plugins.physical_host_plugin:PhysicalHostPlugin
physical.host.plugin=climate.plugins.oshosts.host_plugin:PhysicalHostPlugin
[build_sphinx]
all_files = 1