Added use of trusts to Physical Host plugin

Now the physical host plugin uses trusts to communicate with Nova.

Added a decorator in order to create a trust in certain API calls.
Added the trust_id to the ComputeHost model. Added a DB migration for
this change and a unit test for it.
Added the MissingTrustId exception, which is raised when no trust_id is
provided to some methods of the RPC API.
Removed all the configuration keys related to the admin Climate user.
Modified NovaClientWrapper in order to use the information stored in
context.

Change-Id: I0b83a933c0d72871654f3c6252be5d5e2c4cfd54
Closes-Bug: #1285585
changes/58/84158/12
Pablo Andres Fuente 9 years ago
parent b5fd4387e0
commit c9b7307cf3

@ -15,6 +15,7 @@
from climate.manager.oshosts import rpcapi as manager_rpcapi
from climate import policy
from climate.utils import trusts
class API(object):
@ -27,6 +28,7 @@ class API(object):
return self.manager_rpcapi.list_computehosts()
@policy.authorize('oshosts', 'create')
@trusts.use_trust_auth()
def create_computehost(self, data):
"""Create new computehost.

@ -41,17 +41,13 @@ class API(object):
return self.manager_rpcapi.list_leases(project_id=project_id)
@policy.authorize('leases', 'create')
@trusts.use_trust_auth()
def create_lease(self, data):
"""Create new lease.
:param data: New lease characteristics.
:type data: dict
"""
# here API should go to Keystone API v3 and create trust
trust = trusts.create_trust()
trust_id = trust.id
data.update({'trust_id': trust_id})
return self.manager_rpcapi.create_lease(data)
@policy.authorize('leases', 'get')

@ -23,6 +23,7 @@ from climate.api.v2.controllers import types
from climate import exceptions
from climate.openstack.common.gettextutils import _ # noqa
from climate import policy
from climate.utils import trusts
from climate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -58,6 +59,9 @@ class Host(base._Base):
cpu_info = types.CPUInfo()
"The CPU info JSON data given by the hypervisor"
trust_id = types.UuidType()
"The ID of the trust created for delegating the rights of the user"
extra_capas = wtypes.DictType(wtypes.text, types.TextOrInteger())
"Extra capabilities for the host"
@ -125,6 +129,7 @@ class HostsController(extensions.BaseController):
@policy.authorize('oshosts', 'create')
@wsme_pecan.wsexpose(Host, body=Host, status_code=202)
@trusts.use_trust_auth()
def post(self, host):
"""Creates a new host.

@ -114,19 +114,15 @@ class LeasesController(extensions.BaseController):
@policy.authorize('leases', 'create')
@wsme_pecan.wsexpose(Lease, body=Lease, status_code=202)
@trusts.use_trust_auth()
def post(self, lease):
"""Creates a new lease.
:param lease: a lease within the request body.
"""
# here API should go to Keystone API v3 and create trust
trust = trusts.create_trust()
trust_id = trust.id
lease_dct = lease.as_dict()
lease_dct.update({'trust_id': trust_id})
# FIXME(sbauza): DB exceptions are currently catched and return a lease
# equal to None instead of being sent to the API
lease_dct = lease.as_dict()
lease = pecan.request.rpcapi.create_lease(lease_dct)
if lease is not None:
return Lease.convert(lease)

@ -0,0 +1,61 @@
# Copyright 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add trust_id to ComputeHost
Revision ID: 1fd6c2eded89
Revises: 0_1
Create Date: 2014-03-28 01:12:02.735519
"""
# revision identifiers, used by Alembic.
revision = '1fd6c2eded89'
down_revision = '23d6240b51b2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('computehosts',
sa.Column('trust_id',
sa.String(length=36),
nullable=True))
if op.get_bind().engine.name != 'sqlite':
#I need to do it in this way because Postgress fails
#if I use SQLAlchemy
connection = op.get_bind()
connection.execute("UPDATE computehosts SET trust_id = ''")
op.alter_column('computehosts', 'trust_id',
existing_type=sa.String(length=36), nullable=False)
def downgrade():
engine = op.get_bind().engine
if engine.name == 'sqlite':
# Only for testing purposes with sqlite
op.execute('CREATE TABLE tmp_computehosts as SELECT id, '
'vcpus, cpu_info, hypervisor_type, '
'hypervisor_version, hypervisor_hostname, service_name, '
'memory_mb, local_gb, status '
'FROM computehosts')
op.execute('DROP TABLE computehosts')
op.execute('ALTER TABLE tmp_computehosts RENAME TO computehosts')
return
op.drop_column('computehosts', 'trust_id')

@ -185,6 +185,7 @@ class ComputeHost(mb.ClimateBase):
memory_mb = sa.Column(sa.Integer, nullable=False)
local_gb = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(13))
trust_id = sa.Column(sa.String(36), nullable=False)
computehost_extra_capabilities = relationship('ComputeHostExtraCapability',
cascade="all,delete",
backref='computehost',

@ -90,6 +90,10 @@ class LeaseNameAlreadyExists(exceptions.ClimateException):
msg_fmt = _("The lease with name: %(name)s already exists")
class MissingTrustId(exceptions.ClimateException):
msg_fmt = _("A trust id is required")
# oshost plugin related exceptions
class CantAddExtraCapability(exceptions.ClimateException):
@ -137,3 +141,7 @@ class InvalidState(exceptions.ClimateException):
class InvalidStateUpdate(InvalidState):
msg_fmt = _("Unable to update ID %(id)s state with %(action)s:%(status)s")
class ProjectIdNotFound(exceptions.ClimateException):
msg_fmt = _("No project_id found in current context")

@ -20,7 +20,6 @@ import eventlet
from oslo.config import cfg
from stevedore import enabled
from climate import context
from climate.db import api as db_api
from climate.db import exceptions as db_ex
from climate import exceptions as common_ex
@ -173,6 +172,11 @@ class ManagerService(service_utils.RPCServer):
Return either the model of created lease or None if any error.
"""
try:
trust_id = lease_values.pop('trust_id')
except KeyError:
raise exceptions.MissingTrustId()
# Remove and keep reservation values
reservations = lease_values.pop("reservations", [])
@ -196,78 +200,80 @@ class ManagerService(service_utils.RPCServer):
raise common_ex.NotAuthorized(
'Start date must later than current date')
ctx = context.current()
lease_values['user_id'] = ctx.user_id
lease_values['project_id'] = ctx.project_id
lease_values['start_date'] = start_date
lease_values['end_date'] = end_date
if not lease_values.get('events'):
lease_values['events'] = []
lease_values['events'].append({'event_type': 'start_lease',
'time': start_date,
'status': 'UNDONE'})
lease_values['events'].append({'event_type': 'end_lease',
'time': end_date,
'status': 'UNDONE'})
before_end_date = lease_values.get('before_end_notification', None)
if before_end_date:
# incoming param. Validation check
try:
before_end_date = self._date_from_string(
before_end_date)
self._check_date_within_lease_limits(before_end_date,
lease_values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
elif CONF.manager.notify_hours_before_lease_end > 0:
delta = datetime.timedelta(
hours=CONF.manager.notify_hours_before_lease_end)
before_end_date = lease_values['end_date'] - delta
if before_end_date:
event = {'event_type': 'before_end_lease',
'status': 'UNDONE'}
lease_values['events'].append(event)
self._update_before_end_event_date(event, before_end_date,
lease_values)
with trusts.create_ctx_from_trust(trust_id) as ctx:
lease_values['user_id'] = ctx.user_id
lease_values['project_id'] = ctx.project_id
lease_values['start_date'] = start_date
lease_values['end_date'] = end_date
if not lease_values.get('events'):
lease_values['events'] = []
lease_values['events'].append({'event_type': 'start_lease',
'time': start_date,
'status': 'UNDONE'})
lease_values['events'].append({'event_type': 'end_lease',
'time': end_date,
'status': 'UNDONE'})
before_end_date = lease_values.get('before_end_notification', None)
if before_end_date:
# incoming param. Validation check
try:
before_end_date = self._date_from_string(
before_end_date)
self._check_date_within_lease_limits(before_end_date,
lease_values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
elif CONF.manager.notify_hours_before_lease_end > 0:
delta = datetime.timedelta(
hours=CONF.manager.notify_hours_before_lease_end)
before_end_date = lease_values['end_date'] - delta
if before_end_date:
event = {'event_type': 'before_end_lease',
'status': 'UNDONE'}
lease_values['events'].append(event)
self._update_before_end_event_date(event, before_end_date,
lease_values)
try:
lease = db_api.lease_create(lease_values)
lease_id = lease['id']
except db_ex.ClimateDBDuplicateEntry:
LOG.exception('Cannot create a lease - duplicated lease name')
raise exceptions.LeaseNameAlreadyExists(name=lease_values['name'])
except db_ex.ClimateDBException:
LOG.exception('Cannot create a lease')
raise
else:
try:
for reservation in reservations:
reservation['lease_id'] = lease['id']
reservation['start_date'] = lease['start_date']
reservation['end_date'] = lease['end_date']
resource_type = reservation['resource_type']
if resource_type in self.plugins:
self.plugins[resource_type].create_reservation(
reservation)
else:
raise exceptions.UnsupportedResourceType(resource_type)
except (exceptions.UnsupportedResourceType,
common_ex.ClimateException):
LOG.exception("Failed to create reservation for a lease. "
"Rollback the lease and associated reservations")
db_api.lease_destroy(lease_id)
lease = db_api.lease_create(lease_values)
lease_id = lease['id']
except db_ex.ClimateDBDuplicateEntry:
LOG.exception('Cannot create a lease - duplicated lease name')
raise exceptions.LeaseNameAlreadyExists(
name=lease_values['name'])
except db_ex.ClimateDBException:
LOG.exception('Cannot create a lease')
raise
else:
lease = db_api.lease_get(lease['id'])
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
try:
for reservation in reservations:
reservation['lease_id'] = lease['id']
reservation['start_date'] = lease['start_date']
reservation['end_date'] = lease['end_date']
resource_type = reservation['resource_type']
if resource_type in self.plugins:
self.plugins[resource_type].create_reservation(
reservation)
else:
raise exceptions.UnsupportedResourceType(
resource_type)
except (exceptions.UnsupportedResourceType,
common_ex.ClimateException):
LOG.exception("Failed to create reservation for a lease. "
"Rollback the lease and associated "
"reservations")
db_api.lease_destroy(lease_id)
raise
else:
lease = db_api.lease_get(lease['id'])
self._send_notification(lease, ctx, events=['create'])
return lease
return lease
def update_lease(self, lease_id, values):
if not values:
@ -320,22 +326,25 @@ class ManagerService(service_utils.RPCServer):
raise common_ex.NotAuthorized(
'End date must be later than current and start date')
if before_end_date:
try:
before_end_date = self._date_from_string(before_end_date)
self._check_date_within_lease_limits(before_end_date, values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
#TODO(frossigneux) rollback if an exception is raised
for reservation in \
db_api.reservation_get_all_by_lease_id(lease_id):
reservation['start_date'] = values['start_date']
reservation['end_date'] = values['end_date']
resource_type = reservation['resource_type']
self.plugins[resource_type].update_reservation(
reservation['id'], reservation)
with trusts.create_ctx_from_trust(lease['trust_id']):
if before_end_date:
try:
before_end_date = self._date_from_string(before_end_date)
self._check_date_within_lease_limits(before_end_date,
values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
#TODO(frossigneux) rollback if an exception is raised
for reservation in \
db_api.reservation_get_all_by_lease_id(lease_id):
reservation['start_date'] = values['start_date']
reservation['end_date'] = values['end_date']
resource_type = reservation['resource_type']
self.plugins[resource_type].update_reservation(
reservation['id'],
reservation)
event = db_api.event_get_first_sorted_by_filters(
'lease_id',

@ -13,20 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
RESOURCE_TYPE = u'physical:host'
admin_opts = [
cfg.StrOpt('climate_username',
default='climate_admin',
help='Name of the user for write operations'),
cfg.StrOpt('climate_password',
default='climate_password',
help='Password of the user for write operations'),
cfg.StrOpt('climate_project_name',
default='admin',
help='Project of the user for write operations'),
]
cfg.CONF.register_opts(admin_opts, group=RESOURCE_TYPE)

@ -21,7 +21,6 @@ import uuid
from oslo.config import cfg
from climate import context
from climate.db import api as db_api
from climate.db import exceptions as db_ex
from climate.db import utils as db_utils
@ -31,6 +30,7 @@ from climate.plugins import oshosts as plugin
from climate.plugins.oshosts import nova_inventory
from climate.plugins.oshosts import reservation_pool as rp
from climate.utils.openstack import nova
from climate.utils import trusts
plugin_opts = [
cfg.StrOpt('on_end',
@ -53,17 +53,10 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
description = 'This plugin starts and shutdowns the hosts.'
freepool_name = CONF[resource_type].aggregate_freepool_name
pool = None
inventory = None
def __init__(self):
super(PhysicalHostPlugin, self).__init__()
# Used by nova cli
config = cfg.CONF[self.resource_type]
self.username = config.climate_username
self.api_key = config.climate_password
self.project_id = config.climate_project_name
def create_reservation(self, values):
"""Create reservation."""
pool = rp.ReservationPool()
@ -103,7 +96,8 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
"""Update reservation."""
reservation = db_api.reservation_get(reservation_id)
lease = db_api.lease_get(reservation['lease_id'])
hosts_in_pool = self.pool.get_computehosts(
pool = rp.ReservationPool()
hosts_in_pool = pool.get_computehosts(
reservation['resource_id'])
if (values['start_date'] < lease['start_date'] or
values['end_date'] > lease['end_date']):
@ -149,8 +143,8 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
if hosts_in_pool:
old_hosts = [allocation['compute_host_id']
for allocation in allocations]
self.pool.remove_computehost(reservation['resource_id'],
old_hosts)
pool.remove_computehost(reservation['resource_id'],
old_hosts)
for allocation in allocations:
db_api.host_allocation_destroy(allocation['id'])
for host_id in host_ids:
@ -159,8 +153,8 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
'reservation_id': reservation_id})
if hosts_in_pool:
host = db_api.host_get(host_id)
self.pool.add_computehost(reservation['resource_id'],
host['service_name'])
pool.add_computehost(reservation['resource_id'],
host['service_name'])
def on_start(self, resource_id):
"""Add the hosts in the pool."""
@ -197,15 +191,6 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
pool.delete(reservation['resource_id'])
#TODO(frossigneux) Kill, migrate, or increase fees...
def setup(self, conf):
# Create freepool if not exists
with context.ClimateContext() as ctx:
ctx = ctx.elevated()
if self.pool is None:
self.pool = rp.ReservationPool()
if self.inventory is None:
self.inventory = nova_inventory.NovaInventory()
def _get_extra_capabilities(self, host_id):
extra_capabilities = {}
raw_extra_capabilities = \
@ -237,52 +222,63 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
# - Exception handling for HostNotFound
host_id = host_values.pop('id', None)
host_name = host_values.pop('name', None)
try:
trust_id = host_values.pop('trust_id')
except KeyError:
raise manager_ex.MissingTrustId()
host_ref = host_id or host_name
if host_ref is None:
raise manager_ex.InvalidHost(host=host_values)
servers = self.inventory.get_servers_per_host(host_ref)
if servers:
raise manager_ex.HostHavingServers(host=host_ref,
servers=servers)
host_details = self.inventory.get_host_details(host_ref)
# NOTE(sbauza): Only last duplicate name for same extra capability will
# be stored
to_store = set(host_values.keys()) - set(host_details.keys())
extra_capabilities_keys = to_store
extra_capabilities = dict(
(key, host_values[key]) for key in extra_capabilities_keys
)
self.pool.add_computehost(self.freepool_name,
host_details['service_name'])
host = None
cantaddextracapability = []
try:
host = db_api.host_create(host_details)
except db_ex.ClimateDBException:
#We need to rollback
# TODO(sbauza): Investigate use of Taskflow for atomic transactions
self.pool.remove_computehost(self.freepool_name,
host_details['service_name'])
if host:
for key in extra_capabilities:
values = {'computehost_id': host['id'],
'capability_name': key,
'capability_value': extra_capabilities[key],
}
try:
db_api.host_extra_capability_create(values)
except db_ex.ClimateDBException:
cantaddextracapability.append(key)
if cantaddextracapability:
raise manager_ex.CantAddExtraCapability(
keys=cantaddextracapability,
host=host['id'])
if host:
return self.get_computehost(host['id'])
else:
return None
with trusts.create_ctx_from_trust(trust_id):
inventory = nova_inventory.NovaInventory()
servers = inventory.get_servers_per_host(host_ref)
if servers:
raise manager_ex.HostHavingServers(host=host_ref,
servers=servers)
host_details = inventory.get_host_details(host_ref)
# NOTE(sbauza): Only last duplicate name for same extra capability
# will be stored
to_store = set(host_values.keys()) - set(host_details.keys())
extra_capabilities_keys = to_store
extra_capabilities = dict(
(key, host_values[key]) for key in extra_capabilities_keys
)
pool = rp.ReservationPool()
pool.add_computehost(self.freepool_name,
host_details['service_name'])
host = None
cantaddextracapability = []
try:
if trust_id:
host_details.update({'trust_id': trust_id})
host = db_api.host_create(host_details)
except db_ex.ClimateDBException:
#We need to rollback
# TODO(sbauza): Investigate use of Taskflow for atomic
# transactions
pool.remove_computehost(self.freepool_name,
host_details['service_name'])
if host:
for key in extra_capabilities:
values = {'computehost_id': host['id'],
'capability_name': key,
'capability_value': extra_capabilities[key],
}
try:
db_api.host_extra_capability_create(values)
except db_ex.ClimateDBException:
cantaddextracapability.append(key)
if cantaddextracapability:
raise manager_ex.CantAddExtraCapability(
keys=cantaddextracapability,
host=host['id'])
if host:
return self.get_computehost(host['id'])
else:
return None
def update_computehost(self, host_id, values):
# NOTE (sbauza): Only update existing extra capabilites, don't create
@ -316,23 +312,28 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
if not host:
raise manager_ex.HostNotFound(host=host_id)
# TODO(sbauza):
# - Check if no leases having this host scheduled
servers = self.inventory.get_servers_per_host(
host['hypervisor_hostname'])
if servers:
raise manager_ex.HostHavingServers(
host=host['hypervisor_hostname'], servers=servers)
try:
self.pool.remove_computehost(self.freepool_name,
host['service_name'])
# NOTE(sbauza): Extracapabilities will be destroyed thanks to
# the DB FK.
db_api.host_destroy(host_id)
except db_ex.ClimateDBException:
# Nothing so bad, but we need to advert the admin he has to rerun
raise manager_ex.CantRemoveHost(host=host_id,
pool=self.freepool_name)
with trusts.create_ctx_from_trust(host['trust_id']):
# TODO(sbauza):
# - Check if no leases having this host scheduled
inventory = nova_inventory.NovaInventory()
servers = inventory.get_servers_per_host(
host['hypervisor_hostname'])
if servers:
raise manager_ex.HostHavingServers(
host=host['hypervisor_hostname'], servers=servers)
try:
pool = rp.ReservationPool()
pool.remove_computehost(self.freepool_name,
host['service_name'])
# NOTE(sbauza): Extracapabilities will be destroyed thanks to
# the DB FK.
db_api.host_destroy(host_id)
except db_ex.ClimateDBException:
# Nothing so bad, but we need to advert the admin
# he has to rerun
raise manager_ex.CantRemoveHost(host=host_id,
pool=self.freepool_name)
def _matching_hosts(self, hypervisor_properties, resource_properties,
count_range, start_date, end_date):

@ -14,24 +14,14 @@
# limitations under the License.
from novaclient import exceptions as nova_exceptions
from oslo.config import cfg
from climate import context
from climate.manager import exceptions as manager_exceptions
from climate.plugins import oshosts as plugin
from climate.utils.openstack import nova
class NovaInventory(nova.NovaClientWrapper):
def __init__(self):
super(NovaInventory, self).__init__()
self.ctx = context.current()
# Used by nova cli
config = cfg.CONF[plugin.RESOURCE_TYPE]
self.username = config.climate_username
self.api_key = config.climate_password
self.project_id = config.climate_project_name
def get_host_details(self, host):
"""Get Nova capabilities of a single host

@ -51,16 +51,9 @@ CONF.register_opts(OPTS, group=plugin.RESOURCE_TYPE)
class ReservationPool(nova.NovaClientWrapper):
def __init__(self):
super(ReservationPool, self).__init__()
self.ctx = context.current()
self.config = CONF[plugin.RESOURCE_TYPE]
self.freepool_name = self.config.aggregate_freepool_name
# Used by nova cli
config = cfg.CONF[plugin.RESOURCE_TYPE]
self.username = config.climate_username
self.api_key = config.climate_password
self.project_id = config.climate_project_name
def get_aggregate_from_name_or_id(self, aggregate_obj):
"""Return an aggregate by name or an id."""
@ -115,7 +108,16 @@ class ReservationPool(nova.NovaClientWrapper):
'without Availability Zone' % name)
agg = self.nova.aggregates.create(name, None)
meta = {self.config.climate_owner: self.ctx.project_id}
project_id = None
try:
ctx = context.current()
project_id = ctx.project_id
except RuntimeError:
e = manager_exceptions.ProjectIdNotFound()
LOG.error(e.message)
raise e
meta = {self.config.climate_owner: project_id}
self.nova.aggregates.set_metadata(agg, meta)
return agg

@ -20,6 +20,7 @@ import uuid
from climate.tests import api
from climate.utils import trusts
def fake_computehost(**kw):
@ -29,6 +30,8 @@ def fake_computehost(**kw):
u'hypervisor_type': kw.get('hypervisor_type', u'QEMU'),
u'vcpus': kw.get('vcpus', 1),
u'hypervisor_version': kw.get('hypervisor_version', 1000000),
u'trust_id': kw.get('trust_id',
u'35b17138-b364-4e6a-a131-8f3099c5be68'),
u'memory_mb': kw.get('memory_mb', 8192),
u'local_gb': kw.get('local_gb', 50),
u'cpu_info': kw.get('cpu_info',
@ -61,6 +64,12 @@ def fake_computehost_from_rpc(**kw):
return computehost
def fake_trust(id=fake_computehost()['trust_id']):
return type('Trust', (), {
'id': id,
})
class TestIncorrectHostFromRPC(api.APITest):
def setUp(self):
@ -206,6 +215,9 @@ class TestCreateHost(api.APITest):
self.headers = {'X-Roles': 'admin'}
self.trusts = trusts
self.patch(self.trusts, 'create_trust').return_value = fake_trust()
def test_create_one(self):
response = self.post_json(self.path, self.fake_computehost_body,
headers=self.headers)
@ -293,7 +305,8 @@ class TestUpdateHost(api.APITest):
self.headers = {'X-Roles': 'admin'}
def test_update_one(self):
response = self.put_json(self.path, self.fake_computehost_body,
response = self.put_json(self.path, fake_computehost_request_body(
exclude=['trust_id']),
headers=self.headers)
self.assertEqual(202, response.status_int)
self.assertEqual('application/json', response.content_type)

@ -119,3 +119,64 @@ class TestMigrations(migration.BaseWalkMigrationTestCase,
def _check_2bcfe76b0474(self, engine, data):
self.assertColumnExists(engine, 'leases', 'project_id')
self.assertColumnNotExists(engine, 'leases', 'tenant_id')
def _pre_upgrade_1fd6c2eded89(self, engine):
data = [{
'id': '1',
'hypervisor_hostname': 'host01',
'hypervisor_type': 'QEMU',
'hypervisor_version': 1000000,
'service_name': 'host01',
'vcpus': 1,
'memory_mb': 8192,
'local_gb': 50,
'cpu_info': "{\"vendor\": \"Intel\", \"model\": \"qemu32\", "
"\"arch\": \"x86_64\", \"features\": [],"
" \"topology\": {\"cores\": 1}}",
'extra_capas': {'vgpus': 2}},
{'id': '2',
'hypervisor_hostname': 'host01',
'hypervisor_type': 'QEMU',
'hypervisor_version': 1000000,
'service_name': 'host02',
'vcpus': 1,
'memory_mb': 8192,
'local_gb': 50,
'cpu_info': "{\"vendor\": \"Intel\", \"model\": \"qemu32\", "
"\"arch\": \"x86_64\", \"features\": [],"
" \"topology\": {\"cores\": 1}}",
'extra_capas': {'vgpus': 2}}]
computehosts_table = self.get_table(engine, 'computehosts')
engine.execute(computehosts_table.insert(), data)
return data
def _check_1fd6c2eded89(self, engine, data):
self.assertColumnExists(engine, 'computehosts', 'trust_id')
metadata = sqlalchemy.MetaData()
metadata.bind = engine
computehosts_table = self.get_table(engine, 'computehosts')
all_computehosts = computehosts_table.select().execute()
for computehost in all_computehosts:
self.assertIn(computehost.trust_id, ['', None])
data = {'id': '3',
'hypervisor_hostname': 'host01',
'hypervisor_type': 'QEMU',
'hypervisor_version': 1000000,
'service_name': 'host02',
'vcpus': 1,
'memory_mb': 8192,
'local_gb': 50,
'cpu_info': "{\"vendor\": \"Intel\", \"model\": \"qemu32\", "
"\"arch\": \"x86_64\", \"features\": [],"
" \"topology\": {\"cores\": 1}}",
'extra_capas': {'vgpus': 2},
'trust_id': None}
if engine.name != 'sqlite':
self.assertRaises(sqlalchemy.exc.DBAPIError,
engine.execute,
computehosts_table.insert(),
data)

@ -45,7 +45,8 @@ def _get_fake_phys_reservation_values(id=_get_fake_random_uuid(),
'resource_type': host_plugin.RESOURCE_TYPE,
'hypervisor_properties': '[\"=\", \"$hypervisor_type\", \"QEMU\"]',
'resource_properties': '',
'min': 1, 'max': 1}
'min': 1, 'max': 1,
'trust_id': 'exxee111qwwwwe'}
def _get_fake_virt_reservation_values(lease_id=_get_fake_lease_uuid(),
@ -161,7 +162,8 @@ def _get_fake_host_reservation_values(id=_get_fake_random_uuid(),
'reservation_id': reservation_id,
'resource_properties': "fake",
'hypervisor_properties': "fake",
'min': 1, 'max': 1}
'min': 1, 'max': 1,
'trust_id': 'exxee111qwwwwe'}
def _get_fake_cpu_info():
@ -181,7 +183,8 @@ def _get_fake_host_values(id=_get_fake_random_uuid(), mem=8192, disk=10):
'hypervisor_version': 1000,
'memory_mb': mem,
'local_gb': disk,
'status': 'free'
'status': 'free',
'trust_id': 'exxee111qwwwwe',
}

@ -31,6 +31,7 @@ from climate.plugins import base
from climate.plugins import dummy_vm_plugin
from climate.plugins.oshosts import host_plugin
from climate import tests
from climate.utils.openstack import base as base_utils
from climate.utils import trusts
@ -82,6 +83,7 @@ class ServiceTestCase(tests.TestCase):
self.dummy_plugin = dummy_vm_plugin
self.trusts = trusts
self.notifier_api = notifier_api
self.base_utils = base_utils
self.fake_plugin = self.patch(self.dummy_plugin, 'DummyVMPlugin')
@ -115,6 +117,7 @@ class ServiceTestCase(tests.TestCase):
self.ctx = self.patch(self.context, 'ClimateContext')
self.trust_ctx = self.patch(self.trusts, 'create_ctx_from_trust')
self.trust_create = self.patch(self.trusts, 'create_trust')
self.lease_get = self.patch(self.db_api, 'lease_get')
self.lease_get.return_value = self.lease
self.lease_list = self.patch(self.db_api, 'lease_list')
@ -128,6 +131,8 @@ class ServiceTestCase(tests.TestCase):
{'virtual:instance':
{'on_start': self.fake_plugin.on_start,
'on_end': self.fake_plugin.on_end}}
self.patch(
self.base_utils, 'url_for').return_value = 'http://www.foo.fake'
self.addCleanup(self.cfg.CONF.clear_override,
'notify_hours_before_lease_end',
@ -239,6 +244,7 @@ class ServiceTestCase(tests.TestCase):
self.lease_list.assert_called_once_with()
def test_create_lease_now(self):
trust_id = 'exxee111qwwwwe'
lease_values = {
'id': self.lease_id,
'reservations': [{'id': '111',
@ -246,10 +252,12 @@ class ServiceTestCase(tests.TestCase):
'resource_type': 'virtual:instance',
'status': 'FAKE PROGRESS'}],
'start_date': 'now',
'end_date': '2026-12-13 13:13'}
'end_date': '2026-12-13 13:13',
'trust_id': trust_id}
lease = self.manager.create_lease(lease_values)
self.trust_ctx.assert_called_once_with(trust_id)
self.lease_create.assert_called_once_with(lease_values)
self.assertEqual(lease, self.lease)
expected_context = self.trust_ctx.return_value
@ -259,6 +267,7 @@ class ServiceTestCase(tests.TestCase):
'lease.create')
def test_create_lease_some_time(self):
trust_id = 'exxee111qwwwwe'
lease_values = {
'id': self.lease_id,
'reservations': [{'id': '111',
@ -266,11 +275,14 @@ class ServiceTestCase(tests.TestCase):
'resource_type': 'virtual:instance',
'status': 'FAKE PROGRESS'}],
'start_date': '2026-11-13 13:13',
'end_date': '2026-12-13 13:13'}
'end_date': '2026-12-13 13:13',
'trust_id': trust_id}
self.lease['start_date'] = '2026-11-13 13:13'
lease = self.manager.create_lease(lease_values)
self.trust_ctx.assert_called_once_with(trust_id)
self.lease_create.assert_called_once_with(lease_values)
self.assertEqual(lease, self.lease)
@ -282,7 +294,8 @@ class ServiceTestCase(tests.TestCase):
'resource_type': 'virtual:instance',
'status': 'FAKE PROGRESS'}],
'start_date': '2026-11-13 13:13',
'end_date': '2026-12-13 13:13'}
'end_date': '2026-12-13 13:13',
'trust_id': 'exxee111qwwwwe'}
self.lease['start_date'] = '2026-11-13 13:13'
lease = self.manager.create_lease(lease_values)
@ -319,7 +332,8 @@ class ServiceTestCase(tests.TestCase):
'resource_type': 'virtual:instance',
'status': 'FAKE PROGRESS'}],
'start_date': '2026-11-13 13:13',
'end_date': '2026-11-14 13:13'}
'end_date': '2026-11-14 13:13',
'trust_id': 'exxee111qwwwwe'}
self.lease['start_date'] = '2026-11-13 13:13'
self.cfg.CONF.set_override('notify_hours_before_lease_end', 36,
@ -360,6 +374,7 @@ class ServiceTestCase(tests.TestCase):
'status': 'FAKE PROGRESS'}],
'start_date': start_date,
'end_date': '2026-11-14 13:13',
'trust_id': 'exxee111qwwwwe',
'before_end_notification': before_end_notification}
self.lease['start_date'] = '2026-11-13 13:13'
@ -376,6 +391,7 @@ class ServiceTestCase(tests.TestCase):
'status': 'FAKE PROGRESS'}],
'start_date': '2026-11-13 13:13',
'end_date': '2026-11-14 13:13',
'trust_id': 'exxee111qwwwwe',
'before_end_notification': before_end_notification}
self.lease['start_date'] = '2026-11-13 13:13'
@ -390,7 +406,8 @@ class ServiceTestCase(tests.TestCase):
'resource_type': 'virtual:instance',
'status': 'FAKE PROGRESS'}],
'start_date': '2026-11-13 13:13',
'end_date': '2026-11-14 13:13'}
'end_date': '2026-11-14 13:13',
'trust_id': 'exxee111qwwwwe'}
self.lease['start_date'] = '2026-11-13 13:13'
self.cfg.CONF.set_override('notify_hours_before_lease_end', 0,
@ -424,6 +441,7 @@ class ServiceTestCase(tests.TestCase):
'status': 'FAKE PROGRESS'}],
'start_date': '2026-11-13 13:13',
'end_date': '2026-11-14 13:13',
'trust_id': 'exxee111qwwwwe',
'before_end_notification': before_end_notification}
self.lease['start_date'] = '2026-11-13 13:13'
@ -455,7 +473,8 @@ class ServiceTestCase(tests.TestCase):
def test_create_lease_wrong_date(self):
lease_values = {'start_date': '2025-13-35 13:13',
'end_date': '2025-12-31 13:13'}
'end_date': '2025-12-31 13:13',
'trust_id': 'exxee111qwwwwe'}
self.assertRaises(
manager_ex.InvalidDate, self.manager.create_lease, lease_values)
@ -464,7 +483,8 @@ class ServiceTestCase(tests.TestCase):
before_end_notification = '2026-14 10:13'
lease_values = {'start_date': '2026-11-13 13:13',
'end_date': '2026-11-14 13:13',
'before_end_notification': before_end_notification}
'before_end_notification': before_end_notification,
'trust_id': 'exxee111qwwwwe'}
self.assertRaises(
manager_ex.InvalidDate, self.manager.create_lease, lease_values)
@ -475,7 +495,8 @@ class ServiceTestCase(tests.TestCase):
datetime.datetime.strftime(
datetime.datetime.utcnow() - datetime.timedelta(days=1),
service.LEASE_DATE_FORMAT),
'end_date': '2025-12-31 13:13'}
'end_date': '2025-12-31 13:13',
'trust_id': 'exxee111qwwwwe'}
self.assertRaises(
exceptions.NotAuthorized, self.manager.create_lease, lease_values)
@ -488,7 +509,8 @@ class ServiceTestCase(tests.TestCase):
'resource_type': 'unsupported:type',
'status': 'FAKE PROGRESS'}],
'start_date': '2026-11-13 13:13',
'end_date': '2026-12-13 13:13'}
'end_date': '2026-12-13 13:13',
'trust_id': 'exxee111qwwwwe'}
self.assertRaises(manager_ex.UnsupportedResourceType,
self.manager.create_lease, lease_values)
@ -497,13 +519,23 @@ class ServiceTestCase(tests.TestCase):
lease_values = {
'name': 'duplicated_name',
'start_date': '2026-11-13 13:13',
'end_date': '2026-12-13 13:13'}
'end_date': '2026-12-13 13:13',
'trust_id': 'exxee111qwwwwe'}
self.patch(self.db_api,
'lease_create').side_effect = db_ex.ClimateDBDuplicateEntry
self.assertRaises(manager_ex.LeaseNameAlreadyExists,
self.manager.create_lease, lease_values)
def test_create_lease_without_trust_id(self):
lease_values = {
'name': 'name',
'start_date': '2026-11-13 13:13',
'end_date': '2026-12-13 13:13'}
self.assertRaises(manager_ex.MissingTrustId,
self.manager.create_lease, lease_values)
def test_update_lease_completed_lease_rename(self):
lease_values = {'name': 'renamed'}
target = datetime.datetime(2015, 1, 1)

@ -14,6 +14,8 @@
# limitations under the License.
import sys
import eventlet
import testtools
from climate import exceptions as climate_exceptions
@ -28,25 +30,31 @@ from novaclient import exceptions as nova_exceptions
class VMPluginTestCase(tests.TestCase):
def setUp(self):
super(VMPluginTestCase, self).setUp()
self.plugin = vm_plugin.VMPlugin()
self.nova = nova
self.exc = climate_exceptions
self.logging = logging
self.sys = sys
self.client = self.patch(self.nova, 'ClimateNovaClient')
# To speed up the test run
self.eventlet = eventlet
self.eventlet_sleep = self.patch(self.eventlet, 'sleep')
self.fake_id = '1'
self.nova_wrapper = self.patch(self.nova.NovaClientWrapper, 'nova')
self.plugin = vm_plugin.VMPlugin()
def test_on_start_ok(self):
self.plugin.on_start(self.fake_id)
self.client.return_value.servers.unshelve.assert_called_once_with('1')
self.nova_wrapper.servers.unshelve.assert_called_once_with('1')
@testtools.skip('Will be released later')
def test_on_start_fail(self):
self.client.side_effect = \
self.nova.ClimateNovaClient.exceptions.Conflict
def raise_exception(resource_id):
raise climate_exceptions.Conflict(409)
self.nova_wrapper.servers.unshelve.side_effect = raise_exception
self.plugin.on_start(self.fake_id)
def test_on_end_create_image_ok(self):
@ -57,36 +65,39 @@ class VMPluginTestCase(tests.TestCase):
self.plugin.on_end(self.fake_id)
self.client.return_value.servers.create_image.assert_called_once_with(
'1')
self.nova_wrapper.servers.create_image.assert_called_once_with('1')
def test_on_end_suspend_ok(self):
self.patch(self.plugin, '_split_actions').return_value =\
['suspend']
self.plugin.on_end(self.fake_id)
self.client.return_value.servers.suspend.assert_called_once_with('1')
self.nova_wrapper.servers.suspend.assert_called_once_with('1')
def test_on_end_delete_ok(self):
self.patch(self.plugin, '_split_actions').return_value =\
['delete']
self.plugin.on_end(self.fake_id)
self.client.return_value.servers.delete.assert_called_once_with('1')
self.nova_wrapper.servers.delete.assert_called_once_with('1')
def test_on_end_create_image_instance_or_not_found(self):
self.client.return_value.servers.create_image.side_effect = \
nova_exceptions.NotFound(404)
def raise_exception(resource_id):
raise nova_exceptions.NotFound(404)
self.nova_wrapper.servers.create_image.side_effect = raise_exception
self.plugin.on_end(self.fake_id)
self.client.return_value.servers.delete.assert_called_once_with('1')
self.nova_wrapper.servers.delete.assert_called_once_with('1')
def test_on_end_create_image_ko_invalid_vm_state(self):
self.client.return_value.servers.create_image.side_effect = \
nova_exceptions.Conflict(409)
def raise_exception(resource_id):
raise nova_exceptions.Conflict(409)
self.nova_wrapper.servers.create_image.side_effect = raise_exception
self.plugin.on_end(self.fake_id)
self.client.return_value.servers.delete.assert_called_once_with('1')
self.nova_wrapper.servers.delete.assert_called_once_with('1')
@testtools.skip('Will be released later')
def test_on_end_timeout(self):

@ -13,8 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from novaclient import client
from novaclient import exceptions as nova_exceptions
from novaclient.v1_1 import hypervisors
from climate import context
from climate.manager import exceptions as manager_exceptions
@ -77,15 +77,12 @@ class NovaInventoryTestCase(tests.TestCase):
self.context = context
self.patch(self.context, 'ClimateContext')
self.nova_inventory = nova_inventory
self.client = client
self.client = self.patch(self.client, 'Client').return_value
self.patch(base, 'url_for').return_value = 'http://foo.bar'
self.inventory = self.nova_inventory.NovaInventory()
self.hypervisors_get = self.patch(self.inventory.nova.hypervisors,
'get')
self.hypervisors_get = self.patch(hypervisors.HypervisorManager, 'get')
self.hypervisors_get.side_effect = FakeNovaHypervisors.get
self.hypervisors_search = self.patch(self.inventory.nova.hypervisors,
self.hypervisors_search = self.patch(hypervisors.HypervisorManager,
'search')
self.hypervisors_search.side_effect = FakeNovaHypervisors.search

@ -23,6 +23,7 @@ from climate.plugins import oshosts as host_plugin
from climate.plugins.oshosts import reservation_pool as rp
from climate import tests
from climate.utils.openstack import base
from climate.utils.openstack import nova
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
@ -128,6 +129,20 @@ class ReservationPoolTestCase(tests.TestCase):
self.nova.aggregates.create.assert_called_once_with(self.pool_name,
None)
def test_create_no_project_id(self):
self.patch(self.nova.aggregates, 'create').return_value = \
self.fake_aggregate
self.nova_wrapper = self.patch(nova.NovaClientWrapper, 'nova')
def raiseRuntimeError():
raise RuntimeError()
self.context_mock.side_effect = raiseRuntimeError
self.assertRaises(manager_exceptions.ProjectIdNotFound,
self.pool.create)
def test_delete_with_host(self):
self._patch_get_aggregate_from_name_or_id()
agg = self.pool.get('foo')

@ -31,6 +31,7 @@ from climate.plugins.oshosts import nova_inventory
from climate.plugins.oshosts import reservation_pool as rp
from climate import tests
from climate.utils.openstack import base
from climate.utils import trusts
from novaclient import client as nova_client
@ -58,15 +59,6 @@ class PhysicalHostPlugingSetupOnlyTestCase(tests.TestCase):
self