Fix style errors close to new hacking update
This patch fix a bunch of errors which will arrive after we will start use new hacking version. Change-Id: I49721f1ba9f3f017da00212a9fdc5b87d5afaedf Closes-Bug: #1330536
This commit is contained in:
parent
7f78792d57
commit
ffe65876e8
@ -77,8 +77,8 @@ def make_app():
|
||||
LOG.debug("List of plugins: %s", cfg.CONF.manager.plugins)
|
||||
# TODO(sbauza) : Change this whole crap by removing hardcoded values and
|
||||
# maybe using stevedore for achieving this
|
||||
if cfg.CONF.manager.plugins \
|
||||
and 'physical.host.plugin' in cfg.CONF.manager.plugins:
|
||||
if (cfg.CONF.manager.plugins
|
||||
and 'physical.host.plugin' in cfg.CONF.manager.plugins):
|
||||
app.register_blueprint(host_api_v1_0.rest, url_prefix='/v1/os-hosts')
|
||||
|
||||
for code in werkzeug_exceptions.default_exceptions.iterkeys():
|
||||
|
@ -23,7 +23,7 @@ rest = api_utils.Rest('host_v1_0', __name__)
|
||||
_api = utils.LazyProxy(service.API)
|
||||
|
||||
|
||||
## Computehosts operations
|
||||
# Computehosts operations
|
||||
|
||||
@rest.get('')
|
||||
def computehosts_list():
|
||||
@ -47,8 +47,7 @@ def computehosts_get(host_id):
|
||||
@rest.put('/<host_id>')
|
||||
@validation.check_exists(_api.get_computehost, host_id='host_id')
|
||||
def computehosts_update(host_id, data):
|
||||
"""Update computehost. Only name changing may be proceeded.
|
||||
"""
|
||||
"""Update computehost. Only name changing may be proceeded."""
|
||||
if len(data) == 0:
|
||||
return api_utils.internal_error(status_code=400,
|
||||
descr="No data to update")
|
||||
|
@ -28,7 +28,7 @@ class API(object):
|
||||
def __init__(self):
|
||||
self.manager_rpcapi = manager_rpcapi.ManagerRPCAPI()
|
||||
|
||||
## Leases operations
|
||||
# Leases operations
|
||||
|
||||
@policy.authorize('leases', 'get')
|
||||
def get_leases(self):
|
||||
@ -94,7 +94,7 @@ class API(object):
|
||||
"""
|
||||
self.manager_rpcapi.delete_lease(lease_id)
|
||||
|
||||
## Plugins operations
|
||||
# Plugins operations
|
||||
|
||||
@policy.authorize('plugins', 'get')
|
||||
def get_plugins(self):
|
||||
|
@ -25,7 +25,7 @@ rest = api_utils.Rest('v1_0', __name__)
|
||||
_api = utils.LazyProxy(service.API)
|
||||
|
||||
|
||||
## Leases operations
|
||||
# Leases operations
|
||||
|
||||
@rest.get('/leases')
|
||||
def leases_list():
|
||||
@ -61,7 +61,7 @@ def leases_delete(lease_id):
|
||||
return api_utils.render()
|
||||
|
||||
|
||||
## Plugins operations
|
||||
# Plugins operations
|
||||
|
||||
@rest.get('/plugins')
|
||||
def plugins_list():
|
||||
|
@ -17,6 +17,7 @@ import abc
|
||||
|
||||
from oslo.config import cfg
|
||||
from pecan import rest
|
||||
import six
|
||||
|
||||
from climate.openstack.common import log as logging
|
||||
|
||||
@ -24,8 +25,8 @@ LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseController(rest.RestController):
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
"""Mandatory API method name."""
|
||||
name = None
|
||||
|
@ -22,10 +22,10 @@ from climate.api.v2.controllers import extensions
|
||||
from climate.api.v2.controllers import types
|
||||
from climate import exceptions
|
||||
from climate.openstack.common.gettextutils import _ # noqa
|
||||
from climate.openstack.common import log as logging
|
||||
from climate import policy
|
||||
from climate.utils import trusts
|
||||
|
||||
from climate.openstack.common import log as logging
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -100,8 +100,7 @@ class Host(base._Base):
|
||||
|
||||
|
||||
class HostsController(extensions.BaseController):
|
||||
"""Manages operations on hosts.
|
||||
"""
|
||||
"""Manages operations on hosts."""
|
||||
|
||||
name = 'oshosts'
|
||||
extra_routes = {'os-hosts': 'oshosts',
|
||||
|
@ -88,8 +88,7 @@ class Lease(base._Base):
|
||||
|
||||
|
||||
class LeasesController(extensions.BaseController):
|
||||
"""Manages operations on leases.
|
||||
"""
|
||||
"""Manages operations on leases."""
|
||||
|
||||
name = 'leases'
|
||||
|
||||
|
@ -98,8 +98,8 @@ class TextOrInteger(wtypes.UserType):
|
||||
@staticmethod
|
||||
def validate(value):
|
||||
# NOTE(sbauza): We need to accept non-unicoded Python2 strings
|
||||
if isinstance(value, six.text_type) or isinstance(value, str) \
|
||||
or isinstance(value, int):
|
||||
if (isinstance(value, six.text_type) or isinstance(value, str)
|
||||
or isinstance(value, int)):
|
||||
return value
|
||||
else:
|
||||
raise exceptions.InvalidInput(cls=TextOrInteger.name, value=value)
|
||||
@ -111,7 +111,7 @@ class Datetime(wtypes.UserType):
|
||||
basetype = wtypes.text
|
||||
name = 'datetime'
|
||||
|
||||
#Format must be ISO8601 as default
|
||||
# Format must be ISO8601 as default
|
||||
format = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
|
||||
def __init__(self, format=None):
|
||||
|
@ -20,14 +20,15 @@ from climate.api import context
|
||||
from climate.db import api as dbapi
|
||||
from climate.manager.oshosts import rpcapi as hosts_rpcapi
|
||||
from climate.manager import rpcapi
|
||||
|
||||
from climate.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConfigHook(hooks.PecanHook):
|
||||
"""Attach the configuration object to the request
|
||||
"""ConfigHook
|
||||
|
||||
Attach the configuration object to the request
|
||||
so controllers can get to it.
|
||||
"""
|
||||
|
||||
|
@ -27,9 +27,10 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ParsableErrorMiddleware(object):
|
||||
"""Middleware to replace the plain text message body of an error
|
||||
response with one formatted so the client can parse it.
|
||||
"""Middleware which prepared body to the client
|
||||
|
||||
Middleware to replace the plain text message body of an error
|
||||
response with one formatted so the client can parse it.
|
||||
Based on pecan.middleware.errordocument
|
||||
"""
|
||||
|
||||
@ -43,8 +44,7 @@ class ParsableErrorMiddleware(object):
|
||||
faultstring = None
|
||||
|
||||
def replacement_start_response(status, headers, exc_info=None):
|
||||
"""Overrides the default response to make errors parsable.
|
||||
"""
|
||||
"""Overrides the default response to make errors parsable."""
|
||||
try:
|
||||
status_code = int(status.split(' ')[0])
|
||||
except (ValueError, TypeError): # pragma: nocover
|
||||
|
@ -66,7 +66,7 @@ def drop_db():
|
||||
return IMPL.drop_db()
|
||||
|
||||
|
||||
## Helpers for building constraints / equality checks
|
||||
# Helpers for building constraints / equality checks
|
||||
|
||||
|
||||
def constraint(**conditions):
|
||||
@ -107,7 +107,7 @@ def to_dict(func):
|
||||
return decorator
|
||||
|
||||
|
||||
#Reservation
|
||||
# Reservation
|
||||
|
||||
def reservation_create(reservation_values):
|
||||
"""Create a reservation from the values."""
|
||||
@ -142,7 +142,7 @@ def reservation_update(reservation_id, reservation_values):
|
||||
IMPL.reservation_update(reservation_id, reservation_values)
|
||||
|
||||
|
||||
#Lease
|
||||
# Lease
|
||||
|
||||
def lease_create(lease_values):
|
||||
"""Create a lease from values."""
|
||||
@ -189,7 +189,7 @@ def lease_update(lease_id, lease_values):
|
||||
IMPL.lease_update(lease_id, lease_values)
|
||||
|
||||
|
||||
#Events
|
||||
# Events
|
||||
|
||||
@to_dict
|
||||
def event_create(event_values):
|
||||
@ -270,7 +270,7 @@ def host_reservation_update(host_reservation_id,
|
||||
host_reservation_values)
|
||||
|
||||
|
||||
#Allocation
|
||||
# Allocation
|
||||
|
||||
def host_allocation_create(allocation_values):
|
||||
"""Create an allocation from the values."""
|
||||
|
@ -25,10 +25,11 @@ Create Date: 2014-02-19 17:23:47.705197
|
||||
revision = '0_1'
|
||||
down_revision = None
|
||||
|
||||
import uuid
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.mysql import MEDIUMTEXT
|
||||
import uuid
|
||||
|
||||
|
||||
def _generate_unicode_uuid():
|
||||
|
@ -36,8 +36,8 @@ def upgrade():
|
||||
nullable=True))
|
||||
|
||||
if op.get_bind().engine.name != 'sqlite':
|
||||
#I need to do it in this way because Postgress fails
|
||||
#if I use SQLAlchemy
|
||||
# I need to do it in this way because Postgress fails
|
||||
# if I use SQLAlchemy
|
||||
connection = op.get_bind()
|
||||
connection.execute("UPDATE computehosts SET trust_id = ''")
|
||||
|
||||
|
@ -22,13 +22,10 @@ import os
|
||||
from alembic import command as alembic_command
|
||||
from alembic import config as alembic_config
|
||||
from alembic import util as alembic_util
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate.openstack.common.db import options as db_options
|
||||
|
||||
gettext.install('climate', unicode=1)
|
||||
|
||||
from climate.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
|
@ -22,7 +22,6 @@ from sqlalchemy.sql.expression import asc
|
||||
from sqlalchemy.sql.expression import desc
|
||||
|
||||
from climate.db import exceptions as db_exc
|
||||
|
||||
from climate.db.sqlalchemy import facade_wrapper
|
||||
from climate.db.sqlalchemy import models
|
||||
from climate.openstack.common.db import exception as common_db_exc
|
||||
@ -77,7 +76,7 @@ def drop_db():
|
||||
return True
|
||||
|
||||
|
||||
## Helpers for building constraints / equality checks
|
||||
# Helpers for building constraints / equality checks
|
||||
|
||||
|
||||
def constraint(**conditions):
|
||||
@ -119,7 +118,7 @@ class InequalityCondition(object):
|
||||
return [field != value for value in self.values]
|
||||
|
||||
|
||||
#Reservation
|
||||
# Reservation
|
||||
def _reservation_get(session, reservation_id):
|
||||
query = model_query(models.Reservation, session)
|
||||
return query.filter_by(id=reservation_id).first()
|
||||
@ -135,8 +134,8 @@ def reservation_get_all():
|
||||
|
||||
|
||||
def reservation_get_all_by_lease_id(lease_id):
|
||||
reservations = model_query(models.Reservation, get_session()).\
|
||||
filter_by(lease_id=lease_id)
|
||||
reservations = (model_query(models.Reservation,
|
||||
get_session()).filter_by(lease_id=lease_id))
|
||||
return reservations.all()
|
||||
|
||||
|
||||
@ -192,7 +191,7 @@ def reservation_destroy(reservation_id):
|
||||
session.delete(reservation)
|
||||
|
||||
|
||||
#Lease
|
||||
# Lease
|
||||
def _lease_get(session, lease_id):
|
||||
query = model_query(models.Lease, session)
|
||||
return query.filter_by(id=lease_id).first()
|
||||
@ -286,7 +285,7 @@ def lease_destroy(lease_id):
|
||||
session.delete(lease)
|
||||
|
||||
|
||||
#Event
|
||||
# Event
|
||||
def _event_get(session, event_id):
|
||||
query = model_query(models.Event, session)
|
||||
return query.filter_by(id=event_id).first()
|
||||
@ -313,11 +312,11 @@ def _event_get_sorted_by_filters(sort_key, sort_dir, filters):
|
||||
events_query = _event_get_all(get_session())
|
||||
|
||||
if 'status' in filters:
|
||||
events_query = \
|
||||
events_query.filter(models.Event.status == filters['status'])
|
||||
events_query = (
|
||||
events_query.filter(models.Event.status == filters['status']))
|
||||
if 'lease_id' in filters:
|
||||
events_query = \
|
||||
events_query.filter(models.Event.lease_id == filters['lease_id'])
|
||||
events_query = (
|
||||
events_query.filter(models.Event.lease_id == filters['lease_id']))
|
||||
if 'event_type' in filters:
|
||||
events_query = events_query.filter(models.Event.event_type ==
|
||||
filters['event_type'])
|
||||
@ -330,7 +329,9 @@ def _event_get_sorted_by_filters(sort_key, sort_dir, filters):
|
||||
|
||||
|
||||
def event_get_first_sorted_by_filters(sort_key, sort_dir, filters):
|
||||
"""Return the first result for all events matching the filters
|
||||
"""Return first result for events
|
||||
|
||||
Return the first result for all events matching the filters
|
||||
and sorted by name of the field.
|
||||
"""
|
||||
|
||||
@ -383,7 +384,7 @@ def event_destroy(event_id):
|
||||
session.delete(event)
|
||||
|
||||
|
||||
#ComputeHostReservation
|
||||
# ComputeHostReservation
|
||||
def _host_reservation_get(session, host_reservation_id):
|
||||
query = model_query(models.ComputeHostReservation, session)
|
||||
return query.filter_by(id=host_reservation_id).first()
|
||||
@ -452,7 +453,7 @@ def host_reservation_destroy(host_reservation_id):
|
||||
session.delete(host_reservation)
|
||||
|
||||
|
||||
#ComputeHostAllocation
|
||||
# ComputeHostAllocation
|
||||
def _host_allocation_get(session, host_allocation_id):
|
||||
query = model_query(models.ComputeHostAllocation, session)
|
||||
return query.filter_by(id=host_allocation_id).first()
|
||||
@ -521,7 +522,7 @@ def host_allocation_destroy(host_allocation_id):
|
||||
session.delete(host_allocation)
|
||||
|
||||
|
||||
#ComputeHost
|
||||
# ComputeHost
|
||||
def _host_get(session, host_id):
|
||||
query = model_query(models.ComputeHost, session)
|
||||
return query.filter_by(id=host_id).first()
|
||||
@ -546,8 +547,8 @@ def host_get_all_by_filters(filters):
|
||||
hosts_query = _host_get_all(get_session())
|
||||
|
||||
if 'status' in filters:
|
||||
hosts_query = hosts_query.\
|
||||
filter(models.ComputeHost.status == filters['status'])
|
||||
hosts_query = hosts_query.filter(
|
||||
models.ComputeHost.status == filters['status'])
|
||||
|
||||
return hosts_query.all()
|
||||
|
||||
@ -599,7 +600,7 @@ def host_get_all_by_queries(queries):
|
||||
|
||||
hosts_query = hosts_query.filter(filt)
|
||||
else:
|
||||
#looking for extra capabilities matches
|
||||
# looking for extra capabilities matches
|
||||
extra_filter = model_query(
|
||||
models.ComputeHostExtraCapability, get_session()
|
||||
).filter(models.ComputeHostExtraCapability.capability_name == key
|
||||
@ -658,7 +659,7 @@ def host_destroy(host_id):
|
||||
session.delete(host)
|
||||
|
||||
|
||||
#ComputeHostExtraCapability
|
||||
# ComputeHostExtraCapability
|
||||
def _host_extra_capability_get(session, host_extra_capability_id):
|
||||
query = model_query(models.ComputeHostExtraCapability, session)
|
||||
return query.filter_by(id=host_extra_capability_id).first()
|
||||
@ -701,9 +702,9 @@ def host_extra_capability_update(host_extra_capability_id, values):
|
||||
session = get_session()
|
||||
|
||||
with session.begin():
|
||||
host_extra_capability = \
|
||||
host_extra_capability = (
|
||||
_host_extra_capability_get(session,
|
||||
host_extra_capability_id)
|
||||
host_extra_capability_id))
|
||||
host_extra_capability.update(values)
|
||||
host_extra_capability.save(session=session)
|
||||
|
||||
@ -713,9 +714,9 @@ def host_extra_capability_update(host_extra_capability_id, values):
|
||||
def host_extra_capability_destroy(host_extra_capability_id):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
host_extra_capability = \
|
||||
host_extra_capability = (
|
||||
_host_extra_capability_get(session,
|
||||
host_extra_capability_id)
|
||||
host_extra_capability_id))
|
||||
|
||||
if not host_extra_capability:
|
||||
# raise not found error
|
||||
|
@ -13,10 +13,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from climate.openstack.common.db.sqlalchemy import models as oslo_models
|
||||
from sqlalchemy.ext import declarative
|
||||
from sqlalchemy.orm import attributes
|
||||
|
||||
from climate.openstack.common.db.sqlalchemy import models as oslo_models
|
||||
|
||||
|
||||
class _ClimateBase(oslo_models.ModelBase, oslo_models.TimestampMixin):
|
||||
"""Base class for all Climate SQLAlchemy DB Models."""
|
||||
|
@ -13,19 +13,20 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sqlalchemy as sa
|
||||
import uuid
|
||||
|
||||
from climate.db.sqlalchemy import model_base as mb
|
||||
#FIXME: https://bugs.launchpad.net/climate/+bug/1300132
|
||||
#from climate.openstack.common import log as logging
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.mysql import MEDIUMTEXT
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
#FIXME: https://bugs.launchpad.net/climate/+bug/1300132
|
||||
#LOG = logging.getLogger(__name__)
|
||||
from climate.db.sqlalchemy import model_base as mb
|
||||
# FIXME: https://bugs.launchpad.net/climate/+bug/1300132
|
||||
# from climate.openstack.common import log as logging
|
||||
|
||||
## Helpers
|
||||
# FIXME: https://bugs.launchpad.net/climate/+bug/1300132
|
||||
# LOG = logging.getLogger(__name__)
|
||||
|
||||
# Helpers
|
||||
|
||||
|
||||
def _generate_unicode_uuid():
|
||||
@ -42,7 +43,7 @@ def _id_column():
|
||||
default=_generate_unicode_uuid)
|
||||
|
||||
|
||||
## Main objects: Lease, Reservation, Event
|
||||
# Main objects: Lease, Reservation, Event
|
||||
|
||||
class Lease(mb.ClimateBase):
|
||||
"""Contains all info about lease."""
|
||||
@ -112,8 +113,9 @@ class Reservation(mb.ClimateBase):
|
||||
minMax = res['count_range'].split('-', 1)
|
||||
(d['min'], d['max']) = map(int, minMax)
|
||||
except ValueError:
|
||||
#FIXME: https://bugs.launchpad.net/climate/+bug/1300132
|
||||
#LOG.error("Invalid Range: {0}".format(res['count_range']))
|
||||
# FIXME: https://bugs.launchpad.net/climate/+bug/1300132
|
||||
# LOG.error(
|
||||
# "Invalid Range: {0}".format(res['count_range']))
|
||||
e = "Invalid count range: {0}".format(res['count_range'])
|
||||
raise RuntimeError(e)
|
||||
return d
|
||||
@ -135,7 +137,9 @@ class Event(mb.ClimateBase):
|
||||
|
||||
|
||||
class ComputeHostReservation(mb.ClimateBase):
|
||||
"""Specifies resources asked by reservation from
|
||||
"""Description
|
||||
|
||||
Specifies resources asked by reservation from
|
||||
Compute Host Reservation API.
|
||||
"""
|
||||
|
||||
@ -153,8 +157,7 @@ class ComputeHostReservation(mb.ClimateBase):
|
||||
|
||||
|
||||
class ComputeHostAllocation(mb.ClimateBase):
|
||||
"""Mapping between ComputeHost, ComputeHostReservation and Reservation.
|
||||
"""
|
||||
"""Mapping between ComputeHost, ComputeHostReservation and Reservation."""
|
||||
|
||||
__tablename__ = 'computehost_allocations'
|
||||
|
||||
@ -169,7 +172,9 @@ class ComputeHostAllocation(mb.ClimateBase):
|
||||
|
||||
|
||||
class ComputeHost(mb.ClimateBase):
|
||||
"""Specifies resources asked by reservation from
|
||||
"""Description
|
||||
|
||||
Specifies resources asked by reservation from
|
||||
Compute Host Reservation API.
|
||||
"""
|
||||
|
||||
@ -196,7 +201,9 @@ class ComputeHost(mb.ClimateBase):
|
||||
|
||||
|
||||
class ComputeHostExtraCapability(mb.ClimateBase):
|
||||
"""Allows to define extra capabilities per administrator request for each
|
||||
"""Description
|
||||
|
||||
Allows to define extra capabilities per administrator request for each
|
||||
Compute Host added.
|
||||
"""
|
||||
|
||||
|
@ -32,30 +32,28 @@ def get_backend():
|
||||
|
||||
def _get_leases_from_resource_id(resource_id, start_date, end_date):
|
||||
session = get_session()
|
||||
for lease in session.query(models.Lease).\
|
||||
join(models.Reservation,
|
||||
models.Lease.id == models.Reservation.lease_id).\
|
||||
filter(models.Reservation.resource_id == resource_id).\
|
||||
filter(~sa.or_(sa.and_(models.Lease.start_date < start_date,
|
||||
models.Lease.end_date < start_date),
|
||||
sa.and_(models.Lease.start_date > end_date,
|
||||
models.Lease.end_date > end_date))):
|
||||
border0 = sa.and_(models.Lease.start_date < start_date,
|
||||
models.Lease.end_date < start_date)
|
||||
border1 = sa.and_(models.Lease.start_date > end_date,
|
||||
models.Lease.end_date > end_date)
|
||||
query = (session.query(models.Lease).join(models.Reservation)
|
||||
.filter(models.Reservation.resource_id == resource_id)
|
||||
.filter(~sa.or_(border0, border1)))
|
||||
for lease in query:
|
||||
yield lease
|
||||
|
||||
|
||||
def _get_leases_from_host_id(host_id, start_date, end_date):
|
||||
session = get_session()
|
||||
for lease in session.query(models.Lease).\
|
||||
join(models.Reservation,
|
||||
models.Lease.id == models.Reservation.lease_id).\
|
||||
join(models.ComputeHostAllocation,
|
||||
models.Reservation.id ==
|
||||
models.ComputeHostAllocation.reservation_id).\
|
||||
filter(models.ComputeHostAllocation.compute_host_id == host_id).\
|
||||
filter(~sa.or_(sa.and_(models.Lease.start_date < start_date,
|
||||
models.Lease.end_date < start_date),
|
||||
sa.and_(models.Lease.start_date > end_date,
|
||||
models.Lease.end_date > end_date))):
|
||||
border0 = sa.and_(models.Lease.start_date < start_date,
|
||||
models.Lease.end_date < start_date)
|
||||
border1 = sa.and_(models.Lease.start_date > end_date,
|
||||
models.Lease.end_date > end_date)
|
||||
query = (session.query(models.Lease).join(models.Reservation)
|
||||
.join(models.ComputeHostAllocation)
|
||||
.filter(models.ComputeHostAllocation.compute_host_id == host_id)
|
||||
.filter(~sa.or_(border0, border1)))
|
||||
for lease in query:
|
||||
yield lease
|
||||
|
||||
|
||||
@ -186,15 +184,12 @@ def longest_lease(host_id, start_date, end_date):
|
||||
max_duration = datetime.timedelta(0)
|
||||
longest_lease = None
|
||||
session = get_session()
|
||||
for lease in session.query(models.Lease).\
|
||||
join(models.Reservation,
|
||||
models.Lease.id == models.Reservation.lease_id).\
|
||||
join(models.ComputeHostAllocation,
|
||||
models.Reservation.id ==
|
||||
models.ComputeHostAllocation.reservation_id).\
|
||||
filter(models.ComputeHostAllocation.compute_host_id == host_id).\
|
||||
filter(models.Lease.start_date >= start_date).\
|
||||
filter(models.Lease.end_date <= end_date):
|
||||
query = (session.query(models.Lease).join(models.Reservation)
|
||||
.join(models.ComputeHostAllocation)
|
||||
.filter(models.ComputeHostAllocation.compute_host_id == host_id)
|
||||
.filter(models.Lease.start_date >= start_date)
|
||||
.filter(models.Lease.end_date <= end_date))
|
||||
for lease in query:
|
||||
duration = lease.end_date - lease.start_date
|
||||
if max_duration < duration:
|
||||
max_duration = duration
|
||||
@ -207,15 +202,12 @@ def shortest_lease(host_id, start_date, end_date):
|
||||
min_duration = datetime.timedelta(365 * 1000)
|
||||
longest_lease = None
|
||||
session = get_session()
|
||||
for lease in session.query(models.Lease).\
|
||||
join(models.Reservation,
|
||||
models.Lease.id == models.Reservation.lease_id).\
|
||||
join(models.ComputeHostAllocation,
|
||||
models.Reservation.id ==
|
||||
models.ComputeHostAllocation.reservation_id).\
|
||||
filter(models.ComputeHostAllocation.compute_host_id == host_id).\
|
||||
filter(models.Lease.start_date >= start_date).\
|
||||
filter(models.Lease.end_date <= end_date):
|
||||
query = (session.query(models.Lease).join(models.Reservation)
|
||||
.join(models.ComputeHostAllocation)
|
||||
.filter(models.ComputeHostAllocation.compute_host_id == host_id)
|
||||
.filter(models.Lease.start_date >= start_date)
|
||||
.filter(models.Lease.end_date <= end_date))
|
||||
for lease in query:
|
||||
duration = lease.end_date - lease.start_date
|
||||
if min_duration > duration:
|
||||
min_duration = duration
|
||||
|
@ -61,7 +61,7 @@ def drop_db():
|
||||
return IMPL.drop_db()
|
||||
|
||||
|
||||
## Helpers for building constraints / equality checks
|
||||
# Helpers for building constraints / equality checks
|
||||
|
||||
|
||||
def constraint(**conditions):
|
||||
|
@ -14,10 +14,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import six
|
||||
|
||||
import eventlet
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
from stevedore import enabled
|
||||
|
||||
from climate.db import api as db_api
|
||||
@ -88,9 +88,9 @@ class ManagerService(service_utils.RPCServer):
|
||||
ext.name, ext.plugin.resource_type, e))
|
||||
else:
|
||||
if plugin_obj.resource_type in plugins:
|
||||
msg = "You have provided several plugins for " \
|
||||
"one resource type in configuration file. " \
|
||||
"Please set one plugin per resource type."
|
||||
msg = ("You have provided several plugins for "
|
||||
"one resource type in configuration file. "
|
||||
"Please set one plugin per resource type.")
|
||||
raise exceptions.PluginConfigurationError(error=msg)
|
||||
|
||||
plugins[plugin_obj.resource_type] = plugin_obj
|
||||
@ -336,9 +336,9 @@ class ManagerService(service_utils.RPCServer):
|
||||
LOG.error("Invalid before_end_date param. %s" % e.message)
|
||||
raise e
|
||||
|
||||
#TODO(frossigneux) rollback if an exception is raised
|
||||
for reservation in \
|
||||
db_api.reservation_get_all_by_lease_id(lease_id):
|
||||
# TODO(frossigneux) rollback if an exception is raised
|
||||
for reservation in (
|
||||
db_api.reservation_get_all_by_lease_id(lease_id)):
|
||||
reservation['start_date'] = values['start_date']
|
||||
reservation['end_date'] = values['end_date']
|
||||
resource_type = reservation['resource_type']
|
||||
@ -390,9 +390,9 @@ class ManagerService(service_utils.RPCServer):
|
||||
datetime.datetime.utcnow() > lease['end_date']):
|
||||
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
|
||||
for reservation in lease['reservations']:
|
||||
plugin = self.plugins[reservation['resource_type']]
|
||||
try:
|
||||
self.plugins[reservation['resource_type']]\
|
||||
.on_end(reservation['resource_id'])
|
||||
plugin.on_end(reservation['resource_id'])
|
||||
except (db_ex.ClimateDBException, RuntimeError):
|
||||
LOG.exception("Failed to delete a reservation "
|
||||
"for a lease.")
|
||||
|
@ -59,8 +59,7 @@ class Notifier(object):
|
||||
"""
|
||||
|
||||
def send_lease_notification(self, context, lease, notification):
|
||||
"""Sends lease notification
|
||||
"""
|
||||
"""Sends lease notification."""
|
||||
self._notify(context, 'info', notification, lease)
|
||||
|
||||
def _notify(self, context, level, event_type, payload):
|
||||
|
@ -16,6 +16,7 @@
|
||||
import abc
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from climate.db import api as db_api
|
||||
from climate.openstack.common import log as logging
|
||||
@ -24,8 +25,8 @@ LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BasePlugin(object):
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
resource_type = 'none'
|
||||
title = None
|
||||
|
@ -16,10 +16,10 @@
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import six
|
||||
import uuid
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from climate.db import api as db_api
|
||||
from climate.db import exceptions as db_ex
|
||||
@ -129,9 +129,9 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
).__dict__['running_vms'] > 0):
|
||||
raise manager_ex.NotEnoughHostsAvailable()
|
||||
if allocations:
|
||||
host_reservation = \
|
||||
host_reservation = (
|
||||
db_api.host_reservation_get_by_reservation_id(
|
||||
reservation_id)
|
||||
reservation_id))
|
||||
host_ids = self._matching_hosts(
|
||||
host_reservation['hypervisor_properties'],
|
||||
host_reservation['resource_properties'],
|
||||
@ -189,12 +189,12 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
allocation['compute_host_id'])
|
||||
).__dict__['running_vms'] == 0:
|
||||
pool.delete(reservation['resource_id'])
|
||||
#TODO(frossigneux) Kill, migrate, or increase fees...
|
||||
# TODO(frossigneux) Kill, migrate, or increase fees...
|
||||
|
||||
def _get_extra_capabilities(self, host_id):
|
||||
extra_capabilities = {}
|
||||
raw_extra_capabilities = \
|
||||
db_api.host_extra_capability_get_all_per_host(host_id)
|
||||
raw_extra_capabilities = (
|
||||
db_api.host_extra_capability_get_all_per_host(host_id))
|
||||
for capability in raw_extra_capabilities:
|
||||
key = capability['capability_name']
|
||||
extra_capabilities[key] = capability['capability_value']
|
||||
@ -256,7 +256,7 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
host_details.update({'trust_id': trust_id})
|
||||
host = db_api.host_create(host_details)
|
||||
except db_ex.ClimateDBException:
|
||||
#We need to rollback
|
||||
# We need to rollback
|
||||
# TODO(sbauza): Investigate use of Taskflow for atomic
|
||||
# transactions
|
||||
pool.remove_computehost(self.freepool_name,
|
||||
@ -375,9 +375,10 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
return []
|
||||
|
||||
def _convert_requirements(self, requirements):
|
||||
"""Convert the requirements to an array of strings.
|
||||
["key op value", "key op value", ...]
|
||||
"""Convert the requirements to an array of strings
|
||||
|
||||
Convert the requirements to an array of strings.
|
||||
["key op value", "key op value", ...]
|
||||
"""
|
||||
# TODO(frossigneux) Support the "or" operator
|
||||
# Convert text to json
|
||||
|
@ -72,7 +72,7 @@ class ReservationPool(nova.NovaClientWrapper):
|
||||
except nova_exceptions.NotFound:
|
||||
aggregate = None
|
||||
else:
|
||||
#FIXME(scroiset): can't get an aggregate by name
|
||||
# FIXME(scroiset): can't get an aggregate by name
|
||||
# so iter over all aggregate and check for the good one
|
||||
all_aggregates = self.nova.aggregates.list()
|
||||
for agg in all_aggregates:
|
||||
|
@ -16,6 +16,7 @@
|
||||
"""Actions and states for Climate objects."""
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
from climate.db import api as db_api
|
||||
|
@ -14,11 +14,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Base classes for API tests."""
|
||||
import six
|
||||
|
||||
from oslo.config import cfg
|
||||
import pecan
|
||||
import pecan.testing
|
||||
import six
|
||||
|
||||
from climate.api import context as api_context
|
||||
from climate.api.v2 import app
|
||||
@ -31,8 +30,7 @@ PATH_PREFIX = '/v2'
|
||||
|
||||
|
||||
class APITest(tests.TestCase):
|
||||
"""Used for unittests tests of Pecan controllers.
|
||||
"""
|
||||
"""Used for unittests tests of Pecan controllers."""
|
||||
|
||||
# SOURCE_DATA = {'test_source': {'somekey': '666'}}
|
||||
|
||||
|
@ -25,11 +25,11 @@ class RPCApiTestCase(tests.TestCase):
|
||||
self.fake_list = []
|
||||
self.fake_computehost = {}
|
||||
|
||||
self.patch(self.s_api.API, "get_computehosts").\
|
||||
return_value = self.fake_list
|
||||
fake_get_computehosts = self.patch(self.s_api.API, "get_computehosts")
|
||||
fake_get_computehosts.return_value = self.fake_list
|
||||
self.patch(self.s_api.API, "create_computehost").return_value = True
|
||||
self.patch(self.s_api.API, "get_computehost").\
|
||||
return_value = self.fake_computehost
|
||||
fake_get_computehost = self.patch(self.s_api.API, "get_computehost")
|
||||
fake_get_computehost.return_value = self.fake_computehost
|
||||
self.patch(self.s_api.API, "update_computehost").return_value = True
|
||||
self.patch(self.s_api.API, "delete_computehost").return_value = True
|
||||
|
||||
|
@ -15,9 +15,10 @@
|
||||
"""
|
||||
Tests for API /os-hosts/ methods
|
||||
"""
|
||||
import six
|
||||
import uuid
|
||||
|
||||
import six
|
||||
|
||||
|
||||
from climate.tests import api
|
||||
from climate.utils import trusts
|
||||
|
@ -15,9 +15,10 @@
|
||||
"""
|
||||
Tests for API /leases/ methods
|
||||
"""
|
||||
import six
|
||||
import uuid
|
||||
|
||||
import six
|
||||
|
||||
|
||||
from climate.tests import api
|
||||
from climate.utils import trusts
|
||||
|
@ -36,11 +36,9 @@ import sqlalchemy
|
||||
import sqlalchemy.exc
|
||||
|
||||
import climate.db.migration
|
||||
from climate import tests
|
||||
|
||||
from climate.openstack.common import lockutils
|
||||
from climate.openstack.common import log as logging
|
||||
|
||||
from climate import tests
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
@ -49,7 +47,9 @@ synchronized = lockutils.synchronized_with_prefix('climate-')
|
||||
|
||||
|
||||
def _get_connect_string(backend, user, passwd, database):
|
||||
"""Try to get a connection with a very specific set of values, if we get
|
||||
"""Establish connection
|
||||
|
||||
Try to get a connection with a very specific set of values, if we get
|
||||
these then we'll run the tests, otherwise they are skipped
|
||||
"""
|
||||
if backend == "postgres":
|
||||
@ -120,7 +120,9 @@ def get_pgsql_connection_info(conn_pieces):
|
||||
|
||||
|
||||
class CommonTestsMixIn(object):
|
||||
"""BaseMigrationTestCase is effectively an abstract class,
|
||||
"""Reasons to create Mixin
|
||||
|
||||
BaseMigrationTestCase is effectively an abstract class,
|
||||
meant to be derived from and not directly tested against; that's why these
|
||||
`test_` methods need to be on a Mixin, so that they won't be picked up
|
||||
as valid tests for BaseMigrationTestCase.
|
||||
@ -135,7 +137,9 @@ class CommonTestsMixIn(object):
|
||||
self._test_mysql_opportunistically()
|
||||
|
||||
def test_mysql_connect_fail(self):
|
||||
"""Test that we can trigger a mysql connection failure and we fail
|
||||
"""MySQL graceful fail check
|
||||
|
||||
Test that we can trigger a mysql connection failure and we fail
|
||||
gracefully to ensure we don't break people without mysql
|
||||
"""
|
||||
if _is_backend_avail('mysql', "openstack_cifail", self.PASSWD,
|
||||
@ -146,7 +150,9 @@ class CommonTestsMixIn(object):
|
||||
self._test_postgresql_opportunistically()
|
||||
|
||||
def test_postgresql_connect_fail(self):
|
||||
"""Test that we can trigger a postgres connection failure and we fail
|
||||
"""PostgreSQL graceful fail check
|
||||
|
||||
Test that we can trigger a postgres connection failure and we fail
|
||||
gracefully to ensure we don't break people without postgres
|
||||
"""
|
||||
if _is_backend_avail('postgres', "openstack_cifail", self.PASSWD,
|
||||
@ -155,7 +161,9 @@ class CommonTestsMixIn(object):
|
||||
|
||||
|
||||
class BaseMigrationTestCase(tests.TestCase):
|
||||
"""Base class for testing migrations and migration utils. This sets up
|
||||
"""Base class for migrations
|
||||
|
||||
Base class for testing migrations and migration utils. This sets up
|
||||
and configures the databases to run tests against.
|
||||
"""
|
||||
|
||||
@ -241,8 +249,8 @@ class BaseMigrationTestCase(tests.TestCase):
|
||||
|
||||
@synchronized('pgadmin', external=True, lock_path='/tmp')
|
||||
def _reset_pg(self, conn_pieces):
|
||||
(user, password, database, host) = \
|
||||
get_pgsql_connection_info(conn_pieces)
|
||||
(user, password, database, host) = (
|
||||
get_pgsql_connection_info(conn_pieces))
|
||||
os.environ['PGPASSWORD'] = password
|
||||
os.environ['PGUSER'] = user
|
||||
# note(boris-42): We must create and drop database, we can't
|
||||
@ -268,8 +276,8 @@ class BaseMigrationTestCase(tests.TestCase):
|
||||
# We can execute the MySQL client to destroy and re-create
|
||||
# the MYSQL database, which is easier and less error-prone
|
||||
# than using SQLAlchemy to do this via MetaData...trust me.
|
||||
(user, password, database, host) = \
|
||||
get_mysql_connection_info(conn_pieces)
|
||||
(user, password, database, host) = (
|
||||
get_mysql_connection_info(conn_pieces))
|
||||
sql = ("drop database if exists %(database)s; "
|
||||
"create database %(database)s;" % {'database': database})
|
||||
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s -e \"%(sql)s\""
|
||||
@ -298,24 +306,24 @@ class BaseMigrationTestCase(tests.TestCase):
|
||||
conn_pieces = urlparse.urlparse(conn_string)
|
||||
|
||||
if conn_string.startswith('mysql'):
|
||||
(user, password, database, host) = \
|
||||
get_mysql_connection_info(conn_pieces)
|
||||
(user, password, database, host) = (
|
||||
get_mysql_connection_info(conn_pieces))
|
||||
sql = "create database if not exists %s;" % database
|
||||
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
|
||||
"-e \"%(sql)s\"" % {'user': user, 'password': password,
|
||||
'host': host, 'sql': sql})
|
||||
self.execute_cmd(cmd)
|
||||
elif conn_string.startswith('postgresql'):
|
||||
(user, password, database, host) = \
|
||||
get_pgsql_connection_info(conn_pieces)
|
||||
(user, password, database, host) = (
|
||||
get_pgsql_connection_info(conn_pieces))
|
||||
os.environ['PGPASSWORD'] = password
|
||||
os.environ['PGUSER'] = user
|
||||
|
||||
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
|
||||
" '%(sql)s' -d template1 -A -t")
|
||||
|
||||
sql = ("select count(*) from pg_database WHERE datname = '%s'") \
|
||||
% database
|
||||
sql = (("select count(*) from pg_database WHERE datname = '%s'")
|
||||
% database)
|
||||
|
||||
check_database = sqlcmd % {'user': user, 'host': host, 'sql': sql}
|
||||
process = subprocess.Popen(check_database, shell=True,
|
||||
@ -351,7 +359,9 @@ class BaseMigrationTestCase(tests.TestCase):
|
||||
|
||||
|
||||
class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
"""BaseWalkMigrationTestCase loads in an alternative set of databases for
|
||||
"""BaseWalkMigrationTestCase description
|
||||
|
||||
BaseWalkMigrationTestCase loads in an alternative set of databases for
|
||||
testing against. This is necessary as the default databases can run tests
|
||||
concurrently without interfering with itself. It is expected that
|
||||
databases listed under [migration_dbs] in the configuration are only being
|
||||
@ -387,7 +397,9 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
self._create_databases()
|
||||
|
||||
def _configure(self, engine):
|
||||
"""For each type of repository we should do some of configure steps.
|
||||
"""Configure description
|
||||
|
||||
For each type of repository we should do some of configure steps.
|
||||
For migrate_repo we should set under version control our database.
|
||||
For alembic we should configure database settings. For this goal we
|
||||
should use oslo.config and openstack.commom.db.sqlalchemy.session with
|
||||
@ -403,8 +415,8 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
# automatically in tearDown so no need to clean it up here.
|
||||
connect_string = _get_connect_string(
|
||||
"mysql", self.USER, self.PASSWD, self.DATABASE)
|
||||
(user, password, database, host) = \
|
||||
get_mysql_connection_info(urlparse.urlparse(connect_string))
|
||||
(user, password, database, host) = (
|
||||
get_mysql_connection_info(urlparse.urlparse(connect_string)))
|
||||
engine = sqlalchemy.create_engine(connect_string)
|
||||
self.engines[database] = engine
|
||||
self.test_databases[database] = connect_string
|
||||
@ -435,8 +447,8 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
connect_string = _get_connect_string(
|
||||
"postgres", self.USER, self.PASSWD, self.DATABASE)
|
||||
engine = sqlalchemy.create_engine(connect_string)
|
||||
(user, password, database, host) = \
|
||||
get_mysql_connection_info(urlparse.urlparse(connect_string))
|
||||
(user, password, database, host) = (
|
||||
get_mysql_connection_info(urlparse.urlparse(connect_string)))
|
||||
self.engines[database] = engine
|
||||
self.test_databases[database] = connect_string
|
||||
|
||||
@ -447,7 +459,9 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
del(self.test_databases[database])
|
||||
|
||||
def _alembic_command(self, alembic_command, engine, *args, **kwargs):
|
||||
"""Most of alembic command return data into output.
|
||||
"""Alembic command redefine reasons
|
||||
|
||||
Most of alembic command return data into output.
|
||||
We should redefine this setting for getting info.
|
||||
"""
|
||||
self.ALEMBIC_CONFIG.stdout = buf = io.StringIO()
|
||||
@ -458,7 +472,9 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
return res
|
||||
|
||||
def _get_alembic_versions(self, engine):
|
||||
"""For support of full testing of migrations
|
||||
"""Return list of versions in historical order
|
||||
|
||||
For support of full testing of migrations
|
||||
we should have an opportunity to run command step by step for each
|
||||
version in repo. This method returns list of alembic_versions by
|
||||
historical order.
|
||||
@ -473,7 +489,9 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
return alembic_history
|
||||
|
||||
def _up_and_down_versions(self, engine):
|
||||
"""Since alembic version has a random algorithm of generation
|
||||
"""Store versions
|
||||
|
||||
Since alembic version has a random algorithm of generation
|
||||
(SA-migrate has an ordered autoincrement naming) we should store
|
||||
a tuple of versions (version for upgrade and version for downgrade)
|
||||
for successful testing of migrations in up>down>up mode.
|
||||
@ -515,7 +533,9 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
self._migrate_down(engine, ver_down, next_version=ver_up)
|
||||
|
||||
def _get_version_from_db(self, engine):
|
||||
"""For each type of migrate repo latest version from db
|
||||
"""Return latest version
|
||||
|
||||
For each type of migrate repo latest version from db
|
||||
will be returned.
|
||||
"""
|
||||
conn = engine.connect()
|
||||
@ -527,7 +547,9 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
|
||||
return version
|
||||
|
||||
def _migrate(self, engine, version, cmd):
|
||||
"""Base method for manipulation with migrate repo.
|
||||
"""Upgrad/downgrade actual database
|
||||
|
||||
Base method for manipulation with migrate repo.
|
||||
It will upgrade or downgrade the actual database.
|
||||
"""
|
||||
|
||||
|
@ -37,9 +37,8 @@ postgres=# create database openstack_citest with owner openstack_citest;
|
||||
|
||||
"""
|
||||
|
||||
import sqlalchemy
|
||||
|
||||
from oslo.config import cfg
|
||||
import sqlalchemy
|
||||
|
||||
from climate.tests.db import migration
|
||||
|
||||
|
@ -18,13 +18,11 @@ import operator
|
||||
import uuid
|
||||
|
||||
from climate.db import exceptions as db_exceptions
|
||||
|
||||
from climate.db.sqlalchemy import api as db_api
|
||||
from climate.db.sqlalchemy import models
|
||||
from climate import tests
|
||||
|
||||
from climate.plugins import instances as vm_plugin
|
||||
from climate.plugins import oshosts as host_plugin
|
||||
from climate import tests
|
||||
|
||||
|
||||
def _get_fake_random_uuid():
|
||||
@ -171,7 +169,7 @@ def _get_fake_cpu_info():
|
||||
'model': 'Westmere',
|
||||
'arch': 'x86_64',
|
||||
'features': ['rdtscp', 'pdpe1gb', 'hypervisor', 'vmx', 'ss',
|
||||
'vme'],
|
||||
'vme'],
|
||||
'topology': {'cores': 1, 'threads': 1, 'sockets': 2}})
|
||||
|
||||
|
||||
@ -215,7 +213,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual([lease.to_dict()], [l.to_dict() for l in query.all()])
|
||||
|
||||
def test_create_virt_lease(self):
|
||||
"""Create a virtual lease and verify that all tables have been
|
||||
"""Check virtual lease create
|
||||
|
||||
Create a virtual lease and verify that all tables have been
|
||||
populated.
|
||||
"""
|
||||
|
||||
@ -226,7 +226,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(1, len(db_api.reservation_get_all()))
|
||||
|
||||
def test_create_phys_lease(self):
|
||||
"""Create a physical lease and verify that all tables have been
|
||||
"""Check physical lease create
|
||||
|
||||
Create a physical lease and verify that all tables have been
|
||||
populated.
|
||||
"""
|
||||
|
||||
@ -237,8 +239,7 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(1, len(db_api.reservation_get_all()))
|
||||
|
||||
def test_create_duplicate_leases(self):
|
||||
"""Create two leases with same names, and checks it raises an error.
|
||||
"""
|
||||
"""Create two leases with same names, and checks it raises an error."""
|
||||
|
||||
db_api.lease_create(_get_fake_phys_lease_values())
|
||||
self.assertRaises(db_exceptions.ClimateDBDuplicateEntry,
|
||||
@ -246,7 +247,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
_get_fake_phys_lease_values())
|
||||
|
||||
def test_create_leases_with_duplicated_reservation(self):
|
||||
"""Create two leases with a duplicated reservation,
|
||||
"""Check duplicated reservation create
|
||||
|
||||
Create two leases with a duplicated reservation,
|
||||
and checks it raises an error.
|
||||
"""
|
||||
lease_values = _get_fake_phys_lease_values()
|
||||
@ -261,7 +264,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
lease_values)
|
||||
|
||||
def test_create_leases_with_duplicated_event(self):
|
||||
"""Create two leases with a duplicated event,
|
||||
"""Check duplicated event create
|
||||
|
||||
Create two leases with a duplicated event,
|
||||
and checks it raises an error.
|
||||
"""
|
||||
lease_values = _get_fake_phys_lease_values()
|
||||
@ -351,7 +356,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
# Reservations
|
||||
|
||||
def test_create_reservation(self):
|
||||
"""Create a reservation and verify that all tables have been
|
||||
"""Create and verify reservation
|
||||
|
||||
Create a reservation and verify that all tables have been
|
||||
populated.
|
||||
"""
|
||||
|
||||
@ -361,7 +368,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
['lease_id'])
|
||||
|
||||
def test_reservation_get_all_by_values(self):
|
||||
"""Create two reservations and verify that we can find reservation per
|
||||
"""Create 2 reservations and check find abilities
|
||||
|
||||
Create two reservations and verify that we can find reservation per
|
||||
resource_id or resource_type.
|
||||
"""
|
||||
db_api.reservation_create(_get_fake_phys_reservation_values())
|
||||
@ -386,7 +395,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
db_api.reservation_destroy, '1')
|
||||
|
||||
def test_create_duplicate_reservation(self):
|
||||
"""Create a reservation and verify that an exception is raised if a
|
||||
"""Create duplicated reservation
|
||||
|
||||
Create a reservation and verify that an exception is raised if a
|
||||
duplicated reservation is created.
|
||||
"""
|
||||
uuid = _get_fake_random_uuid()
|
||||
@ -398,7 +409,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
# Host reservations
|
||||
|
||||
def test_create_host_reservation(self):
|
||||
"""Create a host reservation and verify that all tables
|
||||
"""Create host reservation
|
||||
|
||||
Create a host reservation and verify that all tables
|
||||
have been populated.
|
||||
"""
|
||||
|
||||
@ -409,7 +422,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
['id'])
|
||||
|
||||
def test_create_duplicate_host_reservation(self):
|
||||
"""Create a duplicated host reservation and verify that an exception is
|
||||
"""Create duplicated host reservation
|
||||
|
||||
Create a duplicated host reservation and verify that an exception is
|
||||
raised.
|
||||
"""
|
||||
|
||||
@ -420,7 +435,9 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
_get_fake_host_reservation_values(id='1'))
|
||||
|
||||
def test_delete_host_reservation(self):
|
||||
"""Check all deletion cases for host reservation,
|
||||
"""Check deletion for host reservation
|
||||
|
||||
Check all deletion cases for host reservation,
|
||||
including cascade deletion from reservations table.
|
||||
"""
|
||||
|
||||
@ -462,15 +479,12 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual('updated', res['resource_properties'])
|
||||
|
||||
def test_create_host(self):
|
||||
"""Create a host and verify that all tables
|
||||
have been populated.
|
||||
"""
|
||||
"""Create a host and verify that all tables have been populated."""
|
||||
result = db_api.host_create(_get_fake_host_values(id='1'))
|
||||
self.assertEqual(result['id'], _get_fake_host_values(id='1')['id'])
|
||||
|
||||
def test_create_duplicated_host(self):
|
||||
"""Create a duplicated host and verify that an exception is raised.
|
||||
"""
|
||||
"""Create a duplicated host and verify that an exception is raised."""
|
||||
db_api.host_create(_get_fake_host_values(id='1'))
|
||||
# Making sure we still raise a DuplicateDBEntry
|
||||
self.assertRaises(db_exceptions.ClimateDBDuplicateEntry,
|
||||
@ -478,7 +492,10 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
_get_fake_host_values(id='1'))
|
||||
|
||||
def test_search_for_hosts_by_ram(self):
|
||||
"""Create two hosts and check that we can find a host per its RAM info.
|
||||
"""Check RAM info search
|
||||
|
||||
Create two hosts and check that we can find a host per its RAM
|
||||
info.
|
||||
"""
|
||||
db_api.host_create(_get_fake_host_values(id=1, mem=2048))
|
||||
db_api.host_create(_get_fake_host_values(id=2, mem=4096))
|
||||
@ -808,7 +825,7 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(2, len(filtered_events))
|
||||
self.assertEqual(fake_event_type, filtered_events[0].event_type)
|
||||
|
||||
#testing sort
|
||||
# testing sort
|
||||
self.assertTrue(is_result_sorted_correctly(filtered_events,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir))
|
||||
@ -834,7 +851,7 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(1, len(filtered_events))
|
||||
self.assertEqual(fake_status, filtered_events[0].status)
|
||||
|
||||
#testing sort
|
||||
# testing sort
|
||||
self.assertTrue(is_result_sorted_correctly(filtered_events,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir))
|
||||
@ -861,7 +878,7 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(1, len(filtered_events))
|
||||
self.assertEqual(fake_lease_id, filtered_events[0].lease_id)
|
||||
|
||||
#testing sort
|
||||
# testing sort
|
||||
self.assertTrue(is_result_sorted_correctly(filtered_events,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir))
|
||||
@ -888,7 +905,7 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(2, len(filtered_events))
|
||||
self.assertEqual(fake_event_type, filtered_events[0].event_type)
|
||||
|
||||
#testing sort
|
||||
# testing sort
|
||||
self.assertTrue(is_result_sorted_correctly(filtered_events,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir))
|
||||
@ -914,7 +931,7 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(1, len(filtered_events))
|
||||
self.assertEqual(fake_status, filtered_events[0].status)
|
||||
|
||||
#testing sort
|
||||
# testing sort
|
||||
self.assertTrue(is_result_sorted_correctly(filtered_events,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir))
|
||||
@ -941,7 +958,7 @@ class SQLAlchemyDBApiTestCase(tests.DBTestCase):
|
||||
self.assertEqual(1, len(filtered_events))
|
||||
self.assertEqual(fake_lease_id, filtered_events[0].lease_id)
|
||||
|
||||
#testing sort
|
||||
# testing sort
|
||||
self.assertTrue(is_result_sorted_correctly(filtered_events,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir))
|
||||
|
@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
import eventlet
|
||||
import mock
|
||||
from oslo.config import cfg
|
||||
@ -110,8 +111,6 @@ class ServiceTestCase(tests.TestCase):
|
||||
'start_date': datetime.datetime(2013, 12, 20, 13, 00),
|
||||
'end_date': datetime.datetime(2013, 12, 20, 15, 00),
|
||||
'trust_id': 'exxee111qwwwwe'}
|
||||
#'start_date': 'now',
|
||||
#'end_date': '2026-12-13 13:13'}
|
||||
self.good_date = datetime.datetime.strptime('2012-12-13 13:13',
|
||||
'%Y-%m-%d %H:%M')
|
||||
|
||||
@ -127,10 +126,10 @@ class ServiceTestCase(tests.TestCase):
|
||||
self.reservation_update = self.patch(self.db_api, 'reservation_update')
|
||||
self.event_update = self.patch(self.db_api, 'event_update')
|
||||
self.manager.plugins = {'virtual:instance': self.fake_plugin}
|
||||
self.manager.resource_actions = \
|
||||
self.manager.resource_actions = (
|
||||
{'virtual:instance':
|
||||
{'on_start': self.fake_plugin.on_start,
|
||||
'on_end': self.fake_plugin.on_end}}
|
||||
'on_end': self.fake_plugin.on_end}})
|
||||
self.patch(
|
||||
self.base_utils, 'url_for').return_value = 'http://www.foo.fake'
|
||||
|
||||
@ -142,8 +141,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
super(ServiceTestCase, self).tearDown()
|
||||
|
||||
def test_start(self):
|
||||
#NOTE(starodubcevna): it's useless to test start() now, but may be in
|
||||
#future it become useful
|
||||
# NOTE(starodubcevna): it's useless to test start() now, but may be in
|
||||
# future it become useful
|
||||
pass
|
||||
|
||||
def test_multiple_plugins_same_resource_type(self):
|
||||
@ -237,8 +236,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
|
||||
@testtools.skip('incorrect decorator')
|
||||
def test_list_leases(self):
|
||||
#NOTE(starodubcevna): This func works incorrect, and we need to skip
|
||||
#it. It'll be fix in coming soon patches
|
||||
# NOTE(starodubcevna): This func works incorrect, and we need to skip
|
||||
# it. It'll be fix in coming soon patches
|
||||
self.manager.list_leases()
|
||||
|
||||
self.lease_list.assert_called_once_with()
|
||||
@ -564,8 +563,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'start_date': '2015-12-01 20:00',
|
||||
'end_date': '2015-12-01 22:00'
|
||||
}
|
||||
reservation_get_all = \
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = [
|
||||
{
|
||||
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
|
||||
@ -614,8 +613,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'name': 'renamed',
|
||||
'end_date': '2013-12-20 16:00'
|
||||
}
|
||||
reservation_get_all = \
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = [
|
||||
{
|
||||
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
|
||||
@ -665,8 +664,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'name': 'renamed',
|
||||
'end_date': '2013-12-20 16:00'
|
||||
}
|
||||
reservation_get_all = \
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = [
|
||||
{
|
||||
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
|
||||
@ -731,8 +730,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'end_date': '2013-12-20 16:00',
|
||||
'before_end_notification': before_end_notification
|
||||
}
|
||||
reservation_get_all = \
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = [
|
||||
{
|
||||
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
|
||||
@ -802,8 +801,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'end_date': '2013-12-20 16:00',
|
||||
'before_end_notification': before_end_notification
|
||||
}
|
||||
reservation_get_all =\
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = [
|
||||
{
|
||||
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
|
||||
@ -842,8 +841,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'end_date': '2013-12-20 16:00',
|
||||
'before_end_notification': before_end_notification
|
||||
}
|
||||
reservation_get_all = \
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = [
|
||||
{
|
||||
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
|
||||
@ -881,8 +880,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'end_date': '2013-12-20 16:00',
|
||||
'before_end_notification': wrong_before_end_notification
|
||||
}
|
||||
reservation_get_all = \
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = [
|
||||
{
|
||||
'id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
|
||||
@ -986,8 +985,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
events = self.patch(self.db_api, 'event_get_first_sorted_by_filters')
|
||||
events.side_effect = fake_event_get
|
||||
event_update = self.patch(self.db_api, 'event_update')
|
||||
reservation_get_all = \
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id')
|
||||
reservation_get_all = (
|
||||
self.patch(self.db_api, 'reservation_get_all_by_lease_id'))
|
||||
reservation_get_all.return_value = []
|
||||
|
||||
lease_values = {
|
||||
@ -1008,8 +1007,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
{'time': datetime.datetime(2013, 12, 20, 13, 0)})
|
||||
|
||||
def test_delete_lease_before_starting_date(self):
|
||||
self.patch(self.manager, 'get_lease').\
|
||||
return_value = self.lease
|
||||
fake_get_lease = self.patch(self.manager, 'get_lease')
|
||||
fake_get_lease.return_value = self.lease
|
||||
|
||||
target = datetime.datetime(2013, 12, 20, 12, 00)
|
||||
with mock.patch.object(datetime,
|
||||
@ -1022,8 +1021,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
self.lease_destroy.assert_called_once_with(self.lease_id)
|
||||
|
||||
def test_delete_lease_after_ending_date(self):
|
||||
self.patch(self.manager, 'get_lease').\
|
||||
return_value = self.lease
|
||||
fake_get_lease = self.patch(self.manager, 'get_lease')
|
||||
fake_get_lease.return_value = self.lease
|
||||
|
||||
target = datetime.datetime(2013, 12, 20, 16, 00)
|
||||
with mock.patch.object(datetime,
|
||||
@ -1040,8 +1039,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'lease.delete')
|
||||
|
||||
def test_delete_lease_after_starting_date(self):
|
||||
self.patch(self.manager, 'get_lease').\
|
||||
return_value = self.lease
|
||||
fake_get_lease = self.patch(self.manager, 'get_lease')
|
||||
fake_get_lease.return_value = self.lease
|
||||
|
||||
target = datetime.datetime(2013, 12, 20, 13, 30)
|
||||
with mock.patch.object(datetime,
|
||||
@ -1096,10 +1095,10 @@ class ServiceTestCase(tests.TestCase):
|
||||
def raiseClimateException(resource_id):
|
||||
raise exceptions.ClimateException(resource_id)
|
||||
|
||||
self.manager.resource_actions = \
|
||||
self.manager.resource_actions = (
|
||||
{'virtual:instance':
|
||||
{'on_start': self.fake_plugin.on_start,
|
||||
'on_end': raiseClimateException}}
|
||||
'on_end': raiseClimateException}})
|
||||
|
||||
self.patch(self.manager, 'get_lease').return_value = self.lease
|
||||
|
||||
@ -1108,8 +1107,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
self.event_update.assert_called_once_with('1', {'status': 'DONE'})
|
||||
|
||||
def test_getattr_with_correct_plugin_and_method(self):
|
||||
self.fake_list_computehosts = \
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts')
|
||||
self.fake_list_computehosts = (
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts'))
|
||||
self.fake_list_computehosts.return_value = 'foo'
|
||||
|
||||
self.manager.plugins = {'physical:host': self.fake_phys_plugin}
|
||||
@ -1117,8 +1116,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'physical:host:list_computehosts')())
|
||||
|
||||
def test_getattr_with_incorrect_method_name(self):
|
||||
self.fake_list_computehosts = \
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts')
|
||||
self.fake_list_computehosts = (
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts'))
|
||||
self.fake_list_computehosts.return_value = 'foo'
|
||||
|
||||
self.manager.plugins = {'physical:host': self.fake_phys_plugin}
|
||||
@ -1126,8 +1125,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
'simplefakecallwithValueError')
|
||||
|
||||
def test_getattr_with_missing_plugin(self):
|
||||
self.fake_list_computehosts = \
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts')
|
||||
self.fake_list_computehosts = (
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts'))
|
||||
self.fake_list_computehosts.return_value = 'foo'
|
||||
|
||||
self.manager.plugins = {'physical:host': self.fake_phys_plugin}
|
||||
@ -1135,8 +1134,8 @@ class ServiceTestCase(tests.TestCase):
|
||||
self.manager, 'plugin:not_present:list_computehosts')
|
||||
|
||||
def test_getattr_with_missing_method_in_plugin(self):
|
||||
self.fake_list_computehosts = \
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts')
|
||||
self.fake_list_computehosts = (
|
||||
self.patch(self.fake_phys_plugin, 'list_computehosts'))
|
||||
self.fake_list_computehosts.return_value = 'foo'
|
||||
|
||||
self.manager.plugins = {'physical:host': None}
|
||||
|
@ -16,6 +16,7 @@
|
||||
import sys
|
||||
|
||||
import eventlet
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
import testtools
|
||||
|
||||
from climate import exceptions as climate_exceptions
|
||||
@ -24,8 +25,6 @@ from climate.plugins.instances import vm_plugin
|
||||
from climate import tests
|
||||
from climate.utils.openstack import nova
|
||||
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
|
||||
|
||||
class VMPluginTestCase(tests.TestCase):
|
||||
def setUp(self):
|
||||
@ -58,25 +57,22 @@ class VMPluginTestCase(tests.TestCase):
|
||||
self.plugin.on_start(self.fake_id)
|
||||
|
||||
def test_on_end_create_image_ok(self):
|
||||
self.patch(self.plugin, '_split_actions').return_value =\
|
||||
['create_image']
|
||||
self.patch(self.plugin, '_check_active').return_value =\
|
||||
True
|
||||
self.patch(self.plugin, '_split_actions').return_value = (
|
||||
['create_image'])
|
||||
self.patch(self.plugin, '_check_active').return_value = True
|
||||
|
||||
self.plugin.on_end(self.fake_id)
|
||||
|
||||
self.nova_wrapper.servers.create_image.assert_called_once_with('1')
|
||||
|
||||
def test_on_end_suspend_ok(self):
|
||||
self.patch(self.plugin, '_split_actions').return_value =\
|
||||
['suspend']
|
||||
self.patch(self.plugin, '_split_actions').return_value = ['suspend']
|
||||
|
||||
self.plugin.on_end(self.fake_id)
|
||||
self.nova_wrapper.servers.suspend.assert_called_once_with('1')
|
||||
|
||||
def test_on_end_delete_ok(self):
|
||||
self.patch(self.plugin, '_split_actions').return_value =\
|
||||
['delete']
|
||||
self.patch(self.plugin, '_split_actions').return_value = ['delete']
|
||||
|
||||
self.plugin.on_end(self.fake_id)
|
||||
self.nova_wrapper.servers.delete.assert_called_once_with('1')
|
||||
@ -101,8 +97,8 @@ class VMPluginTestCase(tests.TestCase):
|
||||
|
||||
@testtools.skip('Will be released later')
|
||||
def test_on_end_timeout(self):
|
||||
self.patch(self.plugin, '_split_actions').return_value =\
|
||||
['create_image']
|
||||
self.patch(self.plugin, '_split_actions').return_value = (
|
||||
['create_image'])
|
||||
self.assertRaises(self.exc.Timeout,
|
||||
self.plugin.on_end,
|
||||
self.fake_id)
|
||||
|
@ -15,6 +15,8 @@
|
||||
|
||||
import uuid as uuidgen
|
||||
|
||||
from novaclient import client as nova_client
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate import context
|
||||
@ -24,8 +26,6 @@ from climate.plugins.oshosts import reservation_pool as rp
|
||||
from climate import tests
|
||||
from climate.utils.openstack import base
|
||||
from climate.utils.openstack import nova
|
||||
from novaclient import client as nova_client
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
|
||||
|
||||
class AggregateFake(object):
|
||||
@ -76,8 +76,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
else:
|
||||
return self.fake_aggregate
|
||||
|
||||
self.patch(self.pool, 'get_aggregate_from_name_or_id')\
|
||||
.side_effect = get_fake_aggregate
|
||||
patched_pool = self.patch(self.pool, 'get_aggregate_from_name_or_id')
|
||||
patched_pool.side_effect = get_fake_aggregate
|
||||
|
||||
def test_get_aggregate_from_name_or_id(self):
|
||||
def fake_aggregate_get(id):
|
||||
@ -105,24 +105,24 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
self.assertEqual('foo', rp.ReservationPool._generate_aggregate_name())
|
||||
|
||||
def test_create(self):
|
||||
self.patch(self.nova.aggregates, 'create').return_value = \
|
||||
self.fake_aggregate
|
||||
self.patch(self.nova.aggregates, 'create').return_value = (
|
||||
self.fake_aggregate)
|
||||
|
||||
agg = self.pool.create()
|
||||
|
||||
self.assertEqual(agg, self.fake_aggregate)
|
||||
|
||||
az_name = self.climate_az_prefix + self.pool_name
|
||||
self.nova.aggregates.create\
|
||||
.assert_called_once_with(self.pool_name, az_name)
|
||||
check0 = self.nova.aggregates.create
|
||||
check0.assert_called_once_with(self.pool_name, az_name)
|
||||
|
||||
meta = {self.climate_owner: self.project_id}
|
||||
self.nova.aggregates.set_metadata\
|
||||
.assert_called_once_with(self.fake_aggregate, meta)
|
||||
check1 = self.nova.aggregates.set_metadata
|
||||
check1.assert_called_once_with(self.fake_aggregate, meta)
|
||||
|
||||
def test_create_no_az(self):
|
||||
self.patch(self.nova.aggregates, 'create').return_value = \
|
||||
self.fake_aggregate
|
||||
self.patch(self.nova.aggregates, 'create').return_value = (
|
||||
self.fake_aggregate)
|
||||
|
||||
self.pool.create(az=False)
|
||||
|
||||
@ -130,8 +130,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
None)
|
||||
|
||||
def test_create_no_project_id(self):
|
||||
self.patch(self.nova.aggregates, 'create').return_value = \
|
||||
self.fake_aggregate
|
||||
self.patch(self.nova.aggregates, 'create').return_value = (
|
||||
self.fake_aggregate)
|
||||
|
||||
self.nova_wrapper = self.patch(nova.NovaClientWrapper, 'nova')
|
||||
|
||||
@ -173,8 +173,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
raise manager_exceptions.AggregateNotFound
|
||||
else:
|
||||
return self.fake_aggregate
|
||||
self.patch(self.pool, 'get_aggregate_from_name_or_id')\
|
||||
.side_effect = get_fake_aggregate_but_no_freepool
|
||||
fake_pool = self.patch(self.pool, 'get_aggregate_from_name_or_id')
|
||||
fake_pool.side_effect = get_fake_aggregate_but_no_freepool
|
||||
agg = self.pool.get('foo')
|
||||
agg.hosts = []
|
||||
self.assertRaises(manager_exceptions.NoFreePool,
|
||||
@ -194,10 +194,10 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.pool.add_computehost('pool', 'host3')
|
||||
|
||||
self.nova.aggregates.add_host\
|
||||
.assert_any_call(self.fake_aggregate.id, 'host3')
|
||||
self.nova.aggregates.remove_host\
|
||||
.assert_any_call(self.fake_aggregate.id, 'host3')
|
||||
check0 = self.nova.aggregates.add_host
|
||||
check0.assert_any_call(self.fake_aggregate.id, 'host3')
|
||||
check1 = self.nova.aggregates.remove_host
|
||||
check1.assert_any_call(self.fake_aggregate.id, 'host3')
|
||||
|
||||
def test_add_computehost_with_host_id(self):
|
||||
# NOTE(sbauza): Freepool.hosts only contains names of hosts, not UUIDs
|
||||
@ -219,8 +219,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
else:
|
||||
return self.fake_aggregate
|
||||
|
||||
self.patch(self.pool, 'get_aggregate_from_name_or_id')\
|
||||
.side_effect = get_fake_aggregate_but_no_freepool
|
||||
fake_pool = self.patch(self.pool, 'get_aggregate_from_name_or_id')
|
||||
fake_pool.side_effect = get_fake_aggregate_but_no_freepool
|
||||
|
||||
self.assertRaises(manager_exceptions.NoFreePool,
|
||||
self.pool.add_computehost,
|
||||
@ -233,8 +233,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
return self.freepool_name
|
||||
else:
|
||||
raise manager_exceptions.AggregateNotFound
|
||||
self.patch(self.pool, 'get_aggregate_from_name_or_id')\
|
||||
.side_effect = get_no_aggregate_but_freepool
|
||||
fake_pool = self.patch(self.pool, 'get_aggregate_from_name_or_id')
|
||||
fake_pool.side_effect = get_no_aggregate_but_freepool
|
||||
self.assertRaises(manager_exceptions.AggregateNotFound,
|
||||
self.pool.add_computehost,
|
||||
'wrong_pool',
|
||||
@ -243,15 +243,15 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
def test_add_computehost_to_freepool(self):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.pool.add_computehost(self.freepool_name, 'host2')
|
||||
self.nova.aggregates.add_host\
|
||||
.assert_called_once_with(self.fake_freepool.id, 'host2')
|
||||
check = self.nova.aggregates.add_host
|
||||
check.assert_called_once_with(self.fake_freepool.id, 'host2')
|
||||
|
||||
def test_remove_computehost_from_freepool(self):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.pool.remove_computehost(self.freepool_name, 'host3')
|
||||
|
||||
self.nova.aggregates.remove_host\
|
||||
.assert_called_once_with(self.fake_freepool.id, 'host3')
|
||||
check = self.nova.aggregates.remove_host
|
||||
check.assert_called_once_with(self.fake_freepool.id, 'host3')
|
||||
|
||||
def test_remove_computehost_not_existing_from_freepool(self):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
@ -264,8 +264,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.pool.remove_all_computehosts('pool')
|
||||
for host in self.fake_aggregate.hosts:
|
||||
self.nova.aggregates.remove_host\
|
||||
.assert_any_call(self.fake_aggregate.id, host)
|
||||
check = self.nova.aggregates.remove_host
|
||||
check.assert_any_call(self.fake_aggregate.id, host)
|
||||
|
||||
def test_remove_computehost_with_no_freepool(self):
|
||||
def get_fake_aggregate_but_no_freepool(*args):
|
||||
@ -274,8 +274,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
else:
|
||||
return self.fake_aggregate
|
||||
|
||||
self.patch(self.pool, 'get_aggregate_from_name_or_id')\
|
||||
.side_effect = get_fake_aggregate_but_no_freepool
|
||||
fake_pool = self.patch(self.pool, 'get_aggregate_from_name_or_id')
|
||||
fake_pool.side_effect = get_fake_aggregate_but_no_freepool
|
||||
|
||||
self.assertRaises(manager_exceptions.NoFreePool,
|
||||
self.pool.remove_computehost,
|
||||
@ -288,8 +288,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
return self.freepool_name
|
||||
else:
|
||||
raise manager_exceptions.AggregateNotFound
|
||||
self.patch(self.pool, 'get_aggregate_from_name_or_id')\
|
||||
.side_effect = get_no_aggregate_but_freepool
|
||||
fake_pool = self.patch(self.pool, 'get_aggregate_from_name_or_id')
|
||||
fake_pool.side_effect = get_no_aggregate_but_freepool
|
||||
self.assertRaises(manager_exceptions.AggregateNotFound,
|
||||
self.pool.remove_computehost,
|
||||
'wrong_pool',
|
||||
@ -297,8 +297,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
|
||||
def test_remove_computehost_with_wrong_hosts(self):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.nova.aggregates.remove_host.side_effect = \
|
||||
nova_exceptions.NotFound(404)
|
||||
self.nova.aggregates.remove_host.side_effect = (
|
||||
nova_exceptions.NotFound(404))
|
||||
self.assertRaises(manager_exceptions.CantRemoveHost,
|
||||
self.pool.remove_computehost,
|
||||
'pool',
|
||||
@ -306,8 +306,8 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
|
||||
def test_remove_computehosts_with_duplicate_host(self):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.nova.aggregates.add_host.side_effect = \
|
||||
nova_exceptions.Conflict(409)
|
||||
self.nova.aggregates.add_host.side_effect = (
|
||||
nova_exceptions.Conflict(409))
|
||||
self.assertRaises(manager_exceptions.CantAddHost,
|
||||
self.pool.remove_computehost,
|
||||
'pool',
|
||||
@ -324,13 +324,13 @@ class ReservationPoolTestCase(tests.TestCase):
|
||||
def test_add_project(self):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.pool.add_project('pool', 'projectX')
|
||||
self.nova.aggregates.set_metadata\
|
||||
.assert_called_once_with(self.fake_aggregate.id,
|
||||
{'projectX': self.project_id_key})
|
||||
check = self.nova.aggregates.set_metadata
|
||||
check.assert_called_once_with(self.fake_aggregate.id,
|
||||
{'projectX': self.project_id_key})
|
||||
|
||||
def test_remove_project(self):
|
||||
self._patch_get_aggregate_from_name_or_id()
|
||||
self.pool.remove_project('pool', 'projectY')
|
||||
self.nova.aggregates.set_metadata\
|
||||
.assert_called_once_with(self.fake_aggregate.id,
|
||||
{'projectY': None})
|
||||
check = self.nova.aggregates.set_metadata
|
||||
check.assert_called_once_with(self.fake_aggregate.id,
|
||||
{'projectY': None})
|
||||
|
@ -14,12 +14,13 @@
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import mock
|
||||
import testtools
|
||||
import uuid
|
||||
|
||||
from climate import context
|
||||
import mock
|
||||
from novaclient import client as nova_client
|
||||
import testtools
|
||||
|
||||
from climate import context
|
||||
from climate.db import api as db_api
|
||||
from climate.db import exceptions as db_exceptions
|
||||
from climate.db import utils as db_utils
|
||||
@ -32,7 +33,6 @@ from climate.plugins.oshosts import reservation_pool as rp
|
||||
from climate import tests
|
||||
from climate.utils.openstack import base
|
||||
from climate.utils import trusts
|
||||
from novaclient import client as nova_client
|
||||
|
||||
|
||||
class AggregateFake(object):
|
||||
@ -232,8 +232,8 @@ class PhysicalHostPluginTestCase(tests.TestCase):
|
||||
fake_request = fake_host.copy()
|
||||
self.get_extra_capabilities.return_value = {'foo': 'bar'}
|
||||
self.db_host_create.return_value = self.fake_host
|
||||
self.db_host_extra_capability_create.side_effect = \
|
||||
fake_db_host_extra_capability_create
|
||||
fake = self.db_host_extra_capability_create
|
||||
fake.side_effect = fake_db_host_extra_capability_create
|
||||
self.assertRaises(manager_exceptions.CantAddExtraCapability,
|
||||
self.fake_phys_plugin.create_computehost,
|
||||
fake_request)
|
||||
@ -262,8 +262,8 @@ class PhysicalHostPluginTestCase(tests.TestCase):
|
||||
'capability_value': 'bar'
|
||||
},
|
||||
]
|
||||
self.db_host_extra_capability_update.side_effect = \
|
||||
fake_db_host_extra_capability_update
|
||||
fake = self.db_host_extra_capability_update
|
||||
fake.side_effect = fake_db_host_extra_capability_update
|
||||
self.assertRaises(manager_exceptions.CantAddExtraCapability,
|
||||
self.fake_phys_plugin.update_computehost,
|
||||
self.fake_host_id, host_values)
|
||||
|
@ -29,50 +29,50 @@ class TestBaseStackUtils(tests.TestCase):
|
||||
self.url = 'http://%s-net.com'
|
||||
|
||||
def test_url_for_good_v3(self):
|
||||
#TODO(n.s.):Can't find v3 endpoint example. Fix it later.
|
||||
# TODO(n.s.):Can't find v3 endpoint example. Fix it later.
|
||||
pass
|
||||
|
||||
def test_url_for_good_v2_public(self):
|
||||
service_catalog = \
|
||||
service_catalog = (
|
||||
[{"endpoints": [{"adminURL": self.url % 'admin',
|
||||
"region": "RegionOne",
|
||||
"internalURL": self.url % 'internal',
|
||||
"publicURL": self.url % 'public'}],
|
||||
"type": "fake_service",
|
||||
"name": "foo"}]
|
||||
"name": "foo"}])
|
||||
|
||||
url = self.base.url_for(service_catalog, self.service_type)
|
||||
self.assertEqual(url, self.url % 'public')
|
||||
|
||||
def test_url_for_good_v2_admin(self):
|
||||
service_catalog = \
|
||||
service_catalog = (
|
||||
[{"endpoints": [{"adminURL": self.url % 'admin',
|
||||
"region": "RegionOne",
|
||||
"internalURL": self.url % 'internal',
|
||||
"publicURL": self.url % 'public'}],
|
||||
"type": "fake_service",
|
||||
"name": "foo"}]
|
||||
"name": "foo"}])
|
||||
|
||||
url = self.base.url_for(service_catalog, self.service_type,
|
||||
endpoint_interface='admin')
|
||||
self.assertEqual(url, self.url % 'admin')
|
||||
|
||||
def test_url_for_no_service(self):
|
||||
service_catalog = \
|
||||
service_catalog = (
|
||||
[{"endpoints": [{"adminURL": self.url % 'admin',
|
||||
"region": "RegionOne",
|
||||
"internalURL": self.url % 'internal',
|
||||
"publicURL": self.url % 'public'}],
|
||||
"type": "foo_service",
|
||||
"name": "foo"}]
|
||||
"name": "foo"}])
|
||||
|
||||
self.assertRaises(exceptions.ServiceNotFound, self.base.url_for,
|
||||
service_catalog, self.service_type)
|
||||
|
||||
def test_url_for_no_endpoints(self):
|
||||
service_catalog = \
|
||||
service_catalog = (
|
||||
[{"type": "fake_service",
|
||||
"name": "foo"}]
|
||||
"name": "foo"}])
|
||||
|
||||
self.assertRaises(exceptions.EndpointsNotFound, self.base.url_for,
|
||||
service_catalog, self.service_type)
|
||||
|
@ -88,5 +88,5 @@ class TestCKClient(tests.TestCase):
|
||||
endpoint='http://fake.com/')
|
||||
|
||||
def test_getattr(self):
|
||||
#TODO(n.s.): Will be done as soon as pypi package will be updated
|
||||
# TODO(n.s.): Will be done as soon as pypi package will be updated
|
||||
pass
|
||||
|
@ -72,5 +72,5 @@ class TestCNClient(tests.TestCase):
|
||||
auth_url='http://fake.com/')
|
||||
|
||||
def test_getattr(self):
|
||||
#TODO(n.s.): Will be done as soon as pypi package will be updated
|
||||
# TODO(n.s.): Will be done as soon as pypi package will be updated
|
||||
pass
|
||||
|
@ -18,7 +18,9 @@ from climate.manager import exceptions
|
||||
|
||||
def url_for(service_catalog, service_type, admin=False,
|
||||
endpoint_interface=None):
|
||||
"""Gets url of the service to communicate through.
|
||||
"""Description
|
||||
|
||||
Gets url of the service to communicate through.
|
||||
service_catalog - dict contains info about specific OpenStack service
|
||||
service_type - OpenStack service type specification
|
||||
"""
|
||||
|
@ -41,7 +41,9 @@ CONF.register_opts(keystone_opts)
|
||||
|
||||
class ClimateKeystoneClient(object):
|
||||
def __init__(self, **kwargs):
|
||||
"""Return Keystone client for defined in 'identity_service' conf.
|
||||
"""Description
|
||||
|
||||
Return Keystone client for defined in 'identity_service' conf.
|
||||
NOTE: We will use tenant_name until we start using keystone V3
|
||||
client for all our needs.
|
||||
|
||||
@ -107,8 +109,8 @@ class ClimateKeystoneClient(object):
|
||||
kwargs.pop('tenant_name')
|
||||
|
||||
try:
|
||||
#NOTE(n.s.): we shall remove this try: except: clause when
|
||||
#https://review.openstack.org/#/c/66494/ will be merged
|
||||
# NOTE(n.s.): we shall remove this try: except: clause when
|
||||
# https://review.openstack.org/#/c/66494/ will be merged
|
||||
self.keystone = keystone_client.Client(**kwargs)
|
||||
self.keystone.session.auth = self.keystone
|
||||
self.keystone.authenticate(auth_url=kwargs.get('auth_url', None))
|
||||
|
@ -41,7 +41,9 @@ CONF.import_opt('identity_service', 'climate.utils.openstack.keystone')
|
||||
|
||||
class ClimateNovaClient(object):
|
||||
def __init__(self, **kwargs):
|
||||
"""We suppose that in future we may want to use CNC in some places
|
||||
"""Description
|
||||
|
||||
We suppose that in future we may want to use CNC in some places
|
||||
where context will be available, so we create 2 different ways of
|
||||
creating client from context(future) and kwargs(we use it now).
|
||||
|
||||
@ -88,7 +90,7 @@ class ClimateNovaClient(object):
|
||||
mgmt_url = mgmt_url or base.url_for(ctx.service_catalog,
|
||||
CONF.compute_service)
|
||||
if not kwargs.get('auth_url', None):
|
||||
#NOTE(scroiset): novaclient v2.17.0 support only Identity API v2.0
|
||||
# NOTE(scroiset): novaclient v2.17.0 support only Identity API v2.0
|
||||
auth_url = "%s://%s:%s/v2.0" % (CONF.os_auth_protocol,
|
||||
CONF.os_auth_host,
|
||||
CONF.os_auth_port)
|
||||
@ -106,7 +108,7 @@ class ClimateNovaClient(object):
|
||||
return getattr(self.nova, name)
|
||||
|
||||
|
||||
#todo(dbelova): remove these lines after novaclient 2.16.0 will be released
|
||||
# TODO(dbelova): remove these lines after novaclient 2.16.0 will be released
|
||||
class ClimateServer(servers.Server):
|
||||
def unshelve(self):
|
||||
"""Unshelve -- Unshelve the server."""
|
||||
|
@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from climate import context
|
||||
@ -73,7 +74,9 @@ def create_ctx_from_trust(trust_id):
|
||||
|
||||
|
||||
def use_trust_auth():
|
||||
"""This decorator creates a keystone trust, and adds the trust_id to the
|
||||
"""Decorator creates a keystone trust
|
||||
|
||||
This decorator creates a keystone trust, and adds the trust_id to the
|
||||
parameter of the decorated method.
|
||||
"""
|
||||
def decorator(func):
|
||||
|
@ -13,7 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import climateclient.client
|
||||
|
||||
from tempest import clients as manager
|
||||
from tempest import config_resource_reservation as config
|
||||
from tempest.openstack.common import log as logging
|
||||
@ -31,8 +30,8 @@ class ResourceReservationManager(manager.OfficialClientManager):
|
||||
self.interface = None
|
||||
# super cares for credentials validation
|
||||
super(ResourceReservationManager, self).__init__(credentials)
|
||||
self.resource_reservation_client = \
|
||||
self._get_resource_reservation_client()
|
||||
self.resource_reservation_client = (
|
||||
self._get_resource_reservation_client())
|
||||
|
||||
def _get_resource_reservation_client(self):
|
||||
climate_url = self.identity_client.service_catalog.url_for(
|
||||
|
@ -14,9 +14,9 @@
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import dateutil.parser
|
||||
import json
|
||||
|
||||
import dateutil.parser
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.openstack.common import log as logging
|
||||
@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
|
||||
# same as the one at climate/manager/service
|
||||
LEASE_DATE_FORMAT = "%Y-%m-%d %H:%M"
|
||||
LEASE_MIN_DURATION = 2
|
||||
#TODO(cmart): LEASE_IMAGE_PREFIX should be extracted from CONF
|
||||
# TODO(cmart): LEASE_IMAGE_PREFIX should be extracted from CONF
|
||||
LEASE_IMAGE_PREFIX = 'reserved_'
|
||||
|
||||
|
||||
@ -94,7 +94,6 @@ class TestResourceReservationScenario(rrs.ResourceReservationScenarioTest):
|
||||
|
||||
# compare lease_data with data passed as parameter
|
||||
lease = self.get_lease_by_name(expected_lease_params['name'])
|
||||
self.assertNotEmpty(lease)
|
||||
|
||||
# check lease dates!! (Beware of date format)
|
||||
lease_start_date = dateutil.parser.parse(lease['start_date'])
|
||||
@ -105,7 +104,7 @@ class TestResourceReservationScenario(rrs.ResourceReservationScenarioTest):
|
||||
self.assertEqual(expected_lease_params['start'], lease_start_date)
|
||||
self.assertEqual(expected_lease_params['end'], lease_end_date)
|
||||
|
||||
#check lease events!
|
||||
# check lease events!
|
||||
events = lease['events']
|
||||
self.assertTrue(len(events) >= 3)
|
||||
|
||||
@ -130,8 +129,8 @@ class TestResourceReservationScenario(rrs.ResourceReservationScenarioTest):
|
||||
self.assertNotEmpty(
|
||||
filter(lambda image: image.name == image_name, images_list))
|
||||
except Exception as e:
|
||||
message = "Unable to find image with name '%s'. " \
|
||||
"Exception: %s" % (image_name, e.message)
|
||||
message = ("Unable to find image with name '%s'. "
|
||||
"Exception: %s" % (image_name, e.message))
|
||||
raise exceptions.NotFound(message)
|
||||
|
||||
def check_server_is_removed(self):
|
||||
@ -143,7 +142,7 @@ class TestResourceReservationScenario(rrs.ResourceReservationScenarioTest):
|
||||
server = self.compute_client.servers.get(server_id)
|
||||
self.assertEqual(expected_status, server.status)
|
||||
|
||||
#update server resource reference
|
||||
# update server resource reference
|
||||
self.set_resource('server', server)
|
||||
|
||||
def wait_for_server_status(self, status):
|
||||
@ -152,7 +151,7 @@ class TestResourceReservationScenario(rrs.ResourceReservationScenarioTest):
|
||||
self.get_resource('server').id, status)
|
||||
self.check_server_status(status)
|
||||
|
||||
#TODO(cmart): add climate to services after pushing this code into tempest
|
||||
# TODO(cmart): add climate to services after pushing this code into tempest
|
||||
@test.attr(type='slow')
|
||||
@test.services('compute', 'network')
|
||||
def test_server_basic_resource_reservation_operation(self):
|
||||
@ -171,7 +170,7 @@ class TestResourceReservationScenario(rrs.ResourceReservationScenarioTest):
|
||||
self.boot_server_with_lease_data(lease_data, wait=False)
|
||||
self.check_server_status('SHELVED_OFFLOADED')
|
||||
|
||||
#now, wait until the server is active
|
||||
# now, wait until the server is active
|
||||
self.wait_for_server_status('ACTIVE')
|
||||
self.check_lease_creation(lease_data)
|
||||
|
||||
@ -183,9 +182,9 @@ class TestResourceReservationScenario(rrs.ResourceReservationScenarioTest):
|
||||
self.check_server_is_snapshoted()
|
||||
self.check_server_is_removed()
|
||||
|
||||
#remove created snapshot
|
||||
# remove created snapshot
|
||||
image_name = LEASE_IMAGE_PREFIX + self.get_resource('server').name
|
||||
self.remove_image_snapshot(image_name)
|
||||
|
||||
#remove created lease
|
||||
# remove created lease
|
||||
self.delete_lease(created_lease['id'])
|
||||
|
Loading…
Reference in New Issue
Block a user