Change to use db resources for scheduling
This removes nodes cache and turn to use resources saved in DB. A following up patch will split scheduler out from engine service. Change-Id: I89dea92b85ce7055accd3658aabe3a168ebe2df5
This commit is contained in:
parent
7fd154f871
commit
dc6772c6f5
@ -95,6 +95,10 @@ class Connection(object):
|
||||
def compute_node_get_all(self, context):
|
||||
"""Get all compute nodes."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def compute_node_get_all_available(self, context):
|
||||
"""Get all available compute nodes."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def compute_node_destroy(self, context, node_uuid):
|
||||
"""Delete a compute node."""
|
||||
|
@ -106,6 +106,7 @@ def upgrade():
|
||||
sa.Column('availability_zone', sa.String(length=255), nullable=True),
|
||||
sa.Column('node_uuid', sa.String(length=36), nullable=False),
|
||||
sa.Column('extra_specs', sa.Text(), nullable=True),
|
||||
sa.Column('used', sa.Boolean(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('node_uuid', name='uniq_compute_nodes0node_uuid'),
|
||||
mysql_ENGINE='InnoDB',
|
||||
|
@ -263,14 +263,23 @@ class Connection(api.Connection):
|
||||
def compute_node_get(self, context, node_uuid):
|
||||
query = model_query(
|
||||
context,
|
||||
models.ComputeNode).filter_by(node_uuid=node_uuid)
|
||||
models.ComputeNode).filter_by(node_uuid=node_uuid). \
|
||||
options(joinedload('ports'))
|
||||
try:
|
||||
return query.one()
|
||||
except NoResultFound:
|
||||
raise exception.ComputeNodeNotFound(node=node_uuid)
|
||||
|
||||
def compute_node_get_all(self, context):
|
||||
return model_query(context, models.ComputeNode)
|
||||
return model_query(
|
||||
context,
|
||||
models.ComputeNode).options(joinedload('ports'))
|
||||
|
||||
def compute_node_get_all_available(self, context):
|
||||
return model_query(
|
||||
context,
|
||||
models.ComputeNode).filter_by(used=False). \
|
||||
options(joinedload('ports'))
|
||||
|
||||
def compute_node_destroy(self, context, node_uuid):
|
||||
with _session_for_write():
|
||||
|
@ -109,6 +109,7 @@ class ComputeNode(Base):
|
||||
availability_zone = Column(String(255), nullable=True)
|
||||
node_uuid = Column(String(36), nullable=False)
|
||||
extra_specs = Column(db_types.JsonEncodedDict)
|
||||
used = Column(Boolean, default=False)
|
||||
|
||||
|
||||
class ComputePort(Base):
|
||||
@ -126,8 +127,8 @@ class ComputePort(Base):
|
||||
node_uuid = Column(String(36), nullable=False)
|
||||
extra_specs = Column(db_types.JsonEncodedDict)
|
||||
_node = orm.relationship(
|
||||
ComputeNode,
|
||||
backref=orm.backref('compute_ports', uselist=False),
|
||||
"ComputeNode",
|
||||
backref='ports',
|
||||
foreign_keys=node_uuid,
|
||||
primaryjoin='ComputeNode.node_uuid == ComputePort.node_uuid')
|
||||
|
||||
|
@ -35,7 +35,6 @@ class BaseEngineManager(periodic_task.PeriodicTasks):
|
||||
host = CONF.host
|
||||
self.host = host
|
||||
self.topic = topic
|
||||
self.node_cache = {}
|
||||
self.network_api = network.API()
|
||||
scheduler_driver = CONF.scheduler.scheduler_driver
|
||||
self.scheduler = importutils.import_object(scheduler_driver)
|
||||
|
@ -51,9 +51,7 @@ class ScheduleCreateInstanceTask(flow_utils.MoganTask):
|
||||
top_node = self.manager.scheduler.schedule(
|
||||
context,
|
||||
request_spec,
|
||||
self.manager.node_cache,
|
||||
filter_properties)
|
||||
self.manager.node_cache.pop(top_node, None)
|
||||
instance.node_uuid = top_node
|
||||
instance.save()
|
||||
|
||||
|
@ -45,12 +45,9 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
RPC_API_VERSION = '1.0'
|
||||
|
||||
target = messaging.Target(version=RPC_API_VERSION)
|
||||
# TODO(zhenguo): Move lock to scheduler
|
||||
_lock = threading.Lock()
|
||||
|
||||
def _refresh_cache(self, nodes):
|
||||
with self._lock:
|
||||
self.node_cache = nodes
|
||||
|
||||
def _get_compute_port(self, context, port_uuid):
|
||||
"""Gets compute port by the uuid."""
|
||||
try:
|
||||
@ -127,12 +124,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
:param context: security context
|
||||
"""
|
||||
nodes = self.driver.get_available_resources()
|
||||
|
||||
# TODO(zhenguo): Keep using cache until we finished the refactor to
|
||||
# save resources to db.
|
||||
self._refresh_cache(nodes)
|
||||
|
||||
compute_nodes_in_db = objects.ComputeNode.list(context)
|
||||
compute_nodes_in_db = objects.ComputeNodeList.get_all(context)
|
||||
|
||||
# Record compute nodes to db
|
||||
for uuid, node in nodes.items():
|
||||
@ -476,12 +468,11 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
|
||||
def list_availability_zones(self, context):
|
||||
"""Get availability zone list."""
|
||||
with self._lock:
|
||||
node_cache = self.node_cache.values()
|
||||
compute_nodes = objects.ComputeNodeList.get_all_available(context)
|
||||
|
||||
azs = set()
|
||||
for node in node_cache:
|
||||
az = node.properties.get('availability_zone') \
|
||||
for node in compute_nodes:
|
||||
az = node.availability_zone \
|
||||
or CONF.engine.default_availability_zone
|
||||
if az is not None:
|
||||
azs.add(az)
|
||||
|
@ -120,7 +120,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
{'max_attempts': max_attempts,
|
||||
'instance_id': instance_id})
|
||||
|
||||
def _get_weighted_candidates(self, context, request_spec, node_cache,
|
||||
def _get_weighted_candidates(self, context, request_spec,
|
||||
filter_properties=None):
|
||||
"""Return a list of nodes that meet required specs.
|
||||
|
||||
@ -153,7 +153,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
|
||||
# Note: remember, we are using an iterator here. So only
|
||||
# traverse this list once.
|
||||
nodes = self.node_manager.get_all_node_states(node_cache)
|
||||
nodes = self.node_manager.get_all_node_states(context)
|
||||
|
||||
# Filter local nodes based on requirements ...
|
||||
nodes = self.node_manager.get_filtered_nodes(nodes,
|
||||
@ -161,17 +161,16 @@ class FilterScheduler(driver.Scheduler):
|
||||
if not nodes:
|
||||
return []
|
||||
|
||||
LOG.debug("Filtered %s", nodes)
|
||||
LOG.debug("Filtered %(nodes)s", {'nodes': nodes})
|
||||
# weighted_node = WeightedNode() ... the best
|
||||
# node for the job.
|
||||
weighed_nodes = self.node_manager.get_weighed_nodes(nodes,
|
||||
filter_properties)
|
||||
LOG.debug("Weighed %(nodes)s", {'nodes': weighed_nodes})
|
||||
return weighed_nodes
|
||||
|
||||
def schedule(self, context, request_spec, node_cache,
|
||||
filter_properties=None):
|
||||
def schedule(self, context, request_spec, filter_properties=None):
|
||||
weighed_nodes = self._get_weighted_candidates(context, request_spec,
|
||||
node_cache,
|
||||
filter_properties)
|
||||
if not weighed_nodes:
|
||||
LOG.warning(_LW('No weighed nodes found for instance '
|
||||
@ -180,11 +179,9 @@ class FilterScheduler(driver.Scheduler):
|
||||
raise exception.NoValidNode(_("No weighed nodes available"))
|
||||
|
||||
top_node = self._choose_top_node(weighed_nodes, request_spec)
|
||||
self._add_retry_node(filter_properties, top_node)
|
||||
return top_node
|
||||
top_node.obj.consume_from_request(context)
|
||||
self._add_retry_node(filter_properties, top_node.obj.node)
|
||||
return top_node.obj.node
|
||||
|
||||
def _choose_top_node(self, weighed_nodes, request_spec):
|
||||
top_node = weighed_nodes[0]
|
||||
node_state = top_node.obj
|
||||
LOG.debug("Choosing %s", node_state.node)
|
||||
return node_state.node
|
||||
return weighed_nodes[0]
|
||||
|
@ -27,7 +27,7 @@ class PortsFilter(filters.BaseNodeFilter):
|
||||
"""Check if ports has the specified port type."""
|
||||
|
||||
for port in ports:
|
||||
if port_type == port['port_type']:
|
||||
if port_type == port.port_type:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -23,6 +23,7 @@ from oslo_utils import importutils
|
||||
|
||||
from mogan.common import exception
|
||||
from mogan.engine.scheduler import filters
|
||||
from mogan import objects
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -33,12 +34,16 @@ class NodeState(object):
|
||||
"""Mutable and immutable information tracked for a Ironic node."""
|
||||
|
||||
def __init__(self, node):
|
||||
self.node = node['node_uuid']
|
||||
self.capabilities = node['extra_specs']
|
||||
self.availability_zone = node['availability_zone'] \
|
||||
self.node = node.node_uuid
|
||||
self.capabilities = node.extra_specs
|
||||
self.availability_zone = node.availability_zone \
|
||||
or CONF.engine.default_availability_zone
|
||||
self.instance_type = node['node_type']
|
||||
self.ports = node['ports']
|
||||
self.instance_type = node.node_type
|
||||
self.ports = node.ports
|
||||
|
||||
def consume_from_request(self, context):
|
||||
"""Consume the compute node."""
|
||||
objects.ComputeNode.consume_node(context, self.node)
|
||||
|
||||
|
||||
class NodeManager(object):
|
||||
@ -126,11 +131,12 @@ class NodeManager(object):
|
||||
nodes,
|
||||
weight_properties)
|
||||
|
||||
def get_all_node_states(self, node_cache):
|
||||
def get_all_node_states(self, context):
|
||||
"""Returns a list of all the nodes the NodeManager knows about."""
|
||||
|
||||
nodes = objects.ComputeNodeList.get_all_available(context)
|
||||
node_states = []
|
||||
for node_uuid, node in node_cache.items():
|
||||
for node in nodes:
|
||||
node_state = self.node_state_cls(node)
|
||||
node_states.append(node_state)
|
||||
|
||||
|
@ -97,9 +97,10 @@ class MoganObject(object_base.VersionedObject):
|
||||
self[field] = loaded_object[field]
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(obj, db_object):
|
||||
def _from_db_object(context, obj, db_object):
|
||||
"""Converts a database entity to a formal object.
|
||||
|
||||
:param context: security context
|
||||
:param obj: An object of the class.
|
||||
:param db_object: A DB model of the object
|
||||
:return: The object of the class with the database entity added
|
||||
@ -122,7 +123,7 @@ class MoganObject(object_base.VersionedObject):
|
||||
:param db_objects: A list of DB models of the object
|
||||
:returns: A list of objects corresponding to the database entities
|
||||
"""
|
||||
return [cls._from_db_object(cls(context), db_obj)
|
||||
return [cls._from_db_object(context, cls(context), db_obj)
|
||||
for db_obj in db_objects]
|
||||
|
||||
|
||||
|
@ -18,6 +18,7 @@ from oslo_versionedobjects import base as object_base
|
||||
|
||||
from mogan.db import api as dbapi
|
||||
from mogan.objects import base
|
||||
from mogan.objects import compute_port
|
||||
from mogan.objects import fields as object_fields
|
||||
|
||||
|
||||
@ -36,27 +37,37 @@ class ComputeNode(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
'node_type': object_fields.StringField(),
|
||||
'availability_zone': object_fields.StringField(nullable=True),
|
||||
'node_uuid': object_fields.UUIDField(read_only=True),
|
||||
'ports': object_fields.ObjectField('ComputePortList', nullable=True),
|
||||
'extra_specs': object_fields.FlexibleDictField(nullable=True),
|
||||
'used': object_fields.BooleanField(default=False),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def list(cls, context):
|
||||
"""Return a list of ComputeNode objects."""
|
||||
db_compute_nodes = cls.dbapi.compute_node_get_all(context)
|
||||
return cls._from_db_object_list(context, db_compute_nodes)
|
||||
@staticmethod
|
||||
def _from_db_object(context, node, db_node):
|
||||
"""Converts a database entity to a formal object."""
|
||||
for field in node.fields:
|
||||
if field == 'ports':
|
||||
node.ports = object_base.obj_make_list(
|
||||
context, compute_port.ComputePortList(context),
|
||||
compute_port.ComputePort, db_node['ports']
|
||||
)
|
||||
else:
|
||||
node[field] = db_node[field]
|
||||
node.obj_reset_changes()
|
||||
return node
|
||||
|
||||
@classmethod
|
||||
def get(cls, context, node_uuid):
|
||||
"""Find a compute node and return a ComputeNode object."""
|
||||
db_compute_node = cls.dbapi.compute_node_get(context, node_uuid)
|
||||
compute_node = cls._from_db_object(cls(context), db_compute_node)
|
||||
compute_node = cls._from_db_object(
|
||||
context, cls(context), db_compute_node)
|
||||
return compute_node
|
||||
|
||||
def create(self, context=None):
|
||||
"""Create a ComputeNode record in the DB."""
|
||||
values = self.obj_get_changes()
|
||||
db_compute_node = self.dbapi.compute_node_create(context, values)
|
||||
self._from_db_object(self, db_compute_node)
|
||||
self.dbapi.compute_node_create(context, values)
|
||||
|
||||
def destroy(self, context=None):
|
||||
"""Delete the ComputeNode from the DB."""
|
||||
@ -69,14 +80,40 @@ class ComputeNode(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
self.dbapi.compute_node_update(context, self.node_uuid, updates)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def refresh(self, context=None):
|
||||
"""Refresh the object by re-fetching from the DB."""
|
||||
current = self.__class__.get(context, self.node_uuid)
|
||||
self.obj_refresh(current)
|
||||
|
||||
def update_from_driver(self, node):
|
||||
keys = ["cpus", "memory_mb", "hypervisor_type", "node_type",
|
||||
"availability_zone", "node_uuid", "extra_specs"]
|
||||
for key in keys:
|
||||
if key in node:
|
||||
setattr(self, key, node[key])
|
||||
|
||||
@classmethod
|
||||
def consume_node(cls, context, node_uuid):
|
||||
updates = {'used': True}
|
||||
cls.dbapi.compute_node_update(context, node_uuid, updates)
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class ComputeNodeList(object_base.ObjectListBase, base.MoganObject,
|
||||
object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
|
||||
fields = {
|
||||
'objects': object_fields.ListOfObjectsField('ComputeNode')
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_all(cls, context):
|
||||
db_compute_nodes = cls.dbapi.compute_node_get_all(context)
|
||||
return object_base.obj_make_list(context, cls(context),
|
||||
ComputeNode, db_compute_nodes)
|
||||
|
||||
@classmethod
|
||||
def get_all_available(cls, context):
|
||||
db_compute_nodes = cls.dbapi.compute_node_get_all_available(context)
|
||||
return object_base.obj_make_list(context, cls(context),
|
||||
ComputeNode, db_compute_nodes)
|
||||
|
@ -46,14 +46,15 @@ class ComputePort(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
def get(cls, context, port_uuid):
|
||||
"""Find a compute port and return a ComputePort object."""
|
||||
db_compute_port = cls.dbapi.compute_port_get(context, port_uuid)
|
||||
compute_port = cls._from_db_object(cls(context), db_compute_port)
|
||||
compute_port = cls._from_db_object(context, cls(context),
|
||||
db_compute_port)
|
||||
return compute_port
|
||||
|
||||
def create(self, context=None):
|
||||
"""Create a ComputePort record in the DB."""
|
||||
values = self.obj_get_changes()
|
||||
db_compute_port = self.dbapi.compute_port_create(context, values)
|
||||
self._from_db_object(self, db_compute_port)
|
||||
self._from_db_object(context, self, db_compute_port)
|
||||
|
||||
def destroy(self, context=None):
|
||||
"""Delete the ComputePort from the DB."""
|
||||
|
@ -58,7 +58,7 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
@staticmethod
|
||||
def _from_db_object_list(db_objects, cls, context):
|
||||
"""Converts a list of database entities to a list of formal objects."""
|
||||
return [InstanceType._from_db_object(cls(context), obj)
|
||||
return [InstanceType._from_db_object(context, cls(context), obj)
|
||||
for obj in db_objects]
|
||||
|
||||
@classmethod
|
||||
@ -73,7 +73,7 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
"""Find a Instance Type and return a Instance Type object."""
|
||||
db_instance_type = cls.dbapi.instance_type_get(context,
|
||||
instance_type_uuid)
|
||||
instance_type = InstanceType._from_db_object(cls(context),
|
||||
instance_type = InstanceType._from_db_object(context, cls(context),
|
||||
db_instance_type)
|
||||
return instance_type
|
||||
|
||||
@ -81,7 +81,7 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
"""Create a Instance Type record in the DB."""
|
||||
values = self.obj_get_changes()
|
||||
db_instance_type = self.dbapi.instance_type_create(context, values)
|
||||
self._from_db_object(self, db_instance_type)
|
||||
self._from_db_object(context, self, db_instance_type)
|
||||
|
||||
def destroy(self, context=None):
|
||||
"""Delete the Instance Type from the DB."""
|
||||
|
@ -98,6 +98,8 @@ def get_test_compute_node(**kw):
|
||||
'node_uuid': kw.get('node_uuid',
|
||||
'f978ef48-d4af-4dad-beec-e6174309bc71'),
|
||||
'extra_specs': kw.get('extra_specs', {}),
|
||||
'ports': kw.get('ports', []),
|
||||
'used': kw.get('used', False),
|
||||
'updated_at': kw.get('updated_at'),
|
||||
'created_at': kw.get('created_at'),
|
||||
}
|
||||
@ -117,6 +119,10 @@ def create_test_compute_node(context={}, **kw):
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del node['id']
|
||||
# Create node with tags will raise an exception. If tags are not
|
||||
# specified explicitly just delete it.
|
||||
if 'ports' not in kw:
|
||||
del node['ports']
|
||||
dbapi = db_api.get_instance()
|
||||
|
||||
return dbapi.compute_node_create(context, node)
|
||||
|
@ -53,7 +53,6 @@ class CreateInstanceFlowTestCase(base.TestCase):
|
||||
fake_filter_props)
|
||||
mock_schedule.assert_called_once_with(self.ctxt,
|
||||
fake_request_spec,
|
||||
fake_engine_manager.node_cache,
|
||||
fake_filter_props)
|
||||
self.assertEqual(fake_uuid, instance_obj.node_uuid)
|
||||
|
||||
|
@ -17,13 +17,8 @@
|
||||
Fakes For Scheduler tests.
|
||||
"""
|
||||
|
||||
from oslo_versionedobjects import base as object_base
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
from mogan.engine.scheduler import filter_scheduler
|
||||
from mogan.engine.scheduler import node_manager
|
||||
from mogan.objects import base
|
||||
from mogan.objects import fields as object_fields
|
||||
|
||||
|
||||
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
|
||||
@ -32,38 +27,6 @@ class FakeFilterScheduler(filter_scheduler.FilterScheduler):
|
||||
self.node_manager = node_manager.NodeManager()
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class FakeNode(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
fields = {
|
||||
'id': object_fields.IntegerField(),
|
||||
'node_uuid': object_fields.UUIDField(),
|
||||
'node_type': object_fields.StringField(nullable=True),
|
||||
'availability_zone': object_fields.StringField(nullable=True),
|
||||
'extra_specs': object_fields.FlexibleDictField(nullable=True),
|
||||
'ports': fields.ListOfDictOfNullableStringsField(nullable=True),
|
||||
}
|
||||
|
||||
|
||||
fakenode1 = FakeNode(id=1,
|
||||
node_uuid='1a617131-cdbc-45dc-afff-f21f17ae054e',
|
||||
extra_specs={},
|
||||
availability_zone='az1',
|
||||
node_type='type1',
|
||||
ports=[])
|
||||
fakenode2 = FakeNode(id=2,
|
||||
node_uuid='2a617131-cdbc-45dc-afff-f21f17ae054e',
|
||||
extra_specs={},
|
||||
availability_zone='az1',
|
||||
node_type='type1',
|
||||
ports=[])
|
||||
fakenode3 = FakeNode(id=3,
|
||||
node_uuid='3a617131-cdbc-45dc-afff-f21f17ae054e',
|
||||
extra_specs={},
|
||||
availability_zone='az1',
|
||||
node_type='type1',
|
||||
ports=[])
|
||||
|
||||
|
||||
class FakeNodeState(node_manager.NodeState):
|
||||
def __init__(self, node, attribute_dict):
|
||||
super(FakeNodeState, self).__init__(node)
|
||||
|
@ -17,13 +17,16 @@ Tests For NodeManager
|
||||
"""
|
||||
|
||||
import mock
|
||||
from oslo_context import context
|
||||
from oslo_versionedobjects import base as object_base
|
||||
|
||||
from mogan.common import exception
|
||||
from mogan.engine.scheduler import filters
|
||||
from mogan.engine.scheduler import node_manager
|
||||
from mogan.engine.scheduler.node_manager import NodeState
|
||||
from mogan.objects import compute_port
|
||||
from mogan.tests import base as test
|
||||
from mogan.tests.unit.engine.scheduler import fakes
|
||||
from mogan.tests.unit.objects import utils as obj_utils
|
||||
|
||||
|
||||
class FakeFilterClass1(filters.BaseNodeFilter):
|
||||
@ -41,11 +44,15 @@ class NodeManagerTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NodeManagerTestCase, self).setUp()
|
||||
self.ctxt = context.get_admin_context()
|
||||
self.node_manager = node_manager.NodeManager()
|
||||
|
||||
self.fake_nodes = [NodeState(fakes.fakenode1),
|
||||
NodeState(fakes.fakenode2),
|
||||
NodeState(fakes.fakenode3)]
|
||||
fake_node = obj_utils.get_test_compute_node(self.ctxt)
|
||||
fake_ports = object_base.obj_make_list(
|
||||
self.ctxt, compute_port.ComputePortList(self.ctxt),
|
||||
compute_port.ComputePort, [])
|
||||
fake_node.ports = fake_ports
|
||||
self.fake_nodes = [NodeState(fake_node)]
|
||||
|
||||
def test_choose_node_filters_not_found(self):
|
||||
self.override_config('scheduler_default_filters', 'FakeFilterClass3',
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from mogan.common import states
|
||||
from mogan.engine.baremetal.ironic.driver import ironic_states
|
||||
@ -30,20 +31,17 @@ from mogan.tests.unit.objects import utils as obj_utils
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@mock.patch.object(manager.EngineManager, '_refresh_cache')
|
||||
class ManageInstanceTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
tests_db_base.DbTestCase):
|
||||
|
||||
@mock.patch.object(network_api.API, 'delete_port')
|
||||
def test_destroy_networks(self, delete_port_mock,
|
||||
refresh_cache_mock):
|
||||
def test_destroy_networks(self, delete_port_mock):
|
||||
instance = obj_utils.create_test_instance(self.context)
|
||||
inst_port_id = instance.nics[0].port_id
|
||||
delete_port_mock.side_effect = None
|
||||
port = mock.MagicMock()
|
||||
port.extra = {'vif_port_id': 'fake-vif'}
|
||||
port.uuid = 'fake-uuid'
|
||||
refresh_cache_mock.side_effect = None
|
||||
self._start_service()
|
||||
|
||||
self.service.destroy_networks(self.context, instance)
|
||||
@ -53,13 +51,11 @@ class ManageInstanceTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
self.context, inst_port_id, instance.uuid)
|
||||
|
||||
@mock.patch.object(IronicDriver, 'destroy')
|
||||
def _test__delete_instance(self, destroy_node_mock,
|
||||
refresh_cache_mock, state=None):
|
||||
def _test__delete_instance(self, destroy_node_mock, state=None):
|
||||
fake_node = mock.MagicMock()
|
||||
fake_node.provision_state = state
|
||||
instance = obj_utils.create_test_instance(self.context)
|
||||
destroy_node_mock.side_effect = None
|
||||
refresh_cache_mock.side_effect = None
|
||||
self._start_service()
|
||||
|
||||
self.service._delete_instance(self.context, instance)
|
||||
@ -67,25 +63,21 @@ class ManageInstanceTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
|
||||
destroy_node_mock.assert_called_once_with(self.context, instance)
|
||||
|
||||
def test__delete_instance_cleaning(self, refresh_cache_mock):
|
||||
self._test__delete_instance(state=ironic_states.CLEANING,
|
||||
refresh_cache_mock=refresh_cache_mock)
|
||||
def test__delete_instance_cleaning(self):
|
||||
self._test__delete_instance(state=ironic_states.CLEANING)
|
||||
|
||||
def test__delete_instance_cleanwait(self, refresh_cache_mock):
|
||||
self._test__delete_instance(state=ironic_states.CLEANWAIT,
|
||||
refresh_cache_mock=refresh_cache_mock)
|
||||
def test__delete_instance_cleanwait(self):
|
||||
self._test__delete_instance(state=ironic_states.CLEANWAIT)
|
||||
|
||||
@mock.patch.object(manager.EngineManager, '_delete_instance')
|
||||
@mock.patch.object(manager.EngineManager, '_unplug_vifs')
|
||||
def test_delete_instance(self, unplug_mock,
|
||||
delete_inst_mock, refresh_cache_mock):
|
||||
def test_delete_instance(self, unplug_mock, delete_inst_mock):
|
||||
fake_node = mock.MagicMock()
|
||||
fake_node.provision_state = ironic_states.ACTIVE
|
||||
instance = obj_utils.create_test_instance(
|
||||
self.context, status=states.DELETING)
|
||||
unplug_mock.side_effect = None
|
||||
delete_inst_mock.side_effect = None
|
||||
refresh_cache_mock.side_effect = None
|
||||
self._start_service()
|
||||
|
||||
self.service.delete_instance(self.context, instance)
|
||||
@ -97,14 +89,12 @@ class ManageInstanceTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
@mock.patch.object(IronicDriver, 'get_power_state')
|
||||
@mock.patch.object(IronicDriver, 'set_power_state')
|
||||
def test_change_instance_power_state(
|
||||
self, set_power_mock, get_power_mock,
|
||||
refresh_cache_mock):
|
||||
self, set_power_mock, get_power_mock):
|
||||
instance = obj_utils.create_test_instance(
|
||||
self.context, status=states.POWERING_ON)
|
||||
fake_node = mock.MagicMock()
|
||||
fake_node.target_power_state = ironic_states.NOSTATE
|
||||
get_power_mock.return_value = states.POWER_ON
|
||||
refresh_cache_mock.side_effect = None
|
||||
self._start_service()
|
||||
|
||||
self.service.set_power_state(self.context, instance,
|
||||
@ -116,19 +106,17 @@ class ManageInstanceTestCase(mgr_utils.ServiceSetUpMixin,
|
||||
ironic_states.POWER_ON)
|
||||
get_power_mock.assert_called_once_with(self.context, instance.uuid)
|
||||
|
||||
def test_list_availability_zone(self, refresh_cache_mock):
|
||||
refresh_cache_mock.side_effect = None
|
||||
node1 = mock.MagicMock()
|
||||
node2 = mock.MagicMock()
|
||||
node3 = mock.MagicMock()
|
||||
node1.properties = {'availability_zone': 'az1'}
|
||||
node2.properties = {'availability_zone': 'az2'}
|
||||
node3.properties = {'availability_zone': 'az1'}
|
||||
def test_list_availability_zone(self):
|
||||
uuid1 = uuidutils.generate_uuid()
|
||||
uuid2 = uuidutils.generate_uuid()
|
||||
obj_utils.create_test_compute_node(
|
||||
self.context, availability_zone='az1')
|
||||
obj_utils.create_test_compute_node(
|
||||
self.context, node_uuid=uuid1, availability_zone='az2')
|
||||
obj_utils.create_test_compute_node(
|
||||
self.context, node_uuid=uuid2, availability_zone='az1')
|
||||
|
||||
self._start_service()
|
||||
self.service.node_cache = {'node1_id': node1,
|
||||
'node2_id': node2,
|
||||
'node3_id': node3}
|
||||
azs = self.service.list_availability_zones(self.context)
|
||||
self._stop_service()
|
||||
|
||||
|
@ -48,7 +48,7 @@ class RPCAPITestCase(base.DbTestCase):
|
||||
self.fake_type = dbutils.get_test_instance_type()
|
||||
self.fake_type['extra_specs'] = {}
|
||||
self.fake_type_obj = objects.InstanceType._from_db_object(
|
||||
objects.InstanceType(self.context), self.fake_type)
|
||||
self.context, objects.InstanceType(self.context), self.fake_type)
|
||||
|
||||
def test_serialized_instance_has_uuid(self):
|
||||
self.assertIn('uuid', self.fake_instance)
|
||||
|
@ -29,7 +29,8 @@ class TestComputeNodeObject(base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(TestComputeNodeObject, self).setUp()
|
||||
self.ctxt = context.get_admin_context()
|
||||
self.fake_node = utils.get_test_compute_node(context=self.ctxt)
|
||||
self.fake_node = utils.get_test_compute_node(
|
||||
context=self.ctxt, ports=[utils.get_test_compute_port()])
|
||||
self.node = obj_utils.get_test_compute_node(
|
||||
self.ctxt, **self.fake_node)
|
||||
|
||||
@ -44,23 +45,12 @@ class TestComputeNodeObject(base.DbTestCase):
|
||||
mock_node_get.assert_called_once_with(self.context, node_uuid)
|
||||
self.assertEqual(self.context, node._context)
|
||||
|
||||
def test_list(self):
|
||||
with mock.patch.object(self.dbapi, 'compute_node_get_all',
|
||||
autospec=True) as mock_node_get_all:
|
||||
mock_node_get_all.return_value = [self.fake_node]
|
||||
|
||||
nodes = objects.ComputeNode.list(self.context)
|
||||
|
||||
mock_node_get_all.assert_called_once_with(self.context)
|
||||
self.assertIsInstance(nodes[0], objects.ComputeNode)
|
||||
self.assertEqual(self.context, nodes[0]._context)
|
||||
|
||||
def test_create(self):
|
||||
with mock.patch.object(self.dbapi, 'compute_node_create',
|
||||
autospec=True) as mock_node_create:
|
||||
self.fake_node.pop('ports')
|
||||
mock_node_create.return_value = self.fake_node
|
||||
node = objects.ComputeNode(self.context, **self.fake_node)
|
||||
node.obj_get_changes()
|
||||
node.create(self.context)
|
||||
expected_called = copy.deepcopy(self.fake_node)
|
||||
mock_node_create.assert_called_once_with(self.context,
|
||||
@ -71,6 +61,7 @@ class TestComputeNodeObject(base.DbTestCase):
|
||||
uuid = self.fake_node['node_uuid']
|
||||
with mock.patch.object(self.dbapi, 'compute_node_destroy',
|
||||
autospec=True) as mock_node_destroy:
|
||||
self.fake_node.pop('ports')
|
||||
node = objects.ComputeNode(self.context, **self.fake_node)
|
||||
node.destroy(self.context)
|
||||
mock_node_destroy.assert_called_once_with(self.context, uuid)
|
||||
@ -79,16 +70,10 @@ class TestComputeNodeObject(base.DbTestCase):
|
||||
uuid = self.fake_node['node_uuid']
|
||||
with mock.patch.object(self.dbapi, 'compute_node_update',
|
||||
autospec=True) as mock_node_update:
|
||||
self.fake_node.pop('ports')
|
||||
mock_node_update.return_value = self.fake_node
|
||||
node = objects.ComputeNode(self.context, **self.fake_node)
|
||||
updates = node.obj_get_changes()
|
||||
node.save(self.context)
|
||||
mock_node_update.assert_called_once_with(
|
||||
self.context, uuid, updates)
|
||||
|
||||
def test_save_after_refresh(self):
|
||||
db_node = utils.create_test_compute_node(context=self.ctxt)
|
||||
node = objects.ComputeNode.get(self.context, db_node.node_uuid)
|
||||
node.refresh(self.context)
|
||||
node.hypervisor_type = 'refresh'
|
||||
node.save(self.context)
|
||||
|
@ -383,14 +383,14 @@ class _TestObject(object):
|
||||
# The fingerprint values should only be changed if there is a version bump.
|
||||
expected_object_fingerprints = {
|
||||
'Instance': '1.0-18d0ffc894a0f6b52df73a29919c035b',
|
||||
'ComputeNode': '1.0-9dd029c83e37adc7e01ff759e76cdda1',
|
||||
'ComputeNode': '1.0-36221253681d9acb88efe2a9113071c7',
|
||||
'ComputeNodeList': '1.0-33a2e1bb91ad4082f9f63429b77c1244',
|
||||
'ComputePort': '1.0-bdba0f3ece31260c4deea37d39618c1a',
|
||||
'ComputePortList': '1.0-33a2e1bb91ad4082f9f63429b77c1244',
|
||||
'InstanceFault': '1.0-6b5b01b2cc7b6b547837acb168ec6eb9',
|
||||
'InstanceFaultList': '1.0-43e8aad0258652921f929934e9e048fd',
|
||||
'InstanceType': '1.0-589b096651fcdb30898ff50f748dd948',
|
||||
'MyObj': '1.1-aad62eedc5a5cc8bcaf2982c285e753f',
|
||||
'FakeNode': '1.0-f367d3a6d123084a60ef73696cd2964b',
|
||||
'InstanceNic': '1.0-78744332fe105f9c1796dc5295713d9f',
|
||||
'InstanceNics': '1.0-33a2e1bb91ad4082f9f63429b77c1244',
|
||||
'Quota': '1.0-c8caa082f4d726cb63fdc5943f7cd186',
|
||||
|
@ -83,7 +83,10 @@ def get_test_compute_node(ctxt, **kw):
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del db_node['id']
|
||||
node = objects.ComputeNode(ctxt, **db_node)
|
||||
node = objects.ComputeNode(ctxt)
|
||||
for key in db_node:
|
||||
if key != 'ports':
|
||||
setattr(node, key, db_node[key])
|
||||
return node
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user