Cleanup compute multi-node assignment of node

Move assignment of the node field on the instance to the compute host
to be more consistent with how the host field is assigned and handled
by the resource tracker.

Change-Id: Id3086585a99350abbab387932e689825b33ab6b5
This commit is contained in:
Brian Elliott
2012-11-20 20:34:11 +00:00
parent f3e892ed5e
commit 7b39ef953a
9 changed files with 46 additions and 32 deletions

View File

@@ -306,7 +306,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
RPC_API_VERSION = '2.18'
RPC_API_VERSION = '2.19'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -576,13 +576,19 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance):
admin_password, is_first_time, node, instance):
"""Launch a new instance with specified options."""
context = context.elevated()
try:
self._check_instance_not_already_created(context, instance)
image_meta = self._check_image_size(context, instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug(_("No node specified, defaulting to %(node)s") %
locals())
extra_usage_info = {"image_name": image_meta['name']}
self._start_building(context, instance)
self._notify_about_instance_usage(
@@ -591,7 +597,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = None
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
rt = self._get_resource_tracker(instance.get('node'))
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
@@ -941,7 +947,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def run_instance(self, context, instance, request_spec=None,
filter_properties=None, requested_networks=None,
injected_files=None, admin_password=None,
is_first_time=False):
is_first_time=False, node=None):
if filter_properties is None:
filter_properties = {}
@@ -952,7 +958,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance)
admin_password, is_first_time, node, instance)
do_run_instance()
def _shutdown_instance(self, context, instance, bdms):

View File

@@ -82,21 +82,26 @@ class ResourceTracker(object):
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# set the 'host' field and continue the build:
self._set_instance_host(context, instance_ref)
# set the 'host' and node fields and continue the build:
self._set_instance_host_and_node(context, instance_ref)
return claims.NopClaim()
# sanity check:
# sanity checks:
if instance_ref['host']:
LOG.warning(_("Host field should not be set on the instance until "
"resources have been claimed."),
instance=instance_ref)
if instance_ref['node']:
LOG.warning(_("Node field should be not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
claim = claims.Claim(instance_ref, self)
if claim.test(self.compute_node, limits):
self._set_instance_host(context, instance_ref)
self._set_instance_host_and_node(context, instance_ref)
# Mark resources in-use and update stats
self._update_usage_from_instance(self.compute_node, instance_ref)
@@ -168,12 +173,13 @@ class ResourceTracker(object):
'new_instance_type_id': instance_type['id'],
'status': 'pre-migrating'})
def _set_instance_host(self, context, instance_ref):
def _set_instance_host_and_node(self, context, instance_ref):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMPAHORE is held so the resource claim
will not be lost if the audit process starts.
"""
values = {'host': self.host, 'launched_on': self.host}
values = {'host': self.host, 'node': self.nodename,
'launched_on': self.host}
(old_ref, new_ref) = db.instance_update_and_get_original(context,
instance_ref['uuid'], values)
notifications.send_update(context, old_ref, new_ref)

View File

@@ -146,6 +146,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.16 - Add instance_type to resize_instance
2.17 - Add get_backdoor_port()
2.18 - Add bdms to rebuild_instance
2.19 - Add node to run_instance
'''
#
@@ -474,14 +475,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time):
is_first_time, node=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('run_instance', instance=instance_p,
request_spec=request_spec, filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files, admin_password=admin_password,
is_first_time=is_first_time),
topic=_compute_topic(self.topic, ctxt, host, None))
is_first_time=is_first_time, node=node),
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.19')
def set_admin_password(self, ctxt, instance, new_pass):
instance_p = jsonutils.to_primitive(instance)

View File

@@ -87,21 +87,12 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
def instance_update_db(context, instance_uuid):
'''Clear the host and set the scheduled_at field of an Instance.
'''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': None, 'scheduled_at': now}
return db.instance_update(context, instance_uuid, values)
def db_instance_node_set(context, instance_uuid, node):
'''Set the node field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
values = {'node': node}
values = {'host': None, 'node': None, 'scheduled_at': now}
return db.instance_update(context, instance_uuid, values)

View File

@@ -130,9 +130,6 @@ class FilterScheduler(driver.Scheduler):
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
# TODO(NTTdocomo): Combine the next two updates into one
driver.db_instance_node_set(context,
instance_uuid, weighed_host.obj.nodename)
updated_instance = driver.instance_update_db(context,
instance_uuid)
@@ -144,7 +141,8 @@ class FilterScheduler(driver.Scheduler):
request_spec=request_spec, filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files,
admin_password=admin_password, is_first_time=is_first_time)
admin_password=admin_password, is_first_time=is_first_time,
node=weighed_host.obj.nodename)
def _post_select_populate_filter_properties(self, filter_properties,
host_state):

View File

@@ -511,6 +511,15 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance, self.context, instance=instance,
filter_properties=filter_properties)
def test_create_instance_without_node_param(self):
instance = self._create_fake_instance({'node': None})
self.compute.run_instance(self.context, instance=instance)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(NODENAME, instance['node'])
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self.stubs)
@@ -5838,7 +5847,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.mox.ReplayAll()
self.compute._run_instance(self.context, None, {}, None, None, None,
False, self.instance)
False, None, self.instance)
def test_deallocate_network_fail(self):
"""Test de-allocation of network failing before re-scheduling logic

View File

@@ -154,6 +154,7 @@ class BaseTestCase(test.TestCase):
'project_id': '123456',
'vcpus': 1,
'host': None,
'node': None,
'instance_type_id': 1,
'launched_on': None,
}

View File

@@ -310,7 +310,8 @@ class ComputeRpcAPITestCase(test.TestCase):
instance=self.fake_instance, host='fake_host',
request_spec='fake_spec', filter_properties={},
requested_networks='networks', injected_files='files',
admin_password='pw', is_first_time=True)
admin_password='pw', is_first_time=True, node='node',
version='2.19')
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',

View File

@@ -653,7 +653,7 @@ class SchedulerDriverModuleTestCase(test.TestCase):
timeutils.utcnow().AndReturn('fake-now')
db.instance_update(self.context, 'fake_uuid',
{'host': None, 'scheduled_at': 'fake-now'})
{'host': None, 'node': None, 'scheduled_at': 'fake-now'})
rpc.queue_get_for(self.context, 'compute', host).AndReturn(queue)
rpc.cast(self.context, queue,
{'method': method,