From 99b1b106fbfc9175df8ef4a154ba57b37181a8f0 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 5 May 2011 23:14:46 -0400 Subject: [PATCH 01/65] Publish errors via nova.notifier --- nova/log.py | 9 +++++++++ nova/tests/test_notifier.py | 21 +++++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/nova/log.py b/nova/log.py index 096279f7..3e587891 100644 --- a/nova/log.py +++ b/nova/log.py @@ -35,6 +35,7 @@ import os import sys import traceback +import nova from nova import flags from nova import version @@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_bool('publish_errors', True, 'publish error events') flags.DEFINE_string('logfile', None, 'output to named file') @@ -258,12 +260,19 @@ class NovaRootLogger(NovaLogger): else: self.removeHandler(self.filelog) self.addHandler(self.streamlog) + if FLAGS.publish_errors: + self.addHandler(PublishErrorsHandler(ERROR)) if FLAGS.verbose: self.setLevel(DEBUG) else: self.setLevel(INFO) +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + nova.notifier.notify('error', record) + + def handle_exception(type, value, tb): extra = {} if FLAGS.verbose: diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 4d6289e6..d18d3bc0 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,14 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. -import nova +import json +import stubout + +import nova +from nova import log as logging from nova import flags from nova import notifier from nova.notifier import no_op_notifier from nova import test -import stubout +LOG = logging.getLogger('nova.compute.api') class NotifierTestCase(test.TestCase): """Test case for notifications""" @@ -58,3 +62,16 @@ class NotifierTestCase(test.TestCase): notifier.notify('derp', Mock()) self.assertEqual(self.mock_cast, True) + + def test_error_notification(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier.RabbitNotifier') + msgs = [] + def mock_cast(context, topic, msg): + data = json.loads(msg) + msgs.append(data) + self.stubs.Set(nova.rpc, 'cast', mock_cast) + LOG.error('foo'); + msg = msgs[0] + self.assertEqual(msg['event_name'], 'error') + self.assertEqual(msg['model']['msg'], 'foo') From 053e30376f5a0fe78a11fffafae3058cf53d9371 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 6 May 2011 20:15:06 -0400 Subject: [PATCH 02/65] Set publish_errors default to False. --- nova/log.py | 2 +- nova/tests/test_notifier.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/log.py b/nova/log.py index 3e587891..d2ed82c6 100644 --- a/nova/log.py +++ b/nova/log.py @@ -64,7 +64,7 @@ flags.DEFINE_list('default_log_levels', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') -flags.DEFINE_bool('publish_errors', True, 'publish error events') +flags.DEFINE_bool('publish_errors', False, 'publish error events') flags.DEFINE_string('logfile', None, 'output to named file') diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index d18d3bc0..c9c4ddde 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -18,14 +18,12 @@ import json import stubout import nova -from nova import log as logging +from nova import log from nova import flags from nova import notifier from nova.notifier import no_op_notifier from nova import test -LOG = logging.getLogger('nova.compute.api') - class NotifierTestCase(test.TestCase): """Test case for notifications""" def setUp(self): @@ -66,12 +64,17 @@ class NotifierTestCase(test.TestCase): def test_error_notification(self): self.stubs.Set(nova.flags.FLAGS, 'notification_driver', 'nova.notifier.rabbit_notifier.RabbitNotifier') + self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) + LOG = log.getLogger('nova') + LOG.setup_from_flags() + msgs = [] def mock_cast(context, topic, msg): data = json.loads(msg) msgs.append(data) self.stubs.Set(nova.rpc, 'cast', mock_cast) LOG.error('foo'); + self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_name'], 'error') self.assertEqual(msg['model']['msg'], 'foo') From b054a2234e241ce63229c5445a73cc07311fdab4 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 9 May 2011 17:53:25 +0900 Subject: [PATCH 03/65] volume/driver: implement basic snapshot/clone added basic support for snapshot/clone to VolumeDriver. The implementation is not effective, but works. The effective implementation should be done by drived driver class. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 60e1d2da..9eae53e9 100644 --- a/Authors +++ b/Authors @@ -28,6 +28,7 @@ Gabe Westmaas Hisaharu Ishii Hisaki Ohara Ilya Alekseyev +Isaku Yamahata Jason Koelker Jay Pipes Jesse Andrews From 854795bb9f18846bb8d89be5492bec3dd87706b9 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Wed, 11 May 2011 03:24:02 +0400 Subject: [PATCH 04/65] Bugfix #780784. KeyError when creating custom image. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 60e1d2da..72eb0b6a 100644 --- a/Authors +++ b/Authors @@ -54,6 +54,7 @@ Mark Washenberger Masanori Itoh Matt Dietz Michael Gundlach +Mike Scherbakov Monsyne Dragon Monty Taylor MORITA Kazutaka From f7bf59f654fdb4babbc9c6b8296ae2fc69a6c80e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 13 May 2011 10:43:43 -0700 Subject: [PATCH 05/65] started on integrating HostFilter --- nova/scheduler/host_filter.py | 18 ++++++++++++++++++ nova/scheduler/zone_aware_scheduler.py | 16 ++++++++++++---- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 483f3225..17f63d4a 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -286,3 +286,21 @@ def choose_driver(driver_name=None): if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: return driver() raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + + +class HostFilterScheduler(ZoneAwareScheduler): + """The HostFilterScheduler uses the HostFilter drivers to filter + hosts for weighing. The particular driver used may be passed in + as an argument or the default will be used.""" + + def filter_hosts(self, num, specs): + """Filter the full host list (from the ZoneManager)""" + driver_name = specs.get("filter_driver", None) + driver = host_filter.choose_driver(driver_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = specs['instance_type'] + query = driver.instance_type_to_filter(query) + return driver.filter_hosts(self.zone_manager, query) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd..fde8b679 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -25,6 +25,7 @@ import operator from nova import log as logging from nova.scheduler import api from nova.scheduler import driver +from nova.scheduler import host_filter LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') @@ -36,7 +37,7 @@ class ZoneAwareScheduler(driver.Scheduler): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs) - def schedule_run_instance(self, context, topic='compute', specs={}, + def schedule_run_instance(self, context, instance_id, instance_type, *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being @@ -46,6 +47,9 @@ class ZoneAwareScheduler(driver.Scheduler): to simply create the instance (either in this zone or a child zone).""" + # TODO(sandy): We'll have to look for richer specs at some point. + specs = instance_type + if 'blob' in specs: return self.provision_instance(context, topic, specs) @@ -58,7 +62,7 @@ class ZoneAwareScheduler(driver.Scheduler): """Create the requested instance in this Zone or a child zone.""" pass - def select(self, context, *args, **kwargs): + def select(self, context, specs, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal @@ -80,9 +84,13 @@ class ZoneAwareScheduler(driver.Scheduler): ordered by their fitness. """ - #TODO(sandy): extract these from args. + if topic != "compute": + raise NotImplemented(_("Zone Aware Scheduler only understands " + "Compute nodes (for now)")) + + specs = args['instance_type'] + #TODO(sandy): how to infer this from OS API params? num_instances = 1 - specs = {} # Filter local hosts based on requirements ... host_list = self.filter_hosts(num_instances, specs) From 9e97b681d4063e53d050af9e306ec09c99d0c52b Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 13 May 2011 15:36:42 -0500 Subject: [PATCH 06/65] first cut at weighted-sum tests --- nova/scheduler/zone_aware_scheduler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd..38b395d5 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -89,6 +89,8 @@ class ZoneAwareScheduler(driver.Scheduler): # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] + # TODO(sirp): weigh_hosts should also be a function of 'topic' or + # resources, so that we can apply different objective functions to it weighted = self.weigh_hosts(num_instances, specs, host_list) # Next, tack on the best weights from the child zones ... From 9a458b57f4ecc25fa1b418764cd26a7653507a1f Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Sun, 15 May 2011 14:15:37 +0400 Subject: [PATCH 07/65] Unit test for snapshotting (creating custom image). --- nova/tests/test_virt.py | 57 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 1311ba36..eb238e87 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -159,6 +159,7 @@ class LibvirtConnTestCase(test.TestCase): 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', + 'image_id': '123456', 'instance_type_id': '5'} # m1.small def lazy_load_library_exists(self): @@ -279,6 +280,62 @@ class LibvirtConnTestCase(test.TestCase): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) + def test_snapshot(self): + FLAGS.image_service = 'nova.image.fake.FakeImageService' + + # Only file-based instance storages are supported at the moment + test_xml = """ + + + + + + + + """ + + class FakeVirtDomain(object): + + def __init__(self): + pass + + def snapshotCreateXML(self, *args): + return None + + def XMLDesc(self, *args): + return test_xml + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return FakeVirtDomain() + + def fake_execute(*args): + # Touch filename to pass 'with open(out_path)' + open(args[-1], "a").close() + + # Start test + image_service = utils.import_object(FLAGS.image_service) + + # Assuming that base image already exists in image_service + instance_ref = db.instance_create(self.context, self.test_instance) + properties = {'instance_id': instance_ref['id'], + 'user_id': str(self.context.user_id)} + sent_meta = {'name': 'test-snap', 'is_public': False, + 'properties': properties} + # Create new image. It will be updated in snapshot method + # To work with it from snapshot, the single image_service is needed + recv_meta = image_service.create(context, sent_meta) + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') + libvirt_conn.LibvirtConnection._conn.lookupByName = fake_lookup + self.mox.StubOutWithMock(libvirt_conn.utils, 'execute') + libvirt_conn.utils.execute = fake_execute + + self.mox.ReplayAll() + + conn = libvirt_conn.LibvirtConnection(False) + conn.snapshot(instance_ref, recv_meta['id']) + def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) From 27edb43d56fa7cf648ca8c0cb81d87ad9a7a0141 Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Sun, 15 May 2011 15:11:54 +0400 Subject: [PATCH 08/65] Define image state during snapshotting. Name snapshot to the name provided, not generate. --- nova/tests/test_virt.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index eb238e87..c4fcc21c 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -320,8 +320,9 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = db.instance_create(self.context, self.test_instance) properties = {'instance_id': instance_ref['id'], 'user_id': str(self.context.user_id)} - sent_meta = {'name': 'test-snap', 'is_public': False, - 'properties': properties} + snapshot_name = 'test-snap' + sent_meta = {'name': snapshot_name, 'is_public': False, + 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = image_service.create(context, sent_meta) @@ -336,6 +337,11 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_conn.LibvirtConnection(False) conn.snapshot(instance_ref, recv_meta['id']) + snapshot = image_service.show(context, recv_meta['id']) + self.assertEquals(snapshot['properties']['image_state'], 'available') + self.assertEquals(snapshot['status'], 'active') + self.assertEquals(snapshot['name'], snapshot_name) + def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) From e7c9b7876e8f2f19670e1d42bc7afc5a66290a64 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 16 May 2011 15:37:25 -0700 Subject: [PATCH 09/65] basic call going through --- nova/scheduler/host_filter.py | 19 +++++++-------- nova/scheduler/zone_aware_scheduler.py | 32 ++++++++++++-------------- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 17f63d4a..a47e41da 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils +from nova.scheduler import zone_aware_scheduler LOG = logging.getLogger('nova.scheduler.host_filter') @@ -83,8 +84,8 @@ class AllHostsFilter(HostFilter): for host, services in zone_manager.service_states.iteritems()] -class FlavorFilter(HostFilter): - """HostFilter driver hard-coded to work with flavors.""" +class InstanceTypeFilter(HostFilter): + """HostFilter driver hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" @@ -271,7 +272,7 @@ class JsonFilter(HostFilter): return hosts -DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter] +DRIVERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_driver(driver_name=None): @@ -288,19 +289,19 @@ def choose_driver(driver_name=None): raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) -class HostFilterScheduler(ZoneAwareScheduler): +class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): """The HostFilterScheduler uses the HostFilter drivers to filter hosts for weighing. The particular driver used may be passed in as an argument or the default will be used.""" - def filter_hosts(self, num, specs): + def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - driver_name = specs.get("filter_driver", None) - driver = host_filter.choose_driver(driver_name) + driver_name = request_spec.get("filter_driver", None) + driver = choose_driver(driver_name) # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. - instance_type = specs['instance_type'] - query = driver.instance_type_to_filter(query) + instance_type = request_spec['instance_type'] + query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index fde8b679..f9c5f65f 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -25,7 +25,6 @@ import operator from nova import log as logging from nova.scheduler import api from nova.scheduler import driver -from nova.scheduler import host_filter LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') @@ -37,7 +36,7 @@ class ZoneAwareScheduler(driver.Scheduler): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs) - def schedule_run_instance(self, context, instance_id, instance_type, + def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being @@ -48,13 +47,12 @@ class ZoneAwareScheduler(driver.Scheduler): a child zone).""" # TODO(sandy): We'll have to look for richer specs at some point. - specs = instance_type - if 'blob' in specs: - return self.provision_instance(context, topic, specs) + if 'blob' in request_spec: + return self.provision_instance(context, topic, request_spec) # Create build plan and provision ... - build_plan = self.select(context, specs) + build_plan = self.select(context, request_spec) for item in build_plan: self.provision_instance(context, topic, item) @@ -62,24 +60,24 @@ class ZoneAwareScheduler(driver.Scheduler): """Create the requested instance in this Zone or a child zone.""" pass - def select(self, context, specs, *args, **kwargs): + def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal anything about the children.""" - return self._schedule(context, "compute", *args, **kwargs) + return self._schedule(context, "compute", request_spec, *args, **kwargs) - def schedule(self, context, topic, *args, **kwargs): + def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ - res = self._schedule(context, topic, *args, **kwargs) + res = self._schedule(context, topic, request_spec, *args, **kwargs) # TODO(sirp): should this be a host object rather than a weight-dict? if not res: raise driver.NoValidHost(_('No hosts were available')) return res[0] - def _schedule(self, context, topic, *args, **kwargs): + def _schedule(self, context, topic, request_spec, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -88,20 +86,20 @@ class ZoneAwareScheduler(driver.Scheduler): raise NotImplemented(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) - specs = args['instance_type'] + LOG.debug("specs = %s, ARGS = %s" % (request_spec, args, )) #TODO(sandy): how to infer this from OS API params? num_instances = 1 # Filter local hosts based on requirements ... - host_list = self.filter_hosts(num_instances, specs) + host_list = self.filter_hosts(num_instances, request_spec) # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] - weighted = self.weigh_hosts(num_instances, specs, host_list) + weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... child_results = self._call_zone_method(context, "select", - specs=specs) + specs=request_spec) for child_zone, result in child_results: for weighting in result: # Remember the child_zone so we can get back to @@ -116,12 +114,12 @@ class ZoneAwareScheduler(driver.Scheduler): weighted.sort(key=operator.itemgetter('weight')) return weighted - def filter_hosts(self, num, specs): + def filter_hosts(self, num, request_spec): """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format.""" raise NotImplemented() - def weigh_hosts(self, num, specs, hosts): + def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format.""" raise NotImplemented() From b4b2ee96bff839e8c27b070cea0d1f4dd8b9fe91 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 05:27:50 -0700 Subject: [PATCH 10/65] tests fixed and pep8'ed --- nova/scheduler/host_filter.py | 10 +++++++--- nova/scheduler/zone_aware_scheduler.py | 11 ++++++++--- nova/tests/test_host_filter.py | 11 ++++++----- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 8519b8b5..2b0d9af7 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -292,11 +292,15 @@ def choose_driver(driver_name=None): class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): """The HostFilterScheduler uses the HostFilter drivers to filter hosts for weighing. The particular driver used may be passed in - as an argument or the default will be used.""" + as an argument or the default will be used. + + request_spec = {'filter_driver': , + 'instance_type': } + """ def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - driver_name = request_spec.get("filter_driver", None) + driver_name = request_spec.get('filter_driver', None) driver = choose_driver(driver_name) # TODO(sandy): We're only using InstanceType-based specs @@ -309,4 +313,4 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format.""" - return [] + return [dict(weight=1, hostname=hostname) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 2fc5f1f8..614b1bb8 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -59,20 +59,25 @@ class ZoneAwareScheduler(driver.Scheduler): for item in build_plan: self.provision_instance(context, topic, item) + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + def provision_instance(context, topic, item): """Create the requested instance in this Zone or a child zone.""" - pass + return None def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal anything about the children.""" - return self._schedule(context, "compute", request_spec, *args, **kwargs) + return self._schedule(context, "compute", request_spec, + *args, **kwargs) # TODO(sandy): We're only focused on compute instances right now, # so we don't implement the default "schedule()" method required - # of Schedulers. + # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index c029d41e..dd2325cc 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -85,9 +85,9 @@ class HostFilterTestCase(test.TestCase): 'nova.scheduler.host_filter.AllHostsFilter') # Test valid driver ... driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.FlavorFilter') + 'nova.scheduler.host_filter.InstanceTypeFilter') self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.FlavorFilter') + 'nova.scheduler.host_filter.InstanceTypeFilter') # Test invalid driver ... try: host_filter.choose_driver('does not exist') @@ -103,11 +103,12 @@ class HostFilterTestCase(test.TestCase): for host, capabilities in hosts: self.assertTrue(host.startswith('host')) - def test_flavor_driver(self): - driver = host_filter.FlavorFilter() + def test_instance_type_driver(self): + driver = host_filter.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) hosts = driver.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] From dde0f19290d99893b33be6e46ea9e5847f17b399 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 05:43:06 -0700 Subject: [PATCH 11/65] ugh, fixed again --- nova/tests/test_zone_aware_scheduler.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index fdcde34c..37169fb9 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -116,4 +116,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase): sched.set_zone_manager(zm) fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {}) + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) From c68d446324824f6833686d73c6a19a318f7339c9 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:38:44 +0400 Subject: [PATCH 12/65] Moved memcached connection in AuthManager to thread-local storage. Added caching of LDAP connection in thread-local storage. Optimized LDAP queries, added similar memcached support to LDAPDriver. Add "per-driver-request" caching of LDAP results. (should be per-api-request) --- nova/auth/ldapdriver.py | 93 +++++++++++++++++++++++++++++++++++++---- nova/auth/manager.py | 20 ++++++--- 2 files changed, 98 insertions(+), 15 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 3f843285..7849d941 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,7 +24,9 @@ other backends by creating another class that exposes the same public methods. """ +import functools import sys +import threading from nova import exception from nova import flags @@ -85,6 +87,7 @@ def _clean(attr): def sanitize(fn): """Decorator to sanitize all args""" + @functools.wraps(fn) def _wrapped(self, *args, **kwargs): args = [_clean(x) for x in args] kwargs = dict((k, _clean(v)) for (k, v) in kwargs) @@ -103,29 +106,74 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' + __local = threading.local() def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') - self.conn = None if FLAGS.ldap_schema_version == 1: LdapDriver.project_pattern = '(objectclass=novaProject)' LdapDriver.isadmin_attribute = 'isAdmin' LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' + self.__cache = None def __enter__(self): """Creates the connection to LDAP""" - self.conn = self.ldap.initialize(FLAGS.ldap_url) - self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + # TODO(yorik-sar): Should be per-request cache, not per-driver-request + self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): """Destroys the connection to LDAP""" - self.conn.unbind_s() + self.__cache = None return False + def __local_cache(key_fmt): + """Wrap function to cache it's result in self.__cache. + Works only with functions with one fixed argument. + """ + def do_wrap(fn): + @functools.wraps(fn) + def inner(self, arg, **kwargs): + cache_key = key_fmt % (arg,) + try: + res = self.__cache[cache_key] + LOG.debug('Local cache hit for %s by key %s' % + (fn.__name__, cache_key)) + return res + except KeyError: + res = fn(self, arg, **kwargs) + self.__cache[cache_key] = res + return res + return inner + return do_wrap + + @property + def conn(self): + try: + return self.__local.conn + except AttributeError: + conn = self.ldap.initialize(FLAGS.ldap_url) + conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + self.__local.conn = conn + return conn + + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc + @sanitize + @__local_cache('uid_user-%s') def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) @@ -134,15 +182,30 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" + cache_key = 'uak_dn_%s'%(access,) + user_dn = self.mc.get(cache_key) + if user_dn: + user = self.__to_user( + self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE)) + if user: + if user['access'] == access: + return user + else: + self.mc.set(cache_key, None) query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree - return self.__to_user(self.__find_object(dn, query)) + user_obj = self.__find_object(dn, query) + user = self.__to_user(user_obj) + if user: + self.mc.set(cache_key, user_obj['dn'][0]) + return user @sanitize + @__local_cache('pid_project-%s') def get_project(self, pid): """Retrieve project by id""" - dn = self.__project_to_dn(pid) - attr = self.__find_object(dn, LdapDriver.project_pattern) + dn = self.__project_to_dn(pid, search=False) + attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize @@ -395,6 +458,7 @@ class LdapDriver(object): """Check if project exists""" return self.get_project(project_id) is not None + @__local_cache('uid_attrs-%s') def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" dn = FLAGS.ldap_user_subtree @@ -426,12 +490,20 @@ class LdapDriver(object): if scope is None: # One of the flags is 0! scope = self.ldap.SCOPE_SUBTREE + if query is None: + query = "(objectClass=*)" try: res = self.conn.search_s(dn, scope, query) except self.ldap.NO_SUCH_OBJECT: return [] # Just return the attributes - return [attributes for dn, attributes in res] + # FIXME(yorik-sar): Whole driver should be refactored to + # prevent this hack + res1 = [] + for dn, attrs in res: + attrs['dn'] = [dn] + res1.append(attrs) + return res1 def __find_role_dns(self, tree): """Find dns of role objects in given tree""" @@ -564,6 +636,7 @@ class LdapDriver(object): 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + @__local_cache('uid_dn-%s') def __uid_to_dn(self, uid, search=True): """Convert uid to dn""" # By default return a generated DN @@ -576,6 +649,7 @@ class LdapDriver(object): userdn = user[0] return userdn + @__local_cache('pid_dn-%s') def __project_to_dn(self, pid, search=True): """Convert pid to dn""" # By default return a generated DN @@ -603,10 +677,11 @@ class LdapDriver(object): else: return None + @__local_cache('dn_uid-%s') def __dn_to_uid(self, dn): """Convert user dn to uid""" query = '(objectclass=novaUser)' - user = self.__find_object(dn, query) + user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE) return user[FLAGS.ldap_user_id_attribute][0] diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 07235a2a..c71f0f16 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,6 +23,7 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 +import threading import tempfile import uuid import zipfile @@ -206,6 +207,7 @@ class AuthManager(object): """ _instance = None + __local = threading.local() def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -223,12 +225,18 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.memcached_servers, - debug=0) + @property + def mc(self): + try: + return self.__local.mc + except AttributeError: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + mc = memcache.Client(FLAGS.memcached_servers, debug=0) + self.__local.mc = mc + return mc def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', From 5f7650996157839beb5dcca5e4955675381e1b9d Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:39:19 +0400 Subject: [PATCH 13/65] Fixed mistyped key, caused huge performance leak. --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 51979364..5c536f6d 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] From b7293a358b59ba473e565d24307e6f33d04aa1c2 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 17 May 2011 17:45:48 +0400 Subject: [PATCH 14/65] PEP8 fixes. --- nova/auth/ldapdriver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7849d941..9fe0165a 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -182,7 +182,7 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" - cache_key = 'uak_dn_%s'%(access,) + cache_key = 'uak_dn_%s' % (access,) user_dn = self.mc.get(cache_key) if user_dn: user = self.__to_user( @@ -205,7 +205,8 @@ class LdapDriver(object): def get_project(self, pid): """Retrieve project by id""" dn = self.__project_to_dn(pid, search=False) - attr = self.__find_object(dn, LdapDriver.project_pattern, scope=self.ldap.SCOPE_BASE) + attr = self.__find_object(dn, LdapDriver.project_pattern, + scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize From 85f32a99d048a7eb6f10d1605ff35ada22c0e711 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 07:49:12 -0700 Subject: [PATCH 15/65] provision working correctly now --- nova/scheduler/host_filter.py | 19 ++++++++++--------- nova/scheduler/zone_aware_scheduler.py | 26 +++++++++++++++++++++----- 2 files changed, 31 insertions(+), 14 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 2b0d9af7..92ec827d 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -99,9 +99,10 @@ class InstanceTypeFilter(HostFilter): capabilities = services.get('compute', {}) host_ram_mb = capabilities['host_memory_free'] disk_bytes = capabilities['disk_available'] - if host_ram_mb >= instance_type['memory_mb'] and \ - disk_bytes >= instance_type['local_gb']: - selected_hosts.append((host, capabilities)) + spec_ram = instance_type['memory_mb'] + spec_disk = instance_type['local_gb'] + if host_ram_mb >= spec_ram and disk_bytes >= spec_disk: + selected_hosts.append((host, capabilities)) return selected_hosts #host entries (currently) are like: @@ -110,15 +111,15 @@ class InstanceTypeFilter(HostFilter): # 'host_memory_total': 8244539392, # 'host_memory_overhead': 184225792, # 'host_memory_free': 3868327936, -# 'host_memory_free_computed': 3840843776}, -# 'host_other-config': {}, +# 'host_memory_free_computed': 3840843776, +# 'host_other_config': {}, # 'host_ip_address': '192.168.1.109', # 'host_cpu_info': {}, # 'disk_available': 32954957824, # 'disk_total': 50394562560, -# 'disk_used': 17439604736}, +# 'disk_used': 17439604736, # 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', -# 'host_name-label': 'xs-mini'} +# 'host_name_label': 'xs-mini'} # instance_type table has: #name = Column(String(255), unique=True) @@ -307,10 +308,10 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): # currently. Later we'll need to snoop for more detailed # host filter requests. instance_type = request_spec['instance_type'] - query = driver.instance_type_to_filter(instance_type) + name, query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format.""" - return [dict(weight=1, hostname=hostname) for host, caps in hosts] + return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 614b1bb8..3ebb4cae 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -22,6 +22,8 @@ across zones. There are two expansion points to this class for: import operator +from nova import db +from nova import rpc from nova import log as logging from nova.scheduler import api from nova.scheduler import driver @@ -49,7 +51,8 @@ class ZoneAwareScheduler(driver.Scheduler): # TODO(sandy): We'll have to look for richer specs at some point. if 'blob' in request_spec: - return self.provision_instance(context, topic, request_spec) + return self.provision_resource(context, request_spec, + instance_id, kwargs) # Create build plan and provision ... build_plan = self.select(context, request_spec) @@ -57,14 +60,28 @@ class ZoneAwareScheduler(driver.Scheduler): raise driver.NoValidHost(_('No hosts were available')) for item in build_plan: - self.provision_instance(context, topic, item) + self.provision_resource(context, item, instance_id, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) return None - def provision_instance(context, topic, item): - """Create the requested instance in this Zone or a child zone.""" + def provision_resource(self, context, item, instance_id, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in item: + host = item['hostname'] + kwargs['instance_id'] = instance_id + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Casted to compute %(host)s for run_instance") + % locals()) + else: + # TODO(sandy) Provision in child zone ... + LOG.warning(_("Provision to Child Zone not supported (yet)") + % locals()) + pass return None def select(self, context, request_spec, *args, **kwargs): @@ -93,7 +110,6 @@ class ZoneAwareScheduler(driver.Scheduler): raise NotImplemented(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) - LOG.debug("specs = %s, ARGS = %s" % (request_spec, args, )) #TODO(sandy): how to infer this from OS API params? num_instances = 1 From 83a86dadd49272723b29a022dd757555f617caca Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 07:52:02 -0700 Subject: [PATCH 16/65] provision_resource no longer returns value --- nova/scheduler/zone_aware_scheduler.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 3ebb4cae..2050c891 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -51,8 +51,8 @@ class ZoneAwareScheduler(driver.Scheduler): # TODO(sandy): We'll have to look for richer specs at some point. if 'blob' in request_spec: - return self.provision_resource(context, request_spec, - instance_id, kwargs) + self.provision_resource(context, request_spec, instance_id, kwargs) + return None # Create build plan and provision ... build_plan = self.select(context, request_spec) @@ -82,7 +82,6 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass - return None def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information From bd5e1b6ff28d3285b6d83c9d0104da3f74c28b13 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 10:45:19 -0500 Subject: [PATCH 17/65] First cut at least cost scheduler --- nova/scheduler/least_cost.py | 79 +++++++++++++++++++++++++ nova/tests/test_least_cost_scheduler.py | 39 ++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 nova/scheduler/least_cost.py create mode 100644 nova/tests/test_least_cost_scheduler.py diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py new file mode 100644 index 00000000..75dde81c --- /dev/null +++ b/nova/scheduler/least_cost.py @@ -0,0 +1,79 @@ +import collections + +# TODO(sirp): this should be just `zone_aware` to match naming scheme +# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` +# module +from nova.scheduler import zone_aware_scheduler + +class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def get_cost_fns(self): + """Returns a list of tuples containing weights and cost functions to + use for weighing hosts + """ + cost_fns = [] + + return cost_fns + + def weigh_hosts(self, num, specs, hosts): + """ + Returns a list of dictionaries of form: + [ {weight: weight, hostname: hostname} ] + """ + # FIXME(sirp): weigh_hosts should handle more than just instances + cost_fns = [] + hosts = [] + cost_hosts = weighted_sum(domain=hosts, weighted_fns=self.get_cost_fns()) + + # TODO convert hosts back to hostnames + weight_hostnames = [] + return weight_hostnames + +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + +def weighted_sum(domain, weighted_fns, normalize=True): + """ + Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list like: [(score, elem)] + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + elem = domain[idx] + domain_scores.append(elem_score) + + return domain_scores diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py new file mode 100644 index 00000000..a3a18a09 --- /dev/null +++ b/nova/tests/test_least_cost_scheduler.py @@ -0,0 +1,39 @@ +from nova import test +from nova.scheduler import least_cost + +MB = 1024 * 1024 + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + +class WeightedSumTest(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) From c3a46866cca19c76d14d0c7e94f3a3f976a1cd6e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 17 May 2011 13:01:28 -0700 Subject: [PATCH 18/65] pep8 --- nova/scheduler/zone_aware_scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 2050c891..dc18fc42 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -78,7 +78,7 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.debug(_("Casted to compute %(host)s for run_instance") % locals()) else: - # TODO(sandy) Provision in child zone ... + # TODO(sandy) Provision in child zone ... LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass From 293187354b3fbd809acc1996f8fc5b54effefce5 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:27:04 -0500 Subject: [PATCH 19/65] Adding fill first cost function --- nova/scheduler/least_cost.py | 12 +++++++++ nova/tests/test_least_cost_scheduler.py | 33 +++++++++++++++++++++++-- 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index e47951f1..79376c35 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -34,6 +34,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', 'Which cost functions the LeastCostScheduler should use.') +# TODO(sirp): Once we have enough of these rules, we can break them out into a +# cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') def noop_cost_fn(host): @@ -41,6 +43,16 @@ def noop_cost_fn(host): return 1 +flags.DEFINE_integer('fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') +def fill_first_cost_fn(host): + """Prefer hosts that have less ram available, filter_hosts will exclude + hosts that don't have enough ram""" + hostname, caps = host + free_mem = caps['compute']['host_memory_free'] + return free_mem + + class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def get_cost_fns(self): """Returns a list of tuples containing weights and cost functions to diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b2318a3b..b7bcd2f0 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -119,7 +119,7 @@ class LeastCostSchedulerTestCase(test.TestCase): def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected) + self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): num = 1 @@ -137,12 +137,41 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) From 2000f6da7b164152dd17273b0498081dffc2a953 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 17:44:08 -0500 Subject: [PATCH 20/65] Using import_class to import filter_host driver --- nova/scheduler/host_filter.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 7cb41a43..117f0824 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import log as logging from nova.scheduler import zone_aware_scheduler +from nova import utils LOG = logging.getLogger('nova.scheduler.host_filter') @@ -283,11 +284,13 @@ def choose_driver(driver_name=None): if not driver_name: driver_name = FLAGS.default_host_filter_driver - # FIXME(sirp): use utils.import_class here - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + + try: + driver = utils.import_object(driver_name) + return driver + except exception.ClassNotFound: + raise exception.SchedulerHostFilterDriverNotFound( + driver_name=driver_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): From f0996829157d87cd46991c499fdfbea597e50c6d Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:15:31 -0500 Subject: [PATCH 21/65] Pep8 fixes --- nova/scheduler/least_cost.py | 30 +++++++++++++++++-------- nova/scheduler/zone_aware_scheduler.py | 11 ++++----- nova/tests/test_least_cost_scheduler.py | 18 +++++++++------ 3 files changed, 38 insertions(+), 21 deletions(-) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 79376c35..629fe2e4 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -13,16 +13,19 @@ # License for the specific language governing permissions and limitations # under the License. """ -Helpful docstring here +Least Cost Scheduler is a mechanism for choosing which host machines to +provision a set of resources to. The input of the least-cost-scheduler is a +set of objective-functions, called the 'cost-functions', a weight for each +cost-function, and a list of candidate hosts (gathered via FilterHosts). + +The cost-function and weights are tabulated, and the host with the least cost +is then selected for provisioning. """ import collections from nova import flags from nova import log as logging -# TODO(sirp): this should be just `zone_aware` to match naming scheme -# TODO(sirp): perhaps all zone-aware stuff should go under a `zone_aware` -# module from nova.scheduler import zone_aware_scheduler from nova import utils @@ -38,6 +41,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions', # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, 'How much weight to give the noop cost function') + + def noop_cost_fn(host): """Return a pre-weight cost of 1 for each host""" return 1 @@ -45,6 +50,8 @@ def noop_cost_fn(host): flags.DEFINE_integer('fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') + + def fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" @@ -68,7 +75,7 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( cost_fn_str=cost_fn_str) - + try: weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) except AttributeError: @@ -82,17 +89,22 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): def weigh_hosts(self, num, request_spec, hosts): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname} ]""" + # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, _ in hosts] + hostnames = [hostname for hostname, caps in hosts] cost_fns = self.get_cost_fns() costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) - + weighted = [] + weight_log = [] for cost, hostname in zip(costs, hostnames): + weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname) weighted.append(weight_dict) - return weighted + + LOG.debug(_("Weighted Costs => %s") % weight_log) + return weighted def normalize_list(L): @@ -110,7 +122,7 @@ def weighted_sum(domain, weighted_fns, normalize=True): """Use the weighted-sum method to compute a score for an array of objects. Normalize the results of the objective-functions so that the weights are meaningful regardless of objective-function's range. - + domain - input to be scored weighted_fns - list of weights and functions like: [(weight, objective-functions)] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index fa5b3b1b..a1a68ce5 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -80,7 +80,7 @@ class ZoneAwareScheduler(driver.Scheduler): LOG.debug(_("Casted to compute %(host)s for run_instance") % locals()) else: - # TODO(sandy) Provision in child zone ... + # TODO(sandy) Provision in child zone ... LOG.warning(_("Provision to Child Zone not supported (yet)") % locals()) pass @@ -117,11 +117,11 @@ class ZoneAwareScheduler(driver.Scheduler): # Filter local hosts based on requirements ... host_list = self.filter_hosts(num_instances, request_spec) - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - # TODO(sirp): weigh_hosts should also be a function of 'topic' or # resources, so that we can apply different objective functions to it + + # then weigh the selected hosts. + # weighted = [{weight=weight, name=hostname}, ...] weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... @@ -145,8 +145,9 @@ class ZoneAwareScheduler(driver.Scheduler): """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format.""" # NOTE(sirp): The default logic is the equivalent to AllHostsFilter + service_states = self.zone_manager.service_states return [(host, services) - for host, services in self.zone_manager.service_states.iteritems()] + for host, services in service_states.iteritems()] def weigh_hosts(self, num, request_spec, hosts): """Derived classes may override this to provide more sophisticated diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py index b7bcd2f0..c8ce7892 100644 --- a/nova/tests/test_least_cost_scheduler.py +++ b/nova/tests/test_least_cost_scheduler.py @@ -30,6 +30,7 @@ class FakeHost(object): self.free_ram = free_ram self.io = io + class WeightedSumTestCase(test.TestCase): def test_empty_domain(self): domain = [] @@ -50,14 +51,15 @@ class WeightedSumTestCase(test.TestCase): (2, lambda h: h.io), # Avoid high I/O ] - costs = least_cost.weighted_sum(domain=hosts, weighted_fns=weighted_fns) + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) # Each 256 MB unit of free-ram contributes 0.5 points by way of: # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 # Each 100 iops of IO adds 0.5 points by way of: # cost = 2 * (100/400) = 2 * 0.25 = 0.5 expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) + self.assertEqual(expected, costs) # TODO(sirp): unify this with test_host_filter tests? possibility of sharing @@ -65,6 +67,7 @@ class WeightedSumTestCase(test.TestCase): class FakeZoneManager: pass + class LeastCostSchedulerTestCase(test.TestCase): def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -116,7 +119,6 @@ class LeastCostSchedulerTestCase(test.TestCase): #FLAGS.default_host_filter_driver = self.old_flag super(LeastCostSchedulerTestCase, self).tearDown() - def assertWeights(self, expected, num, request_spec, hosts): weighted = self.sched.weigh_hosts(num, request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) @@ -138,8 +140,9 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - - expected = [ dict(weight=1, hostname=hostname) for hostname, caps in hosts] + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_cost_fn_weights(self): @@ -152,7 +155,8 @@ class LeastCostSchedulerTestCase(test.TestCase): request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - expected = [ dict(weight=2, hostname=hostname) for hostname, caps in hosts] + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) def test_fill_first_cost_fn(self): @@ -164,7 +168,7 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 request_spec = {} hosts = self.sched.filter_hosts(num, request_spec) - + expected = [] for idx, (hostname, caps) in enumerate(hosts): # Costs are normalized so over 10 hosts, each host with increasing From 39c566af8673098c7736f7d5803b69ae4bbe9d53 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:32:56 -0500 Subject: [PATCH 22/65] Moving tests into scheduler subdirectory --- nova/scheduler/host_filter.py | 5 - .../test_zone_aware_scheduler.py | 0 nova/tests/test_host_filter.py | 211 ---- nova/tests/test_least_cost_scheduler.py | 181 --- nova/tests/test_scheduler.py | 1118 ----------------- 5 files changed, 1515 deletions(-) rename nova/tests/{ => scheduler}/test_zone_aware_scheduler.py (100%) delete mode 100644 nova/tests/test_host_filter.py delete mode 100644 nova/tests/test_least_cost_scheduler.py delete mode 100644 nova/tests/test_scheduler.py diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 117f0824..79e9f315 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -313,8 +313,3 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): instance_type = request_spec['instance_type'] name, query = driver.instance_type_to_filter(instance_type) return driver.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" - return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py similarity index 100% rename from nova/tests/test_zone_aware_scheduler.py rename to nova/tests/scheduler/test_zone_aware_scheduler.py diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 1a2a86a7..00000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filter Drivers. -""" - -import json - -from nova import exception -from nova import flags -from nova import test -from nova.scheduler import host_filter - -FLAGS = flags.FLAGS - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ - 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag - super(HostFilterTestCase, self).tearDown() - - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid driver ... - try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: - pass - - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_driver(self): - driver = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_driver(self): - driver = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - driver.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) - - try: - driver.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) diff --git a/nova/tests/test_least_cost_scheduler.py b/nova/tests/test_least_cost_scheduler.py deleted file mode 100644 index c8ce7892..00000000 --- a/nova/tests/test_least_cost_scheduler.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Least Cost Scheduler -""" - -from nova import flags -from nova import test -from nova.scheduler import least_cost - -MB = 1024 * 1024 -FLAGS = flags.FLAGS - - -class FakeHost(object): - def __init__(self, host_id, free_ram, io): - self.id = host_id - self.free_ram = free_ram - self.io = io - - -class WeightedSumTestCase(test.TestCase): - def test_empty_domain(self): - domain = [] - weighted_fns = [] - result = least_cost.weighted_sum(domain, weighted_fns) - expected = [] - self.assertEqual(expected, result) - - def test_basic_costing(self): - hosts = [ - FakeHost(1, 512 * MB, 100), - FakeHost(2, 256 * MB, 400), - FakeHost(3, 512 * MB, 100) - ] - - weighted_fns = [ - (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* - (2, lambda h: h.io), # Avoid high I/O - ] - - costs = least_cost.weighted_sum( - domain=hosts, weighted_fns=weighted_fns) - - # Each 256 MB unit of free-ram contributes 0.5 points by way of: - # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 - # Each 100 iops of IO adds 0.5 points by way of: - # cost = 2 * (100/400) = 2 * 0.25 = 0.5 - expected = [1.5, 2.5, 1.5] - self.assertEqual(expected, costs) - - -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - -class LeastCostSchedulerTestCase(test.TestCase): - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(LeastCostSchedulerTestCase, self).setUp() - #self.old_flag = FLAGS.default_host_filter_driver - #FLAGS.default_host_filter_driver = \ - # 'nova.scheduler.host_filter.AllHostsFilter' - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200) - - zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - zone_manager.service_states = states - - self.sched = least_cost.LeastCostScheduler() - self.sched.zone_manager = zone_manager - - def tearDown(self): - #FLAGS.default_host_filter_driver = self.old_flag - super(LeastCostSchedulerTestCase, self).tearDown() - - def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) - self.assertDictListMatch(weighted, expected, approx_equal=True) - - def test_no_hosts(self): - num = 1 - request_spec = {} - hosts = [] - - expected = [] - self.assertWeights(expected, num, request_spec, hosts) - - def test_noop_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=1, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_cost_fn_weights(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' - ] - FLAGS.noop_cost_fn_weight = 2 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [dict(weight=2, hostname=hostname) - for hostname, caps in hosts] - self.assertWeights(expected, num, request_spec, hosts) - - def test_fill_first_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn' - ] - FLAGS.fill_first_cost_fn_weight = 1 - - num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) - - expected = [] - for idx, (hostname, caps) in enumerate(hosts): - # Costs are normalized so over 10 hosts, each host with increasing - # free ram will cost 1/N more. Since the lowest cost host has some - # free ram, we add in the 1/N for the base_cost - weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) - - self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py deleted file mode 100644 index 54b3f80f..00000000 --- a/nova/tests/test_scheduler.py +++ /dev/null @@ -1,1118 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -import datetime -import mox -import novaclient.exceptions -import stubout -import webob - -from mox import IgnoreArg -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import service -from nova import test -from nova import rpc -from nova import utils -from nova.auth import manager as auth_manager -from nova.scheduler import api -from nova.scheduler import manager -from nova.scheduler import driver -from nova.compute import power_state -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS -flags.DECLARE('max_cores', 'nova.scheduler.simple') -flags.DECLARE('stub_network', 'nova.compute.manager') -flags.DECLARE('instances_path', 'nova.compute.manager') - - -class TestDriver(driver.Scheduler): - """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): - return 'fallback_host' - - def schedule_named_method(context, topic, num): - return 'named_host' - - -class SchedulerTestCase(test.TestCase): - """Test case for scheduler""" - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') - - def _create_compute_service(self): - """Create compute-manager(ComputeNode and Service record).""" - ctxt = context.get_admin_context() - dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - s_ref = db.service_create(ctxt, dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - db.compute_node_create(ctxt, dic) - - return db.service_get(ctxt, s_ref['id']) - - def _create_instance(self, **kwargs): - """Create a test instance""" - ctxt = context.get_admin_context() - inst = {} - inst['user_id'] = 'admin' - inst['project_id'] = kwargs.get('project_id', 'fake') - inst['host'] = kwargs.get('host', 'dummy') - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['memory_mb'] = kwargs.get('memory_mb', 10) - inst['local_gb'] = kwargs.get('local_gb', 20) - return db.instance_create(ctxt, inst) - - def test_fallback(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.fallback_host', - {'method': 'noexist', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.noexist(ctxt, 'topic', num=7) - - def test_named_method(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.named_host', - {'method': 'named_method', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.named_method(ctxt, 'topic', num=7) - - def test_show_host_resources_host_not_exit(self): - """A host given as an argument does not exists.""" - - scheduler = manager.SchedulerManager() - dest = 'dummydest' - ctxt = context.get_admin_context() - - self.assertRaises(exception.NotFound, scheduler.show_host_resources, - ctxt, dest) - #TODO(bcwaldon): reimplement this functionality - #c1 = (e.message.find(_("does not exist or is not a " - # "compute node.")) >= 0) - - def _dic_is_equal(self, dic1, dic2, keys=None): - """Compares 2 dictionary contents(Helper method)""" - if not keys: - keys = ['vcpus', 'memory_mb', 'local_gb', - 'vcpus_used', 'memory_mb_used', 'local_gb_used'] - - for key in keys: - if not (dic1[key] == dic2[key]): - return False - return True - - def test_show_host_resources_no_project(self): - """No instance are running on the given host.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - # result checking - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'] == {} - self.assertTrue(c1 and c2 and c3) - db.service_destroy(ctxt, s_ref['id']) - - def test_show_host_resources_works_correctly(self): - """Show_host_resources() works correctly as expected.""" - - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - s_ref = self._create_compute_service() - i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host']) - i_ref2 = self._create_instance(project_id='p-02', vcpus=3, - host=s_ref['host']) - - result = scheduler.show_host_resources(ctxt, s_ref['host']) - - c1 = ('resource' in result and 'usage' in result) - compute_node = s_ref['compute_node'][0] - c2 = self._dic_is_equal(result['resource'], compute_node) - c3 = result['usage'].keys() == ['p-01', 'p-02'] - keys = ['vcpus', 'memory_mb', 'local_gb'] - c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) - c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys) - self.assertTrue(c1 and c2 and c3 and c4 and c5) - - db.service_destroy(ctxt, s_ref['id']) - db.instance_destroy(ctxt, i_ref1['id']) - db.instance_destroy(ctxt, i_ref2['id']) - - -class ZoneSchedulerTestCase(test.TestCase): - """Test case for zone scheduler""" - def setUp(self): - super(ZoneSchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') - - def _create_service_model(self, **kwargs): - service = db.sqlalchemy.models.Service() - service.host = kwargs['host'] - service.disabled = False - service.deleted = False - service.report_count = 0 - service.binary = 'nova-compute' - service.topic = 'compute' - service.id = kwargs['id'] - service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() - return service - - def test_with_two_zones(self): - scheduler = manager.SchedulerManager() - ctxt = context.get_admin_context() - service_list = [self._create_service_model(id=1, - host='host1', - zone='zone1'), - self._create_service_model(id=2, - host='host2', - zone='zone2'), - self._create_service_model(id=3, - host='host3', - zone='zone2'), - self._create_service_model(id=4, - host='host4', - zone='zone2'), - self._create_service_model(id=5, - host='host5', - zone='zone2')] - self.mox.StubOutWithMock(db, 'service_get_all_by_topic') - arg = IgnoreArg() - db.service_get_all_by_topic(arg, arg).AndReturn(service_list) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - rpc.cast(ctxt, - 'compute.host1', - {'method': 'run_instance', - 'args': {'instance_id': 'i-ffffffff', - 'availability_zone': 'zone1'}}) - self.mox.ReplayAll() - scheduler.run_instance(ctxt, - 'compute', - instance_id='i-ffffffff', - availability_zone='zone1') - - -class SimpleDriverTestCase(test.TestCase): - """Test case for simple driver""" - def setUp(self): - super(SimpleDriverTestCase, self).setUp() - self.flags(connection_type='fake', - stub_network=True, - max_cores=4, - max_gigabytes=4, - network_manager='nova.network.manager.FlatManager', - volume_driver='nova.volume.driver.FakeISCSIDriver', - scheduler_driver='nova.scheduler.simple.SimpleScheduler') - self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(SimpleDriverTestCase, self).tearDown() - - def _create_instance(self, **kwargs): - """Create a test instance""" - inst = {} - inst['image_id'] = 1 - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type_id'] = '1' - inst['mac_address'] = utils.generate_mac() - inst['vcpus'] = kwargs.get('vcpus', 1) - inst['ami_launch_index'] = 0 - inst['availability_zone'] = kwargs.get('availability_zone', None) - inst['host'] = kwargs.get('host', 'dummy') - inst['memory_mb'] = kwargs.get('memory_mb', 20) - inst['local_gb'] = kwargs.get('local_gb', 30) - inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self): - """Create a test volume""" - vol = {} - vol['size'] = 1 - vol['availability_zone'] = 'test' - return db.volume_create(self.context, vol)['id'] - - def _create_compute_service(self, **kwargs): - """Create a compute service.""" - - dic = {'binary': 'nova-compute', 'topic': 'compute', - 'report_count': 0, 'availability_zone': 'dummyzone'} - dic['host'] = kwargs.get('host', 'dummy') - s_ref = db.service_create(self.context, dic) - if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) - dic['created_at'] = kwargs.get('created_at', t) - dic['updated_at'] = kwargs.get('updated_at', t) - db.service_update(self.context, s_ref['id'], dic) - - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) - dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') - dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) - db.compute_node_create(self.context, dic) - return db.service_get(self.context, s_ref['id']) - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} - - def test_doesnt_report_disabled_hosts_as_up(self): - """Ensures driver doesn't find hosts before they are enabled""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - db.service_update(self.context, s2['id'], {'disabled': True}) - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(0, len(hosts)) - compute1.kill() - compute2.kill() - - def test_reports_enabled_hosts_as_up(self): - """Ensures driver can find the hosts that are up""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(2, len(hosts)) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_specific_host_gets_instance(self): - """Ensures if you set availability_zone it launches on that zone""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) - past = now - delta - db.service_update(self.context, s1['id'], {'updated_at': past}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - self.assertRaises(driver.WillNotSchedule, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id2) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = self.start_service('compute', host='host1') - s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - db.service_update(self.context, s1['id'], {'disabled': True}) - instance_id2 = self._create_instance(availability_zone='nova:host1') - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual('host1', host) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = self.start_service('compute', host='host1') - compute2 = self.start_service('compute', host='host2') - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - db.instance_destroy(self.context, instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - volume1.kill() - volume2.kill() - - def test_too_many_gigabytes(self): - """Ensures we don't go over max gigabytes""" - volume1 = self.start_service('volume', host='host1') - volume2 = self.start_service('volume', host='host2') - volume_ids1 = [] - volume_ids2 = [] - for index in xrange(FLAGS.max_gigabytes): - volume_id = self._create_volume() - volume1.create_volume(self.context, volume_id) - volume_ids1.append(volume_id) - volume_id = self._create_volume() - volume2.create_volume(self.context, volume_id) - volume_ids2.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_create_volume, - self.context, - volume_id) - for volume_id in volume_ids1: - volume1.delete_volume(self.context, volume_id) - for volume_id in volume_ids2: - volume2.delete_volume(self.context, volume_id) - volume1.kill() - volume2.kill() - - def test_scheduler_live_migration_with_volume(self): - """scheduler_live_migration() works correctly as expected. - - Also, checks instance state is changed from 'running' -> 'migrating'. - - """ - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, dic) - - # cannot check 2nd argument b/c the addresses of instance object - # is different. - driver_i = self.scheduler.driver - nocare = mox.IgnoreArg() - self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') - self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') - driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} - rpc.cast(self.context, - db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), - {"method": 'live_migration', "args": kwargs}) - - self.mox.ReplayAll() - self.scheduler.live_migration(self.context, FLAGS.compute_topic, - instance_id=instance_id, - dest=i_ref['host']) - - i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') - db.instance_destroy(self.context, instance_id) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_instance_not_running(self): - """The instance given by instance_id is not running.""" - - instance_id = self._create_instance(state_description='migrating') - i_ref = db.instance_get(self.context, instance_id) - - try: - self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - except exception.Invalid, e: - c = (e.message.find('is not running') > 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - - def test_live_migration_src_check_volume_node_not_alive(self): - """Raise exception when volume node is not alive.""" - - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - dic = {'instance_id': instance_id, 'size': 1} - v_ref = db.volume_create(self.context, {'instance_id': instance_id, - 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) - dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', - 'topic': 'volume', 'report_count': 0} - s_ref = db.service_create(self.context, dic) - - self.assertRaises(exception.VolumeServiceUnavailable, - self.scheduler.driver.schedule_live_migration, - self.context, instance_id, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.volume_destroy(self.context, v_ref['id']) - - def test_live_migration_src_check_compute_node_not_alive(self): - """Confirms src-compute node is alive.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_src_check, - self.context, i_ref) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_src_check_works_correctly(self): - """Confirms this method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - ret = self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_not_alive(self): - """Confirms exception raises in case dest host does not exist.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t, updated_at=t, - host=i_ref['host']) - - self.assertRaises(exception.ComputeServiceUnavailable, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_same_host(self): - """Confirms exceptioin raises in case dest and src is same host.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host=i_ref['host']) - - self.assertRaises(exception.UnableToMigrateToSelf, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_lack_memory(self): - """Confirms exception raises when dest doesn't have enough memory.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) - - self.assertRaises(exception.MigrationError, - self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_dest_check_service_works_correctly(self): - """Confirms method finishes with no error.""" - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=5) - - ret = self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - 'somewhere') - self.assertTrue(ret is None) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_orig_not_exists(self): - """Destination host does not exist.""" - - dest = 'dummydest' - # mocks for live_migration_common_check() - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) - s_ref = self._create_compute_service(created_at=t1, updated_at=t1, - host=dest) - - # mocks for mounted_on_same_shared_storage() - fpath = '/test/20110127120000' - self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) - topic = FLAGS.compute_topic - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(self.context, topic, dest), - {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), - {"method": 'check_shared_storage_test_file', - "args": {'filename': fpath}}) - driver.rpc.call(mox.IgnoreArg(), - db.queue_get_for(mox.IgnoreArg(), topic, dest), - {"method": 'cleanup_shared_storage_test_file', - "args": {'filename': fpath}}) - - self.mox.ReplayAll() - self.assertRaises(exception.SourceHostUnavailable, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - - def test_live_migration_common_check_service_different_hypervisor(self): - """Original host and dest host has different hypervisor type.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen') - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.InvalidHypervisorType, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_service_different_version(self): - """Original host and dest host has different hypervisor version.""" - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest, - hypervisor_version=12002) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - - self.mox.ReplayAll() - self.assertRaises(exception.DestinationHypervisorTooOld, - self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) - - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - def test_live_migration_common_check_checking_cpuinfo_fail(self): - """Raise excetion when original host doen't have compatible cpu.""" - - dest = 'dummydest' - instance_id = self._create_instance() - i_ref = db.instance_get(self.context, instance_id) - - # compute service for destination - s_ref = self._create_compute_service(host=i_ref['host']) - # compute service for original host - s_ref2 = self._create_compute_service(host=dest) - - # mocks - driver = self.scheduler.driver - self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage') - driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) - self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) - rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), - {"method": 'compare_cpu', - "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ - AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) - - self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except rpc.RemoteError, e: - c = (e.message.find(_("doesn't have compatibility to")) >= 0) - - self.assertTrue(c) - db.instance_destroy(self.context, instance_id) - db.service_destroy(self.context, s_ref['id']) - db.service_destroy(self.context, s_ref2['id']) - - -class FakeZone(object): - def __init__(self, id, api_url, username, password): - self.id = id - self.api_url = api_url - self.username = username - self.password = password - - -def zone_get_all(context): - return [ - FakeZone(1, 'http://example.com', 'bob', 'xxx'), - ] - - -class FakeRerouteCompute(api.reroute_compute): - def _call_child_zones(self, zones, function): - return [] - - def get_collection_context_and_id(self, args, kwargs): - return ("servers", None, 1) - - def unmarshall_result(self, zone_responses): - return dict(magic="found me") - - -def go_boom(self, context, instance): - raise exception.InstanceNotFound(instance_id=instance) - - -def found_instance(self, context, instance): - return dict(name='myserver') - - -class FakeResource(object): - def __init__(self, attribute_dict): - for k, v in attribute_dict.iteritems(): - setattr(self, k, v) - - def pause(self): - pass - - -class ZoneRedirectTest(test.TestCase): - def setUp(self): - super(ZoneRedirectTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - - self.stubs.Set(db, 'zone_get_all', zone_get_all) - - self.enable_zone_routing = FLAGS.enable_zone_routing - FLAGS.enable_zone_routing = True - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.enable_zone_routing = self.enable_zone_routing - super(ZoneRedirectTest, self).tearDown() - - def test_trap_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(found_instance)(None, None, 1) - except api.RedirectResult, e: - self.fail(_("Successful database hit should succeed")) - - def test_trap_not_found_locally(self): - decorator = FakeRerouteCompute("foo") - try: - result = decorator(go_boom)(None, None, 1) - self.assertFail(_("Should have rerouted.")) - except api.RedirectResult, e: - self.assertEquals(e.results['magic'], 'found me') - - def test_routing_flags(self): - FLAGS.enable_zone_routing = False - decorator = FakeRerouteCompute("foo") - self.assertRaises(exception.InstanceNotFound, decorator(go_boom), - None, None, 1) - - def test_get_collection_context_and_id(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.get_collection_context_and_id( - (None, 10, 20), {}), ("servers", 10, 20)) - self.assertEquals(decorator.get_collection_context_and_id( - (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) - self.assertEquals(decorator.get_collection_context_and_id( - (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) - - def test_unmarshal_single_server(self): - decorator = api.reroute_compute("foo") - self.assertEquals(decorator.unmarshall_result([]), {}) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, b=2)), ]), - dict(server=dict(a=1, b=2))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, _b=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(a=1, manager=2)), ]), - dict(server=dict(a=1,))) - self.assertEquals(decorator.unmarshall_result( - [FakeResource(dict(_a=1, manager=2)), ]), - dict(server={})) - - -class FakeServerCollection(object): - def get(self, instance_id): - return FakeResource(dict(a=10, b=20)) - - def find(self, name): - return FakeResource(dict(a=11, b=22)) - - -class FakeEmptyServerCollection(object): - def get(self, f): - raise novaclient.NotFound(1) - - def find(self, name): - raise novaclient.NotFound(2) - - -class FakeNovaClient(object): - def __init__(self, collection): - self.servers = collection - - -class DynamicNovaClientTest(test.TestCase): - def test_issue_novaclient_command_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "get", 100).a, 10) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeServerCollection()), - zone, "servers", "pause", 100), None) - - def test_issue_novaclient_command_not_found(self): - zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "get", 100), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) - - self.assertEquals(api._issue_novaclient_command( - FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "any", "name"), None) - - -class FakeZonesProxy(object): - def do_something(*args, **kwargs): - return 42 - - def raises_exception(*args, **kwargs): - raise Exception('testing') - - -class FakeNovaClientOpenStack(object): - def __init__(self, *args, **kwargs): - self.zones = FakeZonesProxy() - - def authenticate(self): - pass - - -class CallZoneMethodTest(test.TestCase): - def setUp(self): - super(CallZoneMethodTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - self.stubs.Set(db, 'zone_get_all', zone_get_all) - self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) - - def tearDown(self): - self.stubs.UnsetAll() - super(CallZoneMethodTest, self).tearDown() - - def test_call_zone_method(self): - context = {} - method = 'do_something' - results = api.call_zone_method(context, method) - expected = [(1, 42)] - self.assertEqual(expected, results) - - def test_call_zone_method_not_present(self): - context = {} - method = 'not_present' - self.assertRaises(AttributeError, api.call_zone_method, - context, method) - - def test_call_zone_method_generates_exception(self): - context = {} - method = 'raises_exception' - results = api.call_zone_method(context, method) - - # FIXME(sirp): for now the _error_trap code is catching errors and - # converting them to a ("ERROR", "string") tuples. The code (and this - # test) should eventually handle real exceptions. - expected = [(1, ('ERROR', 'testing'))] - self.assertEqual(expected, results) From ccaef1a0c524660b7173420fffe12dbec6a2e49f Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:49:21 -0500 Subject: [PATCH 23/65] Moving into scheduler subdir and refactoring out common code --- nova/tests/scheduler/__init__.py | 0 nova/tests/scheduler/test_host_filter.py | 189 ++++++++++++++++++ .../scheduler/test_least_cost_scheduler.py | 146 ++++++++++++++ .../scheduler/test_zone_aware_scheduler.py | 31 +++ 4 files changed, 366 insertions(+) create mode 100644 nova/tests/scheduler/__init__.py create mode 100644 nova/tests/scheduler/test_host_filter.py create mode 100644 nova/tests/scheduler/test_least_cost_scheduler.py diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py new file mode 100644 index 00000000..c3af50a6 --- /dev/null +++ b/nova/tests/scheduler/test_host_filter.py @@ -0,0 +1,189 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filter Drivers. +""" + +import json + +from nova import exception +from nova import flags +from nova import test +from nova.scheduler import host_filter +from nova.tests.scheduler import test_zone_aware_scheduler + +FLAGS = flags.FLAGS + + +class FakeZoneManager: + pass + + +class HostFilterTestCase(test.TestCase): + """Test case for host filter drivers.""" + + def setUp(self): + super(HostFilterTestCase, self).setUp() + self.old_flag = FLAGS.default_host_filter_driver + FLAGS.default_host_filter_driver = \ + 'nova.scheduler.host_filter.AllHostsFilter' + self.instance_type = dict(name='tiny', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=1, + swap=500, + rxtx_quota=30000, + rxtx_cap=200) + + self.zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + self.zone_manager.service_states = states + + def tearDown(self): + FLAGS.default_host_filter_driver = self.old_flag + super(HostFilterTestCase, self).tearDown() + + def test_choose_driver(self): + # Test default driver ... + driver = host_filter.choose_driver() + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.AllHostsFilter') + # Test valid driver ... + driver = host_filter.choose_driver( + 'nova.scheduler.host_filter.InstanceTypeFilter') + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.InstanceTypeFilter') + # Test invalid driver ... + try: + host_filter.choose_driver('does not exist') + self.fail("Should not find driver") + except exception.SchedulerHostFilterDriverNotFound: + pass + + def test_all_host_driver(self): + driver = host_filter.AllHostsFilter() + cooked = driver.instance_type_to_filter(self.instance_type) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(10, len(hosts)) + for host, capabilities in hosts: + self.assertTrue(host.startswith('host')) + + def test_instance_type_driver(self): + driver = host_filter.InstanceTypeFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + def test_json_driver(self): + driver = host_filter.JsonFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + # Try some custom queries + + raw = ['or', + ['and', + ['<', '$compute.host_memory_free', 30], + ['<', '$compute.disk_available', 300] + ], + ['and', + ['>', '$compute.host_memory_free', 70], + ['>', '$compute.disk_available', 700] + ] + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['not', + ['=', '$compute.host_memory_free', 30], + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(9, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([2, 4, 6, 8, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + # Try some bogus input ... + raw = ['unknown command', ] + cooked = json.dumps(raw) + try: + driver.filter_hosts(self.zone_manager, cooked) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False] + ))) + + try: + driver.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False + )) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$foo', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$.....', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] + ))) + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', {}, ['>', '$missing....foo']] + ))) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py new file mode 100644 index 00000000..e0ed6141 --- /dev/null +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Least Cost Scheduler +""" + +from nova import flags +from nova import test +from nova.scheduler import least_cost +from nova.tests.scheduler import test_zone_aware_scheduler + +MB = 1024 * 1024 +FLAGS = flags.FLAGS + + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + + +class WeightedSumTestCase(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) + + +# TODO(sirp): unify this with test_host_filter tests? possibility of sharing +# test setup code +class FakeZoneManager: + pass + + +class LeastCostSchedulerTestCase(test.TestCase): + def setUp(self): + super(LeastCostSchedulerTestCase, self).setUp() + zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + zone_manager.service_states = states + + self.sched = least_cost.LeastCostScheduler() + self.sched.zone_manager = zone_manager + + def tearDown(self): + super(LeastCostSchedulerTestCase, self).tearDown() + + def assertWeights(self, expected, num, request_spec, hosts): + weighted = self.sched.weigh_hosts(num, request_spec, hosts) + self.assertDictListMatch(weighted, expected, approx_equal=True) + + def test_no_hosts(self): + num = 1 + request_spec = {} + hosts = [] + + expected = [] + self.assertWeights(expected, num, request_spec, hosts) + + def test_noop_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37169fb9..b2cc4fe2 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler from nova.scheduler import zone_manager +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): def filter_hosts(self, num, specs): # NOTE(sirp): this is returning [(hostname, services)] From 53be0b03d0a112aec55ac09a6dc0720389298145 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 17 May 2011 18:53:00 -0500 Subject: [PATCH 24/65] Small cleanups --- nova/tests/scheduler/test_host_filter.py | 7 +++---- nova/tests/scheduler/test_least_cost_scheduler.py | 10 ++++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index c3af50a6..edbab7ab 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -27,10 +27,6 @@ from nova.tests.scheduler import test_zone_aware_scheduler FLAGS = flags.FLAGS -class FakeZoneManager: - pass - - class HostFilterTestCase(test.TestCase): """Test case for host filter drivers.""" @@ -48,6 +44,9 @@ class HostFilterTestCase(test.TestCase): rxtx_quota=30000, rxtx_cap=200) + class FakeZoneManager: + pass + self.zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index e0ed6141..506fa62f 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -63,15 +63,13 @@ class WeightedSumTestCase(test.TestCase): self.assertEqual(expected, costs) -# TODO(sirp): unify this with test_host_filter tests? possibility of sharing -# test setup code -class FakeZoneManager: - pass - - class LeastCostSchedulerTestCase(test.TestCase): def setUp(self): super(LeastCostSchedulerTestCase, self).setUp() + + class FakeZoneManager: + pass + zone_manager = FakeZoneManager() states = test_zone_aware_scheduler.fake_zone_manager_service_states( From 56a579444470516fc0bfef1c51c7ceb5473f5e9c Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 18 May 2011 03:51:25 -0400 Subject: [PATCH 25/65] Removed all utils.import_object(FLAGS.image_service) and replaced with utils.get_default_image_service(). --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index db964064..3f3fd72a 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -905,7 +905,7 @@ class ImageCommands(object): """Methods for dealing with a cloud in an odd state""" def __init__(self, *args, **kwargs): - self.image_service = utils.import_object(FLAGS.image_service) + self.image_service = utils.get_default_image_service() def _register(self, container_format, disk_format, path, owner, name=None, is_public='T', From f168997b0816005e049b31c111f9c081fea5f348 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 09:50:18 -0400 Subject: [PATCH 26/65] get integrated server_tests passing --- nova/flags.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/flags.py b/nova/flags.py index 51979364..2481a10a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,6 +362,9 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') +DEFINE_string('glance_image_service', 'nova.image.local.LocalImageService', + 'The service to use for retrieving and searching for ' + + 'glance images.') DEFINE_string('host', socket.gethostname(), 'name of this node') From 857b5903f13849ff4478ee4608258c71fbf99738 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 10:41:33 -0400 Subject: [PATCH 27/65] fixed QuotaTestCases --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 2481a10a..d3f72d41 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,7 +362,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') -DEFINE_string('glance_image_service', 'nova.image.local.LocalImageService', +DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for ' + 'glance images.') From 5ff9e61184e007eb30e71f09d0a49bf8338af91a Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 18 May 2011 11:13:22 -0400 Subject: [PATCH 28/65] fix pep8 issues --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index d3f72d41..b45d252c 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -363,7 +363,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', - 'The service to use for retrieving and searching for ' + + 'The service to use for retrieving and searching for ' + 'glance images.') DEFINE_string('host', socket.gethostname(), From 03fbd4e1bdd39f47f1b42cb14c4206cf8730da1d Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 20 May 2011 04:14:02 -0400 Subject: [PATCH 29/65] Fixed some tests. --- nova/flags.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index ee5adae3..32cb6efa 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,9 +362,6 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') -DEFINE_string('glance_image_service', 'nova.image.glance.GlanceImageService', - 'The service to use for retrieving and searching for ' + - 'glance images.') DEFINE_string('host', socket.gethostname(), 'name of this node') From 88db79bbf7c51ebdbc49e0f854f1e5d7fc4dec10 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 10:28:04 -0400 Subject: [PATCH 30/65] moved utils functions into nova/image/ --- bin/nova-manage | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3f3fd72a..8a9be5d8 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -78,6 +78,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import image from nova import log as logging from nova import quota from nova import rpc @@ -905,7 +906,7 @@ class ImageCommands(object): """Methods for dealing with a cloud in an odd state""" def __init__(self, *args, **kwargs): - self.image_service = utils.get_default_image_service() + self.image_service = image.get_default_image_service() def _register(self, container_format, disk_format, path, owner, name=None, is_public='T', From 775ec812668ffdb0e3b97bf7828bddf4b4ca015e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 23 May 2011 10:39:50 -0700 Subject: [PATCH 31/65] get rid of all mention of drivers ... it's filter only now --- nova/scheduler/host_filter.py | 55 ++++++++++---------- nova/tests/test_host_filter.py | 93 ++++++++++++++++------------------ 2 files changed, 73 insertions(+), 75 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 92ec827d..d9771754 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -14,8 +14,8 @@ # under the License. """ -Host Filter is a driver mechanism for requesting instance resources. -Three drivers are included: AllHosts, Flavor & JSON. AllHosts just +Host Filter is a mechanism for requesting instance resources. +Three filters are included: AllHosts, Flavor & JSON. AllHosts just returns the full, unfiltered list of hosts. Flavor is a hard coded matching mechanism based on flavor criteria and JSON is an ad-hoc filter grammar. @@ -47,13 +47,13 @@ from nova.scheduler import zone_aware_scheduler LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter_driver', +flags.DEFINE_string('default_host_filter', 'nova.scheduler.host_filter.AllHostsFilter', - 'Which driver to use for filtering hosts.') + 'Which filter to use for filtering hosts.') class HostFilter(object): - """Base class for host filter drivers.""" + """Base class for host filters.""" def instance_type_to_filter(self, instance_type): """Convert instance_type into a filter for most common use-case.""" @@ -64,12 +64,12 @@ class HostFilter(object): raise NotImplementedError() def _full_name(self): - """module.classname of the filter driver""" + """module.classname of the filter.""" return "%s.%s" % (self.__module__, self.__class__.__name__) class AllHostsFilter(HostFilter): - """NOP host filter driver. Returns all hosts in ZoneManager. + """NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used to give us.""" @@ -85,7 +85,7 @@ class AllHostsFilter(HostFilter): class InstanceTypeFilter(HostFilter): - """HostFilter driver hard-coded to work with InstanceType records.""" + """HostFilter hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" @@ -133,7 +133,7 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """Host Filter driver to allow simple JSON-based grammar for + """Host Filter to allow simple JSON-based grammar for selecting hosts.""" def _equals(self, args): @@ -273,43 +273,44 @@ class JsonFilter(HostFilter): return hosts -DRIVERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] +FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] -def choose_driver(driver_name=None): - """Since the caller may specify which driver to use we need +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need to have an authoritative list of what is permissible. This - function checks the driver name against a predefined set - of acceptable drivers.""" + function checks the filter name against a predefined set + of acceptable filters.""" - if not driver_name: - driver_name = FLAGS.default_host_filter_driver - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in FILTERS: + if "%s.%s" % (filter_class.__module__, filter_class.__name__) == \ + filter_name: + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter drivers to filter - hosts for weighing. The particular driver used may be passed in + """The HostFilterScheduler uses the HostFilter to filter + hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. - request_spec = {'filter_driver': , + request_spec = {'filter_name': , 'instance_type': } """ def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - driver_name = request_spec.get('filter_driver', None) - driver = choose_driver(driver_name) + filter_name = request_spec.get('filter_name', None) + host_filter = choose_host_filter(filter_name) # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. instance_type = request_spec['instance_type'] - name, query = driver.instance_type_to_filter(instance_type) - return driver.filter_hosts(self.zone_manager, query) + name, query = host_filter.instance_type_to_filter(instance_type) + return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): """Derived classes must override this method and return diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index dd2325cc..07817cc5 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. """ -Tests For Scheduler Host Filter Drivers. +Tests For Scheduler Host Filters. """ import json @@ -31,7 +31,7 @@ class FakeZoneManager: class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" + """Test case for host filters.""" def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase): 'host_name-label': 'xs-%s' % multiplier} def setUp(self): - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ + self.old_flag = FLAGS.default_host_filter + FLAGS.default_host_filter = \ 'nova.scheduler.host_filter.AllHostsFilter' self.instance_type = dict(name='tiny', memory_mb=50, @@ -76,52 +76,52 @@ class HostFilterTestCase(test.TestCase): self.zone_manager.service_states = states def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag + FLAGS.default_host_filter = self.old_flag - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), + def test_choose_filter(self): + # Test default filter ... + hf = host_filter.choose_host_filter() + self.assertEquals(hf._full_name(), 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( + # Test valid filter ... + hf = host_filter.choose_host_filter( 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(driver._full_name(), + self.assertEquals(hf._full_name(), 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid driver ... + # Test invalid filter ... try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: + host_filter.choose_host_filter('does not exist') + self.fail("Should not find host filter.") + except exception.SchedulerHostFilterNotFound: pass - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) + def test_all_host_filter(self): + hf = host_filter.AllHostsFilter() + cooked = hf.instance_type_to_filter(self.instance_type) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(10, len(hosts)) for host, capabilities in hosts: self.assertTrue(host.startswith('host')) - def test_instance_type_driver(self): - driver = host_filter.InstanceTypeFilter() + def test_instance_type_filter(self): + hf = host_filter.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) + name, cooked = hf.instance_type_to_filter(self.instance_type) self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() self.assertEquals('host05', just_hosts[0]) self.assertEquals('host10', just_hosts[5]) - def test_json_driver(self): - driver = host_filter.JsonFilter() + def test_json_filter(self): + hf = host_filter.JsonFilter() # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) + name, cooked = hf.instance_type_to_filter(self.instance_type) self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() @@ -141,7 +141,7 @@ class HostFilterTestCase(test.TestCase): ] ] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -153,7 +153,7 @@ class HostFilterTestCase(test.TestCase): ['=', '$compute.host_memory_free', 30], ] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(9, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -163,7 +163,7 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -175,35 +175,32 @@ class HostFilterTestCase(test.TestCase): raw = ['unknown command', ] cooked = json.dumps(raw) try: - driver.filter_hosts(self.zone_manager, cooked) + hf.filter_hosts(self.zone_manager, cooked) self.fail("Should give KeyError") except KeyError, e: pass - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( ['not', True, False, True, False] ))) try: - driver.filter_hosts(self.zone_manager, json.dumps( + hf.filter_hosts(self.zone_manager, json.dumps( 'not', True, False, True, False )) self.fail("Should give KeyError") except KeyError, e: pass - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$foo', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$.....', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', {}, ['>', '$missing....foo']]))) From 3c31cc3a3ca5724c65685cb21eb940eb0b9f202a Mon Sep 17 00:00:00 2001 From: William Wolf Date: Mon, 23 May 2011 22:47:44 -0400 Subject: [PATCH 33/65] make image_ref and image_id usage more consistant, eliminate redundancy in compute_api.create() call --- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_compute.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454d..3aaca683 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -302,7 +302,7 @@ class CloudTestCase(test.TestCase): def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_id': 'ami-1', + kwargs = {'image_ref': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) @@ -318,7 +318,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} + kwargs = {'image_ref': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] greenthread.sleep(0.3) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b..b02b99f6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -150,7 +150,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: self.assertEqual(len(db.security_group_get_by_instance( @@ -168,7 +168,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: db.instance_destroy(self.context, ref[0]['id']) @@ -184,7 +184,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_ref=None, security_group=['testgroup']) try: From 293284e9eeb57b18727ac3e6c2388f51fe24ebcb Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 25 May 2011 03:29:16 -0400 Subject: [PATCH 34/65] Don't need to import json. --- nova/tests/test_notifier.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 14bef79b..523f38f2 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import json - import stubout import nova From a5e81ac4bef0743fa0595f000fe0cce11ef492d1 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Wed, 25 May 2011 17:51:30 +0900 Subject: [PATCH 35/65] Add unittests for cloning volumes. --- nova/tests/test_cloud.py | 19 +++++++++++++++++++ nova/tests/test_volume.py | 20 +++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index d9169a64..8c7520fe 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -171,6 +171,25 @@ class CloudTestCase(test.TestCase): db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_create_volume_from_snapshot(self): + """Makes sure create_volume works when we specify a snapshot.""" + vol = db.volume_create(self.context, {'size': 1}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'volume_size': vol['size'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.create_volume(self.context, + snapshot_id=snapshot_id) + volume_id = result['volumeId'] + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) + + db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.snapshot_destroy(self.context, snap['id']) + db.volume_destroy(self.context, vol['id']) + def test_describe_availability_zones(self): """Makes sure describe_availability_zones works and filters results.""" service1 = db.service_create(self.context, {'host': 'host1_zones', diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c66b6695..1c25d601 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase): self.context = context.get_admin_context() @staticmethod - def _create_volume(size='0'): + def _create_volume(size='0', snapshot_id=None): """Create a volume object.""" vol = {} vol['size'] = size + vol['snapshot_id'] = snapshot_id vol['user_id'] = 'fake' vol['project_id'] = 'fake' vol['availability_zone'] = FLAGS.storage_availability_zone @@ -69,6 +70,23 @@ class VolumeTestCase(test.TestCase): self.context, volume_id) + def test_create_volume_from_snapshot(self): + """Test volume can be created from a snapshot.""" + volume_src_id = self._create_volume() + self.volume.create_volume(self.context, volume_src_id) + snapshot_id = self._create_snapshot(volume_src_id) + self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) + volume_dst_id = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst_id, snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), + volume_dst_id).snapshot_id) + + self.volume.delete_volume(self.context, volume_dst_id) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src_id) + def test_too_big_volume(self): """Ensure failure if a too large of a volume is requested.""" # FIXME(vish): validation needs to move into the data layer in From 088664d007b2423bef2ce44f487857ba0c255dfe Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 25 May 2011 19:57:04 -0400 Subject: [PATCH 36/65] Renamed image_ref variables to image_href. Since the convention is that x_ref vars may imply that they are db objects. --- nova/tests/test_cloud.py | 4 ++-- nova/tests/test_compute.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 3aaca683..1219d600 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -302,7 +302,7 @@ class CloudTestCase(test.TestCase): def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_ref': 'ami-1', + kwargs = {'image_href': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) @@ -318,7 +318,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_ref': 'ami-1'} + kwargs = {'image_href': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] greenthread.sleep(0.3) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b02b99f6..b4097660 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -150,7 +150,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_ref=None, + image_href=None, security_group=['testgroup']) try: self.assertEqual(len(db.security_group_get_by_instance( @@ -168,7 +168,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_ref=None, + image_href=None, security_group=['testgroup']) try: db.instance_destroy(self.context, ref[0]['id']) @@ -184,7 +184,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_ref=None, + image_href=None, security_group=['testgroup']) try: From 0f1938cc07072bbb31171651ccbb64e6921d3d66 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 11:21:28 -0400 Subject: [PATCH 37/65] Fix test_cloud tests. --- nova/tests/test_cloud.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1219d600..54c0454d 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -302,7 +302,7 @@ class CloudTestCase(test.TestCase): def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_href': 'ami-1', + kwargs = {'image_id': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) @@ -318,7 +318,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_href': 'ami-1'} + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] greenthread.sleep(0.3) From 074e7da80b08b82fc3d8672c25df364dc7d14444 Mon Sep 17 00:00:00 2001 From: John Tran Date: Thu, 26 May 2011 10:22:45 -0700 Subject: [PATCH 38/65] instance obj returned is not a hash, instead is sqlalchemy obj and hostname attr is what the logic is looking for --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 26c0d776..51373d28 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -536,7 +536,7 @@ class FloatingIpCommands(object): for floating_ip in floating_ips: instance = None if floating_ip['fixed_ip']: - instance = floating_ip['fixed_ip']['instance']['ec2_id'] + instance = floating_ip['fixed_ip']['instance'].hostname print "%s\t%s\t%s" % (floating_ip['host'], floating_ip['address'], instance) From 19668fd79ce200538784762a96e7446ac5190315 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 26 May 2011 10:53:48 -0700 Subject: [PATCH 39/65] missed a driver reference --- nova/scheduler/host_filter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index d9771754..ed76c90b 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -296,13 +296,13 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. - request_spec = {'filter_name': , + request_spec = {'filter': , 'instance_type': } """ def filter_hosts(self, num, request_spec): """Filter the full host list (from the ZoneManager)""" - filter_name = request_spec.get('filter_name', None) + filter_name = request_spec.get('filter', None) host_filter = choose_host_filter(filter_name) # TODO(sandy): We're only using InstanceType-based specs From b75ee2e531ecf14b2518eed2a4ec14764e65b32b Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 26 May 2011 18:14:38 -0400 Subject: [PATCH 40/65] Rename instances.image_id to instances.image_ref. --- nova/tests/test_cloud.py | 6 +++--- nova/tests/test_compute.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454d..eefab58d 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -191,10 +191,10 @@ class CloudTestCase(test.TestCase): def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', @@ -390,7 +390,7 @@ class CloudTestCase(test.TestCase): def test_terminate_instances(self): inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) terminate_instances = self.cloud.terminate_instances # valid instance_id diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4097660..25454087 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -84,7 +84,7 @@ class ComputeTestCase(test.TestCase): def _create_instance(self, params={}): """Create a test instance""" inst = {} - inst['image_id'] = 1 + inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user.id From d62487d377e3c299e87c97e9e4e8a1067f39b7e8 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 14:13:17 +0900 Subject: [PATCH 41/65] Fix pep8 errors. --- nova/tests/test_volume.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 8d58b313..4f10ee6a 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -78,10 +78,12 @@ class VolumeTestCase(test.TestCase): self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) volume_dst_id = self._create_volume(0, snapshot_id) self.volume.create_volume(self.context, volume_dst_id, snapshot_id) - self.assertEqual(volume_dst_id, db.volume_get(context.get_admin_context(), - volume_dst_id).id) - self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), - volume_dst_id).snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).snapshot_id) self.volume.delete_volume(self.context, volume_dst_id) self.volume.delete_snapshot(self.context, snapshot_id) From 19b1e2c5615684bd3a6310be9e8704db6f46017d Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 27 May 2011 05:01:42 -0700 Subject: [PATCH 42/65] fixed docstrings and general tidying --- nova/scheduler/host_filter.py | 41 ++++++++++++++++---------- nova/scheduler/zone_aware_scheduler.py | 33 +++++++++++++-------- 2 files changed, 47 insertions(+), 27 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index ed76c90b..89faace4 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -69,9 +69,11 @@ class HostFilter(object): class AllHostsFilter(HostFilter): - """NOP host filter. Returns all hosts in ZoneManager. + """ + NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used - to give us.""" + to give us. + """ def instance_type_to_filter(self, instance_type): """Return anything to prevent base-class from raising @@ -133,8 +135,10 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """Host Filter to allow simple JSON-based grammar for - selecting hosts.""" + """ + Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ def _equals(self, args): """First term is == all the other terms.""" @@ -229,8 +233,10 @@ class JsonFilter(HostFilter): return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): - """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]'""" + """ + Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]' + """ if not string: return None if string[0] != '$': @@ -277,22 +283,25 @@ FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters.""" + """ + Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ if not filter_name: filter_name = FLAGS.default_host_filter for filter_class in FILTERS: - if "%s.%s" % (filter_class.__module__, filter_class.__name__) == \ - filter_name: + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if host_match == filter_name: return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter to filter + """ + The HostFilterScheduler uses the HostFilter to filter hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. @@ -313,6 +322,8 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" + """ + Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format. + """ return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index dc18fc42..23690762 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -40,13 +40,15 @@ class ZoneAwareScheduler(driver.Scheduler): def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): - """This method is called from nova.compute.api to provision + """ + This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: 1. Create a Build Plan and then provision, or 2. Use the Build Plan information in the request parameters to simply create the instance (either in this zone or - a child zone).""" + a child zone). + """ # TODO(sandy): We'll have to look for richer specs at some point. @@ -79,15 +81,16 @@ class ZoneAwareScheduler(driver.Scheduler): % locals()) else: # TODO(sandy) Provision in child zone ... - LOG.warning(_("Provision to Child Zone not supported (yet)") - % locals()) + LOG.warning(_("Provision to Child Zone not supported (yet)")) pass def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information + """ + Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal - anything about the children.""" + anything about the children. + """ return self._schedule(context, "compute", request_spec, *args, **kwargs) @@ -95,13 +98,15 @@ class ZoneAwareScheduler(driver.Scheduler): # so we don't implement the default "schedule()" method required # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one + """ + The schedule() contract requires we return the one best-suited host for this request. """ raise driver.NoValidHost(_('No hosts were available')) def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, + """ + Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -137,11 +142,15 @@ class ZoneAwareScheduler(driver.Scheduler): return weighted def filter_hosts(self, num, request_spec): - """Derived classes must override this method and return - a list of hosts in [(hostname, capability_dict)] format.""" + """ + Derived classes must override this method and return + a list of hosts in [(hostname, capability_dict)] format. + """ raise NotImplemented() def weigh_hosts(self, num, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" + """ + Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format. + """ raise NotImplemented() From 69cd97189f366da9d8d25507f9a975166ddde232 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 27 May 2011 07:24:02 -0700 Subject: [PATCH 43/65] docstrings again and import ordering --- nova/scheduler/host_filter.py | 18 ++++++------------ nova/scheduler/zone_aware_scheduler.py | 20 +++++++------------- 2 files changed, 13 insertions(+), 25 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 89faace4..4260cbf4 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -69,8 +69,7 @@ class HostFilter(object): class AllHostsFilter(HostFilter): - """ - NOP host filter. Returns all hosts in ZoneManager. + """ NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used to give us. """ @@ -135,8 +134,7 @@ class InstanceTypeFilter(HostFilter): class JsonFilter(HostFilter): - """ - Host Filter to allow simple JSON-based grammar for + """Host Filter to allow simple JSON-based grammar for selecting hosts. """ @@ -233,8 +231,7 @@ class JsonFilter(HostFilter): return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): - """ - Strings prefixed with $ are capability lookups in the + """Strings prefixed with $ are capability lookups in the form '$service.capability[.subcap*]' """ if not string: @@ -283,8 +280,7 @@ FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] def choose_host_filter(filter_name=None): - """ - Since the caller may specify which filter to use we need + """Since the caller may specify which filter to use we need to have an authoritative list of what is permissible. This function checks the filter name against a predefined set of acceptable filters. @@ -300,8 +296,7 @@ def choose_host_filter(filter_name=None): class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """ - The HostFilterScheduler uses the HostFilter to filter + """The HostFilterScheduler uses the HostFilter to filter hosts for weighing. The particular filter used may be passed in as an argument or the default will be used. @@ -322,8 +317,7 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): return host_filter.filter_hosts(self.zone_manager, query) def weigh_hosts(self, num, request_spec, hosts): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 23690762..bc67c779 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -23,8 +23,8 @@ across zones. There are two expansion points to this class for: import operator from nova import db -from nova import rpc from nova import log as logging +from nova import rpc from nova.scheduler import api from nova.scheduler import driver @@ -40,8 +40,7 @@ class ZoneAwareScheduler(driver.Scheduler): def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): - """ - This method is called from nova.compute.api to provision + """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: 1. Create a Build Plan and then provision, or @@ -85,8 +84,7 @@ class ZoneAwareScheduler(driver.Scheduler): pass def select(self, context, request_spec, *args, **kwargs): - """ - Select returns a list of weights and zone/host information + """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal anything about the children. @@ -98,15 +96,13 @@ class ZoneAwareScheduler(driver.Scheduler): # so we don't implement the default "schedule()" method required # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): - """ - The schedule() contract requires we return the one + """The schedule() contract requires we return the one best-suited host for this request. """ raise driver.NoValidHost(_('No hosts were available')) def _schedule(self, context, topic, request_spec, *args, **kwargs): - """ - Returns a list of hosts that meet the required specs, + """Returns a list of hosts that meet the required specs, ordered by their fitness. """ @@ -142,15 +138,13 @@ class ZoneAwareScheduler(driver.Scheduler): return weighted def filter_hosts(self, num, request_spec): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a list of hosts in [(hostname, capability_dict)] format. """ raise NotImplemented() def weigh_hosts(self, num, request_spec, hosts): - """ - Derived classes must override this method and return + """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ raise NotImplemented() From 2b7b5a8332a0994699c34bd54432013dae9226e5 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 27 May 2011 15:48:40 -0400 Subject: [PATCH 44/65] Glance client updates for xenapi and vmware API to work with image refs. --- nova/tests/test_vmwareapi.py | 5 ++--- nova/tests/test_xenapi.py | 29 +++++++++++++---------------- nova/tests/vmwareapi/db_fakes.py | 2 +- 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 22b66010..e5ebd160 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase): vmwareapi_fake.reset() db_fakes.stub_out_db_instance_api(self.stubs) stubs.set_stubs(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) self.conn = vmwareapi_conn.get_connection(False) def _create_instance_in_the_db(self): @@ -64,7 +63,7 @@ class VMWareAPIVMTestCase(test.TestCase): 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': "1", + 'image_ref': "1", 'kernel_id': "1", 'ramdisk_id': "1", 'instance_type': 'm1.large', diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 18a26789..56e1e47a 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase): self.values = {'id': 1, 'project_id': 'fake', 'user_id': 'fake', - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(VMOps, 'reset_network', reset_network) stubs.stub_out_vm_methods(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) fake_utils.stub_out_utils_execute(self.stubs) self.context = context.RequestContext('fake', 'fake', False) self.conn = xenapi_conn.get_connection(False) @@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': id, 'project_id': proj, 'user_id': user, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -351,14 +350,14 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(self.vm['HVM_boot_params'], {}) self.assertEquals(self.vm['HVM_boot_policy'], '') - def _test_spawn(self, image_id, kernel_id, ramdisk_id, + def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", instance_id=1, check_injection=False): stubs.stubout_loopingcall_start(self.stubs) values = {'id': instance_id, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': image_id, + 'image_ref': image_ref, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, @@ -567,7 +566,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -623,7 +622,7 @@ class XenAPIMigrateInstance(test.TestCase): self.values = {'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': None, 'ramdisk_id': None, 'local_gb': 5, @@ -634,8 +633,7 @@ class XenAPIMigrateInstance(test.TestCase): fake_utils.stub_out_utils_execute(self.stubs) stubs.stub_out_migration_methods(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) def tearDown(self): super(XenAPIMigrateInstance, self).tearDown() @@ -661,8 +659,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): """Unit tests for code that detects the ImageType.""" def setUp(self): super(XenAPIDetermineDiskImageTestCase, self).setUp() - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) class FakeInstance(object): pass @@ -679,7 +676,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): def test_instance_disk(self): """If a kernel is specified, the image type is DISK (aka machine).""" FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL self.assert_disk_type(vm_utils.ImageType.DISK) @@ -689,7 +686,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): DISK_RAW is assumed. """ FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -699,7 +696,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'raw'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -709,7 +706,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'vhd'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index 0addd557..764de42d 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs): 'name': values['name'], 'id': values['id'], 'reservation_id': utils.generate_uid('r'), - 'image_id': values['image_id'], + 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'state_description': 'scheduling', From fa306338966270b4d4eaebb45a493505ee930b7a Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 28 May 2011 21:10:57 -0400 Subject: [PATCH 45/65] Cleanup instances_path in test_libvirt test_spawn_with_network_info test. --- nova/tests/test_libvirt.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 4efdd6ae..1fac4e4e 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -18,6 +18,7 @@ import eventlet import mox import os import re +import shutil import sys from xml.etree.ElementTree import fromstring as xml_to_tree @@ -645,6 +646,8 @@ class LibvirtConnTestCase(test.TestCase): except Exception, e: count = (0 <= str(e.message).find('Unexpected method call')) + shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) + self.assertTrue(count) def test_get_host_ip_addr(self): From 9e2cadbb3dccccda39de8f28016dd9ffa1d3e40b Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Tue, 31 May 2011 23:36:49 +0400 Subject: [PATCH 46/65] Moved everything from thread-local storage to class attributes --- nova/auth/ldapdriver.py | 38 +++++++++++--------------------------- nova/auth/manager.py | 14 +++----------- 2 files changed, 14 insertions(+), 38 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 9fe0165a..91f412ba 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -26,7 +26,6 @@ public methods. import functools import sys -import threading from nova import exception from nova import flags @@ -106,7 +105,8 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' - __local = threading.local() + conn = None + mc = None def __init__(self): """Imports the LDAP module""" @@ -117,15 +117,22 @@ class LdapDriver(object): LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' self.__cache = None + if LdapDriver.conn is None: + LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + if LdapDriver.mc is None: + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): - """Creates the connection to LDAP""" # TODO(yorik-sar): Should be per-request cache, not per-driver-request self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): - """Destroys the connection to LDAP""" self.__cache = None return False @@ -149,29 +156,6 @@ class LdapDriver(object): return inner return do_wrap - @property - def conn(self): - try: - return self.__local.conn - except AttributeError: - conn = self.ldap.initialize(FLAGS.ldap_url) - conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) - self.__local.conn = conn - return conn - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc - @sanitize @__local_cache('uid_user-%s') def get_user(self, uid): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c71f0f16..c887297f 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -23,7 +23,6 @@ Nova authentication management import os import shutil import string # pylint: disable=W0402 -import threading import tempfile import uuid import zipfile @@ -207,7 +206,7 @@ class AuthManager(object): """ _instance = None - __local = threading.local() + mc = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -224,19 +223,12 @@ class AuthManager(object): self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - - @property - def mc(self): - try: - return self.__local.mc - except AttributeError: + if AuthManager.mc is None: if FLAGS.memcached_servers: import memcache else: from nova import fakememcache as memcache - mc = memcache.Client(FLAGS.memcached_servers, debug=0) - self.__local.mc = mc - return mc + AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', From 3380c79339b50e4c7a66502ac3738083ca1ecd17 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 1 Jun 2011 10:17:00 -0400 Subject: [PATCH 47/65] pep8 fixes --- nova/log.py | 2 +- nova/tests/test_notifier.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/log.py b/nova/log.py index 960598b1..6909916a 100644 --- a/nova/log.py +++ b/nova/log.py @@ -272,7 +272,7 @@ class PublishErrorsHandler(logging.Handler): def emit(self, record): nova.notifier.api.notify('nova.error.publisher', 'error_notification', nova.notifier.api.ERROR, dict(error=record.msg)) - + def handle_exception(type, value, tb): extra = {} diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index 523f38f2..64b799a2 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -122,12 +122,13 @@ class NotifierTestCase(test.TestCase): self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) LOG = log.getLogger('nova') LOG.setup_from_flags() - msgs = [] + def mock_cast(context, topic, data): msgs.append(data) - self.stubs.Set(nova.rpc, 'cast', mock_cast) - LOG.error('foo'); + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + LOG.error('foo') self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual(msg['event_type'], 'error_notification') From f2afb04293a77c5899556503b76efca1ffdd5f37 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:32:49 +0400 Subject: [PATCH 48/65] Moved memcached driver import to the top of modules. --- nova/auth/ldapdriver.py | 10 ++++++---- nova/auth/manager.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 91f412ba..e26a360a 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -69,6 +69,12 @@ flags.DEFINE_string('ldap_developer', LOG = logging.getLogger("nova.ldapdriver") +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor @@ -121,10 +127,6 @@ class LdapDriver(object): LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) if LdapDriver.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index c887297f..98c7dd26 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', LOG = logging.getLogger('nova.auth.manager') +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + class AuthBase(object): """Base class for objects relating to auth @@ -224,10 +230,6 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) if AuthManager.mc is None: - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', From 684fe7f0ce2a070acfdbbd2854093ed77e72026c Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 1 Jun 2011 18:34:54 +0400 Subject: [PATCH 49/65] PEP8 fix. --- nova/auth/ldapdriver.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e26a360a..95e31ae3 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,8 @@ class LdapDriver(object): self.__cache = None if LdapDriver.conn is None: LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) - LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, + FLAGS.ldap_password) if LdapDriver.mc is None: LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) From ecb4ca1fe5c4833f4afc6066b95767905e9e6d58 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:37:54 -0400 Subject: [PATCH 50/65] fix pep8 issues --- nova/scheduler/host_filter.py | 3 +-- nova/tests/test_host_filter.py | 14 +++++--------- nova/tests/test_zone_aware_scheduler.py | 10 +++------- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 4260cbf4..8827db4d 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -226,8 +226,7 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk] - ] + ['>=', '$compute.disk_available', required_disk]] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 07817cc5..098ebff3 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -133,13 +133,11 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] - ], + ['<', '$compute.disk_available', 300]], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] - ] + ['>', '$compute.disk_available', 700]]] + cooked = json.dumps(raw) hosts = hf.filter_hosts(self.zone_manager, cooked) @@ -183,13 +181,11 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) + ['not', True, False, True, False]))) try: hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) + 'not', True, False, True, False)) self.fail("Should give KeyError") except KeyError, e: pass diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index 37169fb9..90ae427e 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -39,15 +39,11 @@ class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { 'host1': { - 'compute': {'ram': 1000} - }, + 'compute': {'ram': 1000}}, 'host2': { - 'compute': {'ram': 2000} - }, + 'compute': {'ram': 2000}}, 'host3': { - 'compute': {'ram': 3000} - } - } + 'compute': {'ram': 3000}}} class FakeEmptyZoneManager(zone_manager.ZoneManager): From 72126d1aa859e9e72f3fcb378e136ea2ce7d6368 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Wed, 1 Jun 2011 10:58:17 -0400 Subject: [PATCH 51/65] updates to keep things looking better --- nova/scheduler/host_filter.py | 3 ++- nova/tests/test_host_filter.py | 7 +++++-- nova/tests/test_zone_aware_scheduler.py | 16 ++++++++++------ 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 8827db4d..7d6ee0ee 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -226,7 +226,8 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk]] + ['>=', '$compute.disk_available', required_disk], + ] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 098ebff3..3361c7b7 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -133,10 +133,13 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300]], + ['<', '$compute.disk_available', 300], + ], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700]]] + ['>', '$compute.disk_available', 700], + ], + ] cooked = json.dumps(raw) hosts = hf.filter_hosts(self.zone_manager, cooked) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py index 90ae427e..72b74be2 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/test_zone_aware_scheduler.py @@ -38,12 +38,16 @@ class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { - 'host1': { - 'compute': {'ram': 1000}}, - 'host2': { - 'compute': {'ram': 2000}}, - 'host3': { - 'compute': {'ram': 3000}}} + 'host1': { + 'compute': {'ram': 1000}, + }, + 'host2': { + 'compute': {'ram': 2000}, + }, + 'host3': { + 'compute': {'ram': 3000}, + }, + } class FakeEmptyZoneManager(zone_manager.ZoneManager): From 471dde477e03347bd8e1305ece692b90ddea4e7d Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 1 Jun 2011 14:46:05 -0500 Subject: [PATCH 52/65] Allow SSL AMQP connections. --- nova/flags.py | 1 + nova/rpc.py | 1 + 2 files changed, 2 insertions(+) diff --git a/nova/flags.py b/nova/flags.py index 9eaac559..d5090edb 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') +DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') diff --git a/nova/rpc.py b/nova/rpc.py index c5277c6a..2e78a31e 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection): if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, + ssl=FLAGS.rabbit_use_ssl, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) From 568b57fffb38cc3014c6f313dd1d11bd9139e8d4 Mon Sep 17 00:00:00 2001 From: John Tran Date: Wed, 1 Jun 2011 16:01:41 -0700 Subject: [PATCH 53/65] fixed as per peer review to make more consistent --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 51373d28..5de4d9e8 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -536,7 +536,7 @@ class FloatingIpCommands(object): for floating_ip in floating_ips: instance = None if floating_ip['fixed_ip']: - instance = floating_ip['fixed_ip']['instance'].hostname + instance = floating_ip['fixed_ip']['instance']['hostname'] print "%s\t%s\t%s" % (floating_ip['host'], floating_ip['address'], instance) From 570c89c76884ff611698e847a6ad154860b07db4 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 1 Jun 2011 16:51:26 -0700 Subject: [PATCH 54/65] fix novarc to work on mac and zsh --- nova/auth/novarc.template | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 8170fcaf..4a1f4180 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,5 @@ -NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) +NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE-0}) +NOVA_KEY_DIR=$(dirname ${NOVARC}) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" From b935a69d4c362725a195ea5a0fc19195bf712df8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 1 Jun 2011 18:55:41 -0700 Subject: [PATCH 55/65] missed a couple chars --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 4a1f4180..92eed352 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,4 @@ -NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE-0}) +NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE:-$0}) NOVA_KEY_DIR=$(dirname ${NOVARC}) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" From d7166b8a09363db6a7eeda0e4db8a05cacb1bcef Mon Sep 17 00:00:00 2001 From: Mike Scherbakov Date: Thu, 2 Jun 2011 10:29:58 +0400 Subject: [PATCH 56/65] Refactored after review, fixed merge. --- nova/tests/test_libvirt.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index d9316ab4..d008a149 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -328,14 +328,14 @@ class LibvirtConnTestCase(test.TestCase): # To work with it from snapshot, the single image_service is needed recv_meta = image_service.create(context, sent_meta) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn.lookupByName = fake_lookup - self.mox.StubOutWithMock(libvirt_conn.utils, 'execute') - libvirt_conn.utils.execute = fake_execute + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = fake_lookup + self.mox.StubOutWithMock(connection.utils, 'execute') + connection.utils.execute = fake_execute self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.snapshot(instance_ref, recv_meta['id']) snapshot = image_service.show(context, recv_meta['id']) From 67415f2d7567a7f4c9f331f51288eb525bd607c6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 10:20:26 -0700 Subject: [PATCH 57/65] don't use python if readlink is available --- nova/auth/novarc.template | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 92eed352..d30bd849 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,5 +1,6 @@ -NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' ${BASH_SOURCE:-$0}) -NOVA_KEY_DIR=$(dirname ${NOVARC}) +NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || + NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") +NOVA_KEY_DIR=${NOVARC%/*} export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" From f4ad2f8b27e783fc202a3914480d176a2ec2a44d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 11:28:41 -0700 Subject: [PATCH 58/65] use %% because % is a replacement string character --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index d30bd849..eba3a853 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,6 +1,6 @@ NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") -NOVA_KEY_DIR=${NOVARC%/*} +NOVA_KEY_DIR=${NOVARC%%/*} export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" From ab598f596ffc64cef8fac7d1ec754730fd244977 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 12:01:49 -0700 Subject: [PATCH 59/65] Tests to assure all exceptions can be raised as well as fixing NotAuthorized --- nova/tests/test_misc.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index cf8f4c05..c5875a84 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -21,11 +21,24 @@ import select from eventlet import greenpool from eventlet import greenthread +from nova import exception from nova import test from nova import utils from nova.utils import parse_mailmap, str_dict_replace +class ExceptionTestCase(test.TestCase): + @staticmethod + def _raise_exc(exc): + raise exc() + + def test_exceptions_raise(self): + for name in dir(exception): + exc = getattr(exception, name) + if isinstance(exc, type): + self.assertRaises(exc, self._raise_exc, exc) + + class ProjectTestCase(test.TestCase): def test_authors_up_to_date(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') From 5ba52276ca130cf2c0920705323a41bea45dbf72 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:23:05 -0700 Subject: [PATCH 60/65] make all uses of utcnow use our testable utils.utcnow --- bin/nova-manage | 3 +-- nova/notifier/api.py | 7 +++---- nova/scheduler/simple.py | 11 +++++------ nova/tests/test_compute.py | 5 ++--- nova/tests/test_console.py | 2 -- nova/tests/test_middleware.py | 1 - nova/tests/test_scheduler.py | 16 ++++++++-------- 7 files changed, 19 insertions(+), 26 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e8..b545c424 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,7 +53,6 @@ CLI interface for nova management. """ -import datetime import gettext import glob import json @@ -689,7 +688,7 @@ class ServiceCommands(object): """Show a list of all running services. Filter by host & service name. args: [host] [service]""" ctxt = context.get_admin_context() - now = datetime.datetime.utcnow() + now = utils.utcnow() services = db.service_get_all(ctxt) if host: services = [s for s in services if s['host'] == host] diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a3e7a039..d49517c8 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -11,9 +11,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import uuid from nova import flags @@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload): {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', - 'timestamp': datetime.datetime.utcnow(), + 'timestamp': utils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} @@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload): event_type=event_type, priority=priority, payload=payload, - timestamp=str(datetime.datetime.utcnow())) + timestamp=str(utils.utcnow())) driver.notify(msg) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index dd568d2c..87cdef11 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -21,10 +21,9 @@ Simple Scheduler """ -import datetime - from nova import db from nova import flags +from nova import utils from nova.scheduler import driver from nova.scheduler import chance @@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host @@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': service['host'], @@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) return host @@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': service['host'], diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b..c726080e 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -19,7 +19,6 @@ Tests For Compute """ -import datetime import mox import stubout @@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase): instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() + launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() + terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 1a9a867e..831e7670 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -20,8 +20,6 @@ Tests For Console proxy. """ -import datetime - from nova import context from nova import db from nova import exception diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py index 6564a695..40d117c4 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/test_middleware.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import webob import webob.dec import webob.exc diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 54b3f80f..1cf6bbfb 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -196,7 +196,7 @@ class ZoneSchedulerTestCase(test.TestCase): service.topic = 'compute' service.id = kwargs['id'] service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() + service.created_at = utils.utcnow() return service def test_with_two_zones(self): @@ -290,7 +290,7 @@ class SimpleDriverTestCase(test.TestCase): dic['host'] = kwargs.get('host', 'dummy') s_ref = db.service_create(self.context, dic) if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) + t = utils.utcnow() - datetime.timedelta(0) dic['created_at'] = kwargs.get('created_at', t) dic['updated_at'] = kwargs.get('updated_at', t) db.service_update(self.context, s_ref['id'], dic) @@ -401,7 +401,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute1.start() s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -542,7 +542,7 @@ class SimpleDriverTestCase(test.TestCase): def test_wont_sechedule_if_specified_host_is_down(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -692,7 +692,7 @@ class SimpleDriverTestCase(test.TestCase): dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, {'instance_id': instance_id, 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + t1 = utils.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) @@ -709,7 +709,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -737,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -796,7 +796,7 @@ class SimpleDriverTestCase(test.TestCase): # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) From ed226cc01893c0d25393c48459871779de49787c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 2 Jun 2011 14:51:30 -0700 Subject: [PATCH 61/65] switch zones to use utcnow --- nova/scheduler/zone_manager.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3ddf6f3c..3f483adf 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -17,16 +17,17 @@ ZoneManager oversees all communications with child Zones. """ +import datetime import novaclient import thread import traceback -from datetime import datetime from eventlet import greenpool from nova import db from nova import flags from nova import log as logging +from nova import utils FLAGS = flags.FLAGS flags.DEFINE_integer('zone_db_check_interval', 60, @@ -42,7 +43,7 @@ class ZoneState(object): self.name = None self.capabilities = None self.attempt = 0 - self.last_seen = datetime.min + self.last_seen = datetime.datetime.min self.last_exception = None self.last_exception_time = None @@ -56,7 +57,7 @@ class ZoneState(object): def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" - self.last_seen = datetime.now() + self.last_seen = utils.utcnow() self.attempt = 0 self.name = zone_metadata.get("name", "n/a") self.capabilities = ", ".join(["%s=%s" % (k, v) @@ -72,7 +73,7 @@ class ZoneState(object): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception - self.last_exception_time = datetime.now() + self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) @@ -104,7 +105,7 @@ def _poll_zone(zone): class ZoneManager(object): """Keeps the zone states updated.""" def __init__(self): - self.last_zone_db_check = datetime.min + self.last_zone_db_check = datetime.datetime.min self.zone_states = {} # { : ZoneState } self.service_states = {} # { : { : { cap k : v }}} self.green_pool = greenpool.GreenPool() @@ -158,10 +159,10 @@ class ZoneManager(object): def ping(self, context=None): """Ping should be called periodically to update zone status.""" - diff = datetime.now() - self.last_zone_db_check + diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) - self.last_zone_db_check = datetime.now() + self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context) From 9e3a7d0ed8cf5457d3774c3340eb369b07696526 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 12:37:58 +0400 Subject: [PATCH 62/65] Fixed FakeLdapDriver, made it call LdapDriver.__init__ --- nova/auth/ldapdriver.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 95e31ae3..183f7a98 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -676,6 +676,7 @@ class LdapDriver(object): class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): # pylint: disable=W0231 - __import__('nova.auth.fakeldap') - self.ldap = sys.modules['nova.auth.fakeldap'] + def __init__(self): + import nova.auth.fakeldap + sys.modules['ldap'] = nova.auth.fakeldap + super(FakeLdapDriver, self).__init__() From 0dfe9c5e2ee7b98f66404099291b71e8afa85494 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 3 Jun 2011 13:39:22 +0400 Subject: [PATCH 63/65] Flush AuthManager's cache before each test. --- nova/tests/test_auth.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index f02dd94b..7d00bddf 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase): super(_AuthManagerBaseTestCase, self).setUp() self.flags(connection_type='fake') self.manager = manager.AuthManager(new=True) + self.manager.mc.cache = {} def test_create_and_find_user(self): with user_generator(self.manager): From 0b58c7b95f4fd91431b15cb39d933d68d79df37f Mon Sep 17 00:00:00 2001 From: Justin Shepherd Date: Fri, 3 Jun 2011 13:20:34 -0500 Subject: [PATCH 65/65] added 'nova-manage config list' which will list out all of the flags and their values. I also alphabetized the list of available categories --- bin/nova-manage | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 5de4d9e8..fb381077 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1081,24 +1081,35 @@ class ImageCommands(object): self._convert_images(machine_images) +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + print FLAGS.FlagsIntoString() + + CATEGORIES = [ - ('user', UserCommands), ('account', AccountCommands), - ('project', ProjectCommands), - ('role', RoleCommands), - ('shell', ShellCommands), - ('vpn', VpnCommands), - ('fixed', FixedIpCommands), - ('floating', FloatingIpCommands), - ('network', NetworkCommands), - ('vm', VmCommands), - ('service', ServiceCommands), + ('config', ConfigCommands), ('db', DbCommands), - ('volume', VolumeCommands), + ('fixed', FixedIpCommands), + ('flavor', InstanceTypeCommands), + ('floating', FloatingIpCommands), ('instance_type', InstanceTypeCommands), ('image', ImageCommands), - ('flavor', InstanceTypeCommands), - ('version', VersionCommands)] + ('network', NetworkCommands), + ('project', ProjectCommands), + ('role', RoleCommands), + ('service', ServiceCommands), + ('shell', ShellCommands), + ('user', UserCommands), + ('version', VersionCommands), + ('vm', VmCommands), + ('volume', VolumeCommands), + ('vpn', VpnCommands)] def lazy_match(name, key_value_tuples):