From ddab1d537b3b754e5706bcd984ca9795695c290c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 11 Aug 2011 21:03:37 -0700 Subject: [PATCH 01/27] add keystone middlewares for ec2 api --- nova/tests/test_api.py | 3 ++- nova/wsgi.py | 12 ------------ 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 2011ae756..526d1c490 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -32,6 +32,7 @@ from nova import context from nova import exception from nova import test from nova import wsgi +from nova.api import auth from nova.api import ec2 from nova.api.ec2 import apirequest from nova.api.ec2 import cloud @@ -199,7 +200,7 @@ class ApiEc2TestCase(test.TestCase): # NOTE(vish): skipping the Authorizer roles = ['sysadmin', 'netadmin'] ctxt = context.RequestContext('fake', 'fake', roles=roles) - self.app = wsgi.InjectContext(ctxt, + self.app = auth.InjectContext(ctxt, ec2.Requestify(ec2.Authorizer(ec2.Executor()), 'nova.api.ec2.cloud.CloudController')) diff --git a/nova/wsgi.py b/nova/wsgi.py index c8ddb97d7..eae3afcb4 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -274,18 +274,6 @@ class Middleware(Application): return self.process_response(response) -class InjectContext(Middleware): - """Add a 'nova.context' to WSGI environ.""" - def __init__(self, context, *args, **kwargs): - self.context = context - super(InjectContext, self).__init__(*args, **kwargs) - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - req.environ['nova.context'] = self.context - return self.application - - class Debug(Middleware): """Helper class for debugging a WSGI application. From cef339db31aa32f90a6326e6246f885c6bdb246f Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 10:01:04 -0500 Subject: [PATCH 02/27] start of day --- nova/scheduler/abstract_scheduler.py | 172 ++++-------- nova/scheduler/base_scheduler.py | 403 +++++++++++++++++++++++++++ 2 files changed, 455 insertions(+), 120 deletions(-) create mode 100644 nova/scheduler/base_scheduler.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index eb924732a..a6457cc50 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -14,10 +14,10 @@ # under the License. """ -The AbsractScheduler is a base class Scheduler for creating instances -across zones. There are two expansion points to this class for: -1. Assigning Weights to hosts for requested instances -2. Filtering Hosts based on required instance capabilities +The AbsractScheduler is an abstract class Scheduler for creating instances +locally or across zones. Two methods should be overridden in order to +customize the behavior: filter_hosts() and weigh_hosts(). The default +behavior is to simply select all hosts and weight them the same. """ import operator @@ -185,13 +185,11 @@ class AbstractScheduler(driver.Scheduler): for zone_id, result in child_results: if not result: continue - assert isinstance(zone_id, int) for zone_rec in zones: if zone_rec['id'] != zone_id: continue - for item in result: try: offset = zone_rec['weight_offset'] @@ -202,10 +200,10 @@ class AbstractScheduler(driver.Scheduler): item['raw_weight'] = raw_weight except KeyError: LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) + "for Zone: %(zone_id)s") % locals()) def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): + *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: @@ -214,13 +212,11 @@ class AbstractScheduler(driver.Scheduler): to simply create the instance (either in this zone or a child zone). """ - # TODO(sandy): We'll have to look for richer specs at some point. - blob = request_spec.get('blob') if blob: self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) + request_spec, kwargs) return None num_instances = request_spec.get('num_instances', 1) @@ -238,7 +234,7 @@ class AbstractScheduler(driver.Scheduler): build_plan_item = build_plan.pop(0) self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) + request_spec, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) @@ -251,58 +247,49 @@ class AbstractScheduler(driver.Scheduler): anything about the children. """ return self._schedule(context, "compute", request_spec, - *args, **kwargs) + *args, **kwargs) - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ - raise driver.NoValidHost(_('No hosts were available')) + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + msg = _("No host selection for %s defined." % topic) + raise driver.NoValidHost(msg) def _schedule(self, context, topic, request_spec, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ - if topic != "compute": - raise NotImplementedError(_("Scheduler only understands" - " Compute nodes (for now)")) + msg = _("Scheduler only understands Compute nodes (for now)") + raise NotImplementedError(msg) - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] + # Get all available hosts. + all_hosts = self.zone_manager.service_states.iteritems() + print "-"*88 + ss = self.zone_manager.service_states + print ss + print "KEYS", ss.keys() + print "-"*88 - weighted = [] - host_list = None + unfiltered_hosts = [(host, services[host]) + for host, services in all_hosts + if topic in services[host]] - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break + # Filter local hosts based on requirements ... + filtered_hosts = self.filter_hosts(topic, request_spec, host_list) + if not filtered_hosts: + LOG.warn(_("No hosts available")) + return [] - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... + # weigh the selected hosts. + # weighted_hosts = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts) + # Next, tack on the host weights from the child zones json_spec = json.dumps(request_spec) all_zones = db.zone_get_all(context) child_results = self._call_zone_method(context, "select", @@ -314,14 +301,13 @@ class AbstractScheduler(driver.Scheduler): # it later if needed. This implicitly builds a zone # path structure. host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted_hosts.append(host_dict) + weighted_hosts.sort(key=operator.itemgetter('weight')) + return weighted_hosts - weighted.sort(key=operator.itemgetter('weight')) - return weighted - - def compute_filter(self, hostname, capabilities, request_spec): + def basic_ram_filter(self, hostname, capabilities, request_spec): """Return whether or not we can schedule to this compute node. Derived classes should override this and return True if the host is acceptable for scheduling. @@ -330,74 +316,20 @@ class AbstractScheduler(driver.Scheduler): requested_mem = instance_type['memory_mb'] * 1024 * 1024 return capabilities['host_memory_free'] >= requested_mem - def hold_filter_hosts(self, topic, request_spec, hosts=None): - """Filter the full host list (from the ZoneManager)""" - # NOTE(dabo): The logic used by the current _schedule() method - # is incorrect. Since this task is just to refactor the classes, - # I'm not fixing the logic now - that will be the next task. - # So for now this method is just renamed; afterwards this will - # become the filter_hosts() method, and the one below will - # be removed. - filter_name = request_spec.get('filter', None) - # Make sure that the requested filter is legitimate. - selected_filter = host_filter.choose_host_filter(filter_name) - - # TODO(sandy): We're only using InstanceType-based specs - # currently. Later we'll need to snoop for more detailed - # host filter requests. - instance_type = request_spec['instance_type'] - name, query = selected_filter.instance_type_to_filter(instance_type) - return selected_filter.filter_hosts(self.zone_manager, query) - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. + """Filter the full host list returned from the ZoneManager. By default, + this method only applies the basic_ram_filter(), meaning all hosts + with at least enough RAM for the requested instance are returned. + + Override in subclasses to provide greater selectivity. """ - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts + return [(host, services) for host, services in host_list + if basic_ram_filter(host, services, request_spec)] def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes may override this to provide more sophisticated - scheduling objectives + """This version assigns a weight of 1 to all hosts, making selection + of any host basically a random event. Override this method in your + subclass to add logic to prefer one potential host over another. """ - # NOTE(sirp): The default logic is the same as the NoopCostFunction return [dict(weight=1, hostname=hostname, capabilities=capabilities) for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py new file mode 100644 index 000000000..43a6ab2b1 --- /dev/null +++ b/nova/scheduler/base_scheduler.py @@ -0,0 +1,403 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The BaseScheduler is the base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator +import json + +import M2Crypto + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions + +from nova import crypto +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import rpc + +from nova.compute import api as compute_api +from nova.scheduler import api +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.scheduler.abstract_scheduler') + + +class InvalidBlob(exception.NovaException): + message = _("Ill-formed or incorrectly routed 'blob' data sent " + "to instance create request.") + + +class AbstractScheduler(driver.Scheduler): + """Base class for creating Schedulers that can work across any nova + deployment, from simple designs to multiply-nested zones. + """ + + def _call_zone_method(self, context, method, specs, zones): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs, zones=zones) + + def _provision_resource_locally(self, context, build_plan_item, + request_spec, kwargs): + """Create the requested resource in this Zone.""" + host = build_plan_item['hostname'] + base_options = request_spec['instance_properties'] + image = request_spec['image'] + + # TODO(sandy): I guess someone needs to add block_device_mapping + # support at some point? Also, OS API has no concept of security + # groups. + instance = compute_api.API().create_db_entry_for_new_instance(context, + image, base_options, None, []) + + instance_id = instance['id'] + kwargs['instance_id'] = instance_id + + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Provisioning locally via compute node %(host)s") + % locals()) + + def _decrypt_blob(self, blob): + """Returns the decrypted blob or None if invalid. Broken out + for testing.""" + decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) + try: + json_entry = decryptor(blob) + return json.dumps(json_entry) + except M2Crypto.EVP.EVPError: + pass + return None + + def _ask_child_zone_to_create_instance(self, context, zone_info, + request_spec, kwargs): + """Once we have determined that the request should go to one + of our children, we need to fabricate a new POST /servers/ + call with the same parameters that were passed into us. + + Note that we have to reverse engineer from our args to get back the + image, flavor, ipgroup, etc. since the original call could have + come in from EC2 (which doesn't use these things).""" + + instance_type = request_spec['instance_type'] + instance_properties = request_spec['instance_properties'] + + name = instance_properties['display_name'] + image_ref = instance_properties['image_ref'] + meta = instance_properties['metadata'] + flavor_id = instance_type['flavorid'] + reservation_id = instance_properties['reservation_id'] + + files = kwargs['injected_files'] + ipgroup = None # Not supported in OS API ... yet + + child_zone = zone_info['child_zone'] + child_blob = zone_info['child_blob'] + zone = db.zone_get(context, child_zone) + url = zone.api_url + LOG.debug(_("Forwarding instance create call to child zone %(url)s" + ". ReservationID=%(reservation_id)s") + % locals()) + nova = None + try: + nova = novaclient.Client(zone.username, zone.password, None, url) + nova.authenticate() + except novaclient_exceptions.BadRequest, e: + raise exception.NotAuthorized(_("Bad credentials attempting " + "to talk to zone at %(url)s.") % locals()) + + nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, + child_blob, reservation_id=reservation_id) + + def _provision_resource_from_blob(self, context, build_plan_item, + instance_id, request_spec, kwargs): + """Create the requested resource locally or in a child zone + based on what is stored in the zone blob info. + + Attempt to decrypt the blob to see if this request is: + 1. valid, and + 2. intended for this zone or a child zone. + + Note: If we have "blob" that means the request was passed + into us from a parent zone. If we have "child_blob" that + means we gathered the info from one of our children. + It's possible that, when we decrypt the 'blob' field, it + contains "child_blob" data. In which case we forward the + request.""" + + host_info = None + if "blob" in build_plan_item: + # Request was passed in from above. Is it for us? + host_info = self._decrypt_blob(build_plan_item['blob']) + elif "child_blob" in build_plan_item: + # Our immediate child zone provided this info ... + host_info = build_plan_item + + if not host_info: + raise InvalidBlob() + + # Valid data ... is it for us? + if 'child_zone' in host_info and 'child_blob' in host_info: + self._ask_child_zone_to_create_instance(context, host_info, + request_spec, kwargs) + else: + self._provision_resource_locally(context, host_info, request_spec, + kwargs) + + def _provision_resource(self, context, build_plan_item, instance_id, + request_spec, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in build_plan_item: + self._provision_resource_locally(context, build_plan_item, + request_spec, kwargs) + return + + self._provision_resource_from_blob(context, build_plan_item, + instance_id, request_spec, kwargs) + + def _adjust_child_weights(self, child_results, zones): + """Apply the Scale and Offset values from the Zone definition + to adjust the weights returned from the child zones. Alters + child_results in place. + """ + for zone_id, result in child_results: + if not result: + continue + + assert isinstance(zone_id, int) + + for zone_rec in zones: + if zone_rec['id'] != zone_id: + continue + + for item in result: + try: + offset = zone_rec['weight_offset'] + scale = zone_rec['weight_scale'] + raw_weight = item['weight'] + cooked_weight = offset + scale * raw_weight + item['weight'] = cooked_weight + item['raw_weight'] = raw_weight + except KeyError: + LOG.exception(_("Bad child zone scaling values " + "for Zone: %(zone_id)s") % locals()) + + def schedule_run_instance(self, context, instance_id, request_spec, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone). + """ + + # TODO(sandy): We'll have to look for richer specs at some point. + + blob = request_spec.get('blob') + if blob: + self._provision_resource(context, request_spec, instance_id, + request_spec, kwargs) + return None + + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + + # Create build plan and provision ... + build_plan = self.select(context, request_spec) + if not build_plan: + raise driver.NoValidHost(_('No hosts were available')) + + for num in xrange(num_instances): + if not build_plan: + break + + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) + + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + + def select(self, context, request_spec, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children. + """ + return self._schedule(context, "compute", request_spec, + *args, **kwargs) + + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + def schedule(self, context, topic, request_spec, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + raise driver.NoValidHost(_('No hosts were available')) + + def _schedule(self, context, topic, request_spec, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + + if topic != "compute": + raise NotImplementedError(_("Scheduler only understands" + " Compute nodes (for now)")) + + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] + + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break + + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) + + # Next, tack on the best weights from the child zones ... + json_spec = json.dumps(request_spec) + all_zones = db.zone_get_all(context) + child_results = self._call_zone_method(context, "select", + specs=json_spec, zones=all_zones) + self._adjust_child_weights(child_results, all_zones) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = {"weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. + """ + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def hold_filter_hosts(self, topic, request_spec, hosts=None): + """Filter the full host list (from the ZoneManager)""" + # NOTE(dabo): The logic used by the current _schedule() method + # is incorrect. Since this task is just to refactor the classes, + # I'm not fixing the logic now - that will be the next task. + # So for now this method is just renamed; afterwards this will + # become the filter_hosts() method, and the one below will + # be removed. + filter_name = request_spec.get('filter', None) + # Make sure that the requested filter is legitimate. + selected_filter = host_filter.choose_host_filter(filter_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = request_spec['instance_type'] + name, query = selected_filter.instance_type_to_filter(instance_type) + return selected_filter.filter_hosts(self.zone_manager, query) + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '_filter' function more appropriate. + """ + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no _filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True + + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): + """Derived classes may override this to provide more sophisticated + scheduling objectives + """ + # NOTE(sirp): The default logic is the same as the NoopCostFunction + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) From bc295c6cb1a2a9505ee1678883ca8e3814c7d773 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 13:58:26 -0500 Subject: [PATCH 03/27] Created the filters directory in nova/scheduler --- nova/scheduler/__init__.py | 2 + nova/scheduler/abstract_scheduler.py | 30 +- nova/scheduler/base_scheduler.py | 312 +---------------- nova/scheduler/filters/__init__.py | 18 + nova/scheduler/filters/abstract_filter.py | 87 +++++ nova/scheduler/filters/all_hosts_filter.py | 31 ++ .../scheduler/filters/instance_type_filter.py | 86 +++++ nova/scheduler/filters/json_filter.py | 141 ++++++++ nova/scheduler/host_filter.py | 314 ------------------ .../scheduler/test_abstract_scheduler.py | 3 + nova/tests/scheduler/test_host_filter.py | 4 +- .../scheduler/test_least_cost_scheduler.py | 7 +- 12 files changed, 391 insertions(+), 644 deletions(-) create mode 100644 nova/scheduler/filters/__init__.py create mode 100644 nova/scheduler/filters/abstract_filter.py create mode 100644 nova/scheduler/filters/all_hosts_filter.py create mode 100644 nova/scheduler/filters/instance_type_filter.py create mode 100644 nova/scheduler/filters/json_filter.py delete mode 100644 nova/scheduler/host_filter.py diff --git a/nova/scheduler/__init__.py b/nova/scheduler/__init__.py index 8359a7aeb..25078f015 100644 --- a/nova/scheduler/__init__.py +++ b/nova/scheduler/__init__.py @@ -21,5 +21,7 @@ .. automodule:: nova.scheduler :platform: Unix :synopsis: Module that picks a compute node to run a VM instance. +.. moduleauthor:: Sandy Walsh +.. moduleauthor:: Ed Leafe .. moduleauthor:: Chris Behrens """ diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index a6457cc50..a0734f322 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -269,18 +269,13 @@ class AbstractScheduler(driver.Scheduler): # Get all available hosts. all_hosts = self.zone_manager.service_states.iteritems() - print "-"*88 - ss = self.zone_manager.service_states - print ss - print "KEYS", ss.keys() - print "-"*88 - - unfiltered_hosts = [(host, services[host]) + unfiltered_hosts = [(host, services[topic]) for host, services in all_hosts - if topic in services[host]] + if topic in services] # Filter local hosts based on requirements ... - filtered_hosts = self.filter_hosts(topic, request_spec, host_list) + filtered_hosts = self.filter_hosts(topic, request_spec, + unfiltered_hosts) if not filtered_hosts: LOG.warn(_("No hosts available")) return [] @@ -307,22 +302,19 @@ class AbstractScheduler(driver.Scheduler): weighted_hosts.sort(key=operator.itemgetter('weight')) return weighted_hosts - def basic_ram_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def filter_hosts(self, topic, request_spec, host_list=None): + def filter_hosts(self, topic, request_spec, host_list): """Filter the full host list returned from the ZoneManager. By default, this method only applies the basic_ram_filter(), meaning all hosts with at least enough RAM for the requested instance are returned. Override in subclasses to provide greater selectivity. """ + def basic_ram_filter(hostname, capabilities, request_spec): + """Only return hosts with sufficient available RAM.""" + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + return [(host, services) for host, services in host_list if basic_ram_filter(host, services, request_spec)] diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py index 43a6ab2b1..e14ee349e 100644 --- a/nova/scheduler/base_scheduler.py +++ b/nova/scheduler/base_scheduler.py @@ -20,324 +20,22 @@ across zones. There are two expansion points to this class for: 2. Filtering Hosts based on required instance capabilities """ -import operator -import json - -import M2Crypto - -from novaclient import v1_1 as novaclient -from novaclient import exceptions as novaclient_exceptions - -from nova import crypto -from nova import db -from nova import exception from nova import flags from nova import log as logging -from nova import rpc -from nova.compute import api as compute_api -from nova.scheduler import api -from nova.scheduler import driver +from nova.scheduler import abstract_scheduler +from nova.scheduler import host_filter FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.scheduler.abstract_scheduler') +LOG = logging.getLogger('nova.scheduler.base_scheduler') -class InvalidBlob(exception.NovaException): - message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") - - -class AbstractScheduler(driver.Scheduler): +class BaseScheduler(abstract_scheduler.AbstractScheduler): """Base class for creating Schedulers that can work across any nova deployment, from simple designs to multiply-nested zones. """ - - def _call_zone_method(self, context, method, specs, zones): - """Call novaclient zone method. Broken out for testing.""" - return api.call_zone_method(context, method, specs=specs, zones=zones) - - def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): - """Create the requested resource in this Zone.""" - host = build_plan_item['hostname'] - base_options = request_spec['instance_properties'] - image = request_spec['image'] - - # TODO(sandy): I guess someone needs to add block_device_mapping - # support at some point? Also, OS API has no concept of security - # groups. - instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) - - instance_id = instance['id'] - kwargs['instance_id'] = instance_id - - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) - LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) - - def _decrypt_blob(self, blob): - """Returns the decrypted blob or None if invalid. Broken out - for testing.""" - decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) - try: - json_entry = decryptor(blob) - return json.dumps(json_entry) - except M2Crypto.EVP.EVPError: - pass - return None - - def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): - """Once we have determined that the request should go to one - of our children, we need to fabricate a new POST /servers/ - call with the same parameters that were passed into us. - - Note that we have to reverse engineer from our args to get back the - image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - - instance_type = request_spec['instance_type'] - instance_properties = request_spec['instance_properties'] - - name = instance_properties['display_name'] - image_ref = instance_properties['image_ref'] - meta = instance_properties['metadata'] - flavor_id = instance_type['flavorid'] - reservation_id = instance_properties['reservation_id'] - - files = kwargs['injected_files'] - ipgroup = None # Not supported in OS API ... yet - - child_zone = zone_info['child_zone'] - child_blob = zone_info['child_blob'] - zone = db.zone_get(context, child_zone) - url = zone.api_url - LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) - nova = None - try: - nova = novaclient.Client(zone.username, zone.password, None, url) - nova.authenticate() - except novaclient_exceptions.BadRequest, e: - raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - - nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) - - def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): - """Create the requested resource locally or in a child zone - based on what is stored in the zone blob info. - - Attempt to decrypt the blob to see if this request is: - 1. valid, and - 2. intended for this zone or a child zone. - - Note: If we have "blob" that means the request was passed - into us from a parent zone. If we have "child_blob" that - means we gathered the info from one of our children. - It's possible that, when we decrypt the 'blob' field, it - contains "child_blob" data. In which case we forward the - request.""" - - host_info = None - if "blob" in build_plan_item: - # Request was passed in from above. Is it for us? - host_info = self._decrypt_blob(build_plan_item['blob']) - elif "child_blob" in build_plan_item: - # Our immediate child zone provided this info ... - host_info = build_plan_item - - if not host_info: - raise InvalidBlob() - - # Valid data ... is it for us? - if 'child_zone' in host_info and 'child_blob' in host_info: - self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) - else: - self._provision_resource_locally(context, host_info, request_spec, - kwargs) - - def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): - """Create the requested resource in this Zone or a child zone.""" - if "hostname" in build_plan_item: - self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) - return - - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) - - def _adjust_child_weights(self, child_results, zones): - """Apply the Scale and Offset values from the Zone definition - to adjust the weights returned from the child zones. Alters - child_results in place. - """ - for zone_id, result in child_results: - if not result: - continue - - assert isinstance(zone_id, int) - - for zone_rec in zones: - if zone_rec['id'] != zone_id: - continue - - for item in result: - try: - offset = zone_rec['weight_offset'] - scale = zone_rec['weight_scale'] - raw_weight = item['weight'] - cooked_weight = offset + scale * raw_weight - item['weight'] = cooked_weight - item['raw_weight'] = raw_weight - except KeyError: - LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) - - def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): - """This method is called from nova.compute.api to provision - an instance. However we need to look at the parameters being - passed in to see if this is a request to: - 1. Create a Build Plan and then provision, or - 2. Use the Build Plan information in the request parameters - to simply create the instance (either in this zone or - a child zone). - """ - - # TODO(sandy): We'll have to look for richer specs at some point. - - blob = request_spec.get('blob') - if blob: - self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) - return None - - num_instances = request_spec.get('num_instances', 1) - LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % - locals()) - - # Create build plan and provision ... - build_plan = self.select(context, request_spec) - if not build_plan: - raise driver.NoValidHost(_('No hosts were available')) - - for num in xrange(num_instances): - if not build_plan: - break - - build_plan_item = build_plan.pop(0) - self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) - - # Returning None short-circuits the routing to Compute (since - # we've already done it here) - return None - - def select(self, context, request_spec, *args, **kwargs): - """Select returns a list of weights and zone/host information - corresponding to the best hosts to service the request. Any - child zone information has been encrypted so as not to reveal - anything about the children. - """ - return self._schedule(context, "compute", request_spec, - *args, **kwargs) - - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. - def schedule(self, context, topic, request_spec, *args, **kwargs): - """The schedule() contract requires we return the one - best-suited host for this request. - """ - raise driver.NoValidHost(_('No hosts were available')) - - def _schedule(self, context, topic, request_spec, *args, **kwargs): - """Returns a list of hosts that meet the required specs, - ordered by their fitness. - """ - - if topic != "compute": - raise NotImplementedError(_("Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... - json_spec = json.dumps(request_spec) - all_zones = db.zone_get_all(context) - child_results = self._call_zone_method(context, "select", - specs=json_spec, zones=all_zones) - self._adjust_child_weights(child_results, all_zones) - for child_zone, result in child_results: - for weighting in result: - # Remember the child_zone so we can get back to - # it later if needed. This implicitly builds a zone - # path structure. - host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted - - def compute_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def hold_filter_hosts(self, topic, request_spec, hosts=None): + def filter_hosts(self, topic, request_spec, hosts=None): """Filter the full host list (from the ZoneManager)""" - # NOTE(dabo): The logic used by the current _schedule() method - # is incorrect. Since this task is just to refactor the classes, - # I'm not fixing the logic now - that will be the next task. - # So for now this method is just renamed; afterwards this will - # become the filter_hosts() method, and the one below will - # be removed. filter_name = request_spec.get('filter', None) # Make sure that the requested filter is legitimate. selected_filter = host_filter.choose_host_filter(filter_name) diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py new file mode 100644 index 000000000..27160ca0a --- /dev/null +++ b/nova/scheduler/filters/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from all_hosts_filter import AllHostsFilter +from instance_type_filter import InstanceTypeFilter +from json_filter import JsonFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py new file mode 100644 index 000000000..05982820f --- /dev/null +++ b/nova/scheduler/filters/abstract_filter.py @@ -0,0 +1,87 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + +Three filters are included: AllHosts, Flavor & JSON. AllHosts just +returns the full, unfiltered list of hosts. Flavor is a hard coded +matching mechanism based on flavor criteria and JSON is an ad-hoc +filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently Flavors and/or InstanceTypes are used for +specifing the type of instance desired. Specific Nova users have +noted a need for a more expressive way of specifying instances. +Since we don't want to get into building full DSL this is a simple +form as an example of how this could be done. In reality, most +consumers will use the more rigid filters such as FlavorFilter. +""" + +import json + +from nova import exception +from nova import flags +from nova import log as logging + +import nova.scheduler + + +LOG = logging.getLogger('nova.scheduler.host_filter') +FLAGS = flags.FLAGS +flags.DEFINE_string('default_host_filter', + 'nova.scheduler.host_filter.AllHostsFilter', + 'Which filter to use for filtering hosts') + + +class AbstractHostFilter(object): + """Base class for host filters.""" + def instance_type_to_filter(self, instance_type): + """Convert instance_type into a filter for most common use-case.""" + raise NotImplementedError() + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that fulfill the filter.""" + raise NotImplementedError() + + def _full_name(self): + """module.classname of the filter.""" + return "%s.%s" % (self.__module__, self.__class__.__name__) + + +def _get_filters(): + from nova.scheduler import filters + return [itm for itm in dir(filters) + if issubclass(itm, AbstractHostFilter)] + + +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in _get_filters(): + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if host_match == filter_name: + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py new file mode 100644 index 000000000..bc4acfd1a --- /dev/null +++ b/nova/scheduler/filters/all_hosts_filter.py @@ -0,0 +1,31 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.scheduler + + +class AllHostsFilter(nova.scheduler.host_filter.AbstractHostFilter): + """NOP host filter. Returns all hosts in ZoneManager.""" + def instance_type_to_filter(self, instance_type): + """Return anything to prevent base-class from raising + exception. + """ + return (self._full_name(), instance_type) + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts from ZoneManager list.""" + return [(host, services) + for host, services in zone_manager.service_states.iteritems()] diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py new file mode 100644 index 000000000..03ffc46c6 --- /dev/null +++ b/nova/scheduler/filters/instance_type_filter.py @@ -0,0 +1,86 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova.scheduler import host_filter + + +class InstanceTypeFilter(host_filter.AbstractHostFilter): + """HostFilter hard-coded to work with InstanceType records.""" + def instance_type_to_filter(self, instance_type): + """Use instance_type to filter hosts.""" + return (self._full_name(), instance_type) + + def _satisfies_extra_specs(self, capabilities, instance_type): + """Check that the capabilities provided by the compute service + satisfy the extra specs associated with the instance type""" + if 'extra_specs' not in instance_type: + return True + # NOTE(lorinh): For now, we are just checking exact matching on the + # values. Later on, we want to handle numerical + # values so we can represent things like number of GPU cards + try: + for key, value in instance_type['extra_specs'].iteritems(): + if capabilities[key] != value: + return False + except KeyError: + return False + return True + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can create instance_type.""" + instance_type = query + selected_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + capabilities = services.get('compute', {}) + if not capabilities: + continue + host_ram_mb = capabilities['host_memory_free'] + disk_bytes = capabilities['disk_available'] + spec_ram = instance_type['memory_mb'] + spec_disk = instance_type['local_gb'] + extra_specs = instance_type['extra_specs'] + + if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and + self._satisfies_extra_specs(capabilities, instance_type)): + selected_hosts.append((host, capabilities)) + return selected_hosts + + +# host entries (currently) are like: +# {'host_name-description': 'Default install of XenServer', +# 'host_hostname': 'xs-mini', +# 'host_memory_total': 8244539392, +# 'host_memory_overhead': 184225792, +# 'host_memory_free': 3868327936, +# 'host_memory_free_computed': 3840843776, +# 'host_other_config': {}, +# 'host_ip_address': '192.168.1.109', +# 'host_cpu_info': {}, +# 'disk_available': 32954957824, +# 'disk_total': 50394562560, +# 'disk_used': 17439604736, +# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', +# 'host_name_label': 'xs-mini'} + +# instance_type table has: +# name = Column(String(255), unique=True) +# memory_mb = Column(Integer) +# vcpus = Column(Integer) +# local_gb = Column(Integer) +# flavorid = Column(Integer, unique=True) +# swap = Column(Integer, nullable=False, default=0) +# rxtx_quota = Column(Integer, nullable=False, default=0) +# rxtx_cap = Column(Integer, nullable=False, default=0) diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py new file mode 100644 index 000000000..358abdc4d --- /dev/null +++ b/nova/scheduler/filters/json_filter.py @@ -0,0 +1,141 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import operator + +from nova.scheduler import host_filter + + +class JsonFilter(host_filter.AbstractHostFilter): + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ + def _op_comp(self, args, op): + """Returns True if the specified operator can successfully + compare the first item in the args with all the rest. Will + return False if only one item is in the list. + """ + if len(args) < 2: + return False + bad = [arg for arg in args[1:] + if not op(args[0], arg)] + return not bool(bad) + + def _equals(self, args): + """First term is == all the other terms.""" + return self._op_comp(args, operator.eq) + + def _less_than(self, args): + """First term is < all the other terms.""" + return self._op_comp(args, operator.lt) + + def _greater_than(self, args): + """First term is > all the other terms.""" + return self._op_comp(args, operator.gt) + + def _in(self, args): + """First term is in set of remaining terms""" + return self._op_comp(args, operator.contains) + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + return self._op_comp(args, operator.le) + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + return self._op_comp(args, operator.ge) + + def _not(self, args): + """Flip each of the arguments.""" + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return any(args) + + def _and(self, args): + """True if all args are True.""" + return all(args) + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def instance_type_to_filter(self, instance_type): + """Convert instance_type into JSON filter object.""" + required_ram = instance_type['memory_mb'] + required_disk = instance_type['local_gb'] + query = ['and', + ['>=', '$compute.host_memory_free', required_ram], + ['>=', '$compute.disk_available', required_disk]] + return (self._full_name(), json.dumps(query)) + + def _parse_string(self, string, host, services): + """Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]'. + """ + if not string: + return None + if not string.startswith("$"): + return string + + path = string[1:].split(".") + for item in path: + services = services.get(item, None) + if not services: + return None + return services + + def _process_filter(self, zone_manager, query, host, services): + """Recursively parse the query structure.""" + if not query: + return True + cmd = query[0] + method = self.commands[cmd] + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(zone_manager, arg, host, services) + elif isinstance(arg, basestring): + arg = self._parse_string(arg, host, services) + if arg is not None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can fulfill the requirements + specified in the query. + """ + expanded = json.loads(query) + filtered_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + result = self._process_filter(zone_manager, expanded, host, + services) + if isinstance(result, list): + # If any succeeded, include the host + result = any(result) + if result: + filtered_hosts.append((host, services)) + return filtered_hosts diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py deleted file mode 100644 index 45a8f40d8..000000000 --- a/nova/scheduler/host_filter.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright (c) 2011 Openstack, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The Host Filter classes are a way to ensure that only hosts that are -appropriate are considered when creating a new instance. Hosts that are -either incompatible or insufficient to accept a newly-requested instance -are removed by Host Filter classes from consideration. Those that pass -the filter are then passed on for weighting or other process for ordering. - -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. -""" - -import json - -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils - -LOG = logging.getLogger('nova.scheduler.host_filter') - -FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter', - 'nova.scheduler.host_filter.AllHostsFilter', - 'Which filter to use for filtering hosts.') - - -class HostFilter(object): - """Base class for host filters.""" - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into a filter for most common use-case.""" - raise NotImplementedError() - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that fulfill the filter.""" - raise NotImplementedError() - - def _full_name(self): - """module.classname of the filter.""" - return "%s.%s" % (self.__module__, self.__class__.__name__) - - -class AllHostsFilter(HostFilter): - """ NOP host filter. Returns all hosts in ZoneManager. - This essentially does what the old Scheduler+Chance used - to give us. - """ - - def instance_type_to_filter(self, instance_type): - """Return anything to prevent base-class from raising - exception.""" - return (self._full_name(), instance_type) - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts from ZoneManager list.""" - return [(host, services) - for host, services in zone_manager.service_states.iteritems()] - - -class InstanceTypeFilter(HostFilter): - """HostFilter hard-coded to work with InstanceType records.""" - - def instance_type_to_filter(self, instance_type): - """Use instance_type to filter hosts.""" - return (self._full_name(), instance_type) - - def _satisfies_extra_specs(self, capabilities, instance_type): - """Check that the capabilities provided by the compute service - satisfy the extra specs associated with the instance type""" - - if 'extra_specs' not in instance_type: - return True - - # Note(lorinh): For now, we are just checking exact matching on the - # values. Later on, we want to handle numerical - # values so we can represent things like number of GPU cards - - try: - for key, value in instance_type['extra_specs'].iteritems(): - if capabilities[key] != value: - return False - except KeyError: - return False - - return True - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can create instance_type.""" - instance_type = query - selected_hosts = [] - for host, services in zone_manager.service_states.iteritems(): - capabilities = services.get('compute', {}) - host_ram_mb = capabilities['host_memory_free'] - disk_bytes = capabilities['disk_available'] - spec_ram = instance_type['memory_mb'] - spec_disk = instance_type['local_gb'] - extra_specs = instance_type['extra_specs'] - - if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and - self._satisfies_extra_specs(capabilities, instance_type)): - selected_hosts.append((host, capabilities)) - return selected_hosts - -#host entries (currently) are like: -# {'host_name-description': 'Default install of XenServer', -# 'host_hostname': 'xs-mini', -# 'host_memory_total': 8244539392, -# 'host_memory_overhead': 184225792, -# 'host_memory_free': 3868327936, -# 'host_memory_free_computed': 3840843776, -# 'host_other_config': {}, -# 'host_ip_address': '192.168.1.109', -# 'host_cpu_info': {}, -# 'disk_available': 32954957824, -# 'disk_total': 50394562560, -# 'disk_used': 17439604736, -# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', -# 'host_name_label': 'xs-mini'} - -# instance_type table has: -#name = Column(String(255), unique=True) -#memory_mb = Column(Integer) -#vcpus = Column(Integer) -#local_gb = Column(Integer) -#flavorid = Column(Integer, unique=True) -#swap = Column(Integer, nullable=False, default=0) -#rxtx_quota = Column(Integer, nullable=False, default=0) -#rxtx_cap = Column(Integer, nullable=False, default=0) - - -class JsonFilter(HostFilter): - """Host Filter to allow simple JSON-based grammar for - selecting hosts. - """ - - def _equals(self, args): - """First term is == all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs != rhs: - return False - return True - - def _less_than(self, args): - """First term is < all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs >= rhs: - return False - return True - - def _greater_than(self, args): - """First term is > all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs <= rhs: - return False - return True - - def _in(self, args): - """First term is in set of remaining terms""" - if len(args) < 2: - return False - return args[0] in args[1:] - - def _less_than_equal(self, args): - """First term is <= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs > rhs: - return False - return True - - def _greater_than_equal(self, args): - """First term is >= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs < rhs: - return False - return True - - def _not(self, args): - """Flip each of the arguments.""" - if len(args) == 0: - return False - return [not arg for arg in args] - - def _or(self, args): - """True if any arg is True.""" - return True in args - - def _and(self, args): - """True if all args are True.""" - return False not in args - - commands = { - '=': _equals, - '<': _less_than, - '>': _greater_than, - 'in': _in, - '<=': _less_than_equal, - '>=': _greater_than_equal, - 'not': _not, - 'or': _or, - 'and': _and, - } - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into JSON filter object.""" - required_ram = instance_type['memory_mb'] - required_disk = instance_type['local_gb'] - query = ['and', - ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk]] - return (self._full_name(), json.dumps(query)) - - def _parse_string(self, string, host, services): - """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]' - """ - if not string: - return None - if string[0] != '$': - return string - - path = string[1:].split('.') - for item in path: - services = services.get(item, None) - if not services: - return None - return services - - def _process_filter(self, zone_manager, query, host, services): - """Recursively parse the query structure.""" - if len(query) == 0: - return True - cmd = query[0] - method = self.commands[cmd] # Let exception fly. - cooked_args = [] - for arg in query[1:]: - if isinstance(arg, list): - arg = self._process_filter(zone_manager, arg, host, services) - elif isinstance(arg, basestring): - arg = self._parse_string(arg, host, services) - if arg != None: - cooked_args.append(arg) - result = method(self, cooked_args) - return result - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can fulfill filter.""" - expanded = json.loads(query) - hosts = [] - for host, services in zone_manager.service_states.iteritems(): - r = self._process_filter(zone_manager, expanded, host, services) - if isinstance(r, list): - r = True in r - if r: - hosts.append((host, services)) - return hosts - - -FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] - - -def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters. - """ - if not filter_name: - filter_name = FLAGS.default_host_filter - for filter_class in FILTERS: - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if host_match == filter_name: - return filter_class() - raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py index f4f5cc233..aa97e2344 100644 --- a/nova/tests/scheduler/test_abstract_scheduler.py +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -77,6 +77,9 @@ class FakeZoneManager(zone_manager.ZoneManager): 'host3': { 'compute': {'host_memory_free': 3221225472}, }, + 'host4': { + 'compute': {'host_memory_free': 999999999}, + }, } diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 7e664d3f9..818be2f45 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -20,7 +20,7 @@ import json from nova import exception from nova import test -from nova.scheduler import host_filter +from nova.scheduler import filters class FakeZoneManager: @@ -55,7 +55,7 @@ class HostFilterTestCase(test.TestCase): def setUp(self): super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' + default_host_filter = 'nova.scheduler.filteris.AllHostsFilter' self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index de7581d0a..16ec4420b 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -122,11 +122,14 @@ class LeastCostSchedulerTestCase(test.TestCase): self.flags(least_cost_scheduler_cost_functions=[ 'nova.scheduler.least_cost.compute_fill_first_cost_fn'], compute_fill_first_cost_fn_weight=1) - num = 1 instance_type = {'memory_mb': 1024} request_spec = {'instance_type': instance_type} - hosts = self.sched.filter_hosts('compute', request_spec, None) + all_hosts = self.sched.zone_manager.service_states.iteritems() + all_hosts = [(host, services["compute"]) + for host, services in all_hosts + if "compute" in services] + hosts = self.sched.filter_hosts('compute', request_spec, host_list) expected = [] for idx, (hostname, caps) in enumerate(hosts): From 43697e7dea0c1ffb3c0ebbb7b955bf511705bb0b Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 12 Aug 2011 16:19:46 -0500 Subject: [PATCH 04/27] end of day --- nova/scheduler/filters/__init__.py | 1 + nova/scheduler/filters/abstract_filter.py | 54 +------------ nova/scheduler/filters/all_hosts_filter.py | 3 +- .../scheduler/filters/instance_type_filter.py | 5 +- nova/scheduler/filters/json_filter.py | 39 +++++++--- nova/scheduler/host_filter.py | 75 +++++++++++++++++++ nova/tests/scheduler/test_host_filter.py | 34 +++++---- 7 files changed, 129 insertions(+), 82 deletions(-) create mode 100644 nova/scheduler/host_filter.py diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py index 27160ca0a..4c9187c5a 100644 --- a/nova/scheduler/filters/__init__.py +++ b/nova/scheduler/filters/__init__.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from abstract_filter import AbstractHostFilter from all_hosts_filter import AllHostsFilter from instance_type_filter import InstanceTypeFilter from json_filter import JsonFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index 05982820f..fe5610923 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -13,44 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -""" -The Host Filter classes are a way to ensure that only hosts that are -appropriate are considered when creating a new instance. Hosts that are -either incompatible or insufficient to accept a newly-requested instance -are removed by Host Filter classes from consideration. Those that pass -the filter are then passed on for weighting or other process for ordering. - -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. -""" - -import json - -from nova import exception -from nova import flags -from nova import log as logging import nova.scheduler +from nova import flags - -LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS flags.DEFINE_string('default_host_filter', - 'nova.scheduler.host_filter.AllHostsFilter', + 'nova.scheduler.filters.AllHostsFilter', 'Which filter to use for filtering hosts') - class AbstractHostFilter(object): """Base class for host filters.""" def instance_type_to_filter(self, instance_type): @@ -64,24 +35,3 @@ class AbstractHostFilter(object): def _full_name(self): """module.classname of the filter.""" return "%s.%s" % (self.__module__, self.__class__.__name__) - - -def _get_filters(): - from nova.scheduler import filters - return [itm for itm in dir(filters) - if issubclass(itm, AbstractHostFilter)] - - -def choose_host_filter(filter_name=None): - """Since the caller may specify which filter to use we need - to have an authoritative list of what is permissible. This - function checks the filter name against a predefined set - of acceptable filters. - """ - if not filter_name: - filter_name = FLAGS.default_host_filter - for filter_class in _get_filters(): - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if host_match == filter_name: - return filter_class() - raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py index bc4acfd1a..e80d829ca 100644 --- a/nova/scheduler/filters/all_hosts_filter.py +++ b/nova/scheduler/filters/all_hosts_filter.py @@ -15,9 +15,10 @@ import nova.scheduler +from nova.scheduler.filters import abstract_filter -class AllHostsFilter(nova.scheduler.host_filter.AbstractHostFilter): +class AllHostsFilter(abstract_filter.AbstractHostFilter): """NOP host filter. Returns all hosts in ZoneManager.""" def instance_type_to_filter(self, instance_type): """Return anything to prevent base-class from raising diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py index 03ffc46c6..62b9ee414 100644 --- a/nova/scheduler/filters/instance_type_filter.py +++ b/nova/scheduler/filters/instance_type_filter.py @@ -14,10 +14,11 @@ # under the License. -from nova.scheduler import host_filter +import nova.scheduler +from nova.scheduler.filters import abstract_filter -class InstanceTypeFilter(host_filter.AbstractHostFilter): +class InstanceTypeFilter(abstract_filter.AbstractHostFilter): """HostFilter hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py index 358abdc4d..889b96915 100644 --- a/nova/scheduler/filters/json_filter.py +++ b/nova/scheduler/filters/json_filter.py @@ -14,49 +14,64 @@ # under the License. +import json import operator -from nova.scheduler import host_filter +import nova.scheduler +from nova.scheduler.filters import abstract_filter + +def debug(*args): + with file("/tmp/debug", "a") as dbg: + msg = " ".join([str(arg) for arg in args]) + dbg.write("%s\n" % msg) -class JsonFilter(host_filter.AbstractHostFilter): +class JsonFilter(abstract_filter.AbstractHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts. """ - def _op_comp(self, args, op): + def _op_compare(self, args, op): """Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False - bad = [arg for arg in args[1:] - if not op(args[0], arg)] + if op is operator.contains: + debug("ARGS", type(args), args) + debug("op", op) + debug("REVERSED!!!") + # operator.contains reverses the param order. + bad = [arg for arg in args[1:] + if not op(args, args[0])] + else: + bad = [arg for arg in args[1:] + if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" - return self._op_comp(args, operator.eq) + return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" - return self._op_comp(args, operator.lt) + return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" - return self._op_comp(args, operator.gt) + return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms""" - return self._op_comp(args, operator.contains) + return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" - return self._op_comp(args, operator.le) + return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" - return self._op_comp(args, operator.ge) + return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" @@ -129,6 +144,8 @@ class JsonFilter(host_filter.AbstractHostFilter): specified in the query. """ expanded = json.loads(query) + + debug("expanded", type(expanded), expanded) filtered_hosts = [] for host, services in zone_manager.service_states.iteritems(): result = self._process_filter(zone_manager, expanded, host, diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py new file mode 100644 index 000000000..f5191f5c9 --- /dev/null +++ b/nova/scheduler/host_filter.py @@ -0,0 +1,75 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + +Three filters are included: AllHosts, Flavor & JSON. AllHosts just +returns the full, unfiltered list of hosts. Flavor is a hard coded +matching mechanism based on flavor criteria and JSON is an ad-hoc +filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently Flavors and/or InstanceTypes are used for +specifing the type of instance desired. Specific Nova users have +noted a need for a more expressive way of specifying instances. +Since we don't want to get into building full DSL this is a simple +form as an example of how this could be done. In reality, most +consumers will use the more rigid filters such as FlavorFilter. +""" + +import json +import types + +from nova import exception +from nova import flags +from nova import log as logging + +import nova.scheduler + + +LOG = logging.getLogger('nova.scheduler.host_filter') +FLAGS = flags.FLAGS + + +def _get_filters(): + from nova.scheduler import filters + def get_itm(nm): + return getattr(filters, nm) + + return [get_itm(itm) for itm in dir(filters) + if (type(get_itm(itm)) is types.TypeType) + and issubclass(get_itm(itm), filters.AbstractHostFilter)] + + +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in _get_filters(): + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if (host_match.startswith("nova.scheduler.filters") and + (host_match.split(".")[-1] == filter_name)): + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 818be2f45..a64b25138 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -20,6 +20,7 @@ import json from nova import exception from nova import test +from nova.scheduler import host_filter from nova.scheduler import filters @@ -55,7 +56,7 @@ class HostFilterTestCase(test.TestCase): def setUp(self): super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.filteris.AllHostsFilter' + default_host_filter = 'AllHostsFilter' self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, @@ -98,13 +99,10 @@ class HostFilterTestCase(test.TestCase): def test_choose_filter(self): # Test default filter ... hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter') # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') + hf = host_filter.choose_host_filter('InstanceTypeFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter') # Test invalid filter ... try: host_filter.choose_host_filter('does not exist') @@ -113,7 +111,7 @@ class HostFilterTestCase(test.TestCase): pass def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() + hf = filters.AllHostsFilter() cooked = hf.instance_type_to_filter(self.instance_type) hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(10, len(hosts)) @@ -121,11 +119,10 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(host.startswith('host')) def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -134,21 +131,20 @@ class HostFilterTestCase(test.TestCase): self.assertEquals('host10', just_hosts[5]) def test_instance_type_filter_extra_specs(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.gpu_instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(1, len(hosts)) just_hosts = [host for host, caps in hosts] self.assertEquals('host07', just_hosts[0]) def test_json_filter(self): - hf = host_filter.JsonFilter() + hf = filters.JsonFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + self.assertEquals(name.split(".")[-1], 'JsonFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -191,6 +187,12 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) + def debug(*args): + with file("/tmp/debug", "a") as dbg: + msg = " ".join([str(arg) for arg in args]) + dbg.write("%s\n" % msg) + + debug("cooked", cooked, type(cooked)) hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) From d390b24edefd8ec8791f9978da28be67d3cbd216 Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 15 Aug 2011 13:48:09 -0700 Subject: [PATCH 05/27] added cloud unit test for describe_instances to ensure doesn't return deleted instances --- nova/tests/test_cloud.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index b2afc53c9..07a35c447 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -487,6 +487,16 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id']) + def test_describe_instances_deleted(self): + args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args) + inst2 = db.instance_create(self.context, args) + db.instance_destroy(self.context, inst1.id) + result = self.cloud.describe_instances(self.context) + result = result['reservationSet'][0]['instancesSet'] + print result + self.assertEqual(1, len(result)) + def _block_device_mapping_create(self, instance_id, mappings): volumes = [] for bdm in mappings: From 99f47aaf834dd50344dae7a5a50033f4e3fb4d67 Mon Sep 17 00:00:00 2001 From: John Tran Date: Mon, 15 Aug 2011 13:58:44 -0700 Subject: [PATCH 06/27] adding sqlalchemi api tests for test_instance_get_all_by_filter to ensure doesn't return deleted instances --- nova/tests/test_cloud.py | 1 - nova/tests/test_db_api.py | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 07a35c447..39358eeff 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -494,7 +494,6 @@ class CloudTestCase(test.TestCase): db.instance_destroy(self.context, inst1.id) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0]['instancesSet'] - print result self.assertEqual(1, len(result)) def _block_device_mapping_create(self, instance_id, mappings): diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 0c07cbb7c..ed363d1be 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -76,3 +76,18 @@ class DbApiTestCase(test.TestCase): self.assertEqual(instance['id'], result['id']) self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address, '1.2.1.2') + + def test_instance_get_all_by_filters(self): + args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args) + inst2 = db.instance_create(self.context, args) + result = db.instance_get_all_by_filters(self.context, {}) + self.assertTrue(2, len(result)) + + def test_instance_get_all_by_filters_deleted(self): + args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args) + inst2 = db.instance_create(self.context, args) + db.instance_destroy(self.context, inst1.id) + result = db.instance_get_all_by_filters(self.context, {}) + self.assertTrue(1, len(result)) From 34d853b66e0c3ef87ea3ca7c25c2d92869fc7188 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Mon, 15 Aug 2011 17:09:39 -0500 Subject: [PATCH 07/27] got tests passing with logic changes --- nova/scheduler/abstract_scheduler.py | 53 ++--- nova/scheduler/base_scheduler.py | 50 +---- nova/scheduler/filters/__init__.py | 17 ++ nova/scheduler/filters/abstract_filter.py | 2 +- nova/scheduler/filters/json_filter.py | 14 +- nova/scheduler/host_filter.py | 25 +-- nova/scheduler/least_cost.py | 158 +++++++------- nova/tests/scheduler/test_host_filter.py | 2 - .../scheduler/test_least_cost_scheduler.py | 16 +- nova/tests/test_host_filter.py | 200 ------------------ 10 files changed, 137 insertions(+), 400 deletions(-) delete mode 100644 nova/tests/test_host_filter.py diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index a0734f322..2f1ede0a4 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -45,20 +45,19 @@ LOG = logging.getLogger('nova.scheduler.abstract_scheduler') class InvalidBlob(exception.NovaException): message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") + "to instance create request.") class AbstractScheduler(driver.Scheduler): """Base class for creating Schedulers that can work across any nova deployment, from simple designs to multiply-nested zones. """ - def _call_zone_method(self, context, method, specs, zones): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs, zones=zones) def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone.""" host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] @@ -68,21 +67,21 @@ class AbstractScheduler(driver.Scheduler): # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) + image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) + queue = db.queue_get_for(context, "compute", host) + params = {"method": "run_instance", "args": kwargs} + rpc.cast(context, queue, params) LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) + % locals()) def _decrypt_blob(self, blob): """Returns the decrypted blob or None if invalid. Broken out - for testing.""" + for testing. + """ decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) try: json_entry = decryptor(blob) @@ -92,15 +91,15 @@ class AbstractScheduler(driver.Scheduler): return None def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): + request_spec, kwargs): """Once we have determined that the request should go to one of our children, we need to fabricate a new POST /servers/ call with the same parameters that were passed into us. Note that we have to reverse engineer from our args to get back the image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - + come in from EC2 (which doesn't use these things). + """ instance_type = request_spec['instance_type'] instance_properties = request_spec['instance_properties'] @@ -109,30 +108,26 @@ class AbstractScheduler(driver.Scheduler): meta = instance_properties['metadata'] flavor_id = instance_type['flavorid'] reservation_id = instance_properties['reservation_id'] - files = kwargs['injected_files'] ipgroup = None # Not supported in OS API ... yet - child_zone = zone_info['child_zone'] child_blob = zone_info['child_blob'] zone = db.zone_get(context, child_zone) url = zone.api_url LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) + ". ReservationID=%(reservation_id)s") % locals()) nova = None try: nova = novaclient.Client(zone.username, zone.password, None, url) nova.authenticate() except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - + "to talk to zone at %(url)s.") % locals()) nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) + child_blob, reservation_id=reservation_id) def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): + instance_id, request_spec, kwargs): """Create the requested resource locally or in a child zone based on what is stored in the zone blob info. @@ -145,8 +140,8 @@ class AbstractScheduler(driver.Scheduler): means we gathered the info from one of our children. It's possible that, when we decrypt the 'blob' field, it contains "child_blob" data. In which case we forward the - request.""" - + request. + """ host_info = None if "blob" in build_plan_item: # Request was passed in from above. Is it for us? @@ -161,21 +156,20 @@ class AbstractScheduler(driver.Scheduler): # Valid data ... is it for us? if 'child_zone' in host_info and 'child_blob' in host_info: self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) + request_spec, kwargs) else: self._provision_resource_locally(context, host_info, request_spec, - kwargs) + kwargs) def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone or a child zone.""" if "hostname" in build_plan_item: self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) + request_spec, kwargs) return - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) + instance_id, request_spec, kwargs) def _adjust_child_weights(self, child_results, zones): """Apply the Scale and Offset values from the Zone definition @@ -231,7 +225,6 @@ class AbstractScheduler(driver.Scheduler): for num in xrange(num_instances): if not build_plan: break - build_plan_item = build_plan.pop(0) self._provision_resource(context, build_plan_item, instance_id, request_spec, kwargs) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py index e14ee349e..35e5af035 100644 --- a/nova/scheduler/base_scheduler.py +++ b/nova/scheduler/base_scheduler.py @@ -43,40 +43,13 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler): # TODO(sandy): We're only using InstanceType-based specs # currently. Later we'll need to snoop for more detailed # host filter requests. - instance_type = request_spec['instance_type'] + instance_type = request_spec.get("instance_type", None) + if instance_type is None: + # No way to select; return the specified hosts + return hosts or [] name, query = selected_filter.instance_type_to_filter(instance_type) return selected_filter.filter_hosts(self.zone_manager, query) - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '_filter' function more appropriate. - """ - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no _filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts - def weigh_hosts(self, topic, request_spec, hosts): """Derived classes may override this to provide more sophisticated scheduling objectives @@ -84,18 +57,3 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler): # NOTE(sirp): The default logic is the same as the NoopCostFunction return [dict(weight=1, hostname=hostname, capabilities=capabilities) for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py index 4c9187c5a..b86fb795f 100644 --- a/nova/scheduler/filters/__init__.py +++ b/nova/scheduler/filters/__init__.py @@ -13,6 +13,23 @@ # License for the specific language governing permissions and limitations # under the License. +""" +There are three filters included: AllHosts, InstanceType & JSON. + +AllHosts just returns the full, unfiltered list of hosts. +InstanceType is a hard coded matching mechanism based on flavor criteria. +JSON is an ad-hoc filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently InstanceTypes are used for specifing the type of instance desired. +Specific Nova users have noted a need for a more expressive way of specifying +instance requirements. Since we don't want to get into building full DSL, +this filter is a simple form as an example of how this could be done. +In reality, most consumers will use the more rigid filters such as the +InstanceType filter. +""" + from abstract_filter import AbstractHostFilter from all_hosts_filter import AllHostsFilter from instance_type_filter import InstanceTypeFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index fe5610923..d9d272130 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -19,7 +19,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('default_host_filter', - 'nova.scheduler.filters.AllHostsFilter', + 'AllHostsFilter', 'Which filter to use for filtering hosts') class AbstractHostFilter(object): diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py index 889b96915..caf22f5d5 100644 --- a/nova/scheduler/filters/json_filter.py +++ b/nova/scheduler/filters/json_filter.py @@ -20,11 +20,6 @@ import operator import nova.scheduler from nova.scheduler.filters import abstract_filter -def debug(*args): - with file("/tmp/debug", "a") as dbg: - msg = " ".join([str(arg) for arg in args]) - dbg.write("%s\n" % msg) - class JsonFilter(abstract_filter.AbstractHostFilter): """Host Filter to allow simple JSON-based grammar for @@ -38,12 +33,7 @@ class JsonFilter(abstract_filter.AbstractHostFilter): if len(args) < 2: return False if op is operator.contains: - debug("ARGS", type(args), args) - debug("op", op) - debug("REVERSED!!!") - # operator.contains reverses the param order. - bad = [arg for arg in args[1:] - if not op(args, args[0])] + bad = not args[0] in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] @@ -144,8 +134,6 @@ class JsonFilter(abstract_filter.AbstractHostFilter): specified in the query. """ expanded = json.loads(query) - - debug("expanded", type(expanded), expanded) filtered_hosts = [] for host, services in zone_manager.service_states.iteritems(): result = self._process_filter(zone_manager, expanded, host, diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index f5191f5c9..be618f3f3 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -20,43 +20,32 @@ either incompatible or insufficient to accept a newly-requested instance are removed by Host Filter classes from consideration. Those that pass the filter are then passed on for weighting or other process for ordering. -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. +Filters are in the 'filters' directory that is off the 'scheduler' +directory of nova. Additional filters can be created and added to that +directory; be sure to add them to the filters/__init__.py file so that +they are part of the nova.schedulers.filters namespace. """ -import json import types from nova import exception from nova import flags -from nova import log as logging - import nova.scheduler -LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS def _get_filters(): + # Imported here to avoid circular imports from nova.scheduler import filters def get_itm(nm): return getattr(filters, nm) return [get_itm(itm) for itm in dir(filters) if (type(get_itm(itm)) is types.TypeType) - and issubclass(get_itm(itm), filters.AbstractHostFilter)] + and issubclass(get_itm(itm), filters.AbstractHostFilter) + and get_itm(itm) is not filters.AbstractHostFilter] def choose_host_filter(filter_name=None): diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index a58b11289..903d786cd 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -22,14 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost is then selected for provisioning. """ -# TODO(dabo): This class will be removed in the next merge prop; it remains now -# because much of the code will be refactored into different classes. import collections from nova import flags from nova import log as logging -from nova.scheduler import abstract_scheduler +from nova.scheduler import base_scheduler from nova import utils from nova import exception @@ -37,14 +35,16 @@ LOG = logging.getLogger('nova.scheduler.least_cost') FLAGS = flags.FLAGS flags.DEFINE_list('least_cost_scheduler_cost_functions', - ['nova.scheduler.least_cost.noop_cost_fn'], - 'Which cost functions the LeastCostScheduler should use.') + ['nova.scheduler.least_cost.noop_cost_fn'], + 'Which cost functions the LeastCostScheduler should use.') # TODO(sirp): Once we have enough of these rules, we can break them out into a # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, - 'How much weight to give the noop cost function') + 'How much weight to give the noop cost function') +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') def noop_cost_fn(host): @@ -52,87 +52,20 @@ def noop_cost_fn(host): return 1 -flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, - 'How much weight to give the fill-first cost function') - - def compute_fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude - hosts that don't have enough ram""" - hostname, caps = host - free_mem = caps['host_memory_free'] + hosts that don't have enough ram. + """ + hostname, service = host + caps = service.get("compute", {}) + free_mem = caps.get("host_memory_free", 0) return free_mem -class LeastCostScheduler(abstract_scheduler.AbstractScheduler): - def __init__(self, *args, **kwargs): - self.cost_fns_cache = {} - super(LeastCostScheduler, self).__init__(*args, **kwargs) - - def get_cost_fns(self, topic): - """Returns a list of tuples containing weights and cost functions to - use for weighing hosts - """ - - if topic in self.cost_fns_cache: - return self.cost_fns_cache[topic] - - cost_fns = [] - for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: - if '.' in cost_fn_str: - short_name = cost_fn_str.split('.')[-1] - else: - short_name = cost_fn_str - cost_fn_str = "%s.%s.%s" % ( - __name__, self.__class__.__name__, short_name) - - if not (short_name.startswith('%s_' % topic) or - short_name.startswith('noop')): - continue - - try: - # NOTE(sirp): import_class is somewhat misnamed since it can - # any callable from a module - cost_fn = utils.import_class(cost_fn_str) - except exception.ClassNotFound: - raise exception.SchedulerCostFunctionNotFound( - cost_fn_str=cost_fn_str) - - try: - flag_name = "%s_weight" % cost_fn.__name__ - weight = getattr(FLAGS, flag_name) - except AttributeError: - raise exception.SchedulerWeightFlagNotFound( - flag_name=flag_name) - - cost_fns.append((weight, cost_fn)) - - self.cost_fns_cache[topic] = cost_fns - return cost_fns - - def weigh_hosts(self, topic, request_spec, hosts): - """Returns a list of dictionaries of form: - [ {weight: weight, hostname: hostname, capabilities: capabs} ] - """ - - cost_fns = self.get_cost_fns(topic) - costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) - - weighted = [] - weight_log = [] - for cost, (hostname, caps) in zip(costs, hosts): - weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) - weight_dict = dict(weight=cost, hostname=hostname, - capabilities=caps) - weighted.append(weight_dict) - - LOG.debug(_("Weighted Costs => %s") % weight_log) - return weighted - - def normalize_list(L): """Normalize an array of numbers such that each element satisfies: - 0 <= e <= 1""" + 0 <= e <= 1 + """ if not L: return L max_ = max(L) @@ -160,12 +93,10 @@ def weighted_sum(domain, weighted_fns, normalize=True): score_table = collections.defaultdict(list) for weight, fn in weighted_fns: scores = [fn(elem) for elem in domain] - if normalize: norm_scores = normalize_list(scores) else: norm_scores = scores - for idx, score in enumerate(norm_scores): weighted_score = score * weight score_table[idx].append(weighted_score) @@ -175,5 +106,66 @@ def weighted_sum(domain, weighted_fns, normalize=True): for idx in sorted(score_table): elem_score = sum(score_table[idx]) domain_scores.append(elem_score) - return domain_scores + + +class LeastCostScheduler(base_scheduler.BaseScheduler): + def __init__(self, *args, **kwargs): + self.cost_fns_cache = {} + super(LeastCostScheduler, self).__init__(*args, **kwargs) + + def get_cost_fns(self, topic): + """Returns a list of tuples containing weights and cost functions to + use for weighing hosts + """ + if topic in self.cost_fns_cache: + return self.cost_fns_cache[topic] + cost_fns = [] + for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: + if '.' in cost_fn_str: + short_name = cost_fn_str.split('.')[-1] + else: + short_name = cost_fn_str + cost_fn_str = "%s.%s.%s" % ( + __name__, self.__class__.__name__, short_name) + if not (short_name.startswith('%s_' % topic) or + short_name.startswith('noop')): + continue + + try: + # NOTE(sirp): import_class is somewhat misnamed since it can + # any callable from a module + cost_fn = utils.import_class(cost_fn_str) + except exception.ClassNotFound: + raise exception.SchedulerCostFunctionNotFound( + cost_fn_str=cost_fn_str) + + try: + flag_name = "%s_weight" % cost_fn.__name__ + weight = getattr(FLAGS, flag_name) + except AttributeError: + raise exception.SchedulerWeightFlagNotFound( + flag_name=flag_name) + cost_fns.append((weight, cost_fn)) + + self.cost_fns_cache[topic] = cost_fns + return cost_fns + + def weigh_hosts(self, topic, request_spec, hosts): + """Returns a list of dictionaries of form: + [ {weight: weight, hostname: hostname, capabilities: capabs} ] + """ + cost_fns = self.get_cost_fns(topic) + costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) + + weighted = [] + weight_log = [] + for cost, (hostname, service) in zip(costs, hosts): + caps = service[topic] + weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) + weight_dict = dict(weight=cost, hostname=hostname, + capabilities=caps) + weighted.append(weight_dict) + + LOG.debug(_("Weighted Costs => %s") % weight_log) + return weighted diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index a64b25138..a961b1b06 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -192,9 +192,7 @@ class HostFilterTestCase(test.TestCase): msg = " ".join([str(arg) for arg in args]) dbg.write("%s\n" % msg) - debug("cooked", cooked, type(cooked)) hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 16ec4420b..d6eaaa223 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -15,6 +15,7 @@ """ Tests For Least Cost Scheduler """ +import copy from nova import test from nova.scheduler import least_cost @@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase): super(LeastCostSchedulerTestCase, self).tearDown() def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) + weighted = self.sched.weigh_hosts("compute", request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): @@ -125,19 +126,20 @@ class LeastCostSchedulerTestCase(test.TestCase): num = 1 instance_type = {'memory_mb': 1024} request_spec = {'instance_type': instance_type} - all_hosts = self.sched.zone_manager.service_states.iteritems() + svc_states = self.sched.zone_manager.service_states.iteritems() all_hosts = [(host, services["compute"]) - for host, services in all_hosts + for host, services in svc_states if "compute" in services] - hosts = self.sched.filter_hosts('compute', request_spec, host_list) + hosts = self.sched.filter_hosts('compute', request_spec, all_hosts) expected = [] - for idx, (hostname, caps) in enumerate(hosts): + for idx, (hostname, services) in enumerate(hosts): + caps = copy.deepcopy(services["compute"]) # Costs are normalized so over 10 hosts, each host with increasing # free ram will cost 1/N more. Since the lowest cost host has some # free ram, we add in the 1/N for the base_cost weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) + wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps) + expected.append(wtd_dict) self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 3a1389a49..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filters. -""" - -import json - -from nova import exception -from nova import test -from nova.scheduler import host_filter - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filters.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' - self.flags(default_host_filter=default_host_filter) - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200, - extra_specs={}) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def test_choose_filter(self): - # Test default filter ... - hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid filter ... - try: - host_filter.choose_host_filter('does not exist') - self.fail("Should not find host filter.") - except exception.SchedulerHostFilterNotFound: - pass - - def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() - cooked = hf.instance_type_to_filter(self.instance_type) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_filter(self): - hf = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300], - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700], - ], - ] - - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - hf.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False]))) - - try: - hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False)) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$foo', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$.....', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', {}, ['>', '$missing....foo']]))) From 03f7c3127257c3f1d08c75e858499b6c1b724894 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Mon, 15 Aug 2011 17:31:24 -0500 Subject: [PATCH 08/27] pep8 cleanup --- nova/scheduler/abstract_scheduler.py | 6 +++--- nova/scheduler/filters/abstract_filter.py | 4 ++-- nova/scheduler/host_filter.py | 1 + nova/tests/scheduler/test_host_filter.py | 5 ----- nova/tests/scheduler/test_least_cost_scheduler.py | 3 ++- 5 files changed, 8 insertions(+), 11 deletions(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 2f1ede0a4..77db67773 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -15,7 +15,7 @@ """ The AbsractScheduler is an abstract class Scheduler for creating instances -locally or across zones. Two methods should be overridden in order to +locally or across zones. Two methods should be overridden in order to customize the behavior: filter_hosts() and weigh_hosts(). The default behavior is to simply select all hosts and weight them the same. """ @@ -298,8 +298,8 @@ class AbstractScheduler(driver.Scheduler): def filter_hosts(self, topic, request_spec, host_list): """Filter the full host list returned from the ZoneManager. By default, this method only applies the basic_ram_filter(), meaning all hosts - with at least enough RAM for the requested instance are returned. - + with at least enough RAM for the requested instance are returned. + Override in subclasses to provide greater selectivity. """ def basic_ram_filter(hostname, capabilities, request_spec): diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py index d9d272130..a1d00d562 100644 --- a/nova/scheduler/filters/abstract_filter.py +++ b/nova/scheduler/filters/abstract_filter.py @@ -18,10 +18,10 @@ import nova.scheduler from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter', - 'AllHostsFilter', +flags.DEFINE_string('default_host_filter', 'AllHostsFilter', 'Which filter to use for filtering hosts') + class AbstractHostFilter(object): """Base class for host filters.""" def instance_type_to_filter(self, instance_type): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index be618f3f3..4bc5158cc 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -39,6 +39,7 @@ FLAGS = flags.FLAGS def _get_filters(): # Imported here to avoid circular imports from nova.scheduler import filters + def get_itm(nm): return getattr(filters, nm) diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index a961b1b06..17431fc7e 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -187,11 +187,6 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) - def debug(*args): - with file("/tmp/debug", "a") as dbg: - msg = " ".join([str(arg) for arg in args]) - dbg.write("%s\n" % msg) - hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index d6eaaa223..af58de527 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -139,7 +139,8 @@ class LeastCostSchedulerTestCase(test.TestCase): # free ram will cost 1/N more. Since the lowest cost host has some # free ram, we add in the 1/N for the base_cost weight = 0.1 + (0.1 * idx) - wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps) + wtd_dict = dict(hostname=hostname, weight=weight, + capabilities=caps) expected.append(wtd_dict) self.assertWeights(expected, num, request_spec, hosts) From d869ea7a28a77e5bb7f4b3d3d882f8f8745e965b Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 16 Aug 2011 12:02:39 -0400 Subject: [PATCH 09/27] Cleanup the '_base' directory in libvirt tests. --- nova/tests/test_libvirt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 688518bb8..6a213b4f0 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -836,6 +836,7 @@ class LibvirtConnTestCase(test.TestCase): count = (0 <= str(e.message).find('Unexpected method call')) shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) + shutil.rmtree(os.path.join(FLAGS.instances_path, '_base')) self.assertTrue(count) From 31e8286e3395576048c1246002026b9a8f134950 Mon Sep 17 00:00:00 2001 From: John Tran Date: Tue, 16 Aug 2011 09:18:13 -0700 Subject: [PATCH 10/27] test improvements per peer review --- nova/tests/test_cloud.py | 10 ++++++---- nova/tests/test_db_api.py | 12 +++++++----- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 39358eeff..0793784f8 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -488,13 +488,15 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, comp2['id']) def test_describe_instances_deleted(self): - args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} - inst1 = db.instance_create(self.context, args) - inst2 = db.instance_create(self.context, args) + args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args1) + args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} + inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0]['instancesSet'] - self.assertEqual(1, len(result)) + self.assertEqual(result[0]['instanceId'], + ec2utils.id_to_ec2_id(inst2.id)) def _block_device_mapping_create(self, instance_id, mappings): volumes = [] diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index ed363d1be..038c07f40 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -85,9 +85,11 @@ class DbApiTestCase(test.TestCase): self.assertTrue(2, len(result)) def test_instance_get_all_by_filters_deleted(self): - args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} - inst1 = db.instance_create(self.context, args) - inst2 = db.instance_create(self.context, args) + args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args1) + args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} + inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) - result = db.instance_get_all_by_filters(self.context, {}) - self.assertTrue(1, len(result)) + result = db.instance_get_all_by_filters(self.context.elevated(), {}) + self.assertEqual(1, len(result)) + self.assertEqual(result[0].id, inst2.id) From 1e8ddeeab4d60aa0d8dda91045cf42aa5dcc80ed Mon Sep 17 00:00:00 2001 From: Troy Toman Date: Wed, 17 Aug 2011 02:41:17 -0500 Subject: [PATCH 11/27] Changed return code to 413 for metadata, personality and instance quota issues --- Authors | 1 + nova/quota.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Authors b/Authors index 02fe46c79..864679929 100644 --- a/Authors +++ b/Authors @@ -101,6 +101,7 @@ Stephanie Reese Thierry Carrez Todd Willey Trey Morris +Troy Toman Tushar Patil Vasiliy Shlykov Vishvananda Ishaya diff --git a/nova/quota.py b/nova/quota.py index 58766e846..48e598659 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -164,5 +164,5 @@ def allowed_injected_file_path_bytes(context): class QuotaError(exception.ApiError): - """Quota Exceeeded.""" + """Quota Exceeded.""" pass From 6ee3bfbe706c0294fac1670c588c5586160f78ab Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 17 Aug 2011 16:25:53 -0700 Subject: [PATCH 13/27] Make all services use the same launching strategy --- bin/nova-api | 44 +++++++++++++++++--------------------------- nova/service.py | 47 ++++++++++++++++++++++++++++------------------- nova/utils.py | 41 +++-------------------------------------- nova/wsgi.py | 3 --- 4 files changed, 48 insertions(+), 87 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index fe8e83366..d2086dc92 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -19,12 +19,15 @@ """Starter script for Nova API. -Starts both the EC2 and OpenStack APIs in separate processes. +Starts both the EC2 and OpenStack APIs in separate greenthreads. """ +import eventlet +eventlet.monkey_patch() + +import gettext import os -import signal import sys @@ -33,32 +36,19 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath( if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) -import nova.service -import nova.utils +gettext.install('nova', unicode=1) from nova import flags - - -FLAGS = flags.FLAGS - - -def main(): - """Launch EC2 and OSAPI services.""" - nova.utils.Bootstrapper.bootstrap_binary(sys.argv) - - launcher = nova.service.Launcher() - - for api in FLAGS.enabled_apis: - service = nova.service.WSGIService(api) - launcher.launch_service(service) - - signal.signal(signal.SIGTERM, lambda *_: launcher.stop()) - - try: - launcher.wait() - except KeyboardInterrupt: - launcher.stop() - +from nova import log as logging +from nova import service +from nova import utils if __name__ == '__main__': - sys.exit(main()) + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + services = [] + for api in flags.FLAGS.enabled_apis: + services.append(service.WSGIService(api)) + service.serve(*services) + service.wait() diff --git a/nova/service.py b/nova/service.py index 6e9eddc5a..e0735d26f 100644 --- a/nova/service.py +++ b/nova/service.py @@ -20,13 +20,12 @@ """Generic Node baseclass for all workers that run on hosts.""" import inspect -import multiprocessing import os +import signal +import eventlet import greenlet -from eventlet import greenthread - from nova import context from nova import db from nova import exception @@ -77,10 +76,7 @@ class Launcher(object): """ service.start() - try: - service.wait() - except KeyboardInterrupt: - service.stop() + service.wait() def launch_service(self, service): """Load and start the given service. @@ -89,10 +85,8 @@ class Launcher(object): :returns: None """ - process = multiprocessing.Process(target=self.run_service, - args=(service,)) - process.start() - self._services.append(process) + gt = eventlet.spawn(self.run_service, service) + self._services.append(gt) def stop(self): """Stop all services which are currently running. @@ -101,8 +95,7 @@ class Launcher(object): """ for service in self._services: - if service.is_alive(): - service.terminate() + service.kill() def wait(self): """Waits until all services have been stopped, and then returns. @@ -111,7 +104,10 @@ class Launcher(object): """ for service in self._services: - service.join() + try: + service.wait() + except greenlet.GreenletExit: + pass class Service(object): @@ -121,6 +117,7 @@ class Service(object): periodic_interval=None, *args, **kwargs): self.host = host self.binary = binary + self.name = binary self.topic = topic self.manager_class_name = manager manager_class = utils.import_class(self.manager_class_name) @@ -173,7 +170,7 @@ class Service(object): finally: consumer_set.close() - self.consumer_set_thread = greenthread.spawn(_wait) + self.consumer_set_thread = eventlet.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -339,7 +336,17 @@ class WSGIService(object): self.server.wait() +# NOTE(vish): the global launcher is to maintain the existing +# functionality of calling service.serve + +# service.wait +_launcher = None + + def serve(*services): + global _launcher + if not _launcher: + _launcher = Launcher() + signal.signal(signal.SIGTERM, lambda *args: _launcher.stop()) try: if not services: services = [Service.create()] @@ -354,7 +361,7 @@ def serve(*services): flags.DEFINE_flag(flags.HelpXMLFlag()) FLAGS.ParseNewFlags() - name = '_'.join(x.binary for x in services) + name = '_'.join(x.name for x in services) logging.debug(_('Serving %s'), name) logging.debug(_('Full set of FLAGS:')) for flag in FLAGS: @@ -362,9 +369,11 @@ def serve(*services): logging.debug('%(flag)s : %(flag_get)s' % locals()) for x in services: - x.start() + _launcher.launch_service(x) def wait(): - while True: - greenthread.sleep(5) + try: + _launcher.wait() + except KeyboardInterrupt: + _launcher.stop() diff --git a/nova/utils.py b/nova/utils.py index 7276b6bd5..54126f644 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -260,8 +260,9 @@ def default_flagfile(filename='nova.conf', args=None): filename = "./nova.conf" if not os.path.exists(filename): filename = '/etc/nova/nova.conf' - flagfile = '--flagfile=%s' % filename - args.insert(1, flagfile) + if os.path.exists(filename): + flagfile = '--flagfile=%s' % filename + args.insert(1, flagfile) def debug(arg): @@ -837,39 +838,3 @@ def bool_from_str(val): return True if int(val) else False except ValueError: return val.lower() == 'true' - - -class Bootstrapper(object): - """Provides environment bootstrapping capabilities for entry points.""" - - @staticmethod - def bootstrap_binary(argv): - """Initialize the Nova environment using command line arguments.""" - Bootstrapper.setup_flags(argv) - Bootstrapper.setup_logging() - Bootstrapper.log_flags() - - @staticmethod - def setup_logging(): - """Initialize logging and log a message indicating the Nova version.""" - logging.setup() - logging.audit(_("Nova Version (%s)") % - version.version_string_with_vcs()) - - @staticmethod - def setup_flags(input_flags): - """Initialize flags, load flag file, and print help if needed.""" - default_flagfile(args=input_flags) - FLAGS(input_flags or []) - flags.DEFINE_flag(flags.HelpFlag()) - flags.DEFINE_flag(flags.HelpshortFlag()) - flags.DEFINE_flag(flags.HelpXMLFlag()) - FLAGS.ParseNewFlags() - - @staticmethod - def log_flags(): - """Log the list of all active flags being used.""" - logging.audit(_("Currently active flags:")) - for key in FLAGS: - value = FLAGS.get(key, None) - logging.audit(_("%(key)s : %(value)s" % locals())) diff --git a/nova/wsgi.py b/nova/wsgi.py index c8ddb97d7..f2846aa73 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -39,9 +39,6 @@ from nova import log as logging from nova import utils -eventlet.patcher.monkey_patch(socket=True, time=True) - - FLAGS = flags.FLAGS LOG = logging.getLogger('nova.wsgi') From fe91d51a8ef384cdaee3734be802b256e7236f00 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 17 Aug 2011 22:00:38 -0700 Subject: [PATCH 14/27] bug #828429: remove references to interface in nova-dhcpbridge --- bin/nova-dhcpbridge | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index a47ea7a76..c2fd8994d 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -52,7 +52,7 @@ flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') LOG = logging.getLogger('nova.dhcpbridge') -def add_lease(mac, ip_address, _interface): +def add_lease(mac, ip_address): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: LOG.debug(_("leasing ip")) @@ -66,13 +66,13 @@ def add_lease(mac, ip_address, _interface): "args": {"address": ip_address}}) -def old_lease(mac, ip_address, interface): +def old_lease(mac, ip_address): """Update just as add lease.""" LOG.debug(_("Adopted old lease or got a change of mac")) - add_lease(mac, ip_address, interface) + add_lease(mac, ip_address) -def del_lease(mac, ip_address, _interface): +def del_lease(mac, ip_address): """Called when a lease expires.""" if FLAGS.fake_rabbit: LOG.debug(_("releasing ip")) @@ -116,9 +116,9 @@ def main(): mac = argv[2] ip = argv[3] msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s" - " on interface %(interface)s") % locals() + " for network %(network_id)s") % locals() LOG.debug(msg) - globals()[action + '_lease'](mac, ip, interface) + globals()[action + '_lease'](mac, ip) else: print init_leases(network_id) From 771c41f94f0d41faf705d0489b193f1d7cf2d4c9 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 17 Aug 2011 22:29:04 -0700 Subject: [PATCH 15/27] in dhcpbridge, only grab network id from env if needed --- bin/nova-dhcpbridge | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index c2fd8994d..afafca548 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -99,8 +99,6 @@ def main(): utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) logging.setup() - # check ENV first so we don't break any older deploys - network_id = int(os.environ.get('NETWORK_ID')) if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags @@ -115,11 +113,11 @@ def main(): if action in ['add', 'del', 'old']: mac = argv[2] ip = argv[3] - msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s" - " for network %(network_id)s") % locals() + msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s") % locals() LOG.debug(msg) globals()[action + '_lease'](mac, ip) else: + network_id = int(os.environ.get('NETWORK_ID')) print init_leases(network_id) if __name__ == "__main__": From 3c33432a1187cc16dd25de6512986499402acb5f Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 16:22:56 +0000 Subject: [PATCH 17/27] Corrected the hardcoded filter path. Also simplified the filter matching code in host_filter.py --- nova/scheduler/host_filter.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 4bc5158cc..826a99b0a 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -58,8 +58,6 @@ def choose_host_filter(filter_name=None): if not filter_name: filter_name = FLAGS.default_host_filter for filter_class in _get_filters(): - host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if (host_match.startswith("nova.scheduler.filters") and - (host_match.split(".")[-1] == filter_name)): + if filter_class.__name__ == filter_name: return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) From 52588623725dada2442eafdff755cd7595770455 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 16:40:41 +0000 Subject: [PATCH 18/27] Added the fix for the missing parameter for the call to create_db_entry_for_new_instance() --- nova/scheduler/abstract_scheduler.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 77db67773..3930148e2 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -62,12 +62,13 @@ class AbstractScheduler(driver.Scheduler): host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] image = request_spec['image'] + instance_type = request_spec['instance_type'] # TODO(sandy): I guess someone needs to add block_device_mapping # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) + instance_type, image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id @@ -158,8 +159,8 @@ class AbstractScheduler(driver.Scheduler): self._ask_child_zone_to_create_instance(context, host_info, request_spec, kwargs) else: - self._provision_resource_locally(context, host_info, request_spec, - kwargs) + self._provision_resource_locally(context, instance_type, host_info, + request_spec, kwargs) def _provision_resource(self, context, build_plan_item, instance_id, request_spec, kwargs): From 2d2f20976f4c1c087042d6c0da8c3365b2936d26 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 18 Aug 2011 09:50:24 -0700 Subject: [PATCH 19/27] dhcpbridge: add better error if NETWORK_ID is not set, convert locals() to static dict --- bin/nova-dhcpbridge | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index afafca548..1c9ae951e 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -113,11 +113,19 @@ def main(): if action in ['add', 'del', 'old']: mac = argv[2] ip = argv[3] - msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s") % locals() + msg = _("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % \ + {"action": action, + "mac": mac, + "ip": ip} LOG.debug(msg) globals()[action + '_lease'](mac, ip) else: - network_id = int(os.environ.get('NETWORK_ID')) + try: + network_id = int(os.environ.get('NETWORK_ID')) + except TypeError: + LOG.error(_("Environment variable 'NETWORK_ID' must be set.")) + sys.exit(1) + print init_leases(network_id) if __name__ == "__main__": From 71e930f56079a2f11f9d97e38ede678cdb16d8c8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 10:55:39 -0700 Subject: [PATCH 20/27] more cleanup of binaries per review --- bin/nova-ajax-console-proxy | 7 +++---- bin/nova-api | 8 +++----- bin/nova-compute | 5 ++--- bin/nova-console | 5 ++--- bin/nova-direct-api | 11 +++++++---- bin/nova-network | 5 ++--- bin/nova-objectstore | 14 +++++++------- bin/nova-scheduler | 5 ++--- bin/nova-vncproxy | 15 ++++++--------- bin/nova-volume | 5 ++--- 10 files changed, 36 insertions(+), 44 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 2329581a2..0a789b4b9 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -24,7 +24,6 @@ from eventlet import greenthread from eventlet.green import urllib2 import exceptions -import gettext import os import sys import time @@ -38,11 +37,11 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging from nova import rpc +from nova import service from nova import utils from nova import wsgi @@ -141,5 +140,5 @@ if __name__ == '__main__': acp = AjaxConsoleProxy() acp.register_listeners() server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port) - server.start() - server.wait() + service.serve(server) + service.wait() diff --git a/bin/nova-api b/bin/nova-api index d2086dc92..38e2624d8 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -26,7 +26,6 @@ Starts both the EC2 and OpenStack APIs in separate greenthreads. import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -36,7 +35,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath( if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -47,8 +45,8 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - services = [] + servers = [] for api in flags.FLAGS.enabled_apis: - services.append(service.WSGIService(api)) - service.serve(*services) + servers.append(service.WSGIService(api)) + service.serve(*servers) service.wait() diff --git a/bin/nova-compute b/bin/nova-compute index cd7c78def..9aef201e6 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-compute') + service.serve(server) service.wait() diff --git a/bin/nova-console b/bin/nova-console index 40608b995..7f76fdc29 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -21,7 +21,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -33,7 +32,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -44,5 +42,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-console') + service.serve(server) service.wait() diff --git a/bin/nova-direct-api b/bin/nova-direct-api index c6cf9b2ff..106e89ba9 100755 --- a/bin/nova-direct-api +++ b/bin/nova-direct-api @@ -20,7 +20,9 @@ """Starter script for Nova Direct API.""" -import gettext +import eventlet +eventlet.monkey_patch() + import os import sys @@ -32,12 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import compute from nova import flags from nova import log as logging from nova import network +from nova import service from nova import utils from nova import volume from nova import wsgi @@ -97,5 +99,6 @@ if __name__ == '__main__': with_auth, host=FLAGS.direct_host, port=FLAGS.direct_port) - server.start() - server.wait() + + service.serve(server) + service.wait() diff --git a/bin/nova-network b/bin/nova-network index 101761ef7..ce93e9354 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-compute') + service.serve(server) service.wait() diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 4d5aec445..c7a76e120 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -17,11 +17,11 @@ # License for the specific language governing permissions and limitations # under the License. -""" - Daemon for nova objectstore. Supports S3 API. -""" +"""Daemon for nova objectstore. Supports S3 API.""" + +import eventlet +eventlet.monkey_patch() -import gettext import os import sys @@ -33,10 +33,10 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging +from nova import service from nova import utils from nova import wsgi from nova.objectstore import s3server @@ -54,5 +54,5 @@ if __name__ == '__main__': router, port=FLAGS.s3_port, host=FLAGS.s3_host) - server.start() - server.wait() + service.serve(server) + service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 0c205a80f..07d1c55e6 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-compute') + service.serve(server) service.wait() diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy index bdbb30a7f..dc08e2433 100755 --- a/bin/nova-vncproxy +++ b/bin/nova-vncproxy @@ -19,7 +19,8 @@ """VNC Console Proxy Server.""" import eventlet -import gettext +eventlet.monkey_patch() + import os import sys @@ -29,7 +30,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -41,7 +41,7 @@ from nova.vnc import auth from nova.vnc import proxy -LOG = logging.getLogger('nova.vnc-proxy') +LOG = logging.getLogger('nova.vncproxy') FLAGS = flags.FLAGS @@ -81,7 +81,7 @@ if __name__ == "__main__": FLAGS(sys.argv) logging.setup() - LOG.audit(_("Starting nova-vnc-proxy node (version %s)"), + LOG.audit(_("Starting nova-vncproxy node (version %s)"), version.version_string_with_vcs()) if not (os.path.exists(FLAGS.vncproxy_wwwroot) and @@ -107,13 +107,10 @@ if __name__ == "__main__": else: with_auth = auth.VNCNovaAuthMiddleware(with_logging) - service.serve() - server = wsgi.Server("VNC Proxy", with_auth, host=FLAGS.vncproxy_host, port=FLAGS.vncproxy_port) - server.start() server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host) - - server.wait() + service.serve(server) + service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index 8dcdbc500..1451de44a 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -22,7 +22,6 @@ import eventlet eventlet.monkey_patch() -import gettext import os import sys @@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) from nova import flags from nova import log as logging @@ -45,5 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + server = service.Server(binary='nova-volume') + service.serve(server) service.wait() From aab9a58a4940c7c9fa9d0488bfc16a3276c94523 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 10:56:14 -0700 Subject: [PATCH 21/27] add separate api binaries --- bin/nova-api-ec2 | 50 ++++++++++++++++++++++++++++++++++++++++++++++++ bin/nova-api-os | 50 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100755 bin/nova-api-ec2 create mode 100755 bin/nova-api-os diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2 new file mode 100755 index 000000000..9fac7b63a --- /dev/null +++ b/bin/nova-api-ec2 @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Nova API. + +Starts both the EC2 and OpenStack APIs in separate greenthreads. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from nova import flags +from nova import log as logging +from nova import service +from nova import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + server = service.WSGIService('ec2') + service.serve(server) + service.wait() diff --git a/bin/nova-api-os b/bin/nova-api-os new file mode 100755 index 000000000..9d9a7b05e --- /dev/null +++ b/bin/nova-api-os @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Nova API. + +Starts both the EC2 and OpenStack APIs in separate greenthreads. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from nova import flags +from nova import log as logging +from nova import service +from nova import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + server = service.WSGIService('osapi') + service.serve(server) + service.wait() From cf0916773edad1bed9b7d7a54632d23e3b74cf31 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:00:47 -0700 Subject: [PATCH 22/27] remove signal handling and clean up service.serve --- nova/service.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/nova/service.py b/nova/service.py index e0735d26f..8ffd39629 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,7 +21,6 @@ import inspect import os -import signal import eventlet import greenlet @@ -346,33 +345,21 @@ def serve(*services): global _launcher if not _launcher: _launcher = Launcher() - signal.signal(signal.SIGTERM, lambda *args: _launcher.stop()) - try: - if not services: - services = [Service.create()] - except Exception: - logging.exception('in Service.create()') - raise - finally: - # After we've loaded up all our dynamic bits, check - # whether we should print help - flags.DEFINE_flag(flags.HelpFlag()) - flags.DEFINE_flag(flags.HelpshortFlag()) - flags.DEFINE_flag(flags.HelpXMLFlag()) - FLAGS.ParseNewFlags() - - name = '_'.join(x.name for x in services) - logging.debug(_('Serving %s'), name) - logging.debug(_('Full set of FLAGS:')) - for flag in FLAGS: - flag_get = FLAGS.get(flag, None) - logging.debug('%(flag)s : %(flag_get)s' % locals()) - for x in services: _launcher.launch_service(x) def wait(): + # After we've loaded up all our dynamic bits, check + # whether we should print help + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() + logging.debug(_('Full set of FLAGS:')) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + logging.debug('%(flag)s : %(flag_get)s' % locals()) try: _launcher.wait() except KeyboardInterrupt: From b7ca51be5727e693ad6eb252c8f8a32bcf842c81 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:28:02 -0700 Subject: [PATCH 23/27] fix typo --- bin/nova-compute | 2 +- bin/nova-console | 2 +- bin/nova-network | 2 +- bin/nova-scheduler | 2 +- bin/nova-volume | 2 +- nova/service.py | 35 +++++++++++++++++++---------------- 6 files changed, 24 insertions(+), 21 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index 9aef201e6..5239fae72 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-compute') + server = service.Service.create(binary='nova-compute') service.serve(server) service.wait() diff --git a/bin/nova-console b/bin/nova-console index 7f76fdc29..22f6ef171 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -42,6 +42,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-console') + server = service.Service.create(binary='nova-console') service.serve(server) service.wait() diff --git a/bin/nova-network b/bin/nova-network index ce93e9354..57759d30a 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-compute') + server = service.Service.create(binary='nova-network') service.serve(server) service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 07d1c55e6..3b627e62d 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-compute') + server = service.Service.create(binary='nova-compute') service.serve(server) service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index 1451de44a..5405aebbb 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Server(binary='nova-volume') + server = service.Service.create(binary='nova-volume') service.serve(server) service.wait() diff --git a/nova/service.py b/nova/service.py index 8ffd39629..959e79052 100644 --- a/nova/service.py +++ b/nova/service.py @@ -67,24 +67,24 @@ class Launcher(object): self._services = [] @staticmethod - def run_service(service): - """Start and wait for a service to finish. + def run_server(server): + """Start and wait for a server to finish. - :param service: Service to run and wait for. + :param service: Server to run and wait for. :returns: None """ - service.start() - service.wait() + server.start() + server.wait() - def launch_service(self, service): - """Load and start the given service. + def launch_server(self, server): + """Load and start the given server. - :param service: The service you would like to start. + :param server: The server you would like to start. :returns: None """ - gt = eventlet.spawn(self.run_service, service) + gt = eventlet.spawn(self.run_server, server) self._services.append(gt) def stop(self): @@ -110,13 +110,16 @@ class Launcher(object): class Service(object): - """Base class for workers that run on hosts.""" + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager and reports + it state to the database services table.""" def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, *args, **kwargs): self.host = host self.binary = binary - self.name = binary self.topic = topic self.manager_class_name = manager manager_class = utils.import_class(self.manager_class_name) @@ -289,9 +292,9 @@ class WSGIService(object): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None): - """Initialize, but do not start the WSGI service. + """Initialize, but do not start the WSGI server. - :param name: The name of the WSGI service given to the loader. + :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None @@ -341,12 +344,12 @@ class WSGIService(object): _launcher = None -def serve(*services): +def serve(*servers): global _launcher if not _launcher: _launcher = Launcher() - for x in services: - _launcher.launch_service(x) + for server in servers: + _launcher.launch_server(server) def wait(): From 10192235b5f0330b6074d2c0eb49fc0bf05bd2f6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:28:43 -0700 Subject: [PATCH 24/27] one more --- bin/nova-scheduler | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-scheduler b/bin/nova-scheduler index 3b627e62d..2e168cbc6 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -43,6 +43,6 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - server = service.Service.create(binary='nova-compute') + server = service.Service.create(binary='nova-scheduler') service.serve(server) service.wait() From cd72896fad3990b2faf0c2e9f8448e88549e14fa Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 18 Aug 2011 11:31:28 -0700 Subject: [PATCH 25/27] fix docstrings in new api bins --- bin/nova-api-ec2 | 6 +----- bin/nova-api-os | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2 index 9fac7b63a..df50f713d 100755 --- a/bin/nova-api-ec2 +++ b/bin/nova-api-ec2 @@ -17,11 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Starter script for Nova API. - -Starts both the EC2 and OpenStack APIs in separate greenthreads. - -""" +"""Starter script for Nova EC2 API.""" import eventlet eventlet.monkey_patch() diff --git a/bin/nova-api-os b/bin/nova-api-os index 9d9a7b05e..374e850ea 100755 --- a/bin/nova-api-os +++ b/bin/nova-api-os @@ -17,11 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Starter script for Nova API. - -Starts both the EC2 and OpenStack APIs in separate greenthreads. - -""" +"""Starter script for Nova OS API.""" import eventlet eventlet.monkey_patch() From 1184f2b3c564e8be8dc78e1e751cd3933849f05d Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 14:39:25 -0500 Subject: [PATCH 26/27] Updated the distributed scheduler docs with the latest changes to the classes. --- doc/source/devref/distributed_scheduler.rst | 56 ++++++++++---------- doc/source/images/base_scheduler.png | Bin 0 -> 17068 bytes doc/source/images/zone_overview.png | Bin 0 -> 51587 bytes 3 files changed, 27 insertions(+), 29 deletions(-) create mode 100644 doc/source/images/base_scheduler.png create mode 100755 doc/source/images/zone_overview.png diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index e33fda4d2..c63e62f7f 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -31,9 +31,9 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab So, how does this all work? -This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this. +This document will explain the strategy employed by the `BaseScheduler`, which is the base for all schedulers designed to work across zones, and its derivations. You should read the :doc:`devguide/zones` documentation before reading this. - .. image:: /images/zone_aware_scheduler.png + .. image:: /images/base_scheduler.png Costs & Weights --------------- @@ -52,32 +52,32 @@ This Weight is computed for each Instance requested. If the customer asked for 1 .. image:: /images/costs_weights.png -nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler +nova.scheduler.base_scheduler.BaseScheduler ------------------------------------------------------ -As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. +As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `BaseScheduler` uses this information to make its decisions. Here is how it works: 1. The compute nodes are filtered and the nodes remaining are weighed. - 2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. + 2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. 3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request. 4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent. 5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed. 6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed. - .. image:: /images/zone_aware_overview.png + .. image:: /images/zone_overview.png -`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used. +`BaseScheduler` by itself is not capable of handling all the provisioning itself. You should also specify the filter classes and weighting classes to be used in determining which host is selected for new instance creation. Filtering and Weighing ---------------------- -The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. +The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `BaseScheduler` are flexible and extensible. .. image:: /images/filtering.png Requesting a new instance ------------------------- -Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. +Prior to the `BaseScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. `nova.compute.api.create()` performed the following actions: 1. it validated all the fields passed into it. @@ -89,11 +89,11 @@ Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to .. image:: /images/nova.compute.api.create.png -Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. +Generally, the simplest schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. -For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: +For the `BaseScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: 1. it validates all the fields passed into it. 2. it creates a single `reservation_id` for all of instances created. This is a UUID. 3. it creates a single `run_instance` request in the scheduler queue @@ -109,21 +109,19 @@ For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to The Catch --------- -This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. +This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. -When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates. +When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many, many `select` calls issued to child Zones asking for estimates. -Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key` +Instead, we take a rather innovative approach to the problem. We encrypt all the child Zone internal details and pass them back the to parent Zone. In the case of a nested Zone layout, each nesting layer will encrypt the data from all of its children and pass that to its parent Zone. In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. Every Zone interface adds another layer of encryption, using its unique key. -In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. +Once a host is selected, it will either be local to the Zone that received the initial API call, or one of its child Zones. In the latter case, the parent Zone it simply passes the encrypted data for the selected host back to each of its child Zones during the `POST /servers` call as an extra parameter. If the child Zone can decrypt the data, then it is the correct Zone for the selected host; all other Zones will not be able to decrypt the data and will discard the request. This is why it is critical that each Zone has a unique value specified in its config in `--build_plan_encryption_key`: it controls the ability to locate the selected host without having to hard-code path information or other identifying information. The child Zone can then act on the decrypted data and either go directly to the Compute node previously selected if it is located in that Zone, or repeat the process with its child Zones until the target Zone containing the selected host is reached. -Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. +Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.base_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use. Reservation IDs --------------- -NOTE: The features described in this section are related to the up-coming 'merge-4' branch. - The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. @@ -137,23 +135,23 @@ Finally, we need to give the user a way to get information on each of the instan Host Filter ----------- -As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. +As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.filters` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. -The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others: +The filter used is determined by the `--default_host_filters` flag, which points to a Python Class. By default this flag is set to `[AllHostsFilter]` which simply returns all available hosts. But there are others: - * `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. + * `InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`. - * `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. + * `JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples. -To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (``, ``) where `` is whatever you want it to be. +To create your own `HostFilter` the user simply has to derive from `nova.scheduler.filters.AbstractHostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of available hosts is in the `host_list` parameter passed into the call as well as the filter query. The host tuple contains (``, ``) where `` is whatever you want it to be. By default, it is the capabilities reported by the host. Cost Scheduler Weighing ----------------------- -Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled. +Every `BaseScheduler` subclass should also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `BaseScheduler` when all the results have been assembled. -Simple Zone Aware Scheduling +Simple Scheduling Across Zones ---------------------------- -The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. +The `BaseScheduler` uses the default `filter_hosts` method, which will use either any filters specified in the request's `filter` parameter, or, if that is not specified, the filters specified in the `FLAGS.default_host_filters` setting. Its `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. The `--scheduler_driver` flag is how you specify the scheduler class name. @@ -168,14 +166,14 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf --enable_zone_routing=true --zone_name=zone1 --build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b - --scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler - --default_host_filter=nova.scheduler.host_filter.AllHostsFilter + --scheduler_driver=nova.scheduler.base_scheduler.BaseScheduler + --default_host_filter=nova.scheduler.filters.AllHostsFilter `--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands. `--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances. `--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue. `build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys. -`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`. +`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.base_scheduler.BaseScheduler`. `default_host_filter` is the host filter to be used for filtering candidate Compute nodes. Some optional flags which are handy for debugging are: diff --git a/doc/source/images/base_scheduler.png b/doc/source/images/base_scheduler.png new file mode 100644 index 0000000000000000000000000000000000000000..75d029338b5e3d81f7e251677777bbab34580e6f GIT binary patch literal 17068 zcmch81y>wR(bOEr1{{aA}In@ zIYxX4xk0d((sF`=Lhb(N`p|DzYytsNIEw?FRqf22-HaSfp(GuR>@9v6i`v<`nmRh0 zI+D{#+d7+C)4PQ}5kNtSe3ljyR&!53NjFzhn_uc5ZTrFH2}chNO@*7-CBktx5EW(d zJvRiK6JM6YWA-lwEE3S+lelMEY>pJ(@3q(*WcJR9Le^7cL*Z+BMl#3w=d=Wk5&<)J>V9(MxcZ|D2zDZBCS!9wKd{6<42yioo^O@MA?SIl`($#601-&ZP7rE&W28^5EB)(Yyb8A?Y@K7%TGw(hz<5QM(~Xe@&f?k z2Tf!O91VjI2)6fcpWdI54|ovx`?;0ruy=QNOG-*Q-rt@-pJBlJ4`KtdH8#+@B@tjm z7=lD7B6^`|k3ZXD!DmWim9MR3zPxw`D75uSOEG-(R3Hs~#~kmXjp_eLdu(EZ1@8~R z>FE)>_*_g1v~wmGA@`#Pn)LPdzTPd{{i#zWcc+mD9XL136v0#Ee(oozBtB-P4fA>@ zN(-5hJO#N$9ZWm2b{^hs}(c-s~81#5(-C_)qZ{%03{0{No;=~rZH(e?H zltZX-ow+$V|1O>g{FO^$m&vaPW{P8=`35V-yj#m9yCZWeOGUSonktU@XysnHOddo% z0vh-@0$M|c(fZ;wWGb;JwtRbEJ6e;8sk4%^>$xAQF{eqh#*`Ve-%f^~p-4B<`t+yf zp3DD5{#&2Y=k>3fdnW9#yG;E&*HzS*{VEiP$3P2S>gsHpT;rR%L>?Xa(PBJ{rnSAx zN@Y3WX|Wa(ED<0WIfAbf`4|O-ng)nv!+1!717Mn!fJHL+I%%qmfZL9q0EAIq|Fg}_ z`nBM`e59hPcb`$n%~9(lA+Jy(k>1pXP-hd?N`wk=nSr%y3Mfep#JbXjBV1-M3K6Hd z;p{%LVnS<|F&8XF&Q%$Q3FWx-{OefK$o=JoFhM5D&?Kw?k>j9Ay=cr}6R7|g932vB zUb&J;|JzsC4f>U#*n=j1jS9UCZE~=r1zSqIMP>l|ZWtsIn@yYQnj*uW60ZgQt&6Q3 z$>C8N;~@J7nk6=nUu+VX=(2}>dy_Jem??+__|N#0*Kp|BaP22)?z+<1!W(Va2f znQK#6#M&^k18_3cu~5A8RGfy>VkgDpW^oGx;+(pSR+3aj?X)DXcQ=GAFX}?V`Q$+P zB5w&uMTC!-mR0kKnFO;7Enk^Adqj^Q4|C_#1T8_~I4Q_gD*4U#$5olw7u zmn-*<-wi$`X~zN7zP`TLw8OB+9k?A&x%qj_LmyIp-v3R? zYg^8(&}m2}P~i1sLKp_=uF%P9*gQPs^{TG3M~q3r$+Y;)b@rHS?^PTN=o)T433I~D zsydjj%7nM^o5rr=8o1%HAQ`KH@&goDTIewzD;65xto9$^dbili^Q{C&S--3|&)Ut# zHSJ+%^(i=Whh)7C4UKhK9XfW9crC1cAF)X(Hl82yC`2Xo)#3IU(b_S*Rg?p@zYP4S zA3oVV=04}a>_HYML*O}4k)#SdN%XZI612WqaiJu}uT_0xXR_{!_rYym@OrLCQhwwo z+psb}zv#N+^ffrym&LFyKidmE!~GO7Z&$S5ehrEa6m-8bW*JTYW^+o@k+4QuSkp%I z@M;(>1k&Z(s{+0crWPO-5-b#`=gQ}@kS5z&B7{62d~_T3Aan4XP!;Z^*R!xYtDfk$ zyY_V3_n@IEHVj*`(qmdTn}rAYb9ttapp1jffj83zRlRIeKYm#XItI_AmbL)-KV3Am0V@be3{VV@`*d;Smlpxt~^y=yqlMz15-pZwGEhBAj z%)xvAv>~d3Bt|bEsq2yaSpU@ac3b^ZF0qo7C)(LXTcU0NKju?bdqlQYf_+=d;TvP* zTl5f5{hR~Z1n(~?@#lKL)0s7PBkA3>>v)~P-7RCM6xtT^Sb7Lz;&07B1l)QVm@Z5w zGU#!wf{NsQ-uY2pY%o4fc`(t#_eZg7H$KQ~2Inc=XmQ-`P@*m`OJ~=@JXUwjf!c8T zYCOjHl@4c0eyv_`Z3N2I%AvX~(45IY8{GI^QEQn|a-HAhl2BwU69t*CX>R#CgPyDR z)W`jHhZ~2*r6Gl@d!TpLWxhQxr57ce)&4xBZXJ&PavVez6VxS5y^tj-pi}*La2wqW zI3{?9S|kgJ3Jx2#^lQT8sWa}a7oGpRhlt^~NNg+;QoszfF-4v}wBv0(8CqJgcq@Fp z!sv3mUM=c56`kO^>NyinL#1MI@#}YlOEC2D`XU^L2WYAe_vr0&vMB;~A@h9oTW#1- zuKJS3b^Cgso-Wy6&6?75lvXqF%kc3;fal@}$dI4Awp2Rwn;otq3fZ^0E6!oK{KRav zYIj?aJOzCebk;gT=~m_Nx+mf-t6aWW>qcy#Fgi3d;q|N>tbz%$-E23IXiIS69CN8A zHvt|6+|s3SovM#(`ucn#z$iCuDPH26$Jaax!EEoQ#PUox8g&MC9wuzU9xrt<4cEW( zErp3Rk4+x{#nyf?t!$loaxt9uS47->>J3D+CO4A!#5iZO@^BbUR4a>16(Ei;!i-~5 zjW5~0-sZU&;ARHILnE+}fE2|ZF9!BGp&xKy@;o@qZ zz3|kfhtB>|dFuXDWv3u%#98q22@OtFBm{PWbsUNXc(P(|Jgmh%%Lf;*yX_ag@%Z(l zTrU*Fdx4>ci9jbaW}^ghsDUKbJf++ruUvJIEuqfW)hd`Y1LZ{DVUtoV4Sup?_SYwj z@!fW2*J#+7=7~0+v31+nDy34fow%UiWbZkNM%1CCsV&|_4R}6AGiyo*CO5Vs^i^Na zS})5V3!+HKYh)03&Cc7?X*G&{=b*HW0;ut79~-q!c}P$*fA4IaeaxzO+5+M?rV`yo zG333uti#Yg`6)MWQ)!fNm8Z*q@? z!6FXTHTu_&MmfmJQl1|L^o(pxC3D=9wHNf|nU!8qkGBL90nV8D}$b&o_GObzSj%3fdqYLhWU^BM2Sp2NVpaWL~fHy z6P>exTSpfAVp_`+OezrbiY+dUw_ov~8}v4!g6V0}5?fLLmpr#(iljRVPk<{7YTTvY z@VNft(^aY(;IM7(B$~&AJ!I`g6n}lb#P-ZJ$PtT;=E6 z8_Te=GOeGo6Kp_xWvFN7_N|X^Yw44<8KUGTVu2?mz&7qB%gBf;vVJTxK5VMac9zR86aT+X5(D%y#rfs zR=Ss~{1f`CfRpYiw+DCO)@(W(Ztr4!}Jy0u@O*Gtn_KM-z}#{Zbqn zHk_HS6EJ5L!tIWXKFg8J&4c$ux<84I2^$<^B2|!68!`!W4wRVS=`MU!8x;v^5mVEszOQz8*&;E zG2VvNcNmLoZiag0z6*PwvJO|9I6t4Xrx_}a>OkmpwrUk&Dqt&4hEssx78ej0hksbT z0w)@Epa|#QP-P~b?e#X;9lCmmaCu+d=`_5(q^%awEfS}YVPkpD*g`y@F#3TSM>DV# zOI5h6UtOIa!A-R${8On_W_%(8&ejT27t`Qj_Skkv2(7qywt^jQpu+5ueN)7wa?Y?Q86TC zdoC zt>Rd!nVIR2ANFmr4WU- z<2`e62%)la2XtNymNRrSG)aK1u%_X!{mV-WX&oYbLZ%-!uw&WOOiZ2pGZvQ6sCg2` zRLRh+KV6ZF3UeiV_>*ZYVB-%KU!N_cU2bd20W-zr-ia9~%={(A?ultgGMVuS{G-Dk z-}vb1KZpb-V&e(Y1H@#*!#f3)u4=P4en=x&+;RxMLu{CKToN2%z&AxF$4oVi0Ii?4 z{*^h$DJhTng`YUt*fM8kbCA*!KY5GF0s)}CY@GVN1C>BdJ6nmU9TW2E;@D*PG0fT2 zJZc9A(bLnaau4U5gH%cJs7fB7rQyV3$M?PrUKZQ+zN*KF0V(H_M{m&H!B$`asn&JZ z-QB)}Lvhz`KpGOW|D-?K0XTpYj`A1z_B=hb5+r^?4L=PdF3q zKkWNjR9G?gK>NH`cJFE1y4DKs0&i8>GHX>9^W@yek!=_YH)RSWL?KcB)sg?D(4~hl z$4^AFx$5XajfI$)xaT=f055=j>1onkx@|K#i%{nWk>)*c`%sM2$EKK9!Xo0_>89b< zvW$BV9KB8sgN*&(L`NSCS^SZ5<(H=~@SBx+2~X&R-P-+t+FRDKrVW&Y=NG$ z6C7dN)TC}h)>7Y;vgC3q)|{AeP-GFKP3CdP#>bZeUJr8P5Kfw)g(;H8rL6>z-=~?{ zNc5+p42$I~!d`Ko>*D+JOU0;b^^W{Wf+!qk7Vtnjv-ki^4p>h3B0s&5nZ5i^4Qk+L>MG8Q5r7du&RiX3wr>IX-X3g?HC z3NhpEFQF6es?Oz2eW)(pn58Ap*_T}Ovvazzo9X!D&b7i8|0H%U{2++hyHHy0c6qGpSYLFSm=!`oRs6tB1eiU0yV2!1-x7= zzquQv%KwORE(sIec$31De5{}1GsiaRM3CjR$0{Egkqf?*M3TwU5|-2R7ORIJO7BtL zLR03%_xTa$U&L(#?;U*tOHrArsQDuk)PFlgF%~}=yPA1WftgQ&AucYCJ1nlB0xh*k z16*8Ov~5VkrxTzEVPjIXNRdXCXGwI7w7%r|=Af&sLVrnoA38~rMI~TLZ7W6ez!A({ zXcLa7rilHXwLxf#y#bNs0BZ5=tahzMe}( zQ&R~EjCb$=J_)zNwQl)&01Rl=>>&ine?%aU>N%mFv(_PutnRyk>}2_%S$Gwi087>{ zJ!`Jzu0S}C0tE8FG(&#(3#eZI*(Iq+E^1glld#)blTO~LS|MqZno#2z5Pwkt!I!}T zyG2oSpT1>~xiH5pgRoiXrOe5%@ua!ng%FF11Ox$+qUk2{@yr&t^D7xFp-cg&}?craY%5du3c<&FqV)A0aMI4iq^Aaa4u)`2|Q0(dP~eFGU+fd zGM=xt^m7YC5v0Q+DJ7++6FZ}9oWepqAfW^FzF5rx8kR?wk)ST{$bnCrV;Q`Ylar5+ zkF6vhp!_}r6p*DWn4hn;aa%8RE@S)$DW|dVD>*qi4NXV`71Rcc5a>qK)AJivpY04HueJv5P-XO2KCbbs%r{C zql}Z4RSCU3G7|ZJVKjf}HxL*#D|b#&p&nq-0U4n0y@<-7LWPW>Ws?8Uq7V?!$1XSO z4!LxNgL;96Pb9UlvO+>YzL%H~-Vx%2yj` zK}LcKw{DO5p=;{n@m$3p1dZ?CDOt=XV4HGtTeGu;+|2~HCjY8 z5PnC^YH*{qA>_%KnVC5_W(--$OOEEN$Qb(j`s(ZI92^P@3!BT!k&_rUw8U20Pks@-0E$%m6rn|F%(O2sJEfVN8N=x4|Q zpXfUgV2Fce<~UU1w}Fj*D#S={3ut40oL4t1qy3F5oZ{YH0x!w;6Z^j&d%-zp7$ z-ceW*z~g2U5@GP6w+rVl+`!VN4PY(QPX(0x>pfmprm96y)^HTt9afk#= z1n$|1oYLzA#ny7_yt-@o#zc;2aG&G-pBUmK!f0Y9*G4s`1(wX`4sz8m=l~&34oaYV z{IIi(tUE)@S=wEV@}_7g3YmcS!0#>=#WX$NJGQ3{Yvs944C68;g*8$~37We98M#L& zNw2biXX5p^!x3MCa2z`t6+v1`&uPvPwykb1?VZDEO0lTlz}5o* zdRFe`CCFkzet1m$TFc~Wq|48ba9OrIzuN@pDU2qo)$8_cL|%iNRm+lCBy8Rv&sXIY zl9aT5@PX(E2p+DZS>*V6*O@6-zy8BEOqX`E^C7>liK*$P@|aR3GhVzX`<9EAxpW{V~ZeX&xoTS9Qe$NybdBXh+pH zF5fTE!1_;*BQrDc4&B`}W3s}877gi{9G(L~hfXT%e&_w&0ozG-B4$w@S;1&NZ?`+a z^w)nvLeL&fG-Db5s9UKo672_L;bv0?7zT%ib_~TpjD~ijAT=&wc3sn3f^y({{bA>J zgJO%@*+QbgtH`z7dUnIykueoGz%@y~xQfA@-C8`VBU=&L-e+z>@n2{>Tp&;7bw^U~pYsjQi zf8zHtFfc&=>6?60y9^zQn{JUBpQ<~3C=#`vwM)NbD85<8YtkPV zT(w0doA9hJo4tp2z*AdiS-Q9KE{`q%yvA8S(x)hoy2&#a-ZX*dXPplu0kcaEvcF(t zS-tiK_FTIxZNHE$aFE{wxAhjak}A%nx=BVqfaVd|p@*q4X4y|n$6 z7PSW}ao71W|4a8X+$UHxWDiq`kpcgZ@+t+dUE1@>X=}uEUOdspOal{G%T^7+qjvJ>DVl+CAI9Nl6442Ab+# znYGqGECKw=n2?3OLEfW`RNW;x1AP2>5|<{$@` z(w;Kk{+^6T9v3(r9%5rUh~MW|x)6wZ!1&kB@EBoqXSg@*Au;Jy>9-9H7^MbH5>i>o zq~6ZtCx|%Vz#u81$RprhI#z563qe%cJ-nJziVH)VUw;sE=+tOC_LmoSYwKS&OLRo~ zcD1RTHR8PM&DE-HwwUUmCUyQZ9dK?iFE7?N9VOId!Y|5;JaXx%#?cQtTfZJoX0*$g zu`Oh~^gwTD|2n}L&#G$b#GMX~_zB(xzv;i{qKa4wOtY!Fea?y}NKwcONl{JQ-VW_E zt5zd77zBQyB*zShiQJ?(V7Quq+07|$8Ls9_G&R7ZxgneQeY!Pu>u3OQPjvd~D=3)J z7E`lN+`c~kni*^)47?Hg1?o{=E1&{pXS43g;eHjb6fLX?`& zk`O$FjtKMCE?;myx_uV27uJ4hH75+-z^Izh9Ad!lKf(KiL&r4uwD3qmpAc~74yUDY zBgC0$6U2WaNw2cnF34ZuN=U$LajUxl~sB4t)`8p$GuDKmq+38gLF#_upGjj-DhD8qDPK9IbkmUy_9k(Kqj*!a+Q;1~e12na%q zpcWMDC@+sBJG0&60XJvspaf$l@G*K+Z7`dz+MD+_NTnhTSHlxcomsP)8pi4_PC8__u{e$|i+?I}_3WZH0nimC)cLNQHnbaz5F>~`Z67+t4k02G4O&2FVPT1hh>%rO>~KBR{xHGa;-#O{5L$BV*89#{TXtCRzv@*V z6kz9jCJ*c0{Elb2gN$~T4bh^3fzJQYA|)dTF)aYD!wr%n6gja!Sa;#{4ssGAa`NAG zM*x5GxcW?0P-Htxh}bxdkod`zD(U&Mx3||Ql9?7hf<|fqvl_ew)=Q@lG7wg%BL_ec z?5F`FM-)Kjie61S6s5xJ1HFhJjXuy(hyh=Kc>5CLqz^(YR7Kk8xId9a>VsZ>nVW71Fr@sF~1x5o1t>|)0fwmz&W%>^w2tESIO}J;6x3C@jm3+Z55-O;`$^|D=yu;>%`*jV8Cmo!ci3Sf5vfi7*`Cl6*%En5cYgY z-uxI?B%j4;HJ3jx8H1AIb*g~l#mDYy@Q~zH5l!lCE*}4c_O{wc^EA>%DZedY)uk?2tHnu;<&OXC*~gdhb8a z!DA$nGk$hjJMH(jGc2n}XE*A}Y?NKOC}ntS|6SnvH90*<6r;tro+p)IW{{|THOqIe zLH)c;K$pltR#hiHqfxg&yWP^q_LBdu@_0YG>iOw8Lb?N>S|YFrs>s$Icdi88?7}W{ z1(&Inj`I4p8chFk>C>Ys;`)d0`9yn2$btWnpN2-lUTjL0v=nJ4Jh+ljPGzRP>tib( zKx_gYbicgM{;t zhf(+%HhH`l8#2D{J0~8DcjA(jS{znt6TAhpQRyJLr8IWv*Tl-@p*76AwRn12t`rhd z`WAHNhz;6ZPXsXZ?@i3an3xzc0TEGA*qkbBW=spRm~IXC!pRvrE`#rn7Xs(Ui3#7rJDOWFZIbl9ZSvCrjcaPagkRb67Yx6nEsZRB zd=S0Q7}^a=z;2?pFlv;rVx86c8K00CBV`1E(eYTNYV74k^1~I5P|MGN1d|O>Hz=l1G%_=G3O2SY z1Ml>&4kGEu#eCF{Bj*~T)yuq#v_6uGOeKFEHRm+;U>_dzxq-0Y(k~5u4C4q9HrK(s zye`s#r-wv~OpNByOD->zwB^Pv0-jDDFggb6HTXA2=>QvfDu#J?3$5fa4&h+f>|W~f zg9D35;|*Wg0W{~6J(aVHzk8XML&=N70;9|T&J?-k@U-=E6qMwqB@-v-GVsy7-^Ll$ zVBKvUeW+{91yPn){{TYS!T2`Ss{|QOf|jO%AG7QEFHKrpO$w~U&_wYG`wx&M6#fOm z&YT@kjbC@q^m1uHnyX3`TxjuowjEtIcUTA^azjBr7j<)ED4(RTtJ3E?rDkDScM$$k zl0>j(hme+fa;ePQ_lRQDa({~^;K{H&nFoup(bQ`q))*ZKf-*s;1_+lknm{AeqKz1F zAKYr=`XsDQep!-G&B$p^^f9X8I6#p|GCi4-Xlq|OYaX_p{ZVF(+>L0pvcjqws8uvO zH`AbLFgiL~-}mWG=Ip%b$f|VlEXoCChXtN$o9&;CksH(-h2UGV|K#T~46L!miTf`HX@g;~A}8?!!oFB? zJnRb_M1~w@F&K1^odB|)Qc<*K zo1=}*TliVQte4C^pwT=gd9-0sXY7DOmV43OIM|T@#n3UI|kj0f$ z(TBj8Gq!;$})vM>+_ z2S-#?R0VdSSQx7uGtJKM7LiNZc3QJGEe=mPV6Y+*2-v%H*VWbg0Uv?+4W1Dn`R?ZC zf>FT8ig|Y*`cS&dNI3OxnDri;tu!1qTtidYZ0#?^2r){3q0Zjfgb)y2P9R97B!2!F zXTD8+&nj5`?h2*+O6w+GO^(YpPGC~J2i`}(fGx$@!KHu#Aag(tkT5qlr^9Bea8DZ{ zOB?;rRag}lpnepNV*M1ZQQeflKT1EG*`N&(F`R`brQF!+AmmczBqn z!Ittu1TcXxhYH8|5k3O78374u7t#&!fjr0o--Lia`OtAm$N@s6kWO@5ai^Ya&RBDh2>5il0q)Z$+n>RM8yg!LylxkM&4C`m zj7H*llT%Yusi{YhWRxHHs25cuM~My{Ug~`yBOkOj^>_%67uO;t`!RPP-_rb%#?#a!1m^~!f-Nm94vvri*v|CCJ+_B?dwZwr7$zkG{ax!EKKWFtca#p1x|nAm~I;S$woj0t}`aX7C#^rOk15X*%g`RjrZ*2UIILx8;Nfh`gGZ$>W z4ln@^y&?Y1X$vkR^}CE3V|;bWLZNUh`bw5KkNY1UdNLMk599|0b!6`SKv$=u73MOD z0H4xpv&dOweN?=1ChMq`bYg|ZU zRi?Ts{u=Mg>fPf8Y;`ihYM1i{w;1ieqAlD{FYi44>LhOmo79RkZ<9$W;j$!MHpfT{ zssjAF-ahsw44Fi+Ovh`l{1c5HHUedW+8%vh8-s}y%7;x|n%33RGfTqzt@uNo*3%F> z4mQS9zRrUEIOl_JAp!M2Wu*)cHMLvP&ROd(($e2?I((D*vp1%?QBByndcT@Vq%S495BY*5>`v_f)T(g?;K@6Yw>CJNUH6-UpGttK= z^FFWkET@=#?vCf>^YiDX4h$r8T&D>HMmo9!zdb4jhVUVoe3sMLOmggu28|J?7q)uz zKegrQ=R0JX1#M;V<=5$+bX#CU>64ufO{R5%g-PuO@5(>p%_RQ*{^u3mqm$i|GyWxN zU=OU!sL_3*bP!0fp;chm34vj+!Rk*!g1o+Z=vk2I@j*`@QJyWNnhY zhReFqtP_pu-Z)rH== zV*AU-^9w?|*Xjq`oB(YuIWfStdxfBR{8n@C?yoH*WsozfG}ABT2m0p0uXU}!lB z3ky2T#Js$`qN1Xtq@;av2^!}gsX?*wIrn^rX(?;EeX5!FfeE+e+JB+DvA-CcrJT0? zRrjJ(E(m!T(`_Z8-mM7G)U`3bf|XYnJYDnxX%(Kr9`DHeEj_BJV<`g}q_?|X+cn)$ zij!3)>mL)e40`d76@a7yyh|ohgvt-}>Hm%A)gDuwvy%KsXO84v=f zbdJn!rNvN7-`d9`(E|TqBXYQ%Fg(XkzclC+45`A+wX{}K=52f$vcpw3pfABcj6+3& zI2xhQOL9^`E-o(SJ>%P>S*oiUc!S%apB1uw_-wu^)&gj&7G=B5ygd6?%i$^t@-7Cm zB}`QSrlF-8*uRsrwpYnt>KAuWOlSdhpexJHcE23QD$fxic~XIlDy%udj$d;qDw88w z?A^>cK#p>d^ZIIB$KpG93hguEV@uUIvh*e@=hR~D=sxxNC|nQQE*T^FatGLIfB%c9 z>?FmFqtgR(jV>eL`R|Vc(RF)nfb&=TZ)Ih&t?o}j9V4rKZKVY@;+;Or%f>HXZThA- zm$4ih?ZMEbD3D@TsH+VSxWIpnK|mnwEr_M4lE-W05?(p(2ttz|!g}d~ljUDqaK+aW zsfZ(6*l0Vp81Y9zy3dpnW75R!Tjpo?7?=DI3aLZv=qKML^Ou0~M@_FYY&6?io*{*b zL7t{#KD}G8c8j1CH~5aQGGaSEvU<5M9fXca;kI{e`{Rd0G`oJ~k6B-CHKS@Q-{YdD z4&_w6jmOVhMHPDLBG(`Iws+n`|SExqk>Sf z+O$`RK>X?boqm!mlWh%0*?=kT>5hzV#{~q_dPod*c`*b=zJGLE340j|p zB!;{FX-oH>^I4(vQ)nulll73=J(=vWgKB$^$fNDaFkgZUi;T!uU> zTp8l!Hs{3q)aoT%xuv(+b`+x4FraCGCULWbM2+iM7nRcyUV{6~zc@Hg39}eo9g-Xdn^@8aLz71XoVBLi{ zqfFt!wjt#K`|0bfkMK$ib+P+?g-kV8V(GQ(>DjF;jX-6_pXJsD=IgrmGqowTx=k_` z!N&*VVpv}DdBZtatkUp@R)~{02%U}F-?A-2Pd;q>0$b6F#U14eoRfquTxv1 zDz+8$6a!M7R94K2NvxFu?3(op!+ssaNi~b*X8z6|vYIbNVp&HJoL179Qc*AC!O|FR zW5DII91EI%Ra>EGF5vOeo>gLL?871DqpQTIpS#eB_KiBKS5vReg2zp=XRI_|sPBecw!bIuYa3q`*wVTq zffsU1O~X}GbTu?|6irs8AhnT@H1G3T_p`SYkMp*maE-nhfT=c{$+wt!OO=Y(J|}`56;c?;f2Gc^w!pvl$3*KGanN%8!0Jc zCmlOsVK=?i15~AagcVU3qYpIQxk08^SQ<$xi)m>Qa`|D=XQP9{a|bce5?{*asM*+K z2|51$b>kt%3AxSddAu&p4e32Hu2IevWUgeP7EhCsI*^qSLAWtD5DDO-r7jQ@&`;|& zyGM)eIDhe&iPtVU*|x7m2+n0;uOXvXp%LZJJ^Nicf&Y}3JCl(yK)4hH^yU^cz``j& zzf`?HLve6Gec`HT0D%PAs*mp=-$7KaJa2|p?(OL_-;)mgusmvWm3O+c)-*P@lOE(Z zSqKSPo0{^XlcW=CpHAnQ)-&)6?HA=4NO1d|t(bLIS=p|tS0R7#$j`}1H`%@YLp}-< zlG7ETV2C^_MPW6wDIxdy_3!5R1&X|%k;?f7jh+`#Yp}(X%9dYOoV&!zWP?|Jd$+Xd zm9L)Lqs~#tf@9=!04I?S_`7vLUPlU##7clm2`Eisk7|mPo9NFm`ZA?`gM!Kn_4?i) zU2EX;L6_s z8Rqb32{>&5TK>84RjQP0D?AEbCCMKI^~Z<;%(kW~(S9V2l|M%n1)PdShbv_H;jRRx zwsu!il-UYHu#IB_)xngaL_&Xu3s$#E~&|;bT9gUH<;WgSrJ}&yQ&r ziyF2D;E4Ji9#ZSJr6g@Nxe(r7r_926C=>9%W94~Wqb-wv`5s2*KwWaSB_^~)uP&+P zBK=IwC9X?Z3eI==u|zPbvi_mLbxV}t+} zS!r|#?B7-n=Kk{em;V=QXguZo(JfnyGPGrZHeQQ85kQR9A7}S`)x1NC1>Z1Z($#=t zI!4B_;$qhGIYKC-F9=8&b5$L0+G=V+wc-reQ-=5$Jr=ZUxrk=E(o@6?!LgOj-P{vTA)EX8suPOd43DURUh;w(;i=-0tb`w1s& zOG~!<5JKpnUSfa@@Rxz_BWzo`X=k3Cn;Z8xwlpusR|NP79$+d6s&nTjDET8ik|7^C zU=LC?z=sqScp!0i-$(FwRC2)2%m3fqkV*Nzzuss0y*0J8=%DbW-rU@*boi0+OFG#V z2!1g70vCaqC`KtQ$CsG;r0@BJiP}$8DS0U)>&4FFvE1a;B~vi9LSzXK@!vt{Mmczn zrqtZrZa5(o>Sgm?x5WmIk4xp{E}WdZd#jp|%)&^QNRYaL!Psskn^$s|>C*8~!Yv94 z=YQ7(0ql9P;ro4yrbZG^7j0Wc&ifp4Uh4m9&Xyw!BFyk8skS|TWLf(gq{zg_epdMb z9rq6i#;714My9}OU2CPAKqq%tUek?yx}to1aD3?Qdq>W8?omgl?3= zU%cVzp0pTz``>eP&$4Pi@cXAH>n8us60+#1?zw9b{mqc;uXR)*;zS9^j(p8=Y%zRA zmx5m<$Mi#I_*H44Z55kTTTa%4&LImeTQkBoSv^uNNWox#Z`IGwE6+Z+B-ihB_q-`= zw&vkIcIm!`HvasLD!71{4iHin2&PmB1b=V*MhDp9paW?3Y-}(LjvF1p1dFHlz3T#< z_DhR=zK;>RNJuDY&fmAUKniPX)~FX3_DD$AgTvFjGRKF1H?lP=KPzmZFE@nt^*zb| zzPK{Q4bKuJqn%Z+LqLr9ih?+7lB%VKo$)8{`che3!m_<+sXLT$)^hf0cWJvns>L;U zJb81I?hp7WRtY%d4_<2gBIcf+MdmN-aISxpU)F_DJh5tnk;%nP%R7`0^8JjR=$H)> zGs3~^-%xRO&PPxr6I380cs%*D@bROoKw4C-W>w$%t>>vWCd14O;ICJCks|xk6Wf|M zbNPH#&6hXRY8S09o?g?>&+N1mfRWAY}h4|9V;TaqnAuBZf!L0ycRR3a4L`96_ z^BrQ|q~Zc#cnA?UxX#HOAF;V93ME|Ru^IsFV;};SLvC(S?4D9b#|DMZuk)v zR#R_-RELqcYFTW5a{#`%A&)d>k~6|s6f%;3h7%MVFD%@}Uuj@r;CA!X)1%b=B%ICv z!vYu{6oDrX>YS^{2}lEjy;W48ZC8LHgJ=%$jKZJ$dmpJhMpGAA^dny@>@6$+Sd^km zVOV|gX(D1qOnTqSYS=5Pr(n!8zSRvVYdITTkT9h4{b+R=PEW+8Lmc+h`@uGfZNh*! zJSi=rudLku@C6Bx5(z1f9zaPnG(7C!M1&k7cQ`PpMVA9oQT@oQ`zuuD>gwho2bF}4 z&m)exmBAfR9&u*l@Gvoh1epwh1))sv#?+qpdl&RNm1LhEqQ&>~HM-<6Yt&BFa`ad& zy$nMiU84DHgrV^mdo^4%RyzFCs9DlE&EVaG&lKf#R{JlY2#8FICg4)a3}dkPP7kDZba8Rt8J|&ruXElGvb5DqOoQNn83`ad)>9+9`F9e8VOvgNV1M+hgb@ZCLH=sDjKZ7< z<3S+kvlwHo=PgT%n`B6cXWn>esZ3{(q^!%+P44S+-bNL_m+$SJ#_g?XWTZ)d&r2l7 z4Pd^CmgFnsKma-B*~H;YaPZ`V1cwFC(A7Dg->=F_ANBnzYVtI)+$v+65|2o78{n{7tvE zUnfy0C@DFx;D>FskaO8IT;JX@!HeNwk>y}vV${s2=u9IjfaEM4K!j!-`vpL;#JK76 zDfvZIbbDE`t?dxMVNeX(lkOPfhV%8+^-Yj}%9@S#7t64Med&0n&+huBpBJYl#Aadl zVq@cWhY1^DhzlFh^U?uwAs^zK!a+J7ajmsafR%B z<@f}gU*|60Ato`jMppK^psfpH5ao`JhrK)jaf(bKjJZ`THTCs!OGJ@i%CVi(<@tmJ z>_oP_QpC3FU$xx$5OXPKyq%SiaTqtw9OATnwi;PjNLXUW3^9LcF=T>s^JE=PwDMOx zefAo)wfS5e>HT&ZM`;mYN@hc6XUL)1zxg}&Pfs{eQN#FGQO(WqadA#-(H{n{ohC_%|F3q`8`|+AL$$#K*aUSIvX@#06CTV=LsABk>-CO@D$blV9KC@ zPlyrdltll}fSY#zNz2WAK!E)Dnf+e1jBx-T3~LRKw1AEaq5v3*0y{%MKcoId`v|32 WTuAdi8afWSg_0Im5UUh14ER6D7q0aH literal 0 HcmV?d00001 diff --git a/doc/source/images/zone_overview.png b/doc/source/images/zone_overview.png new file mode 100755 index 0000000000000000000000000000000000000000..cc891df0a4c7b2c1d2a16e61de04570c790a7617 GIT binary patch literal 51587 zcmce7b8u!)&~CV~H@35}lg%63ww;Y_ZEV}v*tTsuZ*0BU*vYrQ`_=vP{(GltYNltZ zrn>s{>FMX`b0QSwC6M6p;K9JakfbCWe*?I)vV<^L%?!cW zKLO4`Qp*_(40+(c4vtA9{rle{sf(DVi;BIui@TAN8JL8Vk;5-b6A^nmH!~*}Gba)n zDLWT4n;-6Buee}fV{cNTLaH9wmpKT!a$0YrPd`h&OA%7M(JT9rhY7eXF ztfitvixcb~zR~UY*Tl;pi9c&(M<4fWqZrDV=l&a{h{tmM`=4QDm`YVg-}H6>Z~35W z%X}+scRIF7ILv>?*OFFG;2$iRoTBHoY80EEyu(4wGrXI_N%7|`m(mF#GN`xcps78G z9c;H=2|T5>o~&i%qb@V#846f|)FVQDTyFAH99Ivt6uz|5nA5pRuJ&uN{Eipra7xoG_1~fTxupv-v#>5t02`HeZIv{XoEo0`hHEo;g}l8B?p~TpwH1 zc4)iRxECHaQCW|fZdZ0BKO9TOOK3qfpb3y|7lpNCG{~Qqr{u7r#P@!XQ!DYkU!7u01%vC1n(YD>bIm>l6e`)&LMK~J4oDF+_E!YW6C>O29# z`JXnl;owygn@nKE3jt@=)|@e-suPPEibB2)7flh_UtO96hxG>=WRLKTBv`zrZc9aL zzbsbkrHXGgGe{AfLslV8htkYKsE{XKb@MbNCdIn4F`0)bp#^YOf(ZgK)4uktZFEJ# z73j!u3UW6b(pnGIuntwT(XMkpLeAr;KBaMwn05Ucj(Z}XksfSl%Fx;K3oKE)8m6q= zbc*l|pgJI$F*ywK1p9yboA5Y$%VNEf(BiIcPPpA#g&fCVk>N}U$ZZaH+ug+T2R}H} zR#$qnSUS)>60&_9n^^HzPVo5;OuI4@I>Eih-#G8rVh5<|BI=sRQkVF3#x2CW!bIpr zM{Eyn<*lsNsZX2Xp?_yne_4@Au5B$z7KO5@zpiR#W`DaBz@PkiN|nsX|De@978*;y z%+hMmN*7Ho{(`2DKyvaH0|?#F5h^?hk2^?2?@8yzS-|3Z3oGLNjjuY^b0CS@Le8(p zW6{K4vFZczQtt+ftc;WXGjvEJZJ~a#1 z;6J!*VC2$!>!D-9y>(ymoJDpCm>HSF!7H8jG<&hXR@G|6og&;yxSSt1aq;U_8qZR> zbF#6&tS;_^WErG3JG}m?$d$iFLiy>8QN(-o_ZxX&`YGpTpwKnYS~^75_2fNa9Q8k`pnUA?7OucUg-Wn8@@e%I5jBcm(xR6of#cad=y0$*) zlnbv-?CW|{T_hFMdGUV6N5lM5!`bV*Kf^Hj>T@Yrfv$3)pWA<8N-HO#-xNTkm$tt6 zqq4A>kon<2XB?rV4d)lb8&a0jrEbk*2bzbxJL%Mp&h<;T375)q#pG`#DR;VN%H)ib z0dbT|VKsi2g-e~{vN&}4VJ13@3~RdcYo03~`QmSUCPM=4NF0s6U-Bt-EBn1x5HY7d z#mXCz0yt9uIlyoZNk>OpPUO#ryTBjuxn0L#z)_BulW*(knrqxIq2gkpmju6T?Tk5S zNNWQP{1)3`7RT%^F&KeF=QOkJP+Px-bBnSYVO}*e<+PgiRBfs@%W9wo=cxI=nnzd% zxh~jFXV_9lN5p+kI9HO_x7h9*q)8vO-+n;hUQfZkf@&xZ51tg zgQKzNr20cx^8JzDvE3q`dhJPK88lw2B z7huX6A_-UM5va-^$|1oi(OY?iA`-X+G+{6Hbs5}tpRWw*QC5Er6cgWD$Owu)C+Nju znSbbQU9ZB@IIdbeZu_#oI+=ra9&VuGoBO9_jLMIIUL6bSiF}7^Oy|l^UdDquUGZ4W z993;Dx!7`qxYJ&9lALmhHtVq<47vQaGw#yb-?6O)aP3U(TKQmkBr?X@ra zKwoR!_aL1v;PKIY+Q3TyZ}9@(*sXO1WK<@5vK~W6#0hZBEdE$5-HkPq<+KfwTg(N= zA1HOM_#nr9el~t0G^?Ia{&ZmlaUKrF|NYgQhn2M;XHoT-R#6=0|Bk3fr2l3@9yI!J zZ8U!uBurEVOT?Xp@i>UR!rAOkZ54$Uq}j~WyzC7Tif7O@NKflKy9RS2CKZ+ z|A{HMs=I^-rvx=}28(<~0dWul9i9y>36uA*W9l2pyktEXHI(>W?Jl^8Hj7rBAGsFQx9IHs(D``S*7 zFw;qSAJpN{=_VqMdEJ;!60Ko_1mVBHB-UeZYj?=25!-^s&Y?iWTYmgwsOw;RQV!Pm zIMakgt1CMRwU{)jtKe_rrKS$sNc;~2gPzP>KZdK$v%xbgzI`WyCeC+OQUF)0U=7tI zF$IrKChNUK%Ad=}Y2Y%g6sVd$A+d^KGW}-}7yTDw%F3UKDD-(#pP7*rCILBu3mFg7 z+Kn7hOM5YGRQmjHvk$6gLwd7xugI@~X(pw!r>0y7(B<5(i}aes&)F--bkJP2rUQk7 zs$UMW?m?gr>JdF5qs``HHk?db3nHgGV?$a2r}4*Q3z_L^6Law$VTXcxWTgBb` zcHURG8}>&6v!^IB8%lKC6xW^g`5C^o>bEoi`E^Y1K%DzMZN;0!Ch`?3m(h$1nMLIO zoP$dcoEP0uZ#baK_uON3FX+|kNyCo`q_)`MYJ3_)@6W--WS90c$Kj1%mYN8!VAVz= zaGs8;GZ6jhOT3^vu;ZPLv#eT9av(&3|I+rOq3y|4Ut0lFgYT_12g!)JP`CJ3R@*9* zD@dj*axneN!Qe(I!IA&=uDs@t1W%UbQ;t5m`>S-XOtR8V^Bf}-i{%ZGCEb25F31f2 zyqD#%nI(V7_Yq}p19l9BG4C-`Ah>_bsac#7TQ?TTDeI7DVtrh)6Y{~^k*UpL?!39d zWpWGdNe9>0)$>y>4ii*UQ`2452faT%)ZO#mlQ-&AHdKJ!?7Hmch;5(8l}gefVV-(M zkci3CZ@SaptxgT%+{0bcIAVPDO4SpnAsglgDHchns-VH^FS~}V{t>6Yk@E980;4FY zMX_o*JfYT1KYeh2t9PwsPIpgxjC||sC3l>I4i1bJKQ4HdM9|+DW03AKNGO z6VlY?2vh-#mh2Mew_e>!*g7og4aOOy z0Eh$o4SiW&M`Aedfc+c{Cw3GSCl_a4Vajjjw2JIB8tF3B!J}5@*^PAR4T<9Ay zUo-ma0mqw6h(^4I#^N+I;8G$-ChSX*sS&5U{@5!Gqq{M!mSP}&8#L})vJh^&!sp{B z0!InTUm{bUc`-kar*hOVH3&yRL}U_rLpkr`SUhEa-QSW@92{(#tFNgEm|qorh#TQ? zF~`}E0Mbx!OjIfXI?fhhWZydEi|jhv7GnVRJely)?ms*T7k_iQfUugyo#(%-1M_`5 zta_6v*a}V=+z~e0vaL`{F?4ziQKM^p6e>APuMVQezF*bDD|_*Mni?9a#wfZu zj>4xe&1`;$!hJ2)c3qDAP zMzIbl%D&?8N2e!N2gy+BV7kkhM{{8u<05d~48aS@%`~BXa6ac&SsOCpZH^9g%tqWB z*Ud;rOn5|d{_%J^klk%~F}A*&{AgH$l!Y|DmHui065&jBj_9DeHz&3_k5j)wO)pIW zfm6vN%uWW*zjs{ng2qjUG&t^R?3)%E6p$7ZL27q1OX-^$4 zM#$Os91;~-?Eoa_dRC*%EEZfB<9A^cMxOQCe~}i;ph<$7$InJkNqL96XIL=Kt|+FQ zD5oy(*^E--F|&}YBla=H(ACx;}UF`$vT}ur4C8>*5AZKF9HA}LCecRK(B1SNTZbw58ySQmW>)? z1ey;(g=M(l*$$lOtd~`<4CSas{me?0qbz$)$6JIoo;h|%5m(73S|1%5>l=frVHEN8 zIsvcwi3x$*>(oU!|MuZIo0ZGVCCr7h02i0kZrdaAFB*rpM0Ij|<}q2z^f{M992+*A zg!FH=K!@qY)1(v$3RWy*y4POb-x}KjlT$1CLZ}krjkhvdWJtFGEg#y?!tmx}n&ngS za2B?kSm=0_sLKSvet1}Z2J9QVk&s4Bm`G`B$sasE-xkL`{83g`s_A<_pfL#=>d`P^ zOkIs~bLp|H{pWBCG*_kjV?%btD9Lz_1%AQpS<>sR#y6WkiDYvlmC)`a5Zw; z^0>-%c3?Z9L%IuIROxkIW2+~@D?&(3%nPQ5*;5jGDtj|Q^WF$}w52l1B5y$QRahnm zwz-p?T7S%7Z^;^ZdWS)cNU3gI5kQ|Da@Okg?D?cuuk!L9p5fx9M9$bpiK0U3pE0aF zUeTk(<%i!%x};1*UPScp^Peiwe6-ehwS_nO#`zYISpKKH3 zN_4ApE5aTK*6;+dBo4!m)dp(99Y8>8Rw>$DdeGhL+6lo$B+NeE%eoveP4HZt+JtKP3_Ub^zKGl^{C$PoW|Y)wg0&aQ_-z z(x^zqW<&pLBF}?wR?z3*LBUX_s92#35^;qt%II2JKkoD|L{JgF`Ob__FE`&lvv{7N zv%(^Zj?B-~Ao+{D&XFd|O9^or!aXX|l0&}0RjaBO^7Mn9!udJoSo@j^j)@}zBq78O zTTo}1+c z7Ch)>^q*lwZLO$~QGHgpyV52ctkN zs}?)^cJ(+o7$zzEL$E@y;fG)rsDKZPT>$#v+$#B7eNdB|x%76o3>DRRv=D)i+mF88 z`o~t4^9!ZSO1>$GxPUQ9D;@LLE$)8Gt#Zu!y76Vk4x=3Kk(3RN(MAgzhI#q(J)OFO z-^yHerwa)QW%B<_df;^hp1p6JTmt6bM;AFXv=l;Iu*k}9kpO-V@ta5}Gt+mcH||vi z9PKx^qM&J6ok&$*8`f1&K8|o^=CgA-i?JNhdC7!EWu*!VX?7m}?qH8q@Ol~R`x zjoLzLp}XJwbBxkZfxu-@FF-O-b9Z^EmXl;A&b~fz@9y(`75a8xlOZ*ym$K^$xr>Ds z*FGrgZVvYXU(K|C9n4xL>~KJRHq@B;n)^$nm*G$H-2ysBt?s0?r@_m0Q4hVqPnI*G z@tu1}4tpd&IY(oeLtej?M`_iVS)D414?87%0*{3T$heKYK7#Z;Rml|Gz}b(l)w~*4hz+3SdT^D10w1arxir-Hspdg8mbsHnwY4NAT*|` zU_A*u4XqfDauHmhOHnYE_NksWtd!&j6)frPKv}vQ2S9vq!2|1&|ceG;*bN`Rh>>o_)nyc zg?HIZ!*}@=fff)pSOezq`^Bo7&Y-hSdO9sEEbN&3B>f@LvGM)pgq*v`;x~t;R<M8XTs0+o z&fNX1{h%&#mBHx1K!eeR0dg*jrgqgERP^HchKSjk9Z8RYEL~?IQn>gQi%!dPZ!g0~ zkjOj%mn?JY+W8}`oWy>f3vdxnJTPhq$$M4xqU$}DY}LX zpX#bof4aTxHznEgJ@F5x=;Y^1+Si!izt-b#PW#kTkxXd)>5;k4`-6Wez1RKjdZs{3 z)hcNBS|<18iFKdFUG`_O?YFX}b1oDOoAo=Q^^jp?D&F=cCunO`wA~x|tu37u(BWkD z#b7?53i;}2#f_juSxPjpaZgALUjQ*zkQ76WJ54u^=0!7YwmFJ}qBT~K>?tL<8pt>= zl)puD*wZye>W&=mZl>P!AzHKbXp{F$pHKW?cT!?oKi0$a5fJcuDzv4A^BP)6Uw_-D zw2#2Y*SE2;@dSWy9G{fLHodbgd$g^FH`!tJP>Eujv>*JK7__ObkHYks4MaOr>bHmg zg(CT*D0Lz_=t6zRUu7h|b3BJDdh$F_a8vG5oA7?t#4H1?U4o1%)ogC#O=CyFwg8po zB>CC)W8wE4vs;}v8DF>WkYh+;!7&+Jk6ul|3!4o+ENmkU4b84Ds)v`?j63`kIDE)< zkFR%6qaiW~1X_H*npOo3#qLAusOusz5>!vbkGpucV2Rh)tP{ojddmieg}nxKXUoaT zMk8BaZPr`P`+p^Ls%XNQ94w4ou%!YxmZS`(mQ3opQ5&o^%pt2sFg!Q!OCze*x+>d} z>VK9~(P^X8-tu<)jC{qBkjO$Nzde1Uc~a;%SUJ?1t<+!q2}^&!YH?bD&mR*hPz%3F z>Dq-MVmh|PRhreqa#Z|>Ru6PYjh);of(Z!`Bs@TNu5{#s->VA3FQw}-h031rclE}X+YpI`=^PwIl^>I(JrfGz))wsOvm1QE7hW6T;IXmBbPMJIFvH3qm9dp9`COQrak04QmWw&${Tup9hErfFNV41{^M6C*8AXrluBI*IMA^@8rk zFK+9wJidYke{nLWakmshGSjNh_*A%qttcOJcAnOQcQ$NsI@v!~TI7sJ!!)FakC?>I z!il)KW5yajOm%>z;XV~s0In2+|pFEt$whfqOzhOnz1nx|k=-yJfN6?+VsLYas_ zqJfJqlAL>ap$B%UT!Nu3tS(~h9(~@b#PzecL$aq8<5Wm~+~j@DSrH3-wV;pcCoK|rq=Z)!=f+bRTPrjqqez)?J+?P=S&oufEr;BpC_}{Y0 zstp8)+mIqsygzM#51%=MyM93aROX3>EN&EbHwMd^&}x!P zfwK@u{BVS??!|>^*d$GV%RPJFffr(SJC(Pdg@|~O|0$n-@3X35-!vCEogDffO)sjyez%iZUdm2~W{6H8TmRbFPDOlH4m@7!>&aqsu;Z>$;?99<%4K~V6W|xnk zwKY^$uNbjbF>$*f{XIa|vg(J44cpQxI6*-{cba4Bt)vC`;KkQMbFQ#WO*L(fq@K*v zFxWdtFoDT&SeO7 zA+e*`%?0L#eD5TM`=%pDM_f!2r_ZW(X5x?2uDXNNli%Tm+$I?qWF(Fo-(a%=6!2LE z66OY}dpuWY`Z#Cu3WB^mwG@nu$;DtXEw zVAzg)W{w>9(GpTh9jKg`iJdw1n)+RSpF_nrlfn^21YxGmhf4AohGqqK#dQE{#s=sXs8se`z6L_>CaDwBuWz>o+ww^V<$Q9pU;wx6cARyj{1wBg$Un zA)yX;GOB?o3cyQ{E!~(j0$fCe_4T7GFIlyJL46My1qH>2$kApRmuQiya8lGOz{*#5 zM$oL8U$wP(BusROs=1kS4ey6Tm}rF>+Hv;yyo8+-Md!5T=Gt=Ul%s?1N_^_-@fD81mPS_Gk!eaY zd-X-#BC0X}aX<%N0wjBNTgOX64u4%#?nhF`Qt9ve>aCy)YiV*3VSd$AHh| zYH3#&oDbdN;r5EYP6R9h!ZdBoOl%L+^YBl4n)W|*#;sKblx6Oo@s=XSLV`VH&hg$B z&h@eM4tFzVzFZ~G&85v2vaag+%+#gf(V>+Wm@IzsIc!86TK(@`*$q6@TLknTZ9m)F zj6N;L6uHY40xvfH;N3>w%h63BupdITdjx0m5|_(35O+TE3hXm7p>Sby z#SJD6Y1(gQBnO<3xQCYk-x4bh&W$B5Kej_Ugghu3GCQg`f)NRPqz{P0e-L~sW|tzs zXpj6TH2bLM5As(9y*FfrNEQ{B(C5q3yqjVpOQZoEA;BGteT>eEj(LiC%jch+95pn0E50LR?B6w$l8Zjru5pEx6&yvYv6;#F= zmUicx+dSuvgM&MEga9(saU)RasZ;H8*n-_FCFTqX{%tkAT{8BFr0}Tx@Bv=B2*HSv zIL-uICu9y!IowonD`IQkpSJ8#>{w6n6W)$|1$*;tI*q7Kw4dA9NSLmK-eW@r^}NiI z%}VW)mU&{|i~A+6X%Z`#UE1T8fo3}^g#`D-Cwidk_Z-hQOTiEd^2P)dM!e{Wts?YA z)4n#=6HJ8ZA8ss4rQ_YaD!Q=32)D4jG7O%)H)sO{nubm=;4cZ!PVIxi!@0oO$kHvvvw$CFWq%yPvrE*(%vu^~ zK3W1`$S8k|@X@-%Et?!+hTth9qC_6Eqxj}I5z|eAwQ5=L{&5zui4`U4yl2Xd(lPRtFtM3v%F%O+8a#;MEowVE>hZA;^;vSy1E7n9LwaP z-^=^EJIYD223!YVk?zlxm0+Q34uWS;_vnHww#X}32bTGP}-LMTtD zG|v_>EB3m>{WD2E#W|_7Z9iclm2+rQk!1u@)F|he$VJ4H-a!c&EkS{%BIeOiNkggL z`}@`{DFlzYb67ZZ5hnGE_pgewH39fh!?4oi&YL5PvL|qO*8N85hzO_In0sZlf&%9? z-pY#4{+B@$jwhW0XtfMd9>GPRe&NLS4;evAr)wggrwKVZwdL(AGX_=}xkH(}iqAeL z;`17=)qR5r9G8g!w|}HjfN15Ii(1J95fB;5U+Z0fQsga^e?#^9nQ^E9lHjea{qZlX z`0+AZZ2Y;k^VmIAEYyml;Bgr4#MTD+GulT#iS7;+=X(JxpT~2RcUu}{?eC(|I~r|^ zT9kE-SzJhdR8*Bn$w{WjC(L*yB&n%yE26LM8y$S!d*;*RMVaXswG$E-R6l5aDLBtk zvcafzSIK_T5wBg&ADQU>9&7iEVW>67nn%XnqqlKM)2>RFu{B_kp| z18<#W<&PpG>`E}vmSFszW!FCZkV4 zT6j%;!_MC?EQd%{ouaImiFe9bnVe4fG^kEAZ1V9}nxEH&U!k^5l3ziVRRwGBd*Tgz zILwMEWf(rK%z|6|f~-`mWB1$l0Gerk{F+MokE5SHrB3#hl^4TkC7Xf5j}6H>Gaa*c zVye>81JIBWW|Qs|8val<8*#Vx?-%)2rqa^kvoXCI)tt^zR7&NwTe3eX6pY424e+7u zAgM_Lt(k&kB1*|Rd(^T71*sro6>}iFjN$wD=0}@;&EgkxRBLxus%ESL2nY&zI6kV! z!t3V#hI9FnYg3UdBt=;PCY&*ZMMRRECvlW61$B&loQT~__UV#NENKcU81v|4irL2=g%dVhm@hVr!icPMW`KnO26Iw__pw-?1BQ)QRGx|E|CBR9hpCmbwu=5 z{j6?HiyO5kI6u#RoyNt0+>R>}b$c0g`s)o&OID*6M$WcR*~2H+KAyIVB0-_1Pn7-x znGz{_3HRY<_)QIj7$=T_NLK8ER6v%r?`%((LS{0N{Q~SrQ(Xc75CKf`8^K<{*f_DW zvT|Y?ngxg#As;9JzFAi1-nhQSB?@nbvsG1Re;}6U5(-=;R25pVz^I&k(D@pDJmk5T zJ)HR+JlfREzUADYn6__p^u6{h$BAyYqx(8BAw6?>kd&;LdCu-f*85u5rQ8c?*4IA0 z4T$>j&(!XFrPhiAEFrDkxu;wIZnz9h$Dh|5NR+sf+6dFSOJr7*-%elKU`PD88u`q4h2L?}E}8FYB86WgG!ccSR~5ahmg zbyvfhXsxYh8MVzzN*o^y%lwpjU~4WvE)G6*hR*O(vrpSU=tF&1Ls@ zI)C<5m};AkkB9s`=+&DA8#XF@@5BaMPCz*#TmRA4OGj^;*8z4(**m=casEFx(-&R^8*DmCfxN)UP z%ldM&^H!Yc6*le)CzJN62bH=b)bkTzBm$v?^q)^bf30bI1A4`+?1a1y4{SGE#SWWU z4|}W@4QdojqN1D`Qn)>nXCKkZ)@o=of^?dz`1rqL3v@Uonk^^JE(1#ggh|&bu5>yA z^h^i})|>3H1@MQ5d#mG>=HoGsUw2s5dz&KbyT^h95O|cI_GK(_Thjzl8a$z>_&S|T z$(La7)>Q(2`!HZdV_?gQ2(~MLqb(A;>RB z-5mC=aR!708T{$Nl3vJZ=xC}ejF!*bn^6Cllx*mOp?{A`5tKK#E(PsJ7DuEMV4|j~ zSZh!k8V2cvf+c`jY$$2g6}-8VcfK!d`zyb2e#!f(EH`&40Nl!ie0e$L{A4t^jU+4csz8@&c3bl-S7&sx15$s?G1>?hZS<_9;GL_pUa=d=Z+ZZe&A@CUz zyNck8vQ@9b71XZZ6d|L|1ALK|S@guXsRsrr4Odos5Ys81f}wUxg|E)QVK0kfWqp(%U5;j)EyPYlUkifuS9A74! zN=exC2i+J>0H8;XS>Xd~c8ZUeVm&gvaxLg7wMyCd0X7{zIF2k`sDVLgxDr=zKG_={hGNH0A_%2ZqXGt#{l|LQ8NP$ZQqf>fD#*B`GHbALCA+Gyk-&cF<7FYBX#|wPt14JA*9cJ;3oc8-BHtg3t60o`aE4jx;8AApY*) z;;4jv*wpisDY%m+I~2y>LC@It#f=6IMP^SNMA%|~eeTGWG}kFh_a_0?1IWy>aR5c* zjv7xKXRqk|HGjmvrd+|;vO2@$%q}<)<-d1m$S^jVr#bPi^~KLH$$ft5giM^V2fwO9 zH{1>AC`zh_9oyO!XFtF%Ir~`jg%~~i=!BCJLdjv&a&SUOb44vTeWzPZ>LhWXrl$7w zEL*^h6M79+FWURGAd*)fT{3g6=3(ik4nck{;!?Ey0Q+&U!*#zk-rddqvY;lg`v|wF zfYWOoXnh;aC)21}0M-#j#rcFUo04DKXS_{Zv z2de8hZEgRHO#Z@tUvm}}`XnR<7>b}pVM}_c)Z-1;D>EJwKUAQhJ(HJdlTq6R8fDbz z;tT-*yClt9?)KwDIn{&*^3d+l!!%pY+7ETLe-P<|c%FRqd8w0{LUt>0E(dz<3Z^0J z41lyO(@-Cl+Vteoa&Q(GBo$oYDO>94u(8Aq zDO~w1Z^f?)wbl0p)L7YB;D+1|2$`){xL*DV#+om|xu}v4v3YjBqEdHjv6b=Ha|3b& zve*LPHl1fj9A4~utG2pg>OUfj1+KA}g^kib-60@unSMLvg|6J1!ohwer3cRIvm`H# zVnKuvT54155o=ZwyT%y57S$|Hw5%EJ8dPyyhNu$OXQM!io9U`7)Z;k`G?zB2^JVG?a(-Fjx#@LR&AI zgqrsNT%y@0{O1K4w_D`32Iy;`cbxA%;3Nz99t-~N0z-ddiTlX=qxfcp{ zVA&_HXL!Wp=2?vPzG(HA&$YyAf1yEI8s5S3;g6c$R34itPkYi|_4B%{gfCfPTgt%45w14f#$YeMFUC6u|@h)UZD!%n){9aJ++mT zu>FBLPFq2AQN6RJ`NBy_*-OCjdATJw)-aNwrTGnD%@%376&BtW0DK366WFqwt0{@= zoh<1IXHdAKA4dlC>9UsRVnE^H4el`$y zt=$k0;vejhg&{K^RgZOdy|>4KKYM{bl5!%h2-k1%*lO-;9lYlkjB#!LSThO*&g#3a zH=B3L2!6Y{8zmNZ=X0r7`o1wZQ43^o#7eI@m%SS_FBt!+2KF*u*PYcw7#QkB>)HDL z!rg4{LU(Vp-JI&T4jCfU%V_Xcf<7J2c?fmqSjVz9Tdg?#PKriCvRH4J-S$tX318OY z`doZ>8TO~x@1v7swF-%V6!y!5qNg{eD9x3ckco>o=%EEFkAg|x!5T;%4f?z2+T8QL zXV^S|y0{a)wR)}bx}D3wqn_5_{q?fs4Z8v3uVCU|7(2}iY<4#9s#yca%Y7+tiq@~k zl$Ab<64I)A%Uam8xe&}s7zQ2220yr2D8^dN`netZ*`bo(OyhrS(*r^-1Y&u2o>5J? zq(I-{GbAYt!QtB`7~MN}1J`s$D!GU7->9wM9;M0+uo)7U1&%OY zkfXA^h9Fq!-U|$XYe6Y4Op_1$C8oN0$TpqzGC1Vn_T%OT)99LOVk3W=3#^0L3Fk3- z$#}K}z8xXoUOBqCQ8B`1XJ;P+ppTikxna2-|HBLaK97`*2NZPbhR0lai85vglkUM> zxq4EMcmFsT|Khw+o4-;ZJ-E*Oj=9v%&#vuEjnT>eXrEH=Bbj-64Gd21@y8ER&%JlJ z%Z%Z-kM#EP+KB%tOmGp~KCDt+q><*TUhmN3Ww5cRZ(T`VZysiYfc1+SI^o&yqc)0! z^_!#C5ueL8)b?uXzHU2_azL(;vyfmqScotS2S;~1Nqj;=#`X7fNC$+%{CwseEoK;Q z0{$4_7xDjAYvH)n&c6EI1n#m*ENjc_i^Ah2ISS{RG+C8{3!CN&6VDBBlT^AKpp!V?Qc$?Yl5(yw`fvp-MtPr9z#Ux2G`emmEIz+V~3 zg5IR03Eg3_mpjE&z48_Qt%#Keub;Ms)`zi#05^xc)I+?px~JuC5z;8*t!FIw*J321 zMK&lzr>s9`p!LNP`3%;husrkk8oy%PWr3Wq&CeF)SFGFZ{44^Vw&Ukx)ah@|qI(8xr&< z=~p-27rXG{q~c-dIwqvLOQBm=>?M74oPspCohlbJ*vLeY<9<;&Y&-xA3^Be`tdF}7NTNCQWkAJ--D$)i6sa&CZm^|A0$Ip-r! zs}-W*bjWK_Pv@|u%*s|pzcN1~>WvHQ;@L|GVenY`{1Ksf&_7yvtI2bAyBqM`)E#HN zX;JHe*o13by8IegT~ntoeI<&KvK%nIU2mDvwSYWLH~~P`ts$SMP5gMea8=iGPoudO zZYGa4(b~ivBLv&Uxl9I_#oaS8+SBgx>AD3E;k7SKV6|*Tb?+!Apvc>E)&*y^*4O9tp<@0f z=Pce?LVcGussr0&h%hsMY-NeDzAWmx<4TMNS(tRgv(VN0`2(?)uR(sbD?_+cMVs+_ zVgt&;w}+aOT-EQO<+9$_+qcsxj9(PS%;?R>pv9yS#J#q$4*0%=l3{J5=>lbx&97Q_ z^8^lNSc*{)yMo2uDJA3C5dA)KiN`t9XKupp4g^?|)^nLju8GNL6-J+Cp8D{X7ih|e zPf@n~trXx_ex1x&xm~q;VbBhfCsx#H=|^3WvsEujE}ENPEU`d(a=4KhN@*Ez`f~g< z_8hJ*DLF}5jU;G)jFzr`HwiJusv$73a6kDx7@7Hv&RUQoDhBvPBf)jE<*%}5Rb=-I zKY073K4fM_fVhB(t&Gs$N?Ok$4);KtB0B+4UVaW_30m7%I@t)5PV`A*%#Mk;9uHSd zIcMT^Kb~DIolWPzI>~t7j=(gbBbN{QTJ2OaGwFl{WB&d%z)caf=JHUsfrfBh-|8Ov z+ZXeul~WPmnzi!606iZOOP%bW%VPalg3E?i#n`wu>UAP#1MLz+S39hfHT0aYNCg($&K$z|SU!ipQUWy9k_1my*^*sBxrhT* zb)#O62|og9-W3Z=;tsZH)CvUPRmeVYo@MZ`C#`!CyQx8uE&r-b@ygW-BBJE$Zt$7> zy!}Q7<_=8=H!pPkMv_T|-8u14>*Ilk1b4MoT6 zbFJs1{xj>J%aY;-UNGCY_aIN_mKdp4L)) z?BQPhX(2t8ITLDW0K~dSyKnFKIWfh$rfKj2WyMuh!x-#I69;YLDhb$c?(b{+RteVY zpi<^X-3X8CBfEhamYATe~# z>mH`Zh?2~0N&Xcpq1THsTufhroHIxNkFh`cQy|_4_SNNQbKksaBOS}L40n&+t<6F= zzQJ83t>(f&w{L!o)6t`)D~}hUlXYLF{k{2EJyYUuQ7Gjsh9v$VJKev@snmdVx~ZL; z<4sWA_77F^Rw0iVF=YMQeWag;N!8cOTKr~p^-xadx96Mt3717QHp^k~ic&o~`%b)( zllyQ0p@k_XQ#$&opldelanJDai~e%O-~>Lrmc{w+-@Dtj2CsR2gxQ&1aI|(NeJVTS z3()nQ>1Ic9CN{Gw6&>GPu7L~W$RrmHvqN6s>FdXeAvkqknNcZl9GhL;9_Y;SvpVTM zo0Ap)tFob-3@%q=>HcbLOAim4DJQLJdzbK&iUCCYFs(e|K7eCm*RFzht7fLL?h#x} z8C-*j;j6N-Z&g)oY>FhH#HFL6wC%al-(Cm&Vlx3*TDLYewWfx4s@_j!l~3jCgsq-s z%Sg%j67F`{-Oa>#2(RU^4OA`sfs$YRPJuM{BRl;uBNA8WBLpN|i6S5j(1} zdo8nm*o-z*>Q}odo>riD8REJqO(UnYuceccoHDo8pfYqC6mRD`0jXYwj1(YVwqi3R z;W)ficrRIm2_ae;4~bOIz-x7cwkVB!DI^BWvo5#K!*DF^0{ElH!%%c88={fP#Oj8E zJ$Bff5kCcM<+mN53r`3!{=MtaZ?rBETuxCtL6y?dk((T`lxfQiG%=~ADEB@D($fAC z*esTZs8f{BlFoTXK6XeIc-m@-$Bq}%EE^-ogx|%r-bM7BEuIngRQg#C4nkZF zB9aIc+6`_z&yPxwgse4Ux0dRIY9_U_bpcNYxjRe)oU@0{1d6?#F)Y; z;+`J^=H1E(Sh{1>^bF)CFMZVAs6~um(YaD=_09{bP*aM8uw?F)zh&?9bwGq^%d2*r zT?qzjB2rpNA2LqYXU?mGP(wcDvxLQYz|L17#_Ll%xl@{Ep1~%^-*?J(0on6JZ*RGl zDOSIoC3sbkJd0k$RQQ7!z9_cm2zjMHYUV{0d}H-2@$zQ0+8fhtM*?c8@CuFA`|1Qx(*1FtBcWYV;2V+wNw0Cl@m?d- zb{y+UT`w?!`w1+eovrkP*Z1|_lFdAd&))z%kDvtj@{)F*)8X&91v+ypDA18N%iTl7h zmgJpBM@sm4Q#S&q#KP?2)J8+SZ-kw%A@<*a>ZBRX0acYqKB9H=`mBb0=wFPAk{5L@ zblp<`my1rO$9aDaiwHX(sqt6_&*tWq&4Y)#T6L1~+zY|p%kIM$I}J-jC@e}OP7aQS zEIb<<8$dhKE-c7qZgKIzo$)WCq2anS_gk-5@W?t#QTp5vq{~83ASNWNLqTPR`C+;; zluP$i*}=@7KtXK@?fCu&*P;A?P;C$mYcH*Z__&DBUPQ0S<4-=_u5KuESPg zIi6;vzXX8LE$`7>N2aUGxK#grq14L4)q18vl8`fPv@l5)4=Kw|k|Bxls%KDHv>Xtc z>5daUaVA>&T~6EQNG-yhdCCUXCJB5eupJ8K^@Am78 zU>WOV9uc)dN`6eVBXd7+s3KPlm)A4y!cD+nY$u$n{BCIe{JyZ|f&nN9UrsIj5)|BL z#_DujyQ*iOo+F|xZd?`D))VB6C`eIBmf`GbBSe8oz{dyn(Oj>;t4b1vw4Z=_j3BB5 zWeuwMG>+dfQ>Y(Jl-+31$z4plet3)}2Ilt2lya_CH8n?lnE6p;Ej)R&;aMN+M<0IRVH(+&a1j&BtJIK4USRzkl~h;myYhjB9(|yQ78s{%Zz~OdizS9U zjaPy`B{nt|Rp6VN8g}qinqntz*!CD%&Owj+)8*5>O>N=Pi*&*M)Ibw&i7(M9e};Oe zs|@l$l2H7Qw5m7tR(Syg4*Po*8nVN0>$Ai%F5e;G8`~Rzf-<3=fk>dib^4ZcX}hk&+myb(8R_`Eta?J85llccf8^13 zK^EFfsJT_6HU%U5HGwa&;fbCeTSki-2~tx%F%hI>x~k>kZ*RKOopM*B2^j6Wj&}13 zD2{g$SXe3FXp=HkRbB&UL2z3%bTmIIDU}hR9-EO- zP~v09cUxCxS@;MpPfn(eWjOH$oZ5W(8JV+tfNRk{DLp5Nw$4V3u7Mv!>G4XzMT<9S zV`(kqy)QLkaQNZ(88kvyR7oT9ijoga-N9-|5LX4=w+&EMP87Jf+sMhu z31nn$j~+v6x0L^9?vY0K1PKKwEFGR=Fz7MNNTg3_$hS`(-tcT)FVWR zbaUn9w?BrCib_(C9@ydkBAO<@e^(A(pIu$GvwsQNMW8Sn*e;Ywgo%xb5qPV3z7>Pa z1#-{8&|f2EiRh378v74*jHjTO)mrVTXU+M%`Lqf7tZhp`n^WHK)JjZ~o?hOc)nh>- zaTqup9C3EZWF)v~+OHZ&1;J3?aHy!nVW{?S@vx{VZaxicL=8pN$shqnbHKoWK-#60 zj8lII&M$cBp7v*@zVJlMQdBN7fET56NFY_VG&RMxIb9)(8kDU34cmGjn9HdPG%)Bl z@NJ5T>hL%9@*<3-`-11+dl)*YRzegVuXJ|78`NAxR@B{{FK-FU_ZG{}R9*A?H`-m0 z2)rcd9Cd#`P0@3)9UTL(980^6L#^YXjL}KtzDH%3Hkq#kyex^ ziYoz=xm0-}T%6>C)j7?_?Ts#9euHFbE*Q{G3U!i~_XX$$+ogrT-P2QAh5-f~K=?!X zFluO|4~h%v^P`fI5&=?bucL_WMz>=(I5BP_T3#!vbqy8J>XlVP(h433`m&&ELF?p2 z$k@iAZewRBFFyw5bX3W~!lL~dFPL1M3(KViRhHJn-3J5E6N!F!Q|6{WJTrrplnM91 zk6~Y-g;_G&+V|~BpBl?0QY=oI2G6$nHyRY!mv9yjAD@ez9cs0iT~aDMXp|Yo(YRX2 zz+l+QI+R=%vV2h9-XIWYWkQghqhA6-;Ij|~);bb@z&)J$kH5YyIU+m3@C6U;@N#Cs ztTqEAoSoSaB<5`*zGZo|M~Pzxl}l0YV7T4g-AT&eTJxRuYK)mhg!v&}k8ENxUwgV4 z+^Jiw>}oa}eW;dm-A4W@lPOS2kGDE0uq7&&2Y38odtx;_IBWP^IRXWM*}dkj)k0kD zuNRm8t0`)XF;g}-^!M)zKPmZ{$$w8NEoHB&>a`#{(v09LICFP3x>6BuyHHbbL3lea zsC5Sbt{&C_#2LT-WmQ;*8m2o52VD7A%uc#(Gr|~e3a&Dz2R;l#X;+NeFX+Ca*S=Sd za=XSv<(FQ{HWFe zA{G`F+V8|z(~GCl-;0%5uK+K*YJN?BLB-v4dViK(o~A>^X`)`h4lY@jv2D~`u^piU zf64j?#7hFUFE3i0d@Xmkzy%0xtj{aaZp$#aZm zS|g!@!uVH3wClc5#KKy~+ltCcCAl^XgQW6|rzf7Jx^*a?Qpj52ix+)nVIhc8*{rL2 zoveb4{>D;%BWERo=+?ioo|6j1dVK_x8cnHD7+%d)F??QmS5p+F(}?c+v2gVuw4>4PvcB78dNj z@W7A65=nx#qdPlaN%)*Xu!TGVXP7W>0hH~(|9H@nkvyD?(amV96l8onkk!gL`X#h3 z#mkQ~0pAV|l@B&qK)$(!^P=62oyi}ooWJ%alNuOG)5nN-Ow3 zUQyXJt(qaaq{#kq9RlsI^jTQ%&(d?S69zR!Y%p&5^>*kDNH#|xR1jPcC zl5$L)gx{*N_NfS2kb9o+t$yX_&!4ZlfCzn;l(Q_jADnLXi1WHLGiRz+q7O?2Or}N#=q`HN;v`RF}z# zi0qjnmwMx7;G|1-pI=_S`SyWAzEY}wNt>=67}!@H&!I}8WHKKzOZ0&5YuO7`jV=4h zmiP7X_4Y@JvD6Kn$n)ZC6$nIQTpWwal;H2Oqk0t$vb%K??s6)nX8RbQmJk)g@9S82 zx^v=hYW>)&#iT>I)ef&1neHhV;YzwO+Gpe9xr<>f2@~ z{ck@vb9~^3F~U9`_svth!0+WDR@CVq;aZ+NqWN{R6B}yekR?M54GrDi<^W6z)_;su6t*3koOGNkQz}=^+qw3_Ls@v<<^Qd8j_BFF z{qRq3;?uE?f}zv18QY;=BFj+ATiwB+eb_ zde^A+{@NgS05Na>@WA~XYPx{7l}n2vSg-3EH+W(d-o@M;6JH_oPt6yIrG!o>@+N9y z8myn?-rs|f!r*4Q*-3&VtGc?nii(1#58e24@9x}D!b664A@XEFK|xm51X)^o1_nd~ z1ih-##KdVx#SH?zuO7p)iMu>XL~uU6=BLCCTOSInQV_RA6LjX1)swp6{ZjvZxzT#_ zr#Hpc%EefkmHje8L9lz8`N9$i?)>w2{BXae1;NRG(EOvz%Ehv&m2JH~UUHw4&;SMD z?VsPFk}q3D&24;oI^;4iGxY@$J)%ug>`*_i?sBtikwMz1MWKk(D>|1@CmTU;)#r;Ji;0Kq8HIy^O!hf z2>6KI>}j*O7kc9HS4y_>;jG?Ew+x>x&u=^3Yma9WVmAD6$@56~MsLl{%R80b1py$) zBYPIryOd#b5@~i=;W5EVe`M3-hb%ae;b0I#M@L2~>gpD4xJ5)n*d8fplVrz_{+=&olTa2~+`%_JLc=*D%{x4rm`QxQyuwXLYTLarlz@W)?vZ<4oH#>lP+Pzi%~AmId;_D-~W`A zMb~@IGq>mJY}k`8R6_H_41tQ?scF|MXWzFjVgWGlAqVsG?lT)4$Z>ITNa5n2HKxbM zb0)v+@f&n1E{e<@K*_3l6hXkNBLu_^4$>;}yM3?pQ`OQc$W@dmnko=*d%d#_^b#?VZM>m`n#SPVq8cbc8uy01ttHn?t)}n612qUjj9z0b^To5qW7H4cIYTS zNkD#p>F?&|p&>(JOz+Zv37MJd4iCI}PJDpvCO$3AH&)(}W4_N;6UXlX5`X<$@z6btG0!M9oC>EQus@0q8J*iK`WntIjP^tAZd7NLp% z@$~kCpGXw<^|pNY>Tx{f{JVGC>)0py3M#3=buFrl@s#9Db5ZvfPX_WpouaQWp1t#@ zVAaZwm{@xjYyL=eT^${`RbzUX@1CEn5ItZ(O+o1xHtrLtbJT9GUwTkz%3~wz788|) z7%hPTx1Lm9sPRRv^f%*Y-)}{a;~!YovMa}$5kSlU zU$_(zT^H*HFTVhR-fPLhb$T@egX9!-jspokCy)jE20W`B(l8_b<)8^t`fX>Gd{%OQ zCXKIF(Fjz`%7PuwQGy~IwJ0NiLG zPbi#a)z%i9B1w@KqNV{uXK;n#j+ntWtXQTubyZdpghs=XcS^)a6sh(;Y!CPC<99wH zlQSsif6QHH674LCy}qdqev(~bfOw;%bormb|D{%r=z@6(Ukl?SdH45~qN4+$>MW5M zR4JF0Y0H!|Z`}oDnDJrV$xCJl2_R&lI`A*F2WBJ<43I#{fhP+?BPz>zb(vlu_T|u#YtvQKJ4yVAn)Z%eHh> z7ox_*L*870icS)}%Sqc|Mdp6~kKOlDjQgn?F z>hV6D>H>#viRN&#vj>TXFD>;D+sOehwf+uQ&*obGkZCQG7Nevz8??n_9VK9eQ+2cY zK(e&5TAZJsIay=WwzRZ_=--&beXR}#USGxaD8!2?Tc_#M($dOHO(I%Kd)}*L6*Y-(B!^3lX)eKC4NNge^KJG|!{JyR~_bf2M^lUmRZr=a} zJkO8UG^7dSNnglAg#3M&TX(NvgM+s}UM0?j{kA-z+xsNQYqvKR`)i#*>c)x}OMaA~cs0C;=LT`p7g_H$}0#-(j+VStt&1!-yH1b_R#JUB{9 z!ik`5WQNFA3g0jWzpVT?$_t>|3U_ublF_ET-;j*I7b`gfF6pO~*b4hWckl1-rKPljy2K84C`d@;);fDwAc@N!kazE-&4+;F zORoavOQ$%Edu^wQ^0p`+g`T&$-_Cb3)dljatx&nz>}&+-mjwNnG`J0~imv$F=;E}Eb07s?SaHf>g+$1f@+HjS;r{qGI(kWY z)f4uGS7*aC@)KD01zDBE#))!_Ze&Ql8HxdQ7bL)xq0pTz*BZ(sGjB9G7DIFvNPz(~ zHr9m#GpM+aINRIbBZpl+hwJH?h&|v1+v(KIDh_lt-O|8dii=xAmG zF*p%$dBlFRS;DD%#!I?RIWl#2Y$G_A$F*SHGBiA~bwv&ZL`Cg8vFnP8S|~2A@$rfM zX8Nt~IgLpd`UoB_v$L~wlxK1h9-sL!EhVLI!zj}Lrb%E6-^iN87$3C*;_mj+Mqtg?kV%2T#0^ipX z%GS9*q5-c*mlXdN7WlLVp0O`#Ya;-tf8?NB_)KCKsp(PQJSgOxJEpV9bvw*Ta%W7< zJ^Zk)F}AWIM+4B0CG{uCT*XP!Xi=xnsKFbZ*L*z|l<0P=PCA+gmQr#V7>m{fIFhK?LoEp%ckqzm`jnwmAJ&0Q!njaC|Zcrex}>`iRxUa4^e ze@Kgp2yY#Mg;2d*Q?K_f2OT-1zzTV)`YKGImftBm ztB&>f^Kl?%nofh(zRd*kt-9JtT!L~^oZ@=t0;H(P>AaMx>Ny5yIxaHFBOV?fG_)Q< z5;o{U2IuwwLv#n2YtdG6C2L1LzP`TUL<$*cX;I$?2R%<&v6Iu& z&;bdVi7guPuBf|ER#C3K^#_@xic}Z+@4;?!YeHBcqq*m+I98;KyXvc}k&_ea-I8sF zZ6gnzyqtKRe=D!{(d*uHMhtGts_+cRTEMkQs0$ngd_II^6n`b}tQAzYRfYjcLL!2i zEmMY^9NYZs2oHaE1n}F7)Yq{5>~^bd@dzOxcTDV`$Sn*sY9EoVArR=cL8qj@|K0=_ z>8X6*S``EQ@BoH}jw(IOett>VegL<<8*`1WT+9tHoj(!&Y=D<#UT_+xgvU8a`>S zw+f3A0il^TQ5c$Ab(N!J&lH z3%$Kx6&1yGi4wB?3XZDK$`30|A-+j%>wR@oEHl|JSv}%~(yS$vxur8%eA;q&d*6Ez zg_CIr32Eo7oSed(N;=)o`9*Dedp|Vut-l_0VCTIJOP6AU*m6WnS}Q8 zF<(5kF#07_R6gF{<=`$0!|(3zMZB(hA*wY&!RPzaC3k*3^VNIEx8bNmzGj;A5uNbU z(nfqeD!Zp5Qu*&!ln%?_S%wsFg4{gEIi|9DG!1Vm>dtJVz!5mtjC+tI3-(7t=&i?T12_f7@qovnvZwNt&hBL8 z;m%I;-X4>Cz!g>NTppy9LO@8kx+1WVOe}^8)&iy=o!-;rZk2uzAKwvK`VPyHbWRG{ zeyanWIwv(RZB1JKvzDmAId}h{N3VT~^YnVn5xs6oK=>v8S??rB3sBz3fC|32_{J}M zb3B)XDGn{_JVfQPAtw#7km5s9A4+6c!o$(GKEnG_%YN>cmmLfZSxQS8N>Jmzz70$lM0T`2ovk!oPGmJfuGa>g z1zqYer$^Z49BIMA!ftP?$;lA{2rDnEwJ-a(i<-=2ao~W0I8o!p#XDo;*QtouG+PP7 zB;}aM}+5_myN(nj>hxz#50g>kAKIXwzl}6KSR6TObG}u+1TKMG}P2c zyy5sBQ6uLR$SHjApZX-QPws*>;A<=YdT)M>b52> zu*@yo_Z%GLLtL0jzWaL5rq|(A%F)rOsAz^t!9x!K8aZ;<%e$|s(P^%oPW+hD7b@}vf>p^85F!Ky+#l<06j0;Pz-}>}pfknQsxfDYo}hc&f2(E!sJDLjk427m z$lz5(drPU6y1i3uS!bay++hThs3U|T0UvyQ{;d?kHuAEvKKCaJ5QOFL?~jcnsjp8` zo5N7L!T_qKSBxY>@sQd-3sW}I=w0N zPW_l1%&uck`(kW=1HhlXF*XA6RO5fspYwgW8Cg^+VC->KCJBx)$WbOsFZLI@^VUjlVbYd zuVEcA-9bgfLL{fc-?Hd3zr(7=2XTe?A|cmLn44Rw^5C_r*Qk8qzH-^Kwzl5dvsk~R zm5uP#st79``**^-)8f9aZgpTXZU2k_gxTPymvx6{fF&UbJrjJN7dC4bRctLS?*Fc2 zNso_z5Z8G9xBmkT6g+!0I5oxUy5mLHz3^kpE|=e5~}*$NoO7@8fa@z_SdQj7VU z0`1R>M3TCM+D_w8_M{o8Gp;6S3cp|$V;iA=hbbd8`_N+u4i0|Y&ry_y+1;DJbnr2h zW&Ri08m}nGy-r8 zG<6w+eLcfU2kpoek!x~Hasg@@veCaf7YKXv;?t1*6*}685b9XD5W(&38K$O51Ce8B zXlQr@1c-=;)|QsRszDKspZ+;u!vR-MpKcGoeG62EhGt>uvR|}`iEVnj87CznF=bx# zq6P7tntG0&)TsFooNz}qZhY=d_Z|yD11E5mtqix|YNsLpVOy&n``d4b%Fve=dKygN zA{E?1{_KDjn=PJa^x`zlZ}UtQ$8TAA-dab=q<_#RGOc0nH0T&xu`b-0)?n)%Rh8n>wo!&d#n_|eot--;X zv9T&_B})LHzOfM}UOFQ^{ry$X{Kxw`B$W6<`b#CGNjdv5(1NtSe}}Dccf}!+r{{yY z_$3l=3HN9RdzRm38yzcm-wRCH-XJhbY0&p>>ikiO)`LSaj0O7n__P`#`?f1ee|Ebf z50i%=PDV~P&yC1q3BQo;!ZBrz02p>i!&3k8@h>G|*^<5$skduH+J%bbR9b*a-Me_{ zrl~er^N6ZXq<<(mSrpvvTKhV@`Q1Xevl=7C*u+P%s9VxdIpo|2+revp+y4UUf>V%M zNhi-E_cn|czs_j#X`yrUDLZE<6GXj~Vy|VY6|IN{d~-~m@S{E(co+y(T!H~)sOo(e zkPdvi>xX2RyFV31)P~i;_xG08)*nn658>SLkcBW`n4kYUw4U2Px;cA2J23$hE-$aB zL9`*o4s%f;;fNvc`gFF5U&*%T)bjHK71UDZoMymEo9Q?6d2X=`0h`>oan(9$*2@ey~_#HUROjiF{L&+h02YHskKN`1sn2ib5;5aPC-0s7;3f zE(Qej;jz+i^#dh6AEIvWmn2Kj$--q z^;gGu?|O)}WPz_;FN)E%)DlV***8q5FC@ooL#ZU+i7w#!3llN@axE>sA|>{jaB^`K zu@cLQp@ayrq$z0}<0<yV(k$`-=;-pp%#u-1Og$bZbU1;;gBC{Ie z{B|=8N-FJyA|SwZ3vL;xNb=pMTKIW(X5C^-+5EW*)rB>C8tbCb9?SBOdAjl^Em?#9 zq(NMcr6|`^u-(@4&rZKrStg}NEqGnlhp5d-26(Vjh>Ovx9S!=FWynN-KXX40DrW2W zAJ62kA7A}D9>jw!naAn)h5P5P!_+z~73&=ltRJpf_f2u|t?-lfQQ|a*u@wPi&jb(G z*Ed|T%!l=jI`R=hP;VTuL!AnQ;rp#quk@TS7p)jNh1G>nY%XFjJE+s)6=ulu37}x6 zFYPomCBwGjkcul04-W*)Lfy^VHUF@RgaB4WNHG%{`ZMXp9NyH^jcaFdF-kD|6%A-@ z6)lu`2|}=qY?P$Cr*elPI})@wE@)*@)- z3RBl?&~b2e<@`NESW5K`P#zB{C-?U!mwPswH~8P&$U}rNB%dyNTk350DEP8g+RAK( z^_#2dmI=L=3AIO~M6c>g_tbxdI)*5Jju#Vu-Q2bf69V}77^$hvhQ(L4w+k{c#pdJo zd@C-4Z2H#%QGf9g8VF0Je6FD)+ge*#2oZ8|;e)REcfIzqU+PF+)bQwL3KI$tbr3Jr zZL30+gyfEA=1{>?Qy=hf24xmLcHh4BmB=Jk%tTPl_WU=bN`9(QTQb+a`ueKR=JtTo z-cm_F8DS(>cYY0ayYcxu^4yKOzRhW)y8b`R(}-LRr>YN?AO5S`7zV1UzHiS;@_O2e zifF;?-zY>%KSC&5)%78ylLTe|>e{BtIV~b|2PDbhBXNH8!4KTbdTBO>bQXu-XG?*$$w&IoylF`_qhK}}`6vhuLFL<$!d z1!PyOr{O@zVSSmI_UP#GxV%je`UN7TSV?9_`iBNVeD`q50$AR`kM?8bK;IW%p^kx# zjgi$=g1zLBkoHVM$LuDw8zgPbV|aKeB{onn35juuDt79Gw$JsBl$RI(Cp{k8_J#l7 z)Y)5=WC{B3$BE*aT&_81d`?+bnXMZSZpR$yB!@@#>Y&Fd?-YN0%CejR!!Aw!V+y5+^16G z+@lqi32$|5?2GnInd)afJ-sMp9vsW?E#+iV48**tn@7qsMwartT1y)T;roO9d z!xG}%zgM<)(Y)&fa?yRd(vVt#9>J6@fB*hXj|XjS6{>!gM7>{c5eG2HAq>#98H65I zG_RcLH~SfAFP$C|@U$$6a&TytufdU7k0|Tcejp(&OTevgWTa;|HW?D*S89BLNN=O3 zcyV|j^N4_mFa`$9*A1nd^Q)^?foIF;tM7$xqTp&`Vg$mtX+a@mYy8I8^l`o|BCP!w zb91S;H`sB6U*+WtjEvD?VKi#0NEa5el@qK?_Wj<%6cP-lxqRr1-{ zBzWPg>)7*+WkD-HagoLF)RA!hOxl?oZ89XMD#lHa{6^J^TlfARfX`Iav39$in3+h1 zdT^_9dauRG=dqz*NL7$b&Gy*~^O~&g^mNmlJ)yZJ5f>etpLNlysNHT5CtadfOHXwu ztAQILv>lU3u(V(}2`adV{at!U)TlmCMkW!Pd+2Ms_(ABaP9 zG%{Lf-0x^7)jGBCec|UUx;V-_I?JWbI(9SETu9IUV$2(3zzY5wPeN-!9xVv5Bl)IS zww1ismtsCIx z1Kpwe@&LNb>5q@@!n=E%oH2xG$Oz#6{?*h}KVVOv1G&JS3_%8%k;M6z0aM(=uYnZc zV|2FwDI8hNOsz(G(5ZM!@yIqnT3Q-uOhBL=Pz3YwlRcn72*@bb1LbMSOalkxq{X3| zUjlpQ(vq_&(T@*o8EGy|9geR*d)c27Qg)QS61xlTQgRSTp#gKYt(pY5xKav#q)-B^%o-#&h=U`B3u~pzZy80~@%r-6;^X6`Bqs{a$30}E zZ#1i1(Jr<^xWCA6JzGPs^q`y6_%#YW$jFM_e}Za!8AC(5fC=ubt({ya4N3V1sV-t8 zGsx)bR{i^jaFmv~V$(^FN8ekQ{4)&V8Rt(7YMVEj2;t!*Aq1frpk?KZL8V>r*u?Gj zs-cj7H2jmYXNiBr8N(ygY38L{>(b&w)X-@DeSYlF&L=@R3l$a2oIJR$n?pU?F&jt6 za6^t*l)gM#0n&tecEPVr9nk$YMtaY?ex?BdNp*Kyu#GR_$T;IZMRP=nTyg3zG?oRV zhXSJk1p{3jYkg*PcVMt@`hfjX9UZM|dskcGnPA2T!TP+jkb#H2af+wNncB0*cq z`F3`8>gu;3bXDJ%$4d2v{r$=7>#XJcw(&ETgY@*-!U{MwIReO{+co8!y@=Dft(_rV zNJ&A6!6))atRJ6wc^A{@ni+q#xBFx<6Zq_Jo`=A6Nvk7u_~7G*2zkTQVv?)O&vWl- z`RM5ni;L?tp_-PP_`<-N3kuR_yvwp1Qhz=PQnog*;G6pKb{jP4ZP8P(y)nztXBM5F z=U`%c?B3qk$m#qu8!lK~O#`o~=@$R)xo6GLFd>r=n&+~Hx;5oX^}^U;d21T^3<4&4 zJQwk=+2iYF@_JLwdp51^+H)(wZ^?>3;^kcA)`aB`5iw$zc$-QOFGLsemJJ;P6qk!v zSL~4k*Yf$JYEDk%twzB{Pq9SYoe<56iS|Djh@a7%o11g>{yP*~TPIWA_9X4aPr$tr z5@P1-Q)Zob_5Dw~S;LJfws66Dr06TXnUwFF%uDkNvcL84U&>Z?gw-O!K%mfQXRneSF%ryJKvS;S>zZ zzYl8H85{z_G;#n4ePkh1c}&8-T9;-{xd3Z++CN*X?w*IyxZAO5GJqZi|0mZBN$UH> zJ!p(biO+c71IZgABC^6K3bnhLrbZd(j>k;3mp_mp!_d>yXDW8bHc1I?=cl@tl@5lT z-`&RQ)tp~qgUY?r)ZL74n4FRC>H-W3TW+h%3A_|8x3mSYK-VO%zlhLgx3+!|NXWj0 zH}w%kDJKrpX;nH`-{Yq+l9HFroM9kc04&%E|IJOe;bf<$C)KliPEWYwXXI6v(=$}Z z#>S(jgP|@!A&m=(&yw%0<(Gdp-)*OlMVCZH|M-5?O@3?dhz{)i7|A||fdjsZ26j_1 z$n3Af!MMjr78g-D=d*WKhrq;3n+F*^)#;aaBxhwQN_*G>k4AN>-TG%Puz@BYFQ^TY)-S)twT&jXku^>+7{Vg#9a#)K(l0jH>c zH*f=xE*|re)N~AM8K{GyPC2tCteUW|_r2iW>wo{g8~em>FJ=>r9BxKEHAs%sM^;w) z53g&Z-Q56c`0cg0-VG`vN6&EmdVs??wV24`aj3>^hFJNj7yt9sdAAI8}nqU*2;%fSBN^_TLP@Q7&4mYOwp?H06pXD(Oa6)~;i z4*uqJ(hBi4@%Vg@dQJy5HpsNPzW(TJ<>cpXzXkomygPWpa~2MfE3YY$5pSriE$tS? zw?Y5ls>S%%6F2!9QLl+`%aj;~gSJYT6|Z0W$8xr|vHwjTE-4(a-+|pd#9pWv_Xxlb z=?PXbC!nt8qE^WqVf5r>@ZD@|Q~>sW4;*(Y$kM5+wy#{FR{ws3ih3y=pj6I47{641 zzPQLiti? zH@oI!htqtfsMfg9z@m-r;mb!FG5;%T%-VqaU6WPsjv02cUKQ_?=;45etpUu}gIfP+ zEeA5M~Lf&^fw3sM>t?iMN3P@WID5_^`9bBNGx zX?{MrU;y$q7D$sR4vuf{Dv-Y>9nisnDdae?0NoY;*)T^9Kc`o{ghSrNSO{ao z${FO9ic>EYTclf;Dmi@02;8r2pn3K3%*(n>MU0g;E{hD#>CD18B7&9p2iVyg!=lRN zTD>p5>lP;j(S!ARGx)Jm^eN~V7|E|4O$sY}62JT|^{Q(jg-n_@`aE$!d5jaN6%vZ^ z^^+)Bm3@wrC!aXketn^l=rECUrN@AEe8>w%p7^w3HO;O}7AN{<*$NWCKokUZbdVt< zACof&oJ&DcZ@Hh}n^qK82dYz2QXp3X>X^yf8XcOHT=;Pkmm~)(KcN4gW)bq1Be{>^ zE}7|$+l#~9J$wOqv~L8+VsDixR?Oa0tl#v2H9WVc75YnnB@OhEoz`Qitf9qfw2x^mce;>c{bTD)tBE9^xa_;sj2*bWTJacHIsfbx0VMPI` z$bWGKg2RMx#2_@Ax3BNZ^K%HM_@kSsXg8WP>lrgShFkhg5Z!Pn+y!;%!GRU&R9RtR zw{;XPEuL%qKXXo9uTJ}EXAC$nhGFqK=!ry`1Q8~^l6co@vLF!Jcm#1JC7bP7kH){r zr2KYk?JR~~qYnG-s-Ns`tzKuXX!8AYJyhb&&CQ5~tMuQisTMdgc;>}m@F$GH@j@Z- zqQ{VI(nY~DO`O))^v1?ULjxF}2&C8r2b-v4(RTv4_#Ay%9UTN?0K?yf8vT|V=2WHU z?>9eW1-zyS{CeRmo!6Rt$CxX_N%lOBoWCMxp9q0t|2U!0BB;*b;tHRwgF<%y+mfhm5p)98VO?f&vlW z*x@y#K;mGyi(`m4!o7aeWp z`PrC^aA`c83yDwg|A_j^s5YCZTdYWd;_k)WTil)E?gffF6oR|Emg4U2!QI_0xLfez zaPxlOy7&IbO4iEc$>^CgXZG2P;Z}F@~#1uD%1{bFjuX0H2iqEwBV&Sht_mlZTsmd0J;<~M8 z)qhIvm+fC}yNSP38*!v>^Ke${=zn^=DoQP-zeb2;96Ciof zEerN}m@b=Inm#6>p=FlZgF}U-71jpr)i~Lkt!-+Odd?Ebp*PG28R3r;%PAZGbVfZ3 zIxJR|x@D8zR&7=-Bxbbasu~?HsgW2j6%|?B01w!Q85&3+NtJEYy4s#UsV){P+l1(= z2jStgQ&R!NU1(Qg+(joV5#o$fx#)GaU0AzYhRQt5u5b|+>aVT8{z*Dx&|f4)be`=~ zzeE?LLySs#f=os_p*#Epsua007A9t8RaJrv6-_BxKr1A`5buuJY7C8?{h8NJkd9rWL|R@eKOIi{=U-%TADTW z{mX6xPL9|cp58dl&?fT#j{mUnSk|qW#%lwPGwn`mzWe3oAzc~vmW(8v@pjn zyNi$R?QEriN$T+xsi2urHx>0n2lqym6k-=j^9xLm?+htjAWRq589B`4qIgpo>NstTh* zgOUtMQs?K9d~b^iFAiZV|;@7g?*<>ypZV2L^tqT+?LB0+f(dk%cbB!{Oi9V zxIz0`7&=x+3@$qQx2r34f($yG4kW&)qjRxRXR7{P;V;|H{%8^e6x-Y!n>)`tQqIU) z-dTTHQ-^i*wKPX7YvRlO-^abPf@=n3w6VgHv>c82idn=vK6;bm`O z-$*05&arpykF2_>HO^5i3-+Bb2i9~cKS9dh%SO^MeD^bcmP)^kDUNe?*4NiVT%;hw zsS*IrNK4bOI_W;-ay5!BE7{x2Nsw6}vVK4?*LU7CSeL<}lL`47t^n5UZ<`$9M#8rC zs^>AaLS)A|ja|(&B4{fdPZQWB0$-c?$=bDwdR-S4-oIWKtUH9=0G|ZKndx=*z|`r_ z4<$@V8e(wzT~+tvnW$)e8G|HX9Gr{XRQR=4M`=)PX=|&FqGEN_x#@5$B!Np)S^4Vb zCXgJ4HsPKe6xrc+E^O)xtUNM$FJhZm+aI1SV7t?G+rV8sH{vyon6LwU1Nk+@l){9# z5gx5N#(8=Coa}JgE2+_K@h~WB94bcU%aiIFSLNP-R)g~MOK#rF{F z-bci013EhuiRez!@0~gC1K}B zvPFj^l+>I`-1>CvMH*b&UCyb5hP@+A^U0_-OE5sJeEFqH!&ceVLul@^-18yolI#7( ze?;F?@W{T+6q#prGj!kOV$guI5+nO6?HFh%YCORNc-ex0;su2d__|R3W#Ci49GBl^v1XII!< zA?tDeyvZ>7)IZx%=`!4zLgBgFse96p%i+*r&o|+pg2%eu5eW)e>LAwpWq0=!bbFTI zMJl^#$r&$7ZK>kN)JUo0@87t(4R1*hFQxi_E{-s@lJEEcW#a&FI(nKN-;|X_AFcCps<(wuph$O(b`A-kzbNyvcY-r~^E2lQVCsyCvwYyBry zkXvg&ODm_>xq{WT-siE;uchvjbPQR7C^WouSCOEg72(ZX6R>jC!Uf_T=yUOOGjxM~=rG9>Pj>04XT9|39ukau? z(9;IW$zi@o-n%zCTlVDm!( zsV9z1D-5K+IOQu=R>utVQZ88Ji81IUCh$xFsAF0p{aBV=M4>8L@^N!1FI zSUWkP@bNaez1{l!M{jY%$I`IWzGKHn3X9nfuP7HbqK4JRv~YRMzkz(^6B*IV!umP_ zcmxv(0(iuTV4-B)a!4&(h~H7{TB?8;S=gq=Ms&n)`c)Z~m8t&A-UgUOMa*4YPPsS^ z_KGmcN=mG(%x@zvhk-qU2c7U715Cvy^0Mt;D1M%`z5usjR+1)Ah8i>7T!ZflsujOy^}|!-|(mJ~M%aBnxZKc9WPG$&O#r+e4Cb zcikrlBUFg)#)nounZ52bAL+SEBbJfKfDej_B%HB?iI%sCEXI!((kjO=qNT!i_ng}< zS49F1N+w;Li!$%CGTi%0GX;d6UGo z0Dha8e2u=R4`}B(=?_E8Wu6g)80VdxxDAk4s8=3`;kNxLajS-u`DUdhCIVR%!t z9W}@Ll+B(r)T%?C6|KFlYk~nAkP^MH`so4F35jcygv7ve(u*&i|D0a**=?sW^WP57 zOXyUcwEf>3ZjSQCyMmBUn47XFkoKgB=xxsy9B21pTmr14D5 zfkd(dTjf430-*^{6MYk=4kdM~fx;kXLV zn5vNx+Hc>u0+QHKVZe)w<}95%Z#ZCSdz4z+aBb~>Z7N0)mhNF*Dzjc(gscSbBLy6s zIl$IWH>!28EDD_XM%*RzdP2LIHk&)(%N%s9e*gB*tBE}ZOd3W;4ptGDXGlMso!N2E zUiU|lcltVA<0cX*|L|f09~!HwEFe7tQlb0?0eL|1RJ+vvlBnQqmM@}I7^6E(0qsBr zDk_L{QB@83_-NTDZ-kg|3Su$NZDb7t6!XXhQdg~iFoeOu-(Oft`(n*i{JnH_2|{)m zJl3XCAyQQ`RH@JfS_|9YExOyuz52haxtAmUM6swf0d30Hi!=l^e3x-Ar-p?Yw0H z)9?YDAhSxFfPerD?tMc5?G2|K9#WyjUG>K0?>J#%G31X?`J0KcuGd)hKx!b;Xjm(T zdf!y&z6snS8@K{`^cNp+vhw>FG$WBs>bCga)8A?*`JXdaET1pM9G(3Pg##xfC>`I4 z04XJz7FSlnB`}|-?Up|Qe*9ch(U9)<52VxrpM-QBT`~>hWOb3Oxa{C{ZGYh-$alj_ z?J56_-js$k@J6hLMpV%jyZKMcEwjA)Y#5C= zTGXXego9>gpa6y870>C5vsmB<^ybt`{u~4vUvf77Xc;$tR{V=jYtz%5+Ka~+gCh+z zx-^I6&O9>BCX1Ik5-sky6DdO77bEIJ8v-@#XhgqC4X)U-)OKIMR;yRbt05B9LwNWP zmG&q|pkULaPWx9q5vwIUGqRT#86_nheVKNx{-?fxmm0O@bM=|kht-cC&&KTj?agJI z`&($jh@LS>QwA)84^;o?BXrt-4Mm@kHa)JSa_D`}krJjT>RJbShj#SK&vic_h3u2G z(|xSZW$}rU1)~7Sa{dOx@zCbRK~bQ;QD>e6wl^15b)b5w$eN5ve{I;CFoV5qT(UWB zBUFs$e1@j^QhhUHYQX4Xmf9yCIaCOl%^EXpaOp5C56#(CZKnGaW=bZ%PjBv1zl>Cn zb`ednf_gFXm4hL1ETGMq5oDd69~??4D6$-4%t%UJ%2J`h!g6J0PdFsfFmV6CAu_>G z9V|ejJN*pxV8z)&+=-g?Y`ZaP3-l8Jh`E)x;UDos;3S&k?O19=EpN<%UW;`kH=b*a zpNU^Q_#R9NVrP3joB?^v9D3jM0kqd10zm;@zVAfDv7n_Tcz}O+3j-r#o%S&FPV9|5 zs3!ay%C9U{pvUmTBq|OGcU&SC9_r;hz6G!2u#j)~uYR9#Xi1;T;2`T*n%FK{*W=G0 z_~m?q=dDo2_|)}daHeq!?F(|gJKz(7?DX`{01YR37(m@}g;q_icKf?polDx_^YH>T7B5V*|5`R4Q>ssESIl=U)*hr zc42po&X(Q;%!ZoRp$kbJX=%9A=f-*Dt1tuiXcSj$X_$aonr=Ki?s(YPJtQQQqS%qh zTJ>kyjtI?Hik*c=?&}pW-Za!pbSJzUd4G0IEAD;9^RAAhdRz~ZvB3f=D+2m$g!;xc#GX_{BqX_n0`z>;ED2y?fh#a_E>oh<@; zdd*2hT(~VFb1AR5s?M=ps2Kso9&i%%uI~{w#KP|C_+&Q9E2Hh`_X?cb+{6F_@hLn% zx@P-e*3W8X-owO}#!q;z9uHW66=LNFeFJ^o)#I%lw|! z+VL^nuapfkROJa65??)LPDXion}Wy;IDb2S1V_<WF&AfB#2cxa~jm@Ug-xQ77p3?Ouk0#T|G+giTUR%wsPcM+QM(MQiuDt{T zdr8zuK1)_tR0Q#}NX43%;J?2ki9-W~CIZ^w>E%@B&#m6q4TA*e%fjXEgWEklQ{ljr z5)k+XDQRBNfRpTKSYe?FYYm;eJdwCK%9kUt0?8l<1&sMl56N)pY)Oh6QSwsdQDTf) zT+CB&T!oj2v32{Qh-^++Mbh8NXZs&^pVe{<-#l)Q@%W~G+#Z8j74;N4>U#mv;)cj> zA;?e<$H%t9!hS$X6N$2-qE9h2cRzSIL5``*CHk{m)01aDB80%GjP!ho3qk8tDn~Mk z)8k`*@;>EBt@2Z1sQ5pcxajeSx_8gdrD14Qkb?!f#PwY?BHHxCL?r{W9f312D)%Xb zAGjS8gOSL^AzuCZYVq&irhb!;-uUCLc$o3cQql);cVTvl!Ms**kPQWO2ohlMjvg{n zv5+Vz`G|4p_LLcJ1+Vajos^WMvT|gUJUZb;Zbj70EW6iL4~+NBytuRNPk(p?-`EFI zEq{{9zA%VwBnRI2k|0wmUtC-$DuU!auyMcrGVj8t86G@1I$XMUmGrk~feC<*VE{4~ zE6HvP<^OWHgj!qZA9%=yuUfddS(=-RBcI`gzwns>96p{Zh_B2t{Q?Gk5Jm(`Sin3Z zswpU-!mXm7MRIizg}IcMmio8LJHZJ1P((~?s#KH?>G-mEa__}k}AxcmzD^+X!`G>2s0dvtu(j<4Q*IB*w)+EcQW&- ziA-KjtZZK1%uLwPtZHpTLx{lX3T=+>Si0HfF2fNl<6&TV_C+q%^S69g>cW2qhT~ax z2*RRgO3;F8>gxKsYofQ)ZZ5aV&dzJT)y>^O*5Es74-XGxW2{Nxx8xDQ4mbUi{2e9X z?8hU3G}^~+(K&X`%99Jy(G6Om>&C|>!4xl-TSsT-Q=yL_;p6z?8**iylws1rl@&Qh zHb~l0z|!|UP7&vl@0%SSdq;R29Q8@>%tq?qN%{JE)gv?gy2Y9R4O`De-ou7SXu#;A zhpizQP1Af7@Y{f?Zc0=41;-%Zo2n$*_jHDIFujg4#Zr5m9$2r|B#B^F|xq^9!Thufn!Hnl=k7&3DI^5!l@ zyDOAuo|zW%7RVNPjR($Ae1p;I4dr4Tmyd|7ih<@G9-k;XOmKUih!7@uPS~QN&$rk8 zAr-qrynfwqK7na&h*RUn1n{vnmyjTQGKm;iN=#gD^u77O>F{~^BNDEl)a`R?evuk? ze$GJ5VzTAt2STI*@cl9~qo$$hs<1H^9^b%G`nk)6V*M)d?BP0`rj3FXM!CMp>&{1P z!abCdV_DqnP?DVnCYTHxVu8xIf)3Hveqaq%Y$Dq7lIAwY#JVon zfu(%x^}+~6nC%^9p!@x;#>M&Z>SBDOhjkbp3;4cXeRr1#xz*Gac0ml^f*#Eh(lIcY z9BtK6yG@I*wWI@pCQl2F;Iia}Cn~nFvE6n1`gvK>A~e>!1Crg_cdnLYjOsxLuShn| zc(pVv?~34VC}%PXi~O>HU3TU*T`*&6BlgZ(f?-SO=#JLE-|dqe{;PIbfg`T}Waoe? z7#Qrz57Oap@Kk^{2)QT)6%|Ctar!kjS<{)e@x8c`$u;^VU+o(C=OU-j<$hYjn5aSC z06#5(65~?~)ctQelF`dxwuBrA8E`>~@|376BXlb_%J9_RqbVz^vFwDN(8f-e-eHpW z;RmAqFCAjFMx2rSFs>iZI_$|jb9uIUlN|6rZGOxQ8GGmFtA>SzS-mwl@(Zfz z=;$cV)G8f(i5=&w@L4>s_~3sn>Gr*UAk)V2UJ^LN%^AJgKoi+T3a-}UUE!UZnom<9 zkmj=veJcdoA!%>8-MRfzy+b(oVqu1-#qa%CQ4?El{mc!O7L46&^q594vXfcK9f()v zbHdi30)7@Ea8nE*l3ug2BWZP_Ok0{J zfHA>>EJ@Ds@$tO8;P>~UH+sM+L}xJAc0?PLqpH!3i6l2aAH3mmMGUr+*`kQH%63QSFMfCz+jKPb*}oYySv7bGMm-~ zBv0gPK_-zHUTH-I5fRZmIT?^!U3+XuC1+$T+3fcB*gngJUwX+3WQau0Qdrp+lpnS+R)6F(u zVwhEhHz$09n{)@wv$;97X~w?o&zY=c^^BtzV|0mVoE^tnl!W~_WM90yH#mUGhx^dTxoO*_cas;Wk7bqd{ z8Wey{oxt62g*5&Y81{+o^R>!x0twf+cF4i^V$)t~#nDg%!7>6Ig|f|`1PP@}UB;Z; zD)`a!pijuj{hJ~&`gtjHTyAwxTL^6Upp{vKmBJbt<0A|@W|%R-fu(^#TEw{FvdC*^ z`Es0v2gSREHG_@H;%;vBWG|%6><*Np1g7@ovvTSeQ7{{@JOEYMmkbY?6#ryb#(8-b z_?hatRmZ8fR5>JU{|ht+7uOH+!Z}bJ0-yb6B_v;aW65r-8pK_BjXZrwB-h{w9C@c@ zO%%Las1z$9ZQgYR20mwpR*Lu|2mS!A)u)*_w= z`F<0gS@iD@`O|{t_DAa08R@7AWeKkceJ;X<()Ft&u5a16U_l&5V*Xc54rv5_R~76h z9|UJ3UG5VC2)!fy}9ZvDWdI1$@!y(Bb!U6+p}BGT>l zF{q3s9rQ09v5vekX#5QQ{tB@DF7wDza{eGP0a5}W22pmh-u16iLqk!@n!`re0dP2c2YY^3qAO-@39k+xVrGf*ToS3N)!G-Ncj64vcOpU zZs=z94bMPzh9!BcxPN7J_0WL+<#}dt>sK^O6chnuRBBrQaG7Mpr?N#XaKX}sOl2)` z$AU};q?J?jl|CX-t*M#o(A;9S75A-{(DqM)@=_BJWXS-!uZ83e`H@kttrl z6Hvv$2Y!2CysuspKCeXIF@-&yKE&6Og{u%(H}>(2r$W-#RPzNgQ^&3bfOzBU6#(t` z`=6Mdo$8j@{=|Q z#v2J%c)ZzHs|O))_{p=_|8&?q->7qVF!xFoags(`6#^$zI~u|SxNa^x zQsk5Q`CXAcrTSgS$HQ0|tVA1A*rP=s=5Ooc&>izyHZgjL-B)eQH+lzz=to;=j~VKP75Ed`4GZp64t4VEa#P?=Xj5Y ziClVrWp&|_o9AhPKitRpl6s}t?uA=rLSkgs*7`B0)TeLMT4RNLgi8(nW<72=oGQu&_8G6bOTL!Cjcn40AGYIlzvT zq|1>7bY0E;PS6f6MQhb3r{ZEMRhhDmSm}(hje!ZDZAGL+gx`_T^?&Gua{$iw7d$AV z>`XHBlDtS1asu@Y)$yKS*_45asb;xkD1=%D2OPe+zedX&MKmKc1s9LGCAfo1r@#7` zOAE?I{uzS%%4yy@CvU{IAJ8;|e@yTDrt)7gffCEdb~87ZL1thfokmsoZV$)sQqW0v zoF?|C1ECk?l;53|-k1kjK8mTo+;re&!02S!y1F<2zWk752sY6R7z~Y#iMiMgz0?K! z9zv(V+DR~St<7S6)Yi!A{MDNOBD|?lUQ={V?A2aFgAgONhv_VBkzM?F?8gA$MOrr4fP;O`{dwGOM)%Ou6_3?7m{&KLrg&M|jtUF}1gGu#uxD;l4fb~dzSUUPwOG%j*mm$0K zw1$A3u6zD0{1u@Wox8=>oKlRdbL@eMNOKA>3G8TRg{1-@BBn#XiMb-wMpbZgeb2D; z|8P)eN6qwHU>cKZn#eOM6%!FrhL!>ILr<9zB|iW1#rSERJ1M7?9qA$N9Omzz3lk3l ze2z{*Ay(Y*rf3!a-_kFrkhzAAuAZBp&m}Az@g={ME=Xpg*n{1TthE(sbaa-B6<#C1 zuFm;?{3pcl&cX5V?G1hy5(%$%%7pO$DagtBURS64U%|d{dE@_E@Ds#4LjK=}L;e4c z+fk6SY|)bWKVDy@>G~39vdj7t(&2>ER8lYV0h{5Z{QN=Fl(nNZb#Ra!ZqLc*Gd4X^ zAf}-Yk>DfGJ|LnZVWY{iHuC~ZcH!4O+F`?WTrgmf#4_mmH)R>vch`hW71FzKdzED{ z{%>E1hv%;pp1Ov{e}qC1e=rva1Qr*E{^wglVAo#sb8jPf@m{nlSvvyaFP2x?*=Cpf zU0k7}93O-2D?uHev|8B_A)^DqWZ}oeFRbJb*Y*JWQ!F1~wupi9X%i~Zu>_bGT zhD2CInAB;!iWz^eD5gI4lYg~lzpp3nJzajBr`=vxm^?OrZE4=Wk^g`~@V)uOKpy8i z>{x8^?YG5pyML*m;!nneBw(g^#D6* zINq6P*?a}j>6!8tIqiCc?O=a0j2W`QnMG;#Z{Cs@K>t-|Lk?e>ndM0SGcJb@N zM?agOFt2QUjh#s-8TkDk-Vm0V@o9ML{Viu|RYn;swnbv`K8s0d>V@NWes}UNsj4?- zz2Bfi=8$B>(q`lfdx(`r7A&%A6^zf3bnsrdm3X`Gpk+tBQPrwM!aq>m#PTp=(g7u= z;(mQURRfugz3_=I0f+L9t*pd9TWe$?dy!VQNz<>th}5~bWax{E5m#@`Vh7JJxXx(p z!H-ARds`mV>0O2tcMC*|_m5ETJ{%{}0`J}Q7oxeKC%@l{^)wgJ4|C>?kxkHl{PgZL z_j$Z;?vyY*+#EJBY1&XRyqPGY9$j0Ho}KB8e{0HEjQ-o-pV3cxIg@0S%%((IB>TSP zaM`6|1a_RV5mi~*4i3QXrL7`5joAFpF?Tzd3Pq=+qJgNvCu-fF2O;0g?W6B=YueZ* zmxLUaNL%Td22~T-CX%%K`~9fQZoC@eLUQGF;3pZ2k_csR4x>n(b1{_=qPlu9QUZRZ zti8wR=GT?o!Z7U^)xTedbVoKE$>B6XYR{gzX0#)0kS5e)SugsamRi5MI)VW^E zoC~Ro?*%2S-E)b1@J|O(>tt33$jB!_KHlZjY@ixUbbl~4jnys0X}(*ktJ^Vlpmco3=ZRC-xb$xa(@>s8jg?z5*nZQ(O?B5% zs)-sJ64y4UPE8%=vn`OJD*Xxo9xtX9c{!6|4vdkav{^-|xp7Hb`s#(|yt9n=z>NCD z1;*~b+YU`msXAJcVn0`4jGJ0aDRgY6ybHxB?URz44;y;3qH7!akk~S~GIXr4Ykyb> zS}X>_j6dNC$%H;|1M&)tB^3+BzlteL5$r?@L~9dD^$w-AOxKNpN)jCi=w;cRoJbIM zS6ci8`zHP_o~P{gATdlvrM!uF=3@0LQt+{E$c7MCCuI<$hd$ zZiOyfNd9#;5|Gq4a>BRJ+}zZ}Wr8B({OgE)5bm3Q*dmXCW}c(sD&+LDvV&Zq+M(SW zQOAOc*5V$rWh-+HHz%2l;^noTECC$Axw|P(=i^(O@9TtN*N9Z9)4oV(OFxV zT_PmdPfjo>rTa7{NW)fH-c?jkFghqAs*1+R*ICOZNPJq_S*3c>1UhJ(@DBw1Zj|s^ zy$)jhfF*h0!eE3QhqyYp+S$(fpUAZ&X$aH^$Okw7UO+lI)&>&n zV7Ws5aV+K>D_hxp>`|OFi3KFW$5)j`Sz%t&3pbK=Jbem#nDK0w7!c%|CV=lg^OUqA z?5NzTY-(jiD42}}MSwrmN)iey@TF{#m8t(Qy)lzZzIGrSGl6wpIe=5!Z zb+y$l|7A}<5MbG-Yc}o36&xK~5c>%STLCd(#tY{pm_;LfArDcM?<)!11Y&q@xEUN= zayS>pNg!Ojp|~`8qX^+;FnL?`Eb?FUpHnTqQLa-{qpRInnfF=!Q$iM;qToN!55+?- z{Q`FJG6h;B1rIQ`%6kBee^GksFQJoW>;1TAa6bm7WaLJp~uuBpFzY#e19Us^q9)_#RXXhqMt9?baOBvg0WW`$9jiG} zid@XPC;cYoi?_HC3{R!KbCl-2+BH(EXHLNYTZOVKsisd?Kx+C zy|l?eWMu{UwAAV7>Vfq6&qtJ$FgWt-V#4Mc7FO<(2-@U7;POD!wQmVY2KKmMIjjbw zvlNq75D~Ikuw_!K$Xk+;ptM&TXawv)k9L3CZkfx%Nc|n=2eIC9#X8 zf9rCM=B(9rym5<5IVNor65gTWf@T1|FkVV;e7mu1ocfX>SKq2n{hJYF2{!ol@T-5V`r)1CW7gsZob-k0=(i3yH;TyP3&oxlB!3dkOc^e*$i{ybZB-f2z!G+A;P1HckHBsl0f^(r?EFZsDrLmnB*6& zr$~^4r_s$7r&em%%@rbj2B7aXlk#_SDk3-O~-FOv;(+FaKPTjt1}%r(k|nlQ+a`P8HfcD!RX@;2%c$ zrm3kem>MFUJlXlID?c4EWA2Hq*@}L6H1Hq_&X?iW2gIZ!UYN!&x%A#U5VW4P4WgPf zgcu8;)76~CWUifS8wxpH##dOI1PTbSd8mEN_R2{rgSVjQ9wu>fLEU4^)gZx%nt_Mk zCS--hLd5s2f=ftF_tiBy0U%j2Ogfj zj^h$;{e`D{s3OV*eK)QZ0V^B&AnVzES{uP7(10bv+E)#OlhB#&`#}V(47Ezrzkzpb zz}hXDv_U?=sC~F{Yv%C3Lek|mgg2ZOqRsd09nw}KrJ*|7H5dN~QBjxLm|6>8a+;() zIrTXBJVwICPhwnln<{A4uAZ=~S0ugUfrX_%urz4!0t(x*HTEuUo_Zc*gdKQd@_m4s ztK~K%y{js!)>86kP&AANUN;i)=MkS7~S|_JF3|UvJ=!zJ8(8qW484rq~I- z>H1aKS{(hCbBcuG;?$n@wGPS{yj{p7nvt9pCJiF6oWlWd>&&9n^i8IA(V%&$J-7Ln zc0`r5w6Z|h0c>_z=>UKSWM*ceb@I7+W+>Wv!5D|QdX7|o|65z7Ao`tSI!8l>t zOJ%b!j*t zWRCs<;7*8YX#q@30(Sxs#NmmIme$_(mx97lbphCTCQ3_cv9|HHlQu;Xj0;o($B~C)cNB+z_icta#rUDoW_c^^|u};WFQ?wu{ zrXuW?uI5Swu&5|aR8+(|`OS@QP1U`*7vUCZOpLEm_Da1VYO#b&_~`SO-6~}HgUw_j zC&c2%VpB_Ch1B~*Z|(%&(&FIPR%ruFfE-h{p-Hw3%`|Gp=*`n6GmPeZKl7ujfDAT+ zi_NP(c-d~?6H6!*_mW_DZ!dJkafS~0#GIP7SDO2Nu=)eGhlkp3a!c2--`nkZ2DCse zt`$XDgdEJbim}L$2zr20qQ4kJikwbh_QM%=a;_7baT$ucRgvnmd5;m9f#PrD>E|^_ zk0Ze0*ZN%1sKEr)Rh2ne{MQgvR4oop=GAO?wXftN03U?wrH~ZolICPHvrg+=)?wb! zGo$fe^@zd&)XpVc-915w;jzxHPO?AzB&pQG(%BOcn@9zWolQ)vLwh0*y=fjoGWX9e zLoH!H%c_aXeJ#1vVlk3dlQDhn=#~_|e2r&5I6?KY!$QCS!YF{%Ggk~_*POn_Gx%DE zf^X{nIg`+jJhVf(o#xq3s#-b=Q&bm`UlA$8Bq-NhP|kciD_y`5?sQqMRx^PwH|h3w zHq!HV-U@~u%aZ)DbGrW%; zGV|2Cx5#O$B)&^CLp!8P~ly8a~fF^pPav zc4rHxaox7J<=Uy^I%aLD{(k8JldQ{voqPnfA||qb9&80JI@4kmtfvdqX#VUE86U)|_(i#a&xT;vLP2T{XgwM*9qsGH_O4^4=nqv^Z20b zQqsay6#;G~TjbWHYKRQH+!c7e*VROR+$aA46K`1dTn5{UWkKahs|DL)i90v|gzC$W z%75%VpU|~tXiFEp!fGXwT0J&27e=`Q>HLIJh=y}ktYmy;9f&DZ)ncIIv2Ay5sHF<)&ugxnj!e|=?E^koD%JvXm8cBE z!o)F7d&FB&e@s%~N{*QU)1^z!*&m_j8UtZc?RWH@-860=5hu^^)^Zz;poT-amWkcn z?F?di44x~R5dw+hx38EfcUR?utYN(Vm9iDQZE->##hDb|XAy(DWtzsEO@L%{1jM}#1C|6g-b#SwVYIpm zuSk*cU*;yk*GIC{sQ=Jd;!{6z{$>$FZ!Sa;83UWIXOfi~&d!9$(ziwWCpxOJsE+#-Y`qIc0 z@b`^JMlo%BEOX*wmQ-#co@z3; zOJz3nY5IpxEJGg(;F`JQTyNqK1+qi<0(~_0o%*OcoddI%uMlq<#Xanl1}>R@eov|% zc3{8!lTQ;n-Y6|98EtJjg;9JvfAyNWggDI<4rxM+^*cF$yd@D%7Q>Vm*=#Bdr;(nx zdxjaynEFn>ZyZXOf*LnO;55}O4T#PuOg8^GHui?;eN;2tW^SUnuUItnOF~ZMCrG1n zhtX;x4q+_nRdoil>$w@SgSY*+i$ekd0gjxDzWaxC<+ntq|LTbnF~1qAWQ-M^sW5znnZ~%zS5QE3(|P>H7MRhs&RJYa)JoJh(c; zhBt6c>BaLiGtI1Lb0+Cct5PElnwbxV|J@=0&kRFw&V+TWQ+iQ$&>-h_MAh_UC+QI0 zc1z2e$u3`@NZ;SoGiPFW&WO$W2h%=pj_)Vh8suO5Ng1q1zC`u#$=lK6;2^2 z1wSNP|MDPg(Q-TsRB+hC<=<)=5}dIB8juO~v^5qBCO_p}uQn2fH~ekiVdz;l2dl61 zNq8xeq|6y6S>!`E>M>GO1dHe|-oE^A7ZEW(go0M~1a<+OY^x9XiljpCjZVp~u5~}a zE=u_<5bjLQcbtp7>4Catm%z+67UNHn_~Pv07S>4fXD|9-!)NdIf9IV#D+)$~EqJ%Y zZ?ZDOaXb0~ygQoK-y#^}o`x1J5XHG1qaCbY05>&8>b2cpm=??>5#=g`d$BiA0YabW ziD(m%E;Gy&Gw&kh#}&Gn9Z!!hcAFQ^(UbWRM}}&9olCGcQ6s%nMUDO~gs#ve*H^M= zjtcV5+`LLEk?_(PfJtgpx36vm-PiU=Hnl*Tl)uDYmAteeBW~|M!^WZN7G6+uI9zat z+PIHu1!hy_Q+Ja zx3OmWXkwxm^;J!-s}{z4H!;1YFnMHs=g_!R3uAgEZ3sETr7zkIvT?#^-;v*2ZfZ*1 zva75M$NmddV!%obiV>&XgoL#DG79xNC`o;TS5)Prh#S@%LJ+6epOhDI}LzXnxG=&A$$skHo zGpuw}M=w*}hp9_S8PN!)T$#}pWuP-N*tdY)a>uZb9=>38?NPKLq^~HR2o{tX?wdC! zrrSB|()!}Fo@7{s3@%68Okhr|V66>b!wU5vI?pQAp^dQcM`Y%Fld&>Xs~8=FjH|J` ztt<_DUcm^TYn6%YjX|L$i0A#epK*FBL@i>rc-bWe@2%ixmxC7r0dY0tU}u0&e=hpC z6Nm%{V{ck{WZhk<=#%)^p_z$n;Nto9lBFczuSz|sEa#B~Mjg|7eQop%?oow|ZVcd2 z9D@f=ukX&;LWrg$8LI{4hEYocl^LkZ4;u`inak*Rl6K;m{sE64 zxHllq?mJPIBsCVfQ~(qIyEQ$2x-4L#W62G3sip%<(LSZBOy6?La|NZ1U7*%peIJ^5 zo$1nl`|76W&Had4a7cC=g8w!w+kZ0wrxG!{l5zOxHuf>IsUoDRb8t0lZ-ceP$$MoE zqWgN+w)|$&2gg&lW}Ma3?pJ}w1M_hOR?c#~+>Lp?iNxz!pJ;Y|jpvG&mp)+D4t@lE z>KMZKZTCJO0rPlZthuugI9~CN4dJE$S=0nd8G6o~P(k?wbE#Oqs(yN((w0^O0}A&^^5n7;C!F zt2Hv)!o5c2mhr(G9DliOq^NtG$VkP6LafQE0VXeVeTr+?(Tt(a2_9^-Yhd@rlCw2=x~3{hZ3^0yTPD z?vc;V(xj73{1L%|ujoo}TQ!2sP7MGingVMYu9g#5p-`%AiX6ktb?lfM`j SC^-C$1eh3F7?AZIM*RcSexW}A literal 0 HcmV?d00001 From 77e47abb16ae42bc8f5757e3eab6505c7fc6ae33 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Thu, 18 Aug 2011 21:38:29 +0000 Subject: [PATCH 27/27] Removed extra parameter from the call to _provision_resource_locally() --- nova/scheduler/abstract_scheduler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 3930148e2..e8c343a4b 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -62,7 +62,7 @@ class AbstractScheduler(driver.Scheduler): host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] image = request_spec['image'] - instance_type = request_spec['instance_type'] + instance_type = request_spec.get('instance_type') # TODO(sandy): I guess someone needs to add block_device_mapping # support at some point? Also, OS API has no concept of security @@ -159,8 +159,8 @@ class AbstractScheduler(driver.Scheduler): self._ask_child_zone_to_create_instance(context, host_info, request_spec, kwargs) else: - self._provision_resource_locally(context, instance_type, host_info, - request_spec, kwargs) + self._provision_resource_locally(context, host_info, request_spec, + kwargs) def _provision_resource(self, context, build_plan_item, instance_id, request_spec, kwargs):