Merged with trunkw
This commit is contained in:
commit
4e1c4c1e7b
1
Authors
1
Authors
@ -101,6 +101,7 @@ Stephanie Reese <reese.sm@gmail.com>
|
||||
Thierry Carrez <thierry@openstack.org>
|
||||
Todd Willey <todd@ansolabs.com>
|
||||
Trey Morris <trey.morris@rackspace.com>
|
||||
Troy Toman <troy.toman@rackspace.com>
|
||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
|
@ -24,7 +24,6 @@ from eventlet import greenthread
|
||||
from eventlet.green import urllib2
|
||||
|
||||
import exceptions
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@ -38,11 +37,11 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
|
||||
@ -141,5 +140,5 @@ if __name__ == '__main__':
|
||||
acp = AjaxConsoleProxy()
|
||||
acp.register_listeners()
|
||||
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
|
||||
server.start()
|
||||
server.wait()
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
43
bin/nova-api
43
bin/nova-api
@ -19,12 +19,14 @@
|
||||
|
||||
"""Starter script for Nova API.
|
||||
|
||||
Starts both the EC2 and OpenStack APIs in separate processes.
|
||||
Starts both the EC2 and OpenStack APIs in separate greenthreads.
|
||||
|
||||
"""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
||||
|
||||
@ -33,32 +35,19 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
import nova.service
|
||||
import nova.utils
|
||||
|
||||
from nova import flags
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def main():
|
||||
"""Launch EC2 and OSAPI services."""
|
||||
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
|
||||
nova.utils.monkey_patch()
|
||||
launcher = nova.service.Launcher()
|
||||
|
||||
for api in FLAGS.enabled_apis:
|
||||
service = nova.service.WSGIService(api)
|
||||
launcher.launch_service(service)
|
||||
|
||||
signal.signal(signal.SIGTERM, lambda *_: launcher.stop())
|
||||
|
||||
try:
|
||||
launcher.wait()
|
||||
except KeyboardInterrupt:
|
||||
launcher.stop()
|
||||
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
servers = []
|
||||
for api in flags.FLAGS.enabled_apis:
|
||||
servers.append(service.WSGIService(api))
|
||||
service.serve(*servers)
|
||||
service.wait()
|
||||
|
47
bin/nova-api-ec2
Executable file
47
bin/nova-api-ec2
Executable file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Starter script for Nova EC2 API."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.WSGIService('ec2')
|
||||
service.serve(server)
|
||||
service.wait()
|
47
bin/nova-api-os
Executable file
47
bin/nova-api-os
Executable file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Starter script for Nova OS API."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.WSGIService('osapi')
|
||||
service.serve(server)
|
||||
service.wait()
|
@ -22,7 +22,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -34,7 +33,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, POSSIBLE_TOPDIR)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@ -46,5 +44,6 @@ if __name__ == '__main__':
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
service.serve()
|
||||
server = service.Service.create(binary='nova-compute')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@ -21,7 +21,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -33,7 +32,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@ -44,5 +42,6 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
service.serve()
|
||||
server = service.Service.create(binary='nova-console')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@ -52,7 +52,7 @@ flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
|
||||
LOG = logging.getLogger('nova.dhcpbridge')
|
||||
|
||||
|
||||
def add_lease(mac, ip_address, _interface):
|
||||
def add_lease(mac, ip_address):
|
||||
"""Set the IP that was assigned by the DHCP server."""
|
||||
if FLAGS.fake_rabbit:
|
||||
LOG.debug(_("leasing ip"))
|
||||
@ -66,13 +66,13 @@ def add_lease(mac, ip_address, _interface):
|
||||
"args": {"address": ip_address}})
|
||||
|
||||
|
||||
def old_lease(mac, ip_address, interface):
|
||||
def old_lease(mac, ip_address):
|
||||
"""Update just as add lease."""
|
||||
LOG.debug(_("Adopted old lease or got a change of mac"))
|
||||
add_lease(mac, ip_address, interface)
|
||||
add_lease(mac, ip_address)
|
||||
|
||||
|
||||
def del_lease(mac, ip_address, _interface):
|
||||
def del_lease(mac, ip_address):
|
||||
"""Called when a lease expires."""
|
||||
if FLAGS.fake_rabbit:
|
||||
LOG.debug(_("releasing ip"))
|
||||
@ -99,8 +99,6 @@ def main():
|
||||
utils.default_flagfile(flagfile)
|
||||
argv = FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
# check ENV first so we don't break any older deploys
|
||||
network_id = int(os.environ.get('NETWORK_ID'))
|
||||
|
||||
if int(os.environ.get('TESTING', '0')):
|
||||
from nova.tests import fake_flags
|
||||
@ -115,11 +113,19 @@ def main():
|
||||
if action in ['add', 'del', 'old']:
|
||||
mac = argv[2]
|
||||
ip = argv[3]
|
||||
msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s"
|
||||
" on interface %(interface)s") % locals()
|
||||
msg = _("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % \
|
||||
{"action": action,
|
||||
"mac": mac,
|
||||
"ip": ip}
|
||||
LOG.debug(msg)
|
||||
globals()[action + '_lease'](mac, ip, interface)
|
||||
globals()[action + '_lease'](mac, ip)
|
||||
else:
|
||||
try:
|
||||
network_id = int(os.environ.get('NETWORK_ID'))
|
||||
except TypeError:
|
||||
LOG.error(_("Environment variable 'NETWORK_ID' must be set."))
|
||||
sys.exit(1)
|
||||
|
||||
print init_leases(network_id)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -20,7 +20,9 @@
|
||||
|
||||
"""Starter script for Nova Direct API."""
|
||||
|
||||
import gettext
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -32,12 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import compute
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import network
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova import wsgi
|
||||
@ -97,5 +99,6 @@ if __name__ == '__main__':
|
||||
with_auth,
|
||||
host=FLAGS.direct_host,
|
||||
port=FLAGS.direct_port)
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@ -22,7 +22,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@ -46,5 +44,6 @@ if __name__ == '__main__':
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
service.serve()
|
||||
server = service.Service.create(binary='nova-network')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@ -17,11 +17,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Daemon for nova objectstore. Supports S3 API.
|
||||
"""
|
||||
"""Daemon for nova objectstore. Supports S3 API."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -33,10 +33,10 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.objectstore import s3server
|
||||
@ -55,5 +55,5 @@ if __name__ == '__main__':
|
||||
router,
|
||||
port=FLAGS.s3_port,
|
||||
host=FLAGS.s3_host)
|
||||
server.start()
|
||||
server.wait()
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@ -19,7 +19,8 @@
|
||||
"""VNC Console Proxy Server."""
|
||||
|
||||
import eventlet
|
||||
import gettext
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -29,7 +30,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@ -41,7 +41,7 @@ from nova.vnc import auth
|
||||
from nova.vnc import proxy
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.vnc-proxy')
|
||||
LOG = logging.getLogger('nova.vncproxy')
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -81,7 +81,7 @@ if __name__ == "__main__":
|
||||
FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
|
||||
LOG.audit(_("Starting nova-vnc-proxy node (version %s)"),
|
||||
LOG.audit(_("Starting nova-vncproxy node (version %s)"),
|
||||
version.version_string_with_vcs())
|
||||
|
||||
if not (os.path.exists(FLAGS.vncproxy_wwwroot) and
|
||||
@ -107,13 +107,10 @@ if __name__ == "__main__":
|
||||
else:
|
||||
with_auth = auth.VNCNovaAuthMiddleware(with_logging)
|
||||
|
||||
service.serve()
|
||||
|
||||
server = wsgi.Server("VNC Proxy",
|
||||
with_auth,
|
||||
host=FLAGS.vncproxy_host,
|
||||
port=FLAGS.vncproxy_port)
|
||||
server.start()
|
||||
server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host)
|
||||
|
||||
server.wait()
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@ -22,7 +22,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@ -45,5 +43,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
service.serve()
|
||||
utils.monkey_patch()
|
||||
server = service.Service.create(binary='nova-volume')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@ -31,9 +31,9 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab
|
||||
|
||||
So, how does this all work?
|
||||
|
||||
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this.
|
||||
This document will explain the strategy employed by the `BaseScheduler`, which is the base for all schedulers designed to work across zones, and its derivations. You should read the :doc:`devguide/zones` documentation before reading this.
|
||||
|
||||
.. image:: /images/zone_aware_scheduler.png
|
||||
.. image:: /images/base_scheduler.png
|
||||
|
||||
Costs & Weights
|
||||
---------------
|
||||
@ -52,32 +52,32 @@ This Weight is computed for each Instance requested. If the customer asked for 1
|
||||
|
||||
.. image:: /images/costs_weights.png
|
||||
|
||||
nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler
|
||||
nova.scheduler.base_scheduler.BaseScheduler
|
||||
------------------------------------------------------
|
||||
As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions.
|
||||
As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `BaseScheduler` uses this information to make its decisions.
|
||||
|
||||
Here is how it works:
|
||||
|
||||
1. The compute nodes are filtered and the nodes remaining are weighed.
|
||||
2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
|
||||
2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
|
||||
3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
|
||||
4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
|
||||
5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
|
||||
6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
|
||||
|
||||
.. image:: /images/zone_aware_overview.png
|
||||
.. image:: /images/zone_overview.png
|
||||
|
||||
`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used.
|
||||
`BaseScheduler` by itself is not capable of handling all the provisioning itself. You should also specify the filter classes and weighting classes to be used in determining which host is selected for new instance creation.
|
||||
|
||||
Filtering and Weighing
|
||||
----------------------
|
||||
The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible.
|
||||
The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `BaseScheduler` are flexible and extensible.
|
||||
|
||||
.. image:: /images/filtering.png
|
||||
|
||||
Requesting a new instance
|
||||
-------------------------
|
||||
Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
|
||||
Prior to the `BaseScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
|
||||
|
||||
`nova.compute.api.create()` performed the following actions:
|
||||
1. it validated all the fields passed into it.
|
||||
@ -89,11 +89,11 @@ Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to
|
||||
|
||||
.. image:: /images/nova.compute.api.create.png
|
||||
|
||||
Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
|
||||
Generally, the simplest schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
|
||||
|
||||
The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once.
|
||||
|
||||
For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
|
||||
For the `BaseScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
|
||||
1. it validates all the fields passed into it.
|
||||
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
|
||||
3. it creates a single `run_instance` request in the scheduler queue
|
||||
@ -109,21 +109,19 @@ For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to
|
||||
|
||||
The Catch
|
||||
---------
|
||||
This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
|
||||
This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
|
||||
|
||||
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
|
||||
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many, many `select` calls issued to child Zones asking for estimates.
|
||||
|
||||
Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key`
|
||||
Instead, we take a rather innovative approach to the problem. We encrypt all the child Zone internal details and pass them back the to parent Zone. In the case of a nested Zone layout, each nesting layer will encrypt the data from all of its children and pass that to its parent Zone. In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. Every Zone interface adds another layer of encryption, using its unique key.
|
||||
|
||||
In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent.
|
||||
Once a host is selected, it will either be local to the Zone that received the initial API call, or one of its child Zones. In the latter case, the parent Zone it simply passes the encrypted data for the selected host back to each of its child Zones during the `POST /servers` call as an extra parameter. If the child Zone can decrypt the data, then it is the correct Zone for the selected host; all other Zones will not be able to decrypt the data and will discard the request. This is why it is critical that each Zone has a unique value specified in its config in `--build_plan_encryption_key`: it controls the ability to locate the selected host without having to hard-code path information or other identifying information. The child Zone can then act on the decrypted data and either go directly to the Compute node previously selected if it is located in that Zone, or repeat the process with its child Zones until the target Zone containing the selected host is reached.
|
||||
|
||||
Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use.
|
||||
Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.base_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use.
|
||||
|
||||
Reservation IDs
|
||||
---------------
|
||||
|
||||
NOTE: The features described in this section are related to the up-coming 'merge-4' branch.
|
||||
|
||||
The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created.
|
||||
|
||||
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
|
||||
@ -137,23 +135,23 @@ Finally, we need to give the user a way to get information on each of the instan
|
||||
Host Filter
|
||||
-----------
|
||||
|
||||
As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms.
|
||||
As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.filters` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms.
|
||||
|
||||
The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others:
|
||||
The filter used is determined by the `--default_host_filters` flag, which points to a Python Class. By default this flag is set to `[AllHostsFilter]` which simply returns all available hosts. But there are others:
|
||||
|
||||
* `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`.
|
||||
* `InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`.
|
||||
|
||||
* `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples.
|
||||
* `JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples.
|
||||
|
||||
To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be.
|
||||
To create your own `HostFilter` the user simply has to derive from `nova.scheduler.filters.AbstractHostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of available hosts is in the `host_list` parameter passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be. By default, it is the capabilities reported by the host.
|
||||
|
||||
Cost Scheduler Weighing
|
||||
-----------------------
|
||||
Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled.
|
||||
Every `BaseScheduler` subclass should also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `BaseScheduler` when all the results have been assembled.
|
||||
|
||||
Simple Zone Aware Scheduling
|
||||
Simple Scheduling Across Zones
|
||||
----------------------------
|
||||
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
|
||||
The `BaseScheduler` uses the default `filter_hosts` method, which will use either any filters specified in the request's `filter` parameter, or, if that is not specified, the filters specified in the `FLAGS.default_host_filters` setting. Its `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
|
||||
|
||||
The `--scheduler_driver` flag is how you specify the scheduler class name.
|
||||
|
||||
@ -168,14 +166,14 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf
|
||||
--enable_zone_routing=true
|
||||
--zone_name=zone1
|
||||
--build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b
|
||||
--scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler
|
||||
--default_host_filter=nova.scheduler.host_filter.AllHostsFilter
|
||||
--scheduler_driver=nova.scheduler.base_scheduler.BaseScheduler
|
||||
--default_host_filter=nova.scheduler.filters.AllHostsFilter
|
||||
|
||||
`--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands.
|
||||
`--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances.
|
||||
`--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue.
|
||||
`build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys.
|
||||
`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`.
|
||||
`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.base_scheduler.BaseScheduler`.
|
||||
`default_host_filter` is the host filter to be used for filtering candidate Compute nodes.
|
||||
|
||||
Some optional flags which are handy for debugging are:
|
||||
|
BIN
doc/source/images/base_scheduler.png
Normal file
BIN
doc/source/images/base_scheduler.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 17 KiB |
BIN
doc/source/images/zone_overview.png
Executable file
BIN
doc/source/images/zone_overview.png
Executable file
Binary file not shown.
After Width: | Height: | Size: 50 KiB |
@ -164,5 +164,5 @@ def allowed_injected_file_path_bytes(context):
|
||||
|
||||
|
||||
class QuotaError(exception.ApiError):
|
||||
"""Quota Exceeeded."""
|
||||
"""Quota Exceeded."""
|
||||
pass
|
||||
|
@ -21,5 +21,7 @@
|
||||
.. automodule:: nova.scheduler
|
||||
:platform: Unix
|
||||
:synopsis: Module that picks a compute node to run a VM instance.
|
||||
.. moduleauthor:: Sandy Walsh <sandy.walsh@rackspace.com>
|
||||
.. moduleauthor:: Ed Leafe <ed@leafe.com>
|
||||
.. moduleauthor:: Chris Behrens <cbehrens@codestud.com>
|
||||
"""
|
||||
|
@ -14,10 +14,10 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The AbsractScheduler is a base class Scheduler for creating instances
|
||||
across zones. There are two expansion points to this class for:
|
||||
1. Assigning Weights to hosts for requested instances
|
||||
2. Filtering Hosts based on required instance capabilities
|
||||
The AbsractScheduler is an abstract class Scheduler for creating instances
|
||||
locally or across zones. Two methods should be overridden in order to
|
||||
customize the behavior: filter_hosts() and weigh_hosts(). The default
|
||||
behavior is to simply select all hosts and weight them the same.
|
||||
"""
|
||||
|
||||
import operator
|
||||
@ -45,44 +45,44 @@ LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
|
||||
|
||||
class InvalidBlob(exception.NovaException):
|
||||
message = _("Ill-formed or incorrectly routed 'blob' data sent "
|
||||
"to instance create request.")
|
||||
"to instance create request.")
|
||||
|
||||
|
||||
class AbstractScheduler(driver.Scheduler):
|
||||
"""Base class for creating Schedulers that can work across any nova
|
||||
deployment, from simple designs to multiply-nested zones.
|
||||
"""
|
||||
|
||||
def _call_zone_method(self, context, method, specs, zones):
|
||||
"""Call novaclient zone method. Broken out for testing."""
|
||||
return api.call_zone_method(context, method, specs=specs, zones=zones)
|
||||
|
||||
def _provision_resource_locally(self, context, build_plan_item,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
host = build_plan_item['hostname']
|
||||
base_options = request_spec['instance_properties']
|
||||
image = request_spec['image']
|
||||
instance_type = request_spec.get('instance_type')
|
||||
|
||||
# TODO(sandy): I guess someone needs to add block_device_mapping
|
||||
# support at some point? Also, OS API has no concept of security
|
||||
# groups.
|
||||
instance = compute_api.API().create_db_entry_for_new_instance(context,
|
||||
image, base_options, None, [])
|
||||
instance_type, image, base_options, None, [])
|
||||
|
||||
instance_id = instance['id']
|
||||
kwargs['instance_id'] = instance_id
|
||||
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
queue = db.queue_get_for(context, "compute", host)
|
||||
params = {"method": "run_instance", "args": kwargs}
|
||||
rpc.cast(context, queue, params)
|
||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||
% locals())
|
||||
% locals())
|
||||
|
||||
def _decrypt_blob(self, blob):
|
||||
"""Returns the decrypted blob or None if invalid. Broken out
|
||||
for testing."""
|
||||
for testing.
|
||||
"""
|
||||
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
||||
try:
|
||||
json_entry = decryptor(blob)
|
||||
@ -92,15 +92,15 @@ class AbstractScheduler(driver.Scheduler):
|
||||
return None
|
||||
|
||||
def _ask_child_zone_to_create_instance(self, context, zone_info,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Once we have determined that the request should go to one
|
||||
of our children, we need to fabricate a new POST /servers/
|
||||
call with the same parameters that were passed into us.
|
||||
|
||||
Note that we have to reverse engineer from our args to get back the
|
||||
image, flavor, ipgroup, etc. since the original call could have
|
||||
come in from EC2 (which doesn't use these things)."""
|
||||
|
||||
come in from EC2 (which doesn't use these things).
|
||||
"""
|
||||
instance_type = request_spec['instance_type']
|
||||
instance_properties = request_spec['instance_properties']
|
||||
|
||||
@ -109,30 +109,26 @@ class AbstractScheduler(driver.Scheduler):
|
||||
meta = instance_properties['metadata']
|
||||
flavor_id = instance_type['flavorid']
|
||||
reservation_id = instance_properties['reservation_id']
|
||||
|
||||
files = kwargs['injected_files']
|
||||
ipgroup = None # Not supported in OS API ... yet
|
||||
|
||||
child_zone = zone_info['child_zone']
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s")
|
||||
% locals())
|
||||
". ReservationID=%(reservation_id)s") % locals())
|
||||
nova = None
|
||||
try:
|
||||
nova = novaclient.Client(zone.username, zone.password, None, url)
|
||||
nova.authenticate()
|
||||
except novaclient_exceptions.BadRequest, e:
|
||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
|
||||
child_blob, reservation_id=reservation_id)
|
||||
child_blob, reservation_id=reservation_id)
|
||||
|
||||
def _provision_resource_from_blob(self, context, build_plan_item,
|
||||
instance_id, request_spec, kwargs):
|
||||
instance_id, request_spec, kwargs):
|
||||
"""Create the requested resource locally or in a child zone
|
||||
based on what is stored in the zone blob info.
|
||||
|
||||
@ -145,8 +141,8 @@ class AbstractScheduler(driver.Scheduler):
|
||||
means we gathered the info from one of our children.
|
||||
It's possible that, when we decrypt the 'blob' field, it
|
||||
contains "child_blob" data. In which case we forward the
|
||||
request."""
|
||||
|
||||
request.
|
||||
"""
|
||||
host_info = None
|
||||
if "blob" in build_plan_item:
|
||||
# Request was passed in from above. Is it for us?
|
||||
@ -161,21 +157,20 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# Valid data ... is it for us?
|
||||
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||
self._ask_child_zone_to_create_instance(context, host_info,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
else:
|
||||
self._provision_resource_locally(context, host_info, request_spec,
|
||||
kwargs)
|
||||
kwargs)
|
||||
|
||||
def _provision_resource(self, context, build_plan_item, instance_id,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in build_plan_item:
|
||||
self._provision_resource_locally(context, build_plan_item,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
return
|
||||
|
||||
self._provision_resource_from_blob(context, build_plan_item,
|
||||
instance_id, request_spec, kwargs)
|
||||
instance_id, request_spec, kwargs)
|
||||
|
||||
def _adjust_child_weights(self, child_results, zones):
|
||||
"""Apply the Scale and Offset values from the Zone definition
|
||||
@ -185,13 +180,11 @@ class AbstractScheduler(driver.Scheduler):
|
||||
for zone_id, result in child_results:
|
||||
if not result:
|
||||
continue
|
||||
|
||||
assert isinstance(zone_id, int)
|
||||
|
||||
for zone_rec in zones:
|
||||
if zone_rec['id'] != zone_id:
|
||||
continue
|
||||
|
||||
for item in result:
|
||||
try:
|
||||
offset = zone_rec['weight_offset']
|
||||
@ -202,10 +195,10 @@ class AbstractScheduler(driver.Scheduler):
|
||||
item['raw_weight'] = raw_weight
|
||||
except KeyError:
|
||||
LOG.exception(_("Bad child zone scaling values "
|
||||
"for Zone: %(zone_id)s") % locals())
|
||||
"for Zone: %(zone_id)s") % locals())
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
passed in to see if this is a request to:
|
||||
@ -214,13 +207,11 @@ class AbstractScheduler(driver.Scheduler):
|
||||
to simply create the instance (either in this zone or
|
||||
a child zone).
|
||||
"""
|
||||
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
|
||||
blob = request_spec.get('blob')
|
||||
if blob:
|
||||
self._provision_resource(context, request_spec, instance_id,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
return None
|
||||
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
@ -235,10 +226,9 @@ class AbstractScheduler(driver.Scheduler):
|
||||
for num in xrange(num_instances):
|
||||
if not build_plan:
|
||||
break
|
||||
|
||||
build_plan_item = build_plan.pop(0)
|
||||
self._provision_resource(context, build_plan_item, instance_id,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
@ -251,58 +241,44 @@ class AbstractScheduler(driver.Scheduler):
|
||||
anything about the children.
|
||||
"""
|
||||
return self._schedule(context, "compute", request_spec,
|
||||
*args, **kwargs)
|
||||
*args, **kwargs)
|
||||
|
||||
# TODO(sandy): We're only focused on compute instances right now,
|
||||
# so we don't implement the default "schedule()" method required
|
||||
# of Schedulers.
|
||||
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""The schedule() contract requires we return the one
|
||||
best-suited host for this request.
|
||||
"""
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
# TODO(sandy): We're only focused on compute instances right now,
|
||||
# so we don't implement the default "schedule()" method required
|
||||
# of Schedulers.
|
||||
msg = _("No host selection for %s defined." % topic)
|
||||
raise driver.NoValidHost(msg)
|
||||
|
||||
def _schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
|
||||
if topic != "compute":
|
||||
raise NotImplementedError(_("Scheduler only understands"
|
||||
" Compute nodes (for now)"))
|
||||
msg = _("Scheduler only understands Compute nodes (for now)")
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instance_type = request_spec['instance_type']
|
||||
# Get all available hosts.
|
||||
all_hosts = self.zone_manager.service_states.iteritems()
|
||||
unfiltered_hosts = [(host, services[topic])
|
||||
for host, services in all_hosts
|
||||
if topic in services]
|
||||
|
||||
weighted = []
|
||||
host_list = None
|
||||
# Filter local hosts based on requirements ...
|
||||
filtered_hosts = self.filter_hosts(topic, request_spec,
|
||||
unfiltered_hosts)
|
||||
if not filtered_hosts:
|
||||
LOG.warn(_("No hosts available"))
|
||||
return []
|
||||
|
||||
for i in xrange(num_instances):
|
||||
# Filter local hosts based on requirements ...
|
||||
#
|
||||
# The first pass through here will pass 'None' as the
|
||||
# host_list.. which tells the filter to build the full
|
||||
# list of hosts.
|
||||
# On a 2nd pass, the filter can modify the host_list with
|
||||
# any updates it needs to make based on resources that
|
||||
# may have been consumed from a previous build..
|
||||
host_list = self.filter_hosts(topic, request_spec, host_list)
|
||||
if not host_list:
|
||||
LOG.warn(_("Filter returned no hosts after processing "
|
||||
"%(i)d of %(num_instances)d instances") % locals())
|
||||
break
|
||||
|
||||
# then weigh the selected hosts.
|
||||
# weighted = [{weight=weight, hostname=hostname,
|
||||
# capabilities=capabs}, ...]
|
||||
weights = self.weigh_hosts(topic, request_spec, host_list)
|
||||
weights.sort(key=operator.itemgetter('weight'))
|
||||
best_weight = weights[0]
|
||||
weighted.append(best_weight)
|
||||
self.consume_resources(topic, best_weight['capabilities'],
|
||||
instance_type)
|
||||
|
||||
# Next, tack on the best weights from the child zones ...
|
||||
# weigh the selected hosts.
|
||||
# weighted_hosts = [{weight=weight, hostname=hostname,
|
||||
# capabilities=capabs}, ...]
|
||||
weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts)
|
||||
# Next, tack on the host weights from the child zones
|
||||
json_spec = json.dumps(request_spec)
|
||||
all_zones = db.zone_get_all(context)
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
@ -314,90 +290,32 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# it later if needed. This implicitly builds a zone
|
||||
# path structure.
|
||||
host_dict = {"weight": weighting["weight"],
|
||||
"child_zone": child_zone,
|
||||
"child_blob": weighting["blob"]}
|
||||
weighted.append(host_dict)
|
||||
"child_zone": child_zone,
|
||||
"child_blob": weighting["blob"]}
|
||||
weighted_hosts.append(host_dict)
|
||||
weighted_hosts.sort(key=operator.itemgetter('weight'))
|
||||
return weighted_hosts
|
||||
|
||||
weighted.sort(key=operator.itemgetter('weight'))
|
||||
return weighted
|
||||
def filter_hosts(self, topic, request_spec, host_list):
|
||||
"""Filter the full host list returned from the ZoneManager. By default,
|
||||
this method only applies the basic_ram_filter(), meaning all hosts
|
||||
with at least enough RAM for the requested instance are returned.
|
||||
|
||||
def compute_filter(self, hostname, capabilities, request_spec):
|
||||
"""Return whether or not we can schedule to this compute node.
|
||||
Derived classes should override this and return True if the host
|
||||
is acceptable for scheduling.
|
||||
Override in subclasses to provide greater selectivity.
|
||||
"""
|
||||
instance_type = request_spec['instance_type']
|
||||
requested_mem = instance_type['memory_mb'] * 1024 * 1024
|
||||
return capabilities['host_memory_free'] >= requested_mem
|
||||
def basic_ram_filter(hostname, capabilities, request_spec):
|
||||
"""Only return hosts with sufficient available RAM."""
|
||||
instance_type = request_spec['instance_type']
|
||||
requested_mem = instance_type['memory_mb'] * 1024 * 1024
|
||||
return capabilities['host_memory_free'] >= requested_mem
|
||||
|
||||
def hold_filter_hosts(self, topic, request_spec, hosts=None):
|
||||
"""Filter the full host list (from the ZoneManager)"""
|
||||
# NOTE(dabo): The logic used by the current _schedule() method
|
||||
# is incorrect. Since this task is just to refactor the classes,
|
||||
# I'm not fixing the logic now - that will be the next task.
|
||||
# So for now this method is just renamed; afterwards this will
|
||||
# become the filter_hosts() method, and the one below will
|
||||
# be removed.
|
||||
filter_name = request_spec.get('filter', None)
|
||||
# Make sure that the requested filter is legitimate.
|
||||
selected_filter = host_filter.choose_host_filter(filter_name)
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec['instance_type']
|
||||
name, query = selected_filter.instance_type_to_filter(instance_type)
|
||||
return selected_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def filter_hosts(self, topic, request_spec, host_list=None):
|
||||
"""Return a list of hosts which are acceptable for scheduling.
|
||||
Return value should be a list of (hostname, capability_dict)s.
|
||||
Derived classes may override this, but may find the
|
||||
'<topic>_filter' function more appropriate.
|
||||
"""
|
||||
def _default_filter(self, hostname, capabilities, request_spec):
|
||||
"""Default filter function if there's no <topic>_filter"""
|
||||
# NOTE(sirp): The default logic is the equivalent to
|
||||
# AllHostsFilter
|
||||
return True
|
||||
|
||||
filter_func = getattr(self, '%s_filter' % topic, _default_filter)
|
||||
|
||||
if host_list is None:
|
||||
first_run = True
|
||||
host_list = self.zone_manager.service_states.iteritems()
|
||||
else:
|
||||
first_run = False
|
||||
|
||||
filtered_hosts = []
|
||||
for host, services in host_list:
|
||||
if first_run:
|
||||
if topic not in services:
|
||||
continue
|
||||
services = services[topic]
|
||||
if filter_func(host, services, request_spec):
|
||||
filtered_hosts.append((host, services))
|
||||
return filtered_hosts
|
||||
return [(host, services) for host, services in host_list
|
||||
if basic_ram_filter(host, services, request_spec)]
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Derived classes may override this to provide more sophisticated
|
||||
scheduling objectives
|
||||
"""This version assigns a weight of 1 to all hosts, making selection
|
||||
of any host basically a random event. Override this method in your
|
||||
subclass to add logic to prefer one potential host over another.
|
||||
"""
|
||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
||||
for hostname, capabilities in hosts]
|
||||
|
||||
def compute_consume(self, capabilities, instance_type):
|
||||
"""Consume compute resources for selected host"""
|
||||
|
||||
requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
|
||||
capabilities['host_memory_free'] -= requested_mem
|
||||
|
||||
def consume_resources(self, topic, capabilities, instance_type):
|
||||
"""Consume resources for a specific host. 'host' is a tuple
|
||||
of the hostname and the services"""
|
||||
|
||||
consume_func = getattr(self, '%s_consume' % topic, None)
|
||||
if not consume_func:
|
||||
return
|
||||
consume_func(capabilities, instance_type)
|
||||
|
59
nova/scheduler/base_scheduler.py
Normal file
59
nova/scheduler/base_scheduler.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The BaseScheduler is the base class Scheduler for creating instances
|
||||
across zones. There are two expansion points to this class for:
|
||||
1. Assigning Weights to hosts for requested instances
|
||||
2. Filtering Hosts based on required instance capabilities
|
||||
"""
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
from nova.scheduler import abstract_scheduler
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.scheduler.base_scheduler')
|
||||
|
||||
|
||||
class BaseScheduler(abstract_scheduler.AbstractScheduler):
|
||||
"""Base class for creating Schedulers that can work across any nova
|
||||
deployment, from simple designs to multiply-nested zones.
|
||||
"""
|
||||
def filter_hosts(self, topic, request_spec, hosts=None):
|
||||
"""Filter the full host list (from the ZoneManager)"""
|
||||
filter_name = request_spec.get('filter', None)
|
||||
# Make sure that the requested filter is legitimate.
|
||||
selected_filter = host_filter.choose_host_filter(filter_name)
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec.get("instance_type", None)
|
||||
if instance_type is None:
|
||||
# No way to select; return the specified hosts
|
||||
return hosts or []
|
||||
name, query = selected_filter.instance_type_to_filter(instance_type)
|
||||
return selected_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Derived classes may override this to provide more sophisticated
|
||||
scheduling objectives
|
||||
"""
|
||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
||||
for hostname, capabilities in hosts]
|
36
nova/scheduler/filters/__init__.py
Normal file
36
nova/scheduler/filters/__init__.py
Normal file
@ -0,0 +1,36 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
There are three filters included: AllHosts, InstanceType & JSON.
|
||||
|
||||
AllHosts just returns the full, unfiltered list of hosts.
|
||||
InstanceType is a hard coded matching mechanism based on flavor criteria.
|
||||
JSON is an ad-hoc filter grammar.
|
||||
|
||||
Why JSON? The requests for instances may come in through the
|
||||
REST interface from a user or a parent Zone.
|
||||
Currently InstanceTypes are used for specifing the type of instance desired.
|
||||
Specific Nova users have noted a need for a more expressive way of specifying
|
||||
instance requirements. Since we don't want to get into building full DSL,
|
||||
this filter is a simple form as an example of how this could be done.
|
||||
In reality, most consumers will use the more rigid filters such as the
|
||||
InstanceType filter.
|
||||
"""
|
||||
|
||||
from abstract_filter import AbstractHostFilter
|
||||
from all_hosts_filter import AllHostsFilter
|
||||
from instance_type_filter import InstanceTypeFilter
|
||||
from json_filter import JsonFilter
|
37
nova/scheduler/filters/abstract_filter.py
Normal file
37
nova/scheduler/filters/abstract_filter.py
Normal file
@ -0,0 +1,37 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import nova.scheduler
|
||||
from nova import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('default_host_filter', 'AllHostsFilter',
|
||||
'Which filter to use for filtering hosts')
|
||||
|
||||
|
||||
class AbstractHostFilter(object):
|
||||
"""Base class for host filters."""
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into a filter for most common use-case."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that fulfill the filter."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _full_name(self):
|
||||
"""module.classname of the filter."""
|
||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
32
nova/scheduler/filters/all_hosts_filter.py
Normal file
32
nova/scheduler/filters/all_hosts_filter.py
Normal file
@ -0,0 +1,32 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import nova.scheduler
|
||||
from nova.scheduler.filters import abstract_filter
|
||||
|
||||
|
||||
class AllHostsFilter(abstract_filter.AbstractHostFilter):
|
||||
"""NOP host filter. Returns all hosts in ZoneManager."""
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Return anything to prevent base-class from raising
|
||||
exception.
|
||||
"""
|
||||
return (self._full_name(), instance_type)
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts from ZoneManager list."""
|
||||
return [(host, services)
|
||||
for host, services in zone_manager.service_states.iteritems()]
|
87
nova/scheduler/filters/instance_type_filter.py
Normal file
87
nova/scheduler/filters/instance_type_filter.py
Normal file
@ -0,0 +1,87 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import nova.scheduler
|
||||
from nova.scheduler.filters import abstract_filter
|
||||
|
||||
|
||||
class InstanceTypeFilter(abstract_filter.AbstractHostFilter):
|
||||
"""HostFilter hard-coded to work with InstanceType records."""
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Use instance_type to filter hosts."""
|
||||
return (self._full_name(), instance_type)
|
||||
|
||||
def _satisfies_extra_specs(self, capabilities, instance_type):
|
||||
"""Check that the capabilities provided by the compute service
|
||||
satisfy the extra specs associated with the instance type"""
|
||||
if 'extra_specs' not in instance_type:
|
||||
return True
|
||||
# NOTE(lorinh): For now, we are just checking exact matching on the
|
||||
# values. Later on, we want to handle numerical
|
||||
# values so we can represent things like number of GPU cards
|
||||
try:
|
||||
for key, value in instance_type['extra_specs'].iteritems():
|
||||
if capabilities[key] != value:
|
||||
return False
|
||||
except KeyError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that can create instance_type."""
|
||||
instance_type = query
|
||||
selected_hosts = []
|
||||
for host, services in zone_manager.service_states.iteritems():
|
||||
capabilities = services.get('compute', {})
|
||||
if not capabilities:
|
||||
continue
|
||||
host_ram_mb = capabilities['host_memory_free']
|
||||
disk_bytes = capabilities['disk_available']
|
||||
spec_ram = instance_type['memory_mb']
|
||||
spec_disk = instance_type['local_gb']
|
||||
extra_specs = instance_type['extra_specs']
|
||||
|
||||
if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
|
||||
self._satisfies_extra_specs(capabilities, instance_type)):
|
||||
selected_hosts.append((host, capabilities))
|
||||
return selected_hosts
|
||||
|
||||
|
||||
# host entries (currently) are like:
|
||||
# {'host_name-description': 'Default install of XenServer',
|
||||
# 'host_hostname': 'xs-mini',
|
||||
# 'host_memory_total': 8244539392,
|
||||
# 'host_memory_overhead': 184225792,
|
||||
# 'host_memory_free': 3868327936,
|
||||
# 'host_memory_free_computed': 3840843776,
|
||||
# 'host_other_config': {},
|
||||
# 'host_ip_address': '192.168.1.109',
|
||||
# 'host_cpu_info': {},
|
||||
# 'disk_available': 32954957824,
|
||||
# 'disk_total': 50394562560,
|
||||
# 'disk_used': 17439604736,
|
||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
# 'host_name_label': 'xs-mini'}
|
||||
|
||||
# instance_type table has:
|
||||
# name = Column(String(255), unique=True)
|
||||
# memory_mb = Column(Integer)
|
||||
# vcpus = Column(Integer)
|
||||
# local_gb = Column(Integer)
|
||||
# flavorid = Column(Integer, unique=True)
|
||||
# swap = Column(Integer, nullable=False, default=0)
|
||||
# rxtx_quota = Column(Integer, nullable=False, default=0)
|
||||
# rxtx_cap = Column(Integer, nullable=False, default=0)
|
146
nova/scheduler/filters/json_filter.py
Normal file
146
nova/scheduler/filters/json_filter.py
Normal file
@ -0,0 +1,146 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import json
|
||||
import operator
|
||||
|
||||
import nova.scheduler
|
||||
from nova.scheduler.filters import abstract_filter
|
||||
|
||||
|
||||
class JsonFilter(abstract_filter.AbstractHostFilter):
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
def _op_compare(self, args, op):
|
||||
"""Returns True if the specified operator can successfully
|
||||
compare the first item in the args with all the rest. Will
|
||||
return False if only one item is in the list.
|
||||
"""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
if op is operator.contains:
|
||||
bad = not args[0] in args[1:]
|
||||
else:
|
||||
bad = [arg for arg in args[1:]
|
||||
if not op(args[0], arg)]
|
||||
return not bool(bad)
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
return self._op_compare(args, operator.eq)
|
||||
|
||||
def _less_than(self, args):
|
||||
"""First term is < all the other terms."""
|
||||
return self._op_compare(args, operator.lt)
|
||||
|
||||
def _greater_than(self, args):
|
||||
"""First term is > all the other terms."""
|
||||
return self._op_compare(args, operator.gt)
|
||||
|
||||
def _in(self, args):
|
||||
"""First term is in set of remaining terms"""
|
||||
return self._op_compare(args, operator.contains)
|
||||
|
||||
def _less_than_equal(self, args):
|
||||
"""First term is <= all the other terms."""
|
||||
return self._op_compare(args, operator.le)
|
||||
|
||||
def _greater_than_equal(self, args):
|
||||
"""First term is >= all the other terms."""
|
||||
return self._op_compare(args, operator.ge)
|
||||
|
||||
def _not(self, args):
|
||||
"""Flip each of the arguments."""
|
||||
return [not arg for arg in args]
|
||||
|
||||
def _or(self, args):
|
||||
"""True if any arg is True."""
|
||||
return any(args)
|
||||
|
||||
def _and(self, args):
|
||||
"""True if all args are True."""
|
||||
return all(args)
|
||||
|
||||
commands = {
|
||||
'=': _equals,
|
||||
'<': _less_than,
|
||||
'>': _greater_than,
|
||||
'in': _in,
|
||||
'<=': _less_than_equal,
|
||||
'>=': _greater_than_equal,
|
||||
'not': _not,
|
||||
'or': _or,
|
||||
'and': _and,
|
||||
}
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into JSON filter object."""
|
||||
required_ram = instance_type['memory_mb']
|
||||
required_disk = instance_type['local_gb']
|
||||
query = ['and',
|
||||
['>=', '$compute.host_memory_free', required_ram],
|
||||
['>=', '$compute.disk_available', required_disk]]
|
||||
return (self._full_name(), json.dumps(query))
|
||||
|
||||
def _parse_string(self, string, host, services):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$service.capability[.subcap*]'.
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if not string.startswith("$"):
|
||||
return string
|
||||
|
||||
path = string[1:].split(".")
|
||||
for item in path:
|
||||
services = services.get(item, None)
|
||||
if not services:
|
||||
return None
|
||||
return services
|
||||
|
||||
def _process_filter(self, zone_manager, query, host, services):
|
||||
"""Recursively parse the query structure."""
|
||||
if not query:
|
||||
return True
|
||||
cmd = query[0]
|
||||
method = self.commands[cmd]
|
||||
cooked_args = []
|
||||
for arg in query[1:]:
|
||||
if isinstance(arg, list):
|
||||
arg = self._process_filter(zone_manager, arg, host, services)
|
||||
elif isinstance(arg, basestring):
|
||||
arg = self._parse_string(arg, host, services)
|
||||
if arg is not None:
|
||||
cooked_args.append(arg)
|
||||
result = method(self, cooked_args)
|
||||
return result
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that can fulfill the requirements
|
||||
specified in the query.
|
||||
"""
|
||||
expanded = json.loads(query)
|
||||
filtered_hosts = []
|
||||
for host, services in zone_manager.service_states.iteritems():
|
||||
result = self._process_filter(zone_manager, expanded, host,
|
||||
services)
|
||||
if isinstance(result, list):
|
||||
# If any succeeded, include the host
|
||||
result = any(result)
|
||||
if result:
|
||||
filtered_hosts.append((host, services))
|
||||
return filtered_hosts
|
@ -20,283 +20,33 @@ either incompatible or insufficient to accept a newly-requested instance
|
||||
are removed by Host Filter classes from consideration. Those that pass
|
||||
the filter are then passed on for weighting or other process for ordering.
|
||||
|
||||
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||
filter grammar.
|
||||
|
||||
Why JSON? The requests for instances may come in through the
|
||||
REST interface from a user or a parent Zone.
|
||||
Currently Flavors and/or InstanceTypes are used for
|
||||
specifing the type of instance desired. Specific Nova users have
|
||||
noted a need for a more expressive way of specifying instances.
|
||||
Since we don't want to get into building full DSL this is a simple
|
||||
form as an example of how this could be done. In reality, most
|
||||
consumers will use the more rigid filters such as FlavorFilter.
|
||||
Filters are in the 'filters' directory that is off the 'scheduler'
|
||||
directory of nova. Additional filters can be created and added to that
|
||||
directory; be sure to add them to the filters/__init__.py file so that
|
||||
they are part of the nova.schedulers.filters namespace.
|
||||
"""
|
||||
|
||||
import json
|
||||
import types
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
import nova.scheduler
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('default_host_filter',
|
||||
'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'Which filter to use for filtering hosts.')
|
||||
|
||||
|
||||
class HostFilter(object):
|
||||
"""Base class for host filters."""
|
||||
def _get_filters():
|
||||
# Imported here to avoid circular imports
|
||||
from nova.scheduler import filters
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into a filter for most common use-case."""
|
||||
raise NotImplementedError()
|
||||
def get_itm(nm):
|
||||
return getattr(filters, nm)
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that fulfill the filter."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _full_name(self):
|
||||
"""module.classname of the filter."""
|
||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
||||
|
||||
|
||||
class AllHostsFilter(HostFilter):
|
||||
""" NOP host filter. Returns all hosts in ZoneManager.
|
||||
This essentially does what the old Scheduler+Chance used
|
||||
to give us.
|
||||
"""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Return anything to prevent base-class from raising
|
||||
exception."""
|
||||
return (self._full_name(), instance_type)
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts from ZoneManager list."""
|
||||
return [(host, services)
|
||||
for host, services in zone_manager.service_states.iteritems()]
|
||||
|
||||
|
||||
class InstanceTypeFilter(HostFilter):
|
||||
"""HostFilter hard-coded to work with InstanceType records."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Use instance_type to filter hosts."""
|
||||
return (self._full_name(), instance_type)
|
||||
|
||||
def _satisfies_extra_specs(self, capabilities, instance_type):
|
||||
"""Check that the capabilities provided by the compute service
|
||||
satisfy the extra specs associated with the instance type"""
|
||||
|
||||
if 'extra_specs' not in instance_type:
|
||||
return True
|
||||
|
||||
# Note(lorinh): For now, we are just checking exact matching on the
|
||||
# values. Later on, we want to handle numerical
|
||||
# values so we can represent things like number of GPU cards
|
||||
|
||||
try:
|
||||
for key, value in instance_type['extra_specs'].iteritems():
|
||||
if capabilities[key] != value:
|
||||
return False
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that can create instance_type."""
|
||||
instance_type = query
|
||||
selected_hosts = []
|
||||
for host, services in zone_manager.service_states.iteritems():
|
||||
capabilities = services.get('compute', {})
|
||||
host_ram_mb = capabilities['host_memory_free']
|
||||
disk_bytes = capabilities['disk_available']
|
||||
spec_ram = instance_type['memory_mb']
|
||||
spec_disk = instance_type['local_gb']
|
||||
extra_specs = instance_type['extra_specs']
|
||||
|
||||
if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
|
||||
self._satisfies_extra_specs(capabilities, instance_type)):
|
||||
selected_hosts.append((host, capabilities))
|
||||
return selected_hosts
|
||||
|
||||
#host entries (currently) are like:
|
||||
# {'host_name-description': 'Default install of XenServer',
|
||||
# 'host_hostname': 'xs-mini',
|
||||
# 'host_memory_total': 8244539392,
|
||||
# 'host_memory_overhead': 184225792,
|
||||
# 'host_memory_free': 3868327936,
|
||||
# 'host_memory_free_computed': 3840843776,
|
||||
# 'host_other_config': {},
|
||||
# 'host_ip_address': '192.168.1.109',
|
||||
# 'host_cpu_info': {},
|
||||
# 'disk_available': 32954957824,
|
||||
# 'disk_total': 50394562560,
|
||||
# 'disk_used': 17439604736,
|
||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
# 'host_name_label': 'xs-mini'}
|
||||
|
||||
# instance_type table has:
|
||||
#name = Column(String(255), unique=True)
|
||||
#memory_mb = Column(Integer)
|
||||
#vcpus = Column(Integer)
|
||||
#local_gb = Column(Integer)
|
||||
#flavorid = Column(Integer, unique=True)
|
||||
#swap = Column(Integer, nullable=False, default=0)
|
||||
#rxtx_quota = Column(Integer, nullable=False, default=0)
|
||||
#rxtx_cap = Column(Integer, nullable=False, default=0)
|
||||
|
||||
|
||||
class JsonFilter(HostFilter):
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs != rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _less_than(self, args):
|
||||
"""First term is < all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs >= rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _greater_than(self, args):
|
||||
"""First term is > all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs <= rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _in(self, args):
|
||||
"""First term is in set of remaining terms"""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
return args[0] in args[1:]
|
||||
|
||||
def _less_than_equal(self, args):
|
||||
"""First term is <= all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs > rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _greater_than_equal(self, args):
|
||||
"""First term is >= all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs < rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _not(self, args):
|
||||
"""Flip each of the arguments."""
|
||||
if len(args) == 0:
|
||||
return False
|
||||
return [not arg for arg in args]
|
||||
|
||||
def _or(self, args):
|
||||
"""True if any arg is True."""
|
||||
return True in args
|
||||
|
||||
def _and(self, args):
|
||||
"""True if all args are True."""
|
||||
return False not in args
|
||||
|
||||
commands = {
|
||||
'=': _equals,
|
||||
'<': _less_than,
|
||||
'>': _greater_than,
|
||||
'in': _in,
|
||||
'<=': _less_than_equal,
|
||||
'>=': _greater_than_equal,
|
||||
'not': _not,
|
||||
'or': _or,
|
||||
'and': _and,
|
||||
}
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into JSON filter object."""
|
||||
required_ram = instance_type['memory_mb']
|
||||
required_disk = instance_type['local_gb']
|
||||
query = ['and',
|
||||
['>=', '$compute.host_memory_free', required_ram],
|
||||
['>=', '$compute.disk_available', required_disk]]
|
||||
return (self._full_name(), json.dumps(query))
|
||||
|
||||
def _parse_string(self, string, host, services):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$service.capability[.subcap*]'
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if string[0] != '$':
|
||||
return string
|
||||
|
||||
path = string[1:].split('.')
|
||||
for item in path:
|
||||
services = services.get(item, None)
|
||||
if not services:
|
||||
return None
|
||||
return services
|
||||
|
||||
def _process_filter(self, zone_manager, query, host, services):
|
||||
"""Recursively parse the query structure."""
|
||||
if len(query) == 0:
|
||||
return True
|
||||
cmd = query[0]
|
||||
method = self.commands[cmd] # Let exception fly.
|
||||
cooked_args = []
|
||||
for arg in query[1:]:
|
||||
if isinstance(arg, list):
|
||||
arg = self._process_filter(zone_manager, arg, host, services)
|
||||
elif isinstance(arg, basestring):
|
||||
arg = self._parse_string(arg, host, services)
|
||||
if arg != None:
|
||||
cooked_args.append(arg)
|
||||
result = method(self, cooked_args)
|
||||
return result
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that can fulfill filter."""
|
||||
expanded = json.loads(query)
|
||||
hosts = []
|
||||
for host, services in zone_manager.service_states.iteritems():
|
||||
r = self._process_filter(zone_manager, expanded, host, services)
|
||||
if isinstance(r, list):
|
||||
r = True in r
|
||||
if r:
|
||||
hosts.append((host, services))
|
||||
return hosts
|
||||
|
||||
|
||||
FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
|
||||
return [get_itm(itm) for itm in dir(filters)
|
||||
if (type(get_itm(itm)) is types.TypeType)
|
||||
and issubclass(get_itm(itm), filters.AbstractHostFilter)
|
||||
and get_itm(itm) is not filters.AbstractHostFilter]
|
||||
|
||||
|
||||
def choose_host_filter(filter_name=None):
|
||||
@ -307,8 +57,7 @@ def choose_host_filter(filter_name=None):
|
||||
"""
|
||||
if not filter_name:
|
||||
filter_name = FLAGS.default_host_filter
|
||||
for filter_class in FILTERS:
|
||||
host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
|
||||
if host_match == filter_name:
|
||||
for filter_class in _get_filters():
|
||||
if filter_class.__name__ == filter_name:
|
||||
return filter_class()
|
||||
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
|
||||
|
@ -22,14 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost
|
||||
is then selected for provisioning.
|
||||
"""
|
||||
|
||||
# TODO(dabo): This class will be removed in the next merge prop; it remains now
|
||||
# because much of the code will be refactored into different classes.
|
||||
|
||||
import collections
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import abstract_scheduler
|
||||
from nova.scheduler import base_scheduler
|
||||
from nova import utils
|
||||
from nova import exception
|
||||
|
||||
@ -37,14 +35,16 @@ LOG = logging.getLogger('nova.scheduler.least_cost')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
|
||||
|
||||
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||
'How much weight to give the noop cost function')
|
||||
'How much weight to give the noop cost function')
|
||||
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def noop_cost_fn(host):
|
||||
@ -52,87 +52,20 @@ def noop_cost_fn(host):
|
||||
return 1
|
||||
|
||||
|
||||
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def compute_fill_first_cost_fn(host):
|
||||
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||
hosts that don't have enough ram"""
|
||||
hostname, caps = host
|
||||
free_mem = caps['host_memory_free']
|
||||
hosts that don't have enough ram.
|
||||
"""
|
||||
hostname, service = host
|
||||
caps = service.get("compute", {})
|
||||
free_mem = caps.get("host_memory_free", 0)
|
||||
return free_mem
|
||||
|
||||
|
||||
class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cost_fns_cache = {}
|
||||
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_cost_fns(self, topic):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
|
||||
if topic in self.cost_fns_cache:
|
||||
return self.cost_fns_cache[topic]
|
||||
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
if '.' in cost_fn_str:
|
||||
short_name = cost_fn_str.split('.')[-1]
|
||||
else:
|
||||
short_name = cost_fn_str
|
||||
cost_fn_str = "%s.%s.%s" % (
|
||||
__name__, self.__class__.__name__, short_name)
|
||||
|
||||
if not (short_name.startswith('%s_' % topic) or
|
||||
short_name.startswith('noop')):
|
||||
continue
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
flag_name = "%s_weight" % cost_fn.__name__
|
||||
weight = getattr(FLAGS, flag_name)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
self.cost_fns_cache[topic] = cost_fns
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||
"""
|
||||
|
||||
cost_fns = self.get_cost_fns(topic)
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, caps) in zip(costs, hosts):
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
||||
|
||||
def normalize_list(L):
|
||||
"""Normalize an array of numbers such that each element satisfies:
|
||||
0 <= e <= 1"""
|
||||
0 <= e <= 1
|
||||
"""
|
||||
if not L:
|
||||
return L
|
||||
max_ = max(L)
|
||||
@ -160,12 +93,10 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
score_table = collections.defaultdict(list)
|
||||
for weight, fn in weighted_fns:
|
||||
scores = [fn(elem) for elem in domain]
|
||||
|
||||
if normalize:
|
||||
norm_scores = normalize_list(scores)
|
||||
else:
|
||||
norm_scores = scores
|
||||
|
||||
for idx, score in enumerate(norm_scores):
|
||||
weighted_score = score * weight
|
||||
score_table[idx].append(weighted_score)
|
||||
@ -175,5 +106,66 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
for idx in sorted(score_table):
|
||||
elem_score = sum(score_table[idx])
|
||||
domain_scores.append(elem_score)
|
||||
|
||||
return domain_scores
|
||||
|
||||
|
||||
class LeastCostScheduler(base_scheduler.BaseScheduler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cost_fns_cache = {}
|
||||
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_cost_fns(self, topic):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
if topic in self.cost_fns_cache:
|
||||
return self.cost_fns_cache[topic]
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
if '.' in cost_fn_str:
|
||||
short_name = cost_fn_str.split('.')[-1]
|
||||
else:
|
||||
short_name = cost_fn_str
|
||||
cost_fn_str = "%s.%s.%s" % (
|
||||
__name__, self.__class__.__name__, short_name)
|
||||
if not (short_name.startswith('%s_' % topic) or
|
||||
short_name.startswith('noop')):
|
||||
continue
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
flag_name = "%s_weight" % cost_fn.__name__
|
||||
weight = getattr(FLAGS, flag_name)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
self.cost_fns_cache[topic] = cost_fns
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||
"""
|
||||
cost_fns = self.get_cost_fns(topic)
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, service) in zip(costs, hosts):
|
||||
caps = service[topic]
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
@ -20,13 +20,11 @@
|
||||
"""Generic Node baseclass for all workers that run on hosts."""
|
||||
|
||||
import inspect
|
||||
import multiprocessing
|
||||
import os
|
||||
|
||||
import eventlet
|
||||
import greenlet
|
||||
|
||||
from eventlet import greenthread
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
@ -69,30 +67,25 @@ class Launcher(object):
|
||||
self._services = []
|
||||
|
||||
@staticmethod
|
||||
def run_service(service):
|
||||
"""Start and wait for a service to finish.
|
||||
def run_server(server):
|
||||
"""Start and wait for a server to finish.
|
||||
|
||||
:param service: Service to run and wait for.
|
||||
:param service: Server to run and wait for.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.start()
|
||||
try:
|
||||
service.wait()
|
||||
except KeyboardInterrupt:
|
||||
service.stop()
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
def launch_service(self, service):
|
||||
"""Load and start the given service.
|
||||
def launch_server(self, server):
|
||||
"""Load and start the given server.
|
||||
|
||||
:param service: The service you would like to start.
|
||||
:param server: The server you would like to start.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
process = multiprocessing.Process(target=self.run_service,
|
||||
args=(service,))
|
||||
process.start()
|
||||
self._services.append(process)
|
||||
gt = eventlet.spawn(self.run_server, server)
|
||||
self._services.append(gt)
|
||||
|
||||
def stop(self):
|
||||
"""Stop all services which are currently running.
|
||||
@ -101,8 +94,7 @@ class Launcher(object):
|
||||
|
||||
"""
|
||||
for service in self._services:
|
||||
if service.is_alive():
|
||||
service.terminate()
|
||||
service.kill()
|
||||
|
||||
def wait(self):
|
||||
"""Waits until all services have been stopped, and then returns.
|
||||
@ -111,11 +103,18 @@ class Launcher(object):
|
||||
|
||||
"""
|
||||
for service in self._services:
|
||||
service.join()
|
||||
try:
|
||||
service.wait()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
|
||||
|
||||
class Service(object):
|
||||
"""Base class for workers that run on hosts."""
|
||||
"""Service object for binaries running on hosts.
|
||||
|
||||
A service takes a manager and enables rpc by listening to queues based
|
||||
on topic. It also periodically runs tasks on the manager and reports
|
||||
it state to the database services table."""
|
||||
|
||||
def __init__(self, host, binary, topic, manager, report_interval=None,
|
||||
periodic_interval=None, *args, **kwargs):
|
||||
@ -173,7 +172,7 @@ class Service(object):
|
||||
finally:
|
||||
consumer_set.close()
|
||||
|
||||
self.consumer_set_thread = greenthread.spawn(_wait)
|
||||
self.consumer_set_thread = eventlet.spawn(_wait)
|
||||
|
||||
if self.report_interval:
|
||||
pulse = utils.LoopingCall(self.report_state)
|
||||
@ -293,9 +292,9 @@ class WSGIService(object):
|
||||
"""Provides ability to launch API from a 'paste' configuration."""
|
||||
|
||||
def __init__(self, name, loader=None):
|
||||
"""Initialize, but do not start the WSGI service.
|
||||
"""Initialize, but do not start the WSGI server.
|
||||
|
||||
:param name: The name of the WSGI service given to the loader.
|
||||
:param name: The name of the WSGI server given to the loader.
|
||||
:param loader: Loads the WSGI application using the given name.
|
||||
:returns: None
|
||||
|
||||
@ -339,32 +338,32 @@ class WSGIService(object):
|
||||
self.server.wait()
|
||||
|
||||
|
||||
def serve(*services):
|
||||
try:
|
||||
if not services:
|
||||
services = [Service.create()]
|
||||
except Exception:
|
||||
logging.exception('in Service.create()')
|
||||
raise
|
||||
finally:
|
||||
# After we've loaded up all our dynamic bits, check
|
||||
# whether we should print help
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||
FLAGS.ParseNewFlags()
|
||||
# NOTE(vish): the global launcher is to maintain the existing
|
||||
# functionality of calling service.serve +
|
||||
# service.wait
|
||||
_launcher = None
|
||||
|
||||
name = '_'.join(x.binary for x in services)
|
||||
logging.debug(_('Serving %s'), name)
|
||||
|
||||
def serve(*servers):
|
||||
global _launcher
|
||||
if not _launcher:
|
||||
_launcher = Launcher()
|
||||
for server in servers:
|
||||
_launcher.launch_server(server)
|
||||
|
||||
|
||||
def wait():
|
||||
# After we've loaded up all our dynamic bits, check
|
||||
# whether we should print help
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||
FLAGS.ParseNewFlags()
|
||||
logging.debug(_('Full set of FLAGS:'))
|
||||
for flag in FLAGS:
|
||||
flag_get = FLAGS.get(flag, None)
|
||||
logging.debug('%(flag)s : %(flag_get)s' % locals())
|
||||
|
||||
for x in services:
|
||||
x.start()
|
||||
|
||||
|
||||
def wait():
|
||||
while True:
|
||||
greenthread.sleep(5)
|
||||
try:
|
||||
_launcher.wait()
|
||||
except KeyboardInterrupt:
|
||||
_launcher.stop()
|
||||
|
@ -77,6 +77,9 @@ class FakeZoneManager(zone_manager.ZoneManager):
|
||||
'host3': {
|
||||
'compute': {'host_memory_free': 3221225472},
|
||||
},
|
||||
'host4': {
|
||||
'compute': {'host_memory_free': 999999999},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
@ -21,6 +21,7 @@ import json
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
from nova.scheduler import filters
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
@ -55,7 +56,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(HostFilterTestCase, self).setUp()
|
||||
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
||||
default_host_filter = 'AllHostsFilter'
|
||||
self.flags(default_host_filter=default_host_filter)
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
@ -98,13 +99,10 @@ class HostFilterTestCase(test.TestCase):
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
hf = host_filter.choose_host_filter('InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
@ -113,7 +111,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
hf = filters.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
@ -121,11 +119,10 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
hf = filters.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter')
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@ -134,21 +131,20 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_instance_type_filter_extra_specs(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
hf = filters.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter')
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(1, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
self.assertEquals('host07', just_hosts[0])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
hf = filters.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
self.assertEquals(name.split(".")[-1], 'JsonFilter')
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@ -192,7 +188,6 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
|
@ -15,6 +15,7 @@
|
||||
"""
|
||||
Tests For Least Cost Scheduler
|
||||
"""
|
||||
import copy
|
||||
|
||||
from nova import test
|
||||
from nova.scheduler import least_cost
|
||||
@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||
|
||||
def assertWeights(self, expected, num, request_spec, hosts):
|
||||
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||
weighted = self.sched.weigh_hosts("compute", request_spec, hosts)
|
||||
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||
|
||||
def test_no_hosts(self):
|
||||
@ -122,19 +123,24 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
self.flags(least_cost_scheduler_cost_functions=[
|
||||
'nova.scheduler.least_cost.compute_fill_first_cost_fn'],
|
||||
compute_fill_first_cost_fn_weight=1)
|
||||
|
||||
num = 1
|
||||
instance_type = {'memory_mb': 1024}
|
||||
request_spec = {'instance_type': instance_type}
|
||||
hosts = self.sched.filter_hosts('compute', request_spec, None)
|
||||
svc_states = self.sched.zone_manager.service_states.iteritems()
|
||||
all_hosts = [(host, services["compute"])
|
||||
for host, services in svc_states
|
||||
if "compute" in services]
|
||||
hosts = self.sched.filter_hosts('compute', request_spec, all_hosts)
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, caps) in enumerate(hosts):
|
||||
for idx, (hostname, services) in enumerate(hosts):
|
||||
caps = copy.deepcopy(services["compute"])
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
weight = 0.1 + (0.1 * idx)
|
||||
weight_dict = dict(weight=weight, hostname=hostname)
|
||||
expected.append(weight_dict)
|
||||
wtd_dict = dict(hostname=hostname, weight=weight,
|
||||
capabilities=caps)
|
||||
expected.append(wtd_dict)
|
||||
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
@ -32,6 +32,7 @@ from nova import context
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova import wsgi
|
||||
from nova.api import auth
|
||||
from nova.api import ec2
|
||||
from nova.api.ec2 import apirequest
|
||||
from nova.api.ec2 import cloud
|
||||
@ -199,7 +200,7 @@ class ApiEc2TestCase(test.TestCase):
|
||||
# NOTE(vish): skipping the Authorizer
|
||||
roles = ['sysadmin', 'netadmin']
|
||||
ctxt = context.RequestContext('fake', 'fake', roles=roles)
|
||||
self.app = wsgi.InjectContext(ctxt,
|
||||
self.app = auth.InjectContext(ctxt,
|
||||
ec2.Requestify(ec2.Authorizer(ec2.Executor()),
|
||||
'nova.api.ec2.cloud.CloudController'))
|
||||
|
||||
|
@ -487,6 +487,17 @@ class CloudTestCase(test.TestCase):
|
||||
db.service_destroy(self.context, comp1['id'])
|
||||
db.service_destroy(self.context, comp2['id'])
|
||||
|
||||
def test_describe_instances_deleted(self):
|
||||
args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
|
||||
inst1 = db.instance_create(self.context, args1)
|
||||
args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'}
|
||||
inst2 = db.instance_create(self.context, args2)
|
||||
db.instance_destroy(self.context, inst1.id)
|
||||
result = self.cloud.describe_instances(self.context)
|
||||
result = result['reservationSet'][0]['instancesSet']
|
||||
self.assertEqual(result[0]['instanceId'],
|
||||
ec2utils.id_to_ec2_id(inst2.id))
|
||||
|
||||
def _block_device_mapping_create(self, instance_id, mappings):
|
||||
volumes = []
|
||||
for bdm in mappings:
|
||||
|
@ -76,3 +76,20 @@ class DbApiTestCase(test.TestCase):
|
||||
self.assertEqual(instance['id'], result['id'])
|
||||
self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address,
|
||||
'1.2.1.2')
|
||||
|
||||
def test_instance_get_all_by_filters(self):
|
||||
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
|
||||
inst1 = db.instance_create(self.context, args)
|
||||
inst2 = db.instance_create(self.context, args)
|
||||
result = db.instance_get_all_by_filters(self.context, {})
|
||||
self.assertTrue(2, len(result))
|
||||
|
||||
def test_instance_get_all_by_filters_deleted(self):
|
||||
args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
|
||||
inst1 = db.instance_create(self.context, args1)
|
||||
args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'}
|
||||
inst2 = db.instance_create(self.context, args2)
|
||||
db.instance_destroy(self.context, inst1.id)
|
||||
result = db.instance_get_all_by_filters(self.context.elevated(), {})
|
||||
self.assertEqual(1, len(result))
|
||||
self.assertEqual(result[0].id, inst2.id)
|
||||
|
@ -1,200 +0,0 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
super(HostFilterTestCase, self).setUp()
|
||||
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.flags(default_host_filter=default_host_filter)
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
vcpus=10,
|
||||
local_gb=500,
|
||||
flavorid=1,
|
||||
swap=500,
|
||||
rxtx_quota=30000,
|
||||
rxtx_cap=200,
|
||||
extra_specs={})
|
||||
|
||||
self.zone_manager = FakeZoneManager()
|
||||
states = {}
|
||||
for x in xrange(10):
|
||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
# Try some custom queries
|
||||
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700],
|
||||
],
|
||||
]
|
||||
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['not',
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
# Try some bogus input ...
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False])))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
@ -836,6 +836,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
count = (0 <= str(e.message).find('Unexpected method call'))
|
||||
|
||||
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
|
||||
shutil.rmtree(os.path.join(FLAGS.instances_path, '_base'))
|
||||
|
||||
self.assertTrue(count)
|
||||
|
||||
|
@ -108,11 +108,14 @@ floating_ip_fields = {'id': 0,
|
||||
|
||||
vifs = [{'id': 0,
|
||||
'address': 'DE:AD:BE:EF:00:00',
|
||||
'uuid': '00000000-0000-0000-0000-0000000000000000',
|
||||
'network_id': 0,
|
||||
'network': FakeModel(**networks[0]),
|
||||
'instance_id': 0},
|
||||
{'id': 1,
|
||||
'address': 'DE:AD:BE:EF:00:01',
|
||||
'uuid': '00000000-0000-0000-0000-0000000000000001',
|
||||
'network_id': 0,
|
||||
'network_id': 1,
|
||||
'network': FakeModel(**networks[1]),
|
||||
'instance_id': 0}]
|
||||
@ -163,6 +166,8 @@ class FlatNetworkTestCase(test.TestCase):
|
||||
'ips': 'DONTCARE',
|
||||
'label': 'test%s' % i,
|
||||
'mac': 'DE:AD:BE:EF:00:0%s' % i,
|
||||
'vif_uuid': ('00000000-0000-0000-0000-000000000000000%s' %
|
||||
i),
|
||||
'rxtx_cap': 'DONTCARE',
|
||||
'should_create_vlan': False,
|
||||
'should_create_bridge': False}
|
||||
|
@ -261,8 +261,9 @@ def default_flagfile(filename='nova.conf', args=None):
|
||||
filename = "./nova.conf"
|
||||
if not os.path.exists(filename):
|
||||
filename = '/etc/nova/nova.conf'
|
||||
flagfile = '--flagfile=%s' % filename
|
||||
args.insert(1, flagfile)
|
||||
if os.path.exists(filename):
|
||||
flagfile = '--flagfile=%s' % filename
|
||||
args.insert(1, flagfile)
|
||||
|
||||
|
||||
def debug(arg):
|
||||
@ -840,42 +841,6 @@ def bool_from_str(val):
|
||||
return val.lower() == 'true'
|
||||
|
||||
|
||||
class Bootstrapper(object):
|
||||
"""Provides environment bootstrapping capabilities for entry points."""
|
||||
|
||||
@staticmethod
|
||||
def bootstrap_binary(argv):
|
||||
"""Initialize the Nova environment using command line arguments."""
|
||||
Bootstrapper.setup_flags(argv)
|
||||
Bootstrapper.setup_logging()
|
||||
Bootstrapper.log_flags()
|
||||
|
||||
@staticmethod
|
||||
def setup_logging():
|
||||
"""Initialize logging and log a message indicating the Nova version."""
|
||||
logging.setup()
|
||||
logging.audit(_("Nova Version (%s)") %
|
||||
version.version_string_with_vcs())
|
||||
|
||||
@staticmethod
|
||||
def setup_flags(input_flags):
|
||||
"""Initialize flags, load flag file, and print help if needed."""
|
||||
default_flagfile(args=input_flags)
|
||||
FLAGS(input_flags or [])
|
||||
flags.DEFINE_flag(flags.HelpFlag())
|
||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||
FLAGS.ParseNewFlags()
|
||||
|
||||
@staticmethod
|
||||
def log_flags():
|
||||
"""Log the list of all active flags being used."""
|
||||
logging.audit(_("Currently active flags:"))
|
||||
for key in FLAGS:
|
||||
value = FLAGS.get(key, None)
|
||||
logging.audit(_("%(key)s : %(value)s" % locals()))
|
||||
|
||||
|
||||
def monkey_patch():
|
||||
if not FLAGS.monkey_patch:
|
||||
return
|
||||
|
15
nova/wsgi.py
15
nova/wsgi.py
@ -39,9 +39,6 @@ from nova import log as logging
|
||||
from nova import utils
|
||||
|
||||
|
||||
eventlet.patcher.monkey_patch(socket=True, time=True)
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.wsgi')
|
||||
|
||||
@ -274,18 +271,6 @@ class Middleware(Application):
|
||||
return self.process_response(response)
|
||||
|
||||
|
||||
class InjectContext(Middleware):
|
||||
"""Add a 'nova.context' to WSGI environ."""
|
||||
def __init__(self, context, *args, **kwargs):
|
||||
self.context = context
|
||||
super(InjectContext, self).__init__(*args, **kwargs)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=Request)
|
||||
def __call__(self, req):
|
||||
req.environ['nova.context'] = self.context
|
||||
return self.application
|
||||
|
||||
|
||||
class Debug(Middleware):
|
||||
"""Helper class for debugging a WSGI application.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user