Merged with trunkw
This commit is contained in:
1
Authors
1
Authors
@@ -101,6 +101,7 @@ Stephanie Reese <reese.sm@gmail.com>
|
||||
Thierry Carrez <thierry@openstack.org>
|
||||
Todd Willey <todd@ansolabs.com>
|
||||
Trey Morris <trey.morris@rackspace.com>
|
||||
Troy Toman <troy.toman@rackspace.com>
|
||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||
|
@@ -24,7 +24,6 @@ from eventlet import greenthread
|
||||
from eventlet.green import urllib2
|
||||
|
||||
import exceptions
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@@ -38,11 +37,11 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
|
||||
@@ -141,5 +140,5 @@ if __name__ == '__main__':
|
||||
acp = AjaxConsoleProxy()
|
||||
acp.register_listeners()
|
||||
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
|
||||
server.start()
|
||||
server.wait()
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
43
bin/nova-api
43
bin/nova-api
@@ -19,12 +19,14 @@
|
||||
|
||||
"""Starter script for Nova API.
|
||||
|
||||
Starts both the EC2 and OpenStack APIs in separate processes.
|
||||
Starts both the EC2 and OpenStack APIs in separate greenthreads.
|
||||
|
||||
"""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
||||
|
||||
@@ -33,32 +35,19 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
import nova.service
|
||||
import nova.utils
|
||||
|
||||
from nova import flags
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def main():
|
||||
"""Launch EC2 and OSAPI services."""
|
||||
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
|
||||
nova.utils.monkey_patch()
|
||||
launcher = nova.service.Launcher()
|
||||
|
||||
for api in FLAGS.enabled_apis:
|
||||
service = nova.service.WSGIService(api)
|
||||
launcher.launch_service(service)
|
||||
|
||||
signal.signal(signal.SIGTERM, lambda *_: launcher.stop())
|
||||
|
||||
try:
|
||||
launcher.wait()
|
||||
except KeyboardInterrupt:
|
||||
launcher.stop()
|
||||
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
servers = []
|
||||
for api in flags.FLAGS.enabled_apis:
|
||||
servers.append(service.WSGIService(api))
|
||||
service.serve(*servers)
|
||||
service.wait()
|
||||
|
47
bin/nova-api-ec2
Executable file
47
bin/nova-api-ec2
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Starter script for Nova EC2 API."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.WSGIService('ec2')
|
||||
service.serve(server)
|
||||
service.wait()
|
47
bin/nova-api-os
Executable file
47
bin/nova-api-os
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Starter script for Nova OS API."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||
sys.argv[0]), os.pardir, os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.WSGIService('osapi')
|
||||
service.serve(server)
|
||||
service.wait()
|
@@ -22,7 +22,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -34,7 +33,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, POSSIBLE_TOPDIR)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@@ -46,5 +44,6 @@ if __name__ == '__main__':
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
service.serve()
|
||||
server = service.Service.create(binary='nova-compute')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@@ -21,7 +21,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -33,7 +32,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@@ -44,5 +42,6 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
service.serve()
|
||||
server = service.Service.create(binary='nova-console')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@@ -52,7 +52,7 @@ flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
|
||||
LOG = logging.getLogger('nova.dhcpbridge')
|
||||
|
||||
|
||||
def add_lease(mac, ip_address, _interface):
|
||||
def add_lease(mac, ip_address):
|
||||
"""Set the IP that was assigned by the DHCP server."""
|
||||
if FLAGS.fake_rabbit:
|
||||
LOG.debug(_("leasing ip"))
|
||||
@@ -66,13 +66,13 @@ def add_lease(mac, ip_address, _interface):
|
||||
"args": {"address": ip_address}})
|
||||
|
||||
|
||||
def old_lease(mac, ip_address, interface):
|
||||
def old_lease(mac, ip_address):
|
||||
"""Update just as add lease."""
|
||||
LOG.debug(_("Adopted old lease or got a change of mac"))
|
||||
add_lease(mac, ip_address, interface)
|
||||
add_lease(mac, ip_address)
|
||||
|
||||
|
||||
def del_lease(mac, ip_address, _interface):
|
||||
def del_lease(mac, ip_address):
|
||||
"""Called when a lease expires."""
|
||||
if FLAGS.fake_rabbit:
|
||||
LOG.debug(_("releasing ip"))
|
||||
@@ -99,8 +99,6 @@ def main():
|
||||
utils.default_flagfile(flagfile)
|
||||
argv = FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
# check ENV first so we don't break any older deploys
|
||||
network_id = int(os.environ.get('NETWORK_ID'))
|
||||
|
||||
if int(os.environ.get('TESTING', '0')):
|
||||
from nova.tests import fake_flags
|
||||
@@ -115,11 +113,19 @@ def main():
|
||||
if action in ['add', 'del', 'old']:
|
||||
mac = argv[2]
|
||||
ip = argv[3]
|
||||
msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s"
|
||||
" on interface %(interface)s") % locals()
|
||||
msg = _("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % \
|
||||
{"action": action,
|
||||
"mac": mac,
|
||||
"ip": ip}
|
||||
LOG.debug(msg)
|
||||
globals()[action + '_lease'](mac, ip, interface)
|
||||
globals()[action + '_lease'](mac, ip)
|
||||
else:
|
||||
try:
|
||||
network_id = int(os.environ.get('NETWORK_ID'))
|
||||
except TypeError:
|
||||
LOG.error(_("Environment variable 'NETWORK_ID' must be set."))
|
||||
sys.exit(1)
|
||||
|
||||
print init_leases(network_id)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@@ -20,7 +20,9 @@
|
||||
|
||||
"""Starter script for Nova Direct API."""
|
||||
|
||||
import gettext
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -32,12 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import compute
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import network
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova import wsgi
|
||||
@@ -97,5 +99,6 @@ if __name__ == '__main__':
|
||||
with_auth,
|
||||
host=FLAGS.direct_host,
|
||||
port=FLAGS.direct_port)
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@@ -22,7 +22,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@@ -46,5 +44,6 @@ if __name__ == '__main__':
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
service.serve()
|
||||
server = service.Service.create(binary='nova-network')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@@ -17,11 +17,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Daemon for nova objectstore. Supports S3 API.
|
||||
"""
|
||||
"""Daemon for nova objectstore. Supports S3 API."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -33,10 +33,10 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.objectstore import s3server
|
||||
@@ -55,5 +55,5 @@ if __name__ == '__main__':
|
||||
router,
|
||||
port=FLAGS.s3_port,
|
||||
host=FLAGS.s3_host)
|
||||
server.start()
|
||||
server.wait()
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@@ -19,7 +19,8 @@
|
||||
"""VNC Console Proxy Server."""
|
||||
|
||||
import eventlet
|
||||
import gettext
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -29,7 +30,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@@ -41,7 +41,7 @@ from nova.vnc import auth
|
||||
from nova.vnc import proxy
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.vnc-proxy')
|
||||
LOG = logging.getLogger('nova.vncproxy')
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -81,7 +81,7 @@ if __name__ == "__main__":
|
||||
FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
|
||||
LOG.audit(_("Starting nova-vnc-proxy node (version %s)"),
|
||||
LOG.audit(_("Starting nova-vncproxy node (version %s)"),
|
||||
version.version_string_with_vcs())
|
||||
|
||||
if not (os.path.exists(FLAGS.vncproxy_wwwroot) and
|
||||
@@ -107,13 +107,10 @@ if __name__ == "__main__":
|
||||
else:
|
||||
with_auth = auth.VNCNovaAuthMiddleware(with_logging)
|
||||
|
||||
service.serve()
|
||||
|
||||
server = wsgi.Server("VNC Proxy",
|
||||
with_auth,
|
||||
host=FLAGS.vncproxy_host,
|
||||
port=FLAGS.vncproxy_port)
|
||||
server.start()
|
||||
server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host)
|
||||
|
||||
server.wait()
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@@ -22,7 +22,6 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -34,7 +33,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@@ -45,5 +43,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
service.serve()
|
||||
utils.monkey_patch()
|
||||
server = service.Service.create(binary='nova-volume')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
@@ -21,5 +21,7 @@
|
||||
.. automodule:: nova.scheduler
|
||||
:platform: Unix
|
||||
:synopsis: Module that picks a compute node to run a VM instance.
|
||||
.. moduleauthor:: Sandy Walsh <sandy.walsh@rackspace.com>
|
||||
.. moduleauthor:: Ed Leafe <ed@leafe.com>
|
||||
.. moduleauthor:: Chris Behrens <cbehrens@codestud.com>
|
||||
"""
|
||||
|
@@ -14,10 +14,10 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The AbsractScheduler is a base class Scheduler for creating instances
|
||||
across zones. There are two expansion points to this class for:
|
||||
1. Assigning Weights to hosts for requested instances
|
||||
2. Filtering Hosts based on required instance capabilities
|
||||
The AbsractScheduler is an abstract class Scheduler for creating instances
|
||||
locally or across zones. Two methods should be overridden in order to
|
||||
customize the behavior: filter_hosts() and weigh_hosts(). The default
|
||||
behavior is to simply select all hosts and weight them the same.
|
||||
"""
|
||||
|
||||
import operator
|
||||
@@ -45,44 +45,44 @@ LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
|
||||
|
||||
class InvalidBlob(exception.NovaException):
|
||||
message = _("Ill-formed or incorrectly routed 'blob' data sent "
|
||||
"to instance create request.")
|
||||
"to instance create request.")
|
||||
|
||||
|
||||
class AbstractScheduler(driver.Scheduler):
|
||||
"""Base class for creating Schedulers that can work across any nova
|
||||
deployment, from simple designs to multiply-nested zones.
|
||||
"""
|
||||
|
||||
def _call_zone_method(self, context, method, specs, zones):
|
||||
"""Call novaclient zone method. Broken out for testing."""
|
||||
return api.call_zone_method(context, method, specs=specs, zones=zones)
|
||||
|
||||
def _provision_resource_locally(self, context, build_plan_item,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
host = build_plan_item['hostname']
|
||||
base_options = request_spec['instance_properties']
|
||||
image = request_spec['image']
|
||||
instance_type = request_spec.get('instance_type')
|
||||
|
||||
# TODO(sandy): I guess someone needs to add block_device_mapping
|
||||
# support at some point? Also, OS API has no concept of security
|
||||
# groups.
|
||||
instance = compute_api.API().create_db_entry_for_new_instance(context,
|
||||
image, base_options, None, [])
|
||||
instance_type, image, base_options, None, [])
|
||||
|
||||
instance_id = instance['id']
|
||||
kwargs['instance_id'] = instance_id
|
||||
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
queue = db.queue_get_for(context, "compute", host)
|
||||
params = {"method": "run_instance", "args": kwargs}
|
||||
rpc.cast(context, queue, params)
|
||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||
% locals())
|
||||
% locals())
|
||||
|
||||
def _decrypt_blob(self, blob):
|
||||
"""Returns the decrypted blob or None if invalid. Broken out
|
||||
for testing."""
|
||||
for testing.
|
||||
"""
|
||||
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
||||
try:
|
||||
json_entry = decryptor(blob)
|
||||
@@ -92,15 +92,15 @@ class AbstractScheduler(driver.Scheduler):
|
||||
return None
|
||||
|
||||
def _ask_child_zone_to_create_instance(self, context, zone_info,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Once we have determined that the request should go to one
|
||||
of our children, we need to fabricate a new POST /servers/
|
||||
call with the same parameters that were passed into us.
|
||||
|
||||
Note that we have to reverse engineer from our args to get back the
|
||||
image, flavor, ipgroup, etc. since the original call could have
|
||||
come in from EC2 (which doesn't use these things)."""
|
||||
|
||||
come in from EC2 (which doesn't use these things).
|
||||
"""
|
||||
instance_type = request_spec['instance_type']
|
||||
instance_properties = request_spec['instance_properties']
|
||||
|
||||
@@ -109,30 +109,26 @@ class AbstractScheduler(driver.Scheduler):
|
||||
meta = instance_properties['metadata']
|
||||
flavor_id = instance_type['flavorid']
|
||||
reservation_id = instance_properties['reservation_id']
|
||||
|
||||
files = kwargs['injected_files']
|
||||
ipgroup = None # Not supported in OS API ... yet
|
||||
|
||||
child_zone = zone_info['child_zone']
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s")
|
||||
% locals())
|
||||
". ReservationID=%(reservation_id)s") % locals())
|
||||
nova = None
|
||||
try:
|
||||
nova = novaclient.Client(zone.username, zone.password, None, url)
|
||||
nova.authenticate()
|
||||
except novaclient_exceptions.BadRequest, e:
|
||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
|
||||
child_blob, reservation_id=reservation_id)
|
||||
child_blob, reservation_id=reservation_id)
|
||||
|
||||
def _provision_resource_from_blob(self, context, build_plan_item,
|
||||
instance_id, request_spec, kwargs):
|
||||
instance_id, request_spec, kwargs):
|
||||
"""Create the requested resource locally or in a child zone
|
||||
based on what is stored in the zone blob info.
|
||||
|
||||
@@ -145,8 +141,8 @@ class AbstractScheduler(driver.Scheduler):
|
||||
means we gathered the info from one of our children.
|
||||
It's possible that, when we decrypt the 'blob' field, it
|
||||
contains "child_blob" data. In which case we forward the
|
||||
request."""
|
||||
|
||||
request.
|
||||
"""
|
||||
host_info = None
|
||||
if "blob" in build_plan_item:
|
||||
# Request was passed in from above. Is it for us?
|
||||
@@ -161,21 +157,20 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# Valid data ... is it for us?
|
||||
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||
self._ask_child_zone_to_create_instance(context, host_info,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
else:
|
||||
self._provision_resource_locally(context, host_info, request_spec,
|
||||
kwargs)
|
||||
kwargs)
|
||||
|
||||
def _provision_resource(self, context, build_plan_item, instance_id,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in build_plan_item:
|
||||
self._provision_resource_locally(context, build_plan_item,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
return
|
||||
|
||||
self._provision_resource_from_blob(context, build_plan_item,
|
||||
instance_id, request_spec, kwargs)
|
||||
instance_id, request_spec, kwargs)
|
||||
|
||||
def _adjust_child_weights(self, child_results, zones):
|
||||
"""Apply the Scale and Offset values from the Zone definition
|
||||
@@ -185,13 +180,11 @@ class AbstractScheduler(driver.Scheduler):
|
||||
for zone_id, result in child_results:
|
||||
if not result:
|
||||
continue
|
||||
|
||||
assert isinstance(zone_id, int)
|
||||
|
||||
for zone_rec in zones:
|
||||
if zone_rec['id'] != zone_id:
|
||||
continue
|
||||
|
||||
for item in result:
|
||||
try:
|
||||
offset = zone_rec['weight_offset']
|
||||
@@ -202,10 +195,10 @@ class AbstractScheduler(driver.Scheduler):
|
||||
item['raw_weight'] = raw_weight
|
||||
except KeyError:
|
||||
LOG.exception(_("Bad child zone scaling values "
|
||||
"for Zone: %(zone_id)s") % locals())
|
||||
"for Zone: %(zone_id)s") % locals())
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
*args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
passed in to see if this is a request to:
|
||||
@@ -214,13 +207,11 @@ class AbstractScheduler(driver.Scheduler):
|
||||
to simply create the instance (either in this zone or
|
||||
a child zone).
|
||||
"""
|
||||
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
|
||||
blob = request_spec.get('blob')
|
||||
if blob:
|
||||
self._provision_resource(context, request_spec, instance_id,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
return None
|
||||
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
@@ -235,10 +226,9 @@ class AbstractScheduler(driver.Scheduler):
|
||||
for num in xrange(num_instances):
|
||||
if not build_plan:
|
||||
break
|
||||
|
||||
build_plan_item = build_plan.pop(0)
|
||||
self._provision_resource(context, build_plan_item, instance_id,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
@@ -251,58 +241,44 @@ class AbstractScheduler(driver.Scheduler):
|
||||
anything about the children.
|
||||
"""
|
||||
return self._schedule(context, "compute", request_spec,
|
||||
*args, **kwargs)
|
||||
*args, **kwargs)
|
||||
|
||||
# TODO(sandy): We're only focused on compute instances right now,
|
||||
# so we don't implement the default "schedule()" method required
|
||||
# of Schedulers.
|
||||
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""The schedule() contract requires we return the one
|
||||
best-suited host for this request.
|
||||
"""
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
# TODO(sandy): We're only focused on compute instances right now,
|
||||
# so we don't implement the default "schedule()" method required
|
||||
# of Schedulers.
|
||||
msg = _("No host selection for %s defined." % topic)
|
||||
raise driver.NoValidHost(msg)
|
||||
|
||||
def _schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
|
||||
if topic != "compute":
|
||||
raise NotImplementedError(_("Scheduler only understands"
|
||||
" Compute nodes (for now)"))
|
||||
msg = _("Scheduler only understands Compute nodes (for now)")
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instance_type = request_spec['instance_type']
|
||||
# Get all available hosts.
|
||||
all_hosts = self.zone_manager.service_states.iteritems()
|
||||
unfiltered_hosts = [(host, services[topic])
|
||||
for host, services in all_hosts
|
||||
if topic in services]
|
||||
|
||||
weighted = []
|
||||
host_list = None
|
||||
# Filter local hosts based on requirements ...
|
||||
filtered_hosts = self.filter_hosts(topic, request_spec,
|
||||
unfiltered_hosts)
|
||||
if not filtered_hosts:
|
||||
LOG.warn(_("No hosts available"))
|
||||
return []
|
||||
|
||||
for i in xrange(num_instances):
|
||||
# Filter local hosts based on requirements ...
|
||||
#
|
||||
# The first pass through here will pass 'None' as the
|
||||
# host_list.. which tells the filter to build the full
|
||||
# list of hosts.
|
||||
# On a 2nd pass, the filter can modify the host_list with
|
||||
# any updates it needs to make based on resources that
|
||||
# may have been consumed from a previous build..
|
||||
host_list = self.filter_hosts(topic, request_spec, host_list)
|
||||
if not host_list:
|
||||
LOG.warn(_("Filter returned no hosts after processing "
|
||||
"%(i)d of %(num_instances)d instances") % locals())
|
||||
break
|
||||
|
||||
# then weigh the selected hosts.
|
||||
# weighted = [{weight=weight, hostname=hostname,
|
||||
# capabilities=capabs}, ...]
|
||||
weights = self.weigh_hosts(topic, request_spec, host_list)
|
||||
weights.sort(key=operator.itemgetter('weight'))
|
||||
best_weight = weights[0]
|
||||
weighted.append(best_weight)
|
||||
self.consume_resources(topic, best_weight['capabilities'],
|
||||
instance_type)
|
||||
|
||||
# Next, tack on the best weights from the child zones ...
|
||||
# weigh the selected hosts.
|
||||
# weighted_hosts = [{weight=weight, hostname=hostname,
|
||||
# capabilities=capabs}, ...]
|
||||
weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts)
|
||||
# Next, tack on the host weights from the child zones
|
||||
json_spec = json.dumps(request_spec)
|
||||
all_zones = db.zone_get_all(context)
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
@@ -314,90 +290,32 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# it later if needed. This implicitly builds a zone
|
||||
# path structure.
|
||||
host_dict = {"weight": weighting["weight"],
|
||||
"child_zone": child_zone,
|
||||
"child_blob": weighting["blob"]}
|
||||
weighted.append(host_dict)
|
||||
"child_zone": child_zone,
|
||||
"child_blob": weighting["blob"]}
|
||||
weighted_hosts.append(host_dict)
|
||||
weighted_hosts.sort(key=operator.itemgetter('weight'))
|
||||
return weighted_hosts
|
||||
|
||||
weighted.sort(key=operator.itemgetter('weight'))
|
||||
return weighted
|
||||
def filter_hosts(self, topic, request_spec, host_list):
|
||||
"""Filter the full host list returned from the ZoneManager. By default,
|
||||
this method only applies the basic_ram_filter(), meaning all hosts
|
||||
with at least enough RAM for the requested instance are returned.
|
||||
|
||||
def compute_filter(self, hostname, capabilities, request_spec):
|
||||
"""Return whether or not we can schedule to this compute node.
|
||||
Derived classes should override this and return True if the host
|
||||
is acceptable for scheduling.
|
||||
Override in subclasses to provide greater selectivity.
|
||||
"""
|
||||
instance_type = request_spec['instance_type']
|
||||
requested_mem = instance_type['memory_mb'] * 1024 * 1024
|
||||
return capabilities['host_memory_free'] >= requested_mem
|
||||
def basic_ram_filter(hostname, capabilities, request_spec):
|
||||
"""Only return hosts with sufficient available RAM."""
|
||||
instance_type = request_spec['instance_type']
|
||||
requested_mem = instance_type['memory_mb'] * 1024 * 1024
|
||||
return capabilities['host_memory_free'] >= requested_mem
|
||||
|
||||
def hold_filter_hosts(self, topic, request_spec, hosts=None):
|
||||
"""Filter the full host list (from the ZoneManager)"""
|
||||
# NOTE(dabo): The logic used by the current _schedule() method
|
||||
# is incorrect. Since this task is just to refactor the classes,
|
||||
# I'm not fixing the logic now - that will be the next task.
|
||||
# So for now this method is just renamed; afterwards this will
|
||||
# become the filter_hosts() method, and the one below will
|
||||
# be removed.
|
||||
filter_name = request_spec.get('filter', None)
|
||||
# Make sure that the requested filter is legitimate.
|
||||
selected_filter = host_filter.choose_host_filter(filter_name)
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec['instance_type']
|
||||
name, query = selected_filter.instance_type_to_filter(instance_type)
|
||||
return selected_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def filter_hosts(self, topic, request_spec, host_list=None):
|
||||
"""Return a list of hosts which are acceptable for scheduling.
|
||||
Return value should be a list of (hostname, capability_dict)s.
|
||||
Derived classes may override this, but may find the
|
||||
'<topic>_filter' function more appropriate.
|
||||
"""
|
||||
def _default_filter(self, hostname, capabilities, request_spec):
|
||||
"""Default filter function if there's no <topic>_filter"""
|
||||
# NOTE(sirp): The default logic is the equivalent to
|
||||
# AllHostsFilter
|
||||
return True
|
||||
|
||||
filter_func = getattr(self, '%s_filter' % topic, _default_filter)
|
||||
|
||||
if host_list is None:
|
||||
first_run = True
|
||||
host_list = self.zone_manager.service_states.iteritems()
|
||||
else:
|
||||
first_run = False
|
||||
|
||||
filtered_hosts = []
|
||||
for host, services in host_list:
|
||||
if first_run:
|
||||
if topic not in services:
|
||||
continue
|
||||
services = services[topic]
|
||||
if filter_func(host, services, request_spec):
|
||||
filtered_hosts.append((host, services))
|
||||
return filtered_hosts
|
||||
return [(host, services) for host, services in host_list
|
||||
if basic_ram_filter(host, services, request_spec)]
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Derived classes may override this to provide more sophisticated
|
||||
scheduling objectives
|
||||
"""This version assigns a weight of 1 to all hosts, making selection
|
||||
of any host basically a random event. Override this method in your
|
||||
subclass to add logic to prefer one potential host over another.
|
||||
"""
|
||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
||||
for hostname, capabilities in hosts]
|
||||
|
||||
def compute_consume(self, capabilities, instance_type):
|
||||
"""Consume compute resources for selected host"""
|
||||
|
||||
requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
|
||||
capabilities['host_memory_free'] -= requested_mem
|
||||
|
||||
def consume_resources(self, topic, capabilities, instance_type):
|
||||
"""Consume resources for a specific host. 'host' is a tuple
|
||||
of the hostname and the services"""
|
||||
|
||||
consume_func = getattr(self, '%s_consume' % topic, None)
|
||||
if not consume_func:
|
||||
return
|
||||
consume_func(capabilities, instance_type)
|
||||
|
59
nova/scheduler/base_scheduler.py
Normal file
59
nova/scheduler/base_scheduler.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2011 Openstack, LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
The BaseScheduler is the base class Scheduler for creating instances
|
||||
across zones. There are two expansion points to this class for:
|
||||
1. Assigning Weights to hosts for requested instances
|
||||
2. Filtering Hosts based on required instance capabilities
|
||||
"""
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
from nova.scheduler import abstract_scheduler
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.scheduler.base_scheduler')
|
||||
|
||||
|
||||
class BaseScheduler(abstract_scheduler.AbstractScheduler):
|
||||
"""Base class for creating Schedulers that can work across any nova
|
||||
deployment, from simple designs to multiply-nested zones.
|
||||
"""
|
||||
def filter_hosts(self, topic, request_spec, hosts=None):
|
||||
"""Filter the full host list (from the ZoneManager)"""
|
||||
filter_name = request_spec.get('filter', None)
|
||||
# Make sure that the requested filter is legitimate.
|
||||
selected_filter = host_filter.choose_host_filter(filter_name)
|
||||
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec.get("instance_type", None)
|
||||
if instance_type is None:
|
||||
# No way to select; return the specified hosts
|
||||
return hosts or []
|
||||
name, query = selected_filter.instance_type_to_filter(instance_type)
|
||||
return selected_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Derived classes may override this to provide more sophisticated
|
||||
scheduling objectives
|
||||
"""
|
||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
||||
for hostname, capabilities in hosts]
|
@@ -20,283 +20,33 @@ either incompatible or insufficient to accept a newly-requested instance
|
||||
are removed by Host Filter classes from consideration. Those that pass
|
||||
the filter are then passed on for weighting or other process for ordering.
|
||||
|
||||
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||
filter grammar.
|
||||
|
||||
Why JSON? The requests for instances may come in through the
|
||||
REST interface from a user or a parent Zone.
|
||||
Currently Flavors and/or InstanceTypes are used for
|
||||
specifing the type of instance desired. Specific Nova users have
|
||||
noted a need for a more expressive way of specifying instances.
|
||||
Since we don't want to get into building full DSL this is a simple
|
||||
form as an example of how this could be done. In reality, most
|
||||
consumers will use the more rigid filters such as FlavorFilter.
|
||||
Filters are in the 'filters' directory that is off the 'scheduler'
|
||||
directory of nova. Additional filters can be created and added to that
|
||||
directory; be sure to add them to the filters/__init__.py file so that
|
||||
they are part of the nova.schedulers.filters namespace.
|
||||
"""
|
||||
|
||||
import json
|
||||
import types
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
import nova.scheduler
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('default_host_filter',
|
||||
'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'Which filter to use for filtering hosts.')
|
||||
|
||||
|
||||
class HostFilter(object):
|
||||
"""Base class for host filters."""
|
||||
def _get_filters():
|
||||
# Imported here to avoid circular imports
|
||||
from nova.scheduler import filters
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into a filter for most common use-case."""
|
||||
raise NotImplementedError()
|
||||
def get_itm(nm):
|
||||
return getattr(filters, nm)
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that fulfill the filter."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _full_name(self):
|
||||
"""module.classname of the filter."""
|
||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
||||
|
||||
|
||||
class AllHostsFilter(HostFilter):
|
||||
""" NOP host filter. Returns all hosts in ZoneManager.
|
||||
This essentially does what the old Scheduler+Chance used
|
||||
to give us.
|
||||
"""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Return anything to prevent base-class from raising
|
||||
exception."""
|
||||
return (self._full_name(), instance_type)
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts from ZoneManager list."""
|
||||
return [(host, services)
|
||||
for host, services in zone_manager.service_states.iteritems()]
|
||||
|
||||
|
||||
class InstanceTypeFilter(HostFilter):
|
||||
"""HostFilter hard-coded to work with InstanceType records."""
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Use instance_type to filter hosts."""
|
||||
return (self._full_name(), instance_type)
|
||||
|
||||
def _satisfies_extra_specs(self, capabilities, instance_type):
|
||||
"""Check that the capabilities provided by the compute service
|
||||
satisfy the extra specs associated with the instance type"""
|
||||
|
||||
if 'extra_specs' not in instance_type:
|
||||
return True
|
||||
|
||||
# Note(lorinh): For now, we are just checking exact matching on the
|
||||
# values. Later on, we want to handle numerical
|
||||
# values so we can represent things like number of GPU cards
|
||||
|
||||
try:
|
||||
for key, value in instance_type['extra_specs'].iteritems():
|
||||
if capabilities[key] != value:
|
||||
return False
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that can create instance_type."""
|
||||
instance_type = query
|
||||
selected_hosts = []
|
||||
for host, services in zone_manager.service_states.iteritems():
|
||||
capabilities = services.get('compute', {})
|
||||
host_ram_mb = capabilities['host_memory_free']
|
||||
disk_bytes = capabilities['disk_available']
|
||||
spec_ram = instance_type['memory_mb']
|
||||
spec_disk = instance_type['local_gb']
|
||||
extra_specs = instance_type['extra_specs']
|
||||
|
||||
if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
|
||||
self._satisfies_extra_specs(capabilities, instance_type)):
|
||||
selected_hosts.append((host, capabilities))
|
||||
return selected_hosts
|
||||
|
||||
#host entries (currently) are like:
|
||||
# {'host_name-description': 'Default install of XenServer',
|
||||
# 'host_hostname': 'xs-mini',
|
||||
# 'host_memory_total': 8244539392,
|
||||
# 'host_memory_overhead': 184225792,
|
||||
# 'host_memory_free': 3868327936,
|
||||
# 'host_memory_free_computed': 3840843776,
|
||||
# 'host_other_config': {},
|
||||
# 'host_ip_address': '192.168.1.109',
|
||||
# 'host_cpu_info': {},
|
||||
# 'disk_available': 32954957824,
|
||||
# 'disk_total': 50394562560,
|
||||
# 'disk_used': 17439604736,
|
||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||
# 'host_name_label': 'xs-mini'}
|
||||
|
||||
# instance_type table has:
|
||||
#name = Column(String(255), unique=True)
|
||||
#memory_mb = Column(Integer)
|
||||
#vcpus = Column(Integer)
|
||||
#local_gb = Column(Integer)
|
||||
#flavorid = Column(Integer, unique=True)
|
||||
#swap = Column(Integer, nullable=False, default=0)
|
||||
#rxtx_quota = Column(Integer, nullable=False, default=0)
|
||||
#rxtx_cap = Column(Integer, nullable=False, default=0)
|
||||
|
||||
|
||||
class JsonFilter(HostFilter):
|
||||
"""Host Filter to allow simple JSON-based grammar for
|
||||
selecting hosts.
|
||||
"""
|
||||
|
||||
def _equals(self, args):
|
||||
"""First term is == all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs != rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _less_than(self, args):
|
||||
"""First term is < all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs >= rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _greater_than(self, args):
|
||||
"""First term is > all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs <= rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _in(self, args):
|
||||
"""First term is in set of remaining terms"""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
return args[0] in args[1:]
|
||||
|
||||
def _less_than_equal(self, args):
|
||||
"""First term is <= all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs > rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _greater_than_equal(self, args):
|
||||
"""First term is >= all the other terms."""
|
||||
if len(args) < 2:
|
||||
return False
|
||||
lhs = args[0]
|
||||
for rhs in args[1:]:
|
||||
if lhs < rhs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _not(self, args):
|
||||
"""Flip each of the arguments."""
|
||||
if len(args) == 0:
|
||||
return False
|
||||
return [not arg for arg in args]
|
||||
|
||||
def _or(self, args):
|
||||
"""True if any arg is True."""
|
||||
return True in args
|
||||
|
||||
def _and(self, args):
|
||||
"""True if all args are True."""
|
||||
return False not in args
|
||||
|
||||
commands = {
|
||||
'=': _equals,
|
||||
'<': _less_than,
|
||||
'>': _greater_than,
|
||||
'in': _in,
|
||||
'<=': _less_than_equal,
|
||||
'>=': _greater_than_equal,
|
||||
'not': _not,
|
||||
'or': _or,
|
||||
'and': _and,
|
||||
}
|
||||
|
||||
def instance_type_to_filter(self, instance_type):
|
||||
"""Convert instance_type into JSON filter object."""
|
||||
required_ram = instance_type['memory_mb']
|
||||
required_disk = instance_type['local_gb']
|
||||
query = ['and',
|
||||
['>=', '$compute.host_memory_free', required_ram],
|
||||
['>=', '$compute.disk_available', required_disk]]
|
||||
return (self._full_name(), json.dumps(query))
|
||||
|
||||
def _parse_string(self, string, host, services):
|
||||
"""Strings prefixed with $ are capability lookups in the
|
||||
form '$service.capability[.subcap*]'
|
||||
"""
|
||||
if not string:
|
||||
return None
|
||||
if string[0] != '$':
|
||||
return string
|
||||
|
||||
path = string[1:].split('.')
|
||||
for item in path:
|
||||
services = services.get(item, None)
|
||||
if not services:
|
||||
return None
|
||||
return services
|
||||
|
||||
def _process_filter(self, zone_manager, query, host, services):
|
||||
"""Recursively parse the query structure."""
|
||||
if len(query) == 0:
|
||||
return True
|
||||
cmd = query[0]
|
||||
method = self.commands[cmd] # Let exception fly.
|
||||
cooked_args = []
|
||||
for arg in query[1:]:
|
||||
if isinstance(arg, list):
|
||||
arg = self._process_filter(zone_manager, arg, host, services)
|
||||
elif isinstance(arg, basestring):
|
||||
arg = self._parse_string(arg, host, services)
|
||||
if arg != None:
|
||||
cooked_args.append(arg)
|
||||
result = method(self, cooked_args)
|
||||
return result
|
||||
|
||||
def filter_hosts(self, zone_manager, query):
|
||||
"""Return a list of hosts that can fulfill filter."""
|
||||
expanded = json.loads(query)
|
||||
hosts = []
|
||||
for host, services in zone_manager.service_states.iteritems():
|
||||
r = self._process_filter(zone_manager, expanded, host, services)
|
||||
if isinstance(r, list):
|
||||
r = True in r
|
||||
if r:
|
||||
hosts.append((host, services))
|
||||
return hosts
|
||||
|
||||
|
||||
FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
|
||||
return [get_itm(itm) for itm in dir(filters)
|
||||
if (type(get_itm(itm)) is types.TypeType)
|
||||
and issubclass(get_itm(itm), filters.AbstractHostFilter)
|
||||
and get_itm(itm) is not filters.AbstractHostFilter]
|
||||
|
||||
|
||||
def choose_host_filter(filter_name=None):
|
||||
@@ -307,8 +57,7 @@ def choose_host_filter(filter_name=None):
|
||||
"""
|
||||
if not filter_name:
|
||||
filter_name = FLAGS.default_host_filter
|
||||
for filter_class in FILTERS:
|
||||
host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
|
||||
if host_match == filter_name:
|
||||
for filter_class in _get_filters():
|
||||
if filter_class.__name__ == filter_name:
|
||||
return filter_class()
|
||||
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
|
||||
|
@@ -22,14 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost
|
||||
is then selected for provisioning.
|
||||
"""
|
||||
|
||||
# TODO(dabo): This class will be removed in the next merge prop; it remains now
|
||||
# because much of the code will be refactored into different classes.
|
||||
|
||||
import collections
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import abstract_scheduler
|
||||
from nova.scheduler import base_scheduler
|
||||
from nova import utils
|
||||
from nova import exception
|
||||
|
||||
@@ -37,14 +35,16 @@ LOG = logging.getLogger('nova.scheduler.least_cost')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
|
||||
|
||||
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||
'How much weight to give the noop cost function')
|
||||
'How much weight to give the noop cost function')
|
||||
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def noop_cost_fn(host):
|
||||
@@ -52,87 +52,20 @@ def noop_cost_fn(host):
|
||||
return 1
|
||||
|
||||
|
||||
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def compute_fill_first_cost_fn(host):
|
||||
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||
hosts that don't have enough ram"""
|
||||
hostname, caps = host
|
||||
free_mem = caps['host_memory_free']
|
||||
hosts that don't have enough ram.
|
||||
"""
|
||||
hostname, service = host
|
||||
caps = service.get("compute", {})
|
||||
free_mem = caps.get("host_memory_free", 0)
|
||||
return free_mem
|
||||
|
||||
|
||||
class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cost_fns_cache = {}
|
||||
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_cost_fns(self, topic):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
|
||||
if topic in self.cost_fns_cache:
|
||||
return self.cost_fns_cache[topic]
|
||||
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
if '.' in cost_fn_str:
|
||||
short_name = cost_fn_str.split('.')[-1]
|
||||
else:
|
||||
short_name = cost_fn_str
|
||||
cost_fn_str = "%s.%s.%s" % (
|
||||
__name__, self.__class__.__name__, short_name)
|
||||
|
||||
if not (short_name.startswith('%s_' % topic) or
|
||||
short_name.startswith('noop')):
|
||||
continue
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
flag_name = "%s_weight" % cost_fn.__name__
|
||||
weight = getattr(FLAGS, flag_name)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
self.cost_fns_cache[topic] = cost_fns
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||
"""
|
||||
|
||||
cost_fns = self.get_cost_fns(topic)
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, caps) in zip(costs, hosts):
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
||||
|
||||
def normalize_list(L):
|
||||
"""Normalize an array of numbers such that each element satisfies:
|
||||
0 <= e <= 1"""
|
||||
0 <= e <= 1
|
||||
"""
|
||||
if not L:
|
||||
return L
|
||||
max_ = max(L)
|
||||
@@ -160,12 +93,10 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
score_table = collections.defaultdict(list)
|
||||
for weight, fn in weighted_fns:
|
||||
scores = [fn(elem) for elem in domain]
|
||||
|
||||
if normalize:
|
||||
norm_scores = normalize_list(scores)
|
||||
else:
|
||||
norm_scores = scores
|
||||
|
||||
for idx, score in enumerate(norm_scores):
|
||||
weighted_score = score * weight
|
||||
score_table[idx].append(weighted_score)
|
||||
@@ -175,5 +106,66 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
for idx in sorted(score_table):
|
||||
elem_score = sum(score_table[idx])
|
||||
domain_scores.append(elem_score)
|
||||
|
||||
return domain_scores
|
||||
|
||||
|
||||
class LeastCostScheduler(base_scheduler.BaseScheduler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cost_fns_cache = {}
|
||||
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_cost_fns(self, topic):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
if topic in self.cost_fns_cache:
|
||||
return self.cost_fns_cache[topic]
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
if '.' in cost_fn_str:
|
||||
short_name = cost_fn_str.split('.')[-1]
|
||||
else:
|
||||
short_name = cost_fn_str
|
||||
cost_fn_str = "%s.%s.%s" % (
|
||||
__name__, self.__class__.__name__, short_name)
|
||||
if not (short_name.startswith('%s_' % topic) or
|
||||
short_name.startswith('noop')):
|
||||
continue
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
flag_name = "%s_weight" % cost_fn.__name__
|
||||
weight = getattr(FLAGS, flag_name)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
self.cost_fns_cache[topic] = cost_fns
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||
"""
|
||||
cost_fns = self.get_cost_fns(topic)
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, service) in zip(costs, hosts):
|
||||
caps = service[topic]
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
@@ -77,6 +77,9 @@ class FakeZoneManager(zone_manager.ZoneManager):
|
||||
'host3': {
|
||||
'compute': {'host_memory_free': 3221225472},
|
||||
},
|
||||
'host4': {
|
||||
'compute': {'host_memory_free': 999999999},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
@@ -21,6 +21,7 @@ import json
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
from nova.scheduler import filters
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
@@ -55,7 +56,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(HostFilterTestCase, self).setUp()
|
||||
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
||||
default_host_filter = 'AllHostsFilter'
|
||||
self.flags(default_host_filter=default_host_filter)
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
@@ -98,13 +99,10 @@ class HostFilterTestCase(test.TestCase):
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
hf = host_filter.choose_host_filter('InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
@@ -113,7 +111,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
hf = filters.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
@@ -121,11 +119,10 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
hf = filters.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter')
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -134,21 +131,20 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_instance_type_filter_extra_specs(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
hf = filters.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter')
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(1, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
self.assertEquals('host07', just_hosts[0])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
hf = filters.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
self.assertEquals(name.split(".")[-1], 'JsonFilter')
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@@ -192,7 +188,6 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
|
@@ -15,6 +15,7 @@
|
||||
"""
|
||||
Tests For Least Cost Scheduler
|
||||
"""
|
||||
import copy
|
||||
|
||||
from nova import test
|
||||
from nova.scheduler import least_cost
|
||||
@@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||
|
||||
def assertWeights(self, expected, num, request_spec, hosts):
|
||||
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||
weighted = self.sched.weigh_hosts("compute", request_spec, hosts)
|
||||
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||
|
||||
def test_no_hosts(self):
|
||||
@@ -122,19 +123,24 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
self.flags(least_cost_scheduler_cost_functions=[
|
||||
'nova.scheduler.least_cost.compute_fill_first_cost_fn'],
|
||||
compute_fill_first_cost_fn_weight=1)
|
||||
|
||||
num = 1
|
||||
instance_type = {'memory_mb': 1024}
|
||||
request_spec = {'instance_type': instance_type}
|
||||
hosts = self.sched.filter_hosts('compute', request_spec, None)
|
||||
svc_states = self.sched.zone_manager.service_states.iteritems()
|
||||
all_hosts = [(host, services["compute"])
|
||||
for host, services in svc_states
|
||||
if "compute" in services]
|
||||
hosts = self.sched.filter_hosts('compute', request_spec, all_hosts)
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, caps) in enumerate(hosts):
|
||||
for idx, (hostname, services) in enumerate(hosts):
|
||||
caps = copy.deepcopy(services["compute"])
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
weight = 0.1 + (0.1 * idx)
|
||||
weight_dict = dict(weight=weight, hostname=hostname)
|
||||
expected.append(weight_dict)
|
||||
wtd_dict = dict(hostname=hostname, weight=weight,
|
||||
capabilities=caps)
|
||||
expected.append(wtd_dict)
|
||||
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
@@ -32,6 +32,7 @@ from nova import context
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova import wsgi
|
||||
from nova.api import auth
|
||||
from nova.api import ec2
|
||||
from nova.api.ec2 import apirequest
|
||||
from nova.api.ec2 import cloud
|
||||
@@ -199,7 +200,7 @@ class ApiEc2TestCase(test.TestCase):
|
||||
# NOTE(vish): skipping the Authorizer
|
||||
roles = ['sysadmin', 'netadmin']
|
||||
ctxt = context.RequestContext('fake', 'fake', roles=roles)
|
||||
self.app = wsgi.InjectContext(ctxt,
|
||||
self.app = auth.InjectContext(ctxt,
|
||||
ec2.Requestify(ec2.Authorizer(ec2.Executor()),
|
||||
'nova.api.ec2.cloud.CloudController'))
|
||||
|
||||
|
@@ -487,6 +487,17 @@ class CloudTestCase(test.TestCase):
|
||||
db.service_destroy(self.context, comp1['id'])
|
||||
db.service_destroy(self.context, comp2['id'])
|
||||
|
||||
def test_describe_instances_deleted(self):
|
||||
args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
|
||||
inst1 = db.instance_create(self.context, args1)
|
||||
args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'}
|
||||
inst2 = db.instance_create(self.context, args2)
|
||||
db.instance_destroy(self.context, inst1.id)
|
||||
result = self.cloud.describe_instances(self.context)
|
||||
result = result['reservationSet'][0]['instancesSet']
|
||||
self.assertEqual(result[0]['instanceId'],
|
||||
ec2utils.id_to_ec2_id(inst2.id))
|
||||
|
||||
def _block_device_mapping_create(self, instance_id, mappings):
|
||||
volumes = []
|
||||
for bdm in mappings:
|
||||
|
@@ -76,3 +76,20 @@ class DbApiTestCase(test.TestCase):
|
||||
self.assertEqual(instance['id'], result['id'])
|
||||
self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address,
|
||||
'1.2.1.2')
|
||||
|
||||
def test_instance_get_all_by_filters(self):
|
||||
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
|
||||
inst1 = db.instance_create(self.context, args)
|
||||
inst2 = db.instance_create(self.context, args)
|
||||
result = db.instance_get_all_by_filters(self.context, {})
|
||||
self.assertTrue(2, len(result))
|
||||
|
||||
def test_instance_get_all_by_filters_deleted(self):
|
||||
args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
|
||||
inst1 = db.instance_create(self.context, args1)
|
||||
args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'}
|
||||
inst2 = db.instance_create(self.context, args2)
|
||||
db.instance_destroy(self.context, inst1.id)
|
||||
result = db.instance_get_all_by_filters(self.context.elevated(), {})
|
||||
self.assertEqual(1, len(result))
|
||||
self.assertEqual(result[0].id, inst2.id)
|
||||
|
@@ -1,200 +0,0 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
super(HostFilterTestCase, self).setUp()
|
||||
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.flags(default_host_filter=default_host_filter)
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
vcpus=10,
|
||||
local_gb=500,
|
||||
flavorid=1,
|
||||
swap=500,
|
||||
rxtx_quota=30000,
|
||||
rxtx_cap=200,
|
||||
extra_specs={})
|
||||
|
||||
self.zone_manager = FakeZoneManager()
|
||||
states = {}
|
||||
for x in xrange(10):
|
||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
# Try some custom queries
|
||||
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700],
|
||||
],
|
||||
]
|
||||
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['not',
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
# Try some bogus input ...
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False])))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
@@ -836,6 +836,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
count = (0 <= str(e.message).find('Unexpected method call'))
|
||||
|
||||
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
|
||||
shutil.rmtree(os.path.join(FLAGS.instances_path, '_base'))
|
||||
|
||||
self.assertTrue(count)
|
||||
|
||||
|
@@ -108,11 +108,14 @@ floating_ip_fields = {'id': 0,
|
||||
|
||||
vifs = [{'id': 0,
|
||||
'address': 'DE:AD:BE:EF:00:00',
|
||||
'uuid': '00000000-0000-0000-0000-0000000000000000',
|
||||
'network_id': 0,
|
||||
'network': FakeModel(**networks[0]),
|
||||
'instance_id': 0},
|
||||
{'id': 1,
|
||||
'address': 'DE:AD:BE:EF:00:01',
|
||||
'uuid': '00000000-0000-0000-0000-0000000000000001',
|
||||
'network_id': 0,
|
||||
'network_id': 1,
|
||||
'network': FakeModel(**networks[1]),
|
||||
'instance_id': 0}]
|
||||
@@ -163,6 +166,8 @@ class FlatNetworkTestCase(test.TestCase):
|
||||
'ips': 'DONTCARE',
|
||||
'label': 'test%s' % i,
|
||||
'mac': 'DE:AD:BE:EF:00:0%s' % i,
|
||||
'vif_uuid': ('00000000-0000-0000-0000-000000000000000%s' %
|
||||
i),
|
||||
'rxtx_cap': 'DONTCARE',
|
||||
'should_create_vlan': False,
|
||||
'should_create_bridge': False}
|
||||
|
Reference in New Issue
Block a user