merge with trey
This commit is contained in:
3
Authors
3
Authors
@@ -30,6 +30,7 @@ Gabe Westmaas <gabe.westmaas@rackspace.com>
|
|||||||
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
Ilya Alekseyev <ialekseev@griddynamics.com>
|
||||||
|
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||||
Jason Koelker <jason@koelker.net>
|
Jason Koelker <jason@koelker.net>
|
||||||
Jay Pipes <jaypipes@gmail.com>
|
Jay Pipes <jaypipes@gmail.com>
|
||||||
Jesse Andrews <anotherjesse@gmail.com>
|
Jesse Andrews <anotherjesse@gmail.com>
|
||||||
@@ -58,6 +59,7 @@ Mark Washenberger <mark.washenberger@rackspace.com>
|
|||||||
Masanori Itoh <itoumsn@nttdata.co.jp>
|
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||||
Matt Dietz <matt.dietz@rackspace.com>
|
Matt Dietz <matt.dietz@rackspace.com>
|
||||||
Michael Gundlach <michael.gundlach@rackspace.com>
|
Michael Gundlach <michael.gundlach@rackspace.com>
|
||||||
|
Mike Scherbakov <mihgen@gmail.com>
|
||||||
Monsyne Dragon <mdragon@rackspace.com>
|
Monsyne Dragon <mdragon@rackspace.com>
|
||||||
Monty Taylor <mordred@inaugust.com>
|
Monty Taylor <mordred@inaugust.com>
|
||||||
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
||||||
@@ -83,6 +85,7 @@ Trey Morris <trey.morris@rackspace.com>
|
|||||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
Vivek Y S <vivek.ys@gmail.com>
|
||||||
William Wolf <throughnothing@gmail.com>
|
William Wolf <throughnothing@gmail.com>
|
||||||
Yoshiaki Tamura <yoshi@midokura.jp>
|
Yoshiaki Tamura <yoshi@midokura.jp>
|
||||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
|
|||||||
@@ -53,7 +53,6 @@
|
|||||||
CLI interface for nova management.
|
CLI interface for nova management.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
import gettext
|
import gettext
|
||||||
import glob
|
import glob
|
||||||
import json
|
import json
|
||||||
@@ -78,6 +77,7 @@ from nova import crypto
|
|||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import image
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import quota
|
from nova import quota
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
@@ -97,7 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
|
|||||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||||
flags.DECLARE('images_path', 'nova.image.local')
|
flags.DECLARE('images_path', 'nova.image.local')
|
||||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn')
|
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
|
||||||
flags.DEFINE_flag(flags.HelpFlag())
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
@@ -423,12 +423,16 @@ class ProjectCommands(object):
|
|||||||
arguments: project_id [key] [value]"""
|
arguments: project_id [key] [value]"""
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
if key:
|
if key:
|
||||||
|
if value.lower() == 'unlimited':
|
||||||
|
value = None
|
||||||
try:
|
try:
|
||||||
db.quota_update(ctxt, project_id, key, value)
|
db.quota_update(ctxt, project_id, key, value)
|
||||||
except exception.ProjectQuotaNotFound:
|
except exception.ProjectQuotaNotFound:
|
||||||
db.quota_create(ctxt, project_id, key, value)
|
db.quota_create(ctxt, project_id, key, value)
|
||||||
project_quota = quota.get_quota(ctxt, project_id)
|
project_quota = quota.get_project_quotas(ctxt, project_id)
|
||||||
for key, value in project_quota.iteritems():
|
for key, value in project_quota.iteritems():
|
||||||
|
if value is None:
|
||||||
|
value = 'unlimited'
|
||||||
print '%s: %s' % (key, value)
|
print '%s: %s' % (key, value)
|
||||||
|
|
||||||
def remove(self, project_id, user_id):
|
def remove(self, project_id, user_id):
|
||||||
@@ -539,7 +543,7 @@ class FloatingIpCommands(object):
|
|||||||
for floating_ip in floating_ips:
|
for floating_ip in floating_ips:
|
||||||
instance = None
|
instance = None
|
||||||
if floating_ip['fixed_ip']:
|
if floating_ip['fixed_ip']:
|
||||||
instance = floating_ip['fixed_ip']['instance']['ec2_id']
|
instance = floating_ip['fixed_ip']['instance']['hostname']
|
||||||
print "%s\t%s\t%s" % (floating_ip['host'],
|
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||||
floating_ip['address'],
|
floating_ip['address'],
|
||||||
instance)
|
instance)
|
||||||
@@ -702,7 +706,7 @@ class ServiceCommands(object):
|
|||||||
"""Show a list of all running services. Filter by host & service name.
|
"""Show a list of all running services. Filter by host & service name.
|
||||||
args: [host] [service]"""
|
args: [host] [service]"""
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
services = db.service_get_all(ctxt)
|
services = db.service_get_all(ctxt)
|
||||||
if host:
|
if host:
|
||||||
services = [s for s in services if s['host'] == host]
|
services = [s for s in services if s['host'] == host]
|
||||||
@@ -949,7 +953,7 @@ class ImageCommands(object):
|
|||||||
"""Methods for dealing with a cloud in an odd state"""
|
"""Methods for dealing with a cloud in an odd state"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.image_service = utils.import_object(FLAGS.image_service)
|
self.image_service = image.get_default_image_service()
|
||||||
|
|
||||||
def _register(self, container_format, disk_format,
|
def _register(self, container_format, disk_format,
|
||||||
path, owner, name=None, is_public='T',
|
path, owner, name=None, is_public='T',
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ other backends by creating another class that exposes the same
|
|||||||
public methods.
|
public methods.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@@ -68,6 +69,12 @@ flags.DEFINE_string('ldap_developer',
|
|||||||
LOG = logging.getLogger("nova.ldapdriver")
|
LOG = logging.getLogger("nova.ldapdriver")
|
||||||
|
|
||||||
|
|
||||||
|
if FLAGS.memcached_servers:
|
||||||
|
import memcache
|
||||||
|
else:
|
||||||
|
from nova import fakememcache as memcache
|
||||||
|
|
||||||
|
|
||||||
# TODO(vish): make an abstract base class with the same public methods
|
# TODO(vish): make an abstract base class with the same public methods
|
||||||
# to define a set interface for AuthDrivers. I'm delaying
|
# to define a set interface for AuthDrivers. I'm delaying
|
||||||
# creating this now because I'm expecting an auth refactor
|
# creating this now because I'm expecting an auth refactor
|
||||||
@@ -85,6 +92,7 @@ def _clean(attr):
|
|||||||
|
|
||||||
def sanitize(fn):
|
def sanitize(fn):
|
||||||
"""Decorator to sanitize all args"""
|
"""Decorator to sanitize all args"""
|
||||||
|
@functools.wraps(fn)
|
||||||
def _wrapped(self, *args, **kwargs):
|
def _wrapped(self, *args, **kwargs):
|
||||||
args = [_clean(x) for x in args]
|
args = [_clean(x) for x in args]
|
||||||
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
|
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
|
||||||
@@ -103,29 +111,56 @@ class LdapDriver(object):
|
|||||||
isadmin_attribute = 'isNovaAdmin'
|
isadmin_attribute = 'isNovaAdmin'
|
||||||
project_attribute = 'owner'
|
project_attribute = 'owner'
|
||||||
project_objectclass = 'groupOfNames'
|
project_objectclass = 'groupOfNames'
|
||||||
|
conn = None
|
||||||
|
mc = None
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Imports the LDAP module"""
|
"""Imports the LDAP module"""
|
||||||
self.ldap = __import__('ldap')
|
self.ldap = __import__('ldap')
|
||||||
self.conn = None
|
|
||||||
if FLAGS.ldap_schema_version == 1:
|
if FLAGS.ldap_schema_version == 1:
|
||||||
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
||||||
LdapDriver.isadmin_attribute = 'isAdmin'
|
LdapDriver.isadmin_attribute = 'isAdmin'
|
||||||
LdapDriver.project_attribute = 'projectManager'
|
LdapDriver.project_attribute = 'projectManager'
|
||||||
LdapDriver.project_objectclass = 'novaProject'
|
LdapDriver.project_objectclass = 'novaProject'
|
||||||
|
self.__cache = None
|
||||||
|
if LdapDriver.conn is None:
|
||||||
|
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
|
||||||
|
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
|
||||||
|
FLAGS.ldap_password)
|
||||||
|
if LdapDriver.mc is None:
|
||||||
|
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
"""Creates the connection to LDAP"""
|
# TODO(yorik-sar): Should be per-request cache, not per-driver-request
|
||||||
self.conn = self.ldap.initialize(FLAGS.ldap_url)
|
self.__cache = {}
|
||||||
self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
"""Destroys the connection to LDAP"""
|
self.__cache = None
|
||||||
self.conn.unbind_s()
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def __local_cache(key_fmt):
|
||||||
|
"""Wrap function to cache it's result in self.__cache.
|
||||||
|
Works only with functions with one fixed argument.
|
||||||
|
"""
|
||||||
|
def do_wrap(fn):
|
||||||
|
@functools.wraps(fn)
|
||||||
|
def inner(self, arg, **kwargs):
|
||||||
|
cache_key = key_fmt % (arg,)
|
||||||
|
try:
|
||||||
|
res = self.__cache[cache_key]
|
||||||
|
LOG.debug('Local cache hit for %s by key %s' %
|
||||||
|
(fn.__name__, cache_key))
|
||||||
|
return res
|
||||||
|
except KeyError:
|
||||||
|
res = fn(self, arg, **kwargs)
|
||||||
|
self.__cache[cache_key] = res
|
||||||
|
return res
|
||||||
|
return inner
|
||||||
|
return do_wrap
|
||||||
|
|
||||||
@sanitize
|
@sanitize
|
||||||
|
@__local_cache('uid_user-%s')
|
||||||
def get_user(self, uid):
|
def get_user(self, uid):
|
||||||
"""Retrieve user by id"""
|
"""Retrieve user by id"""
|
||||||
attr = self.__get_ldap_user(uid)
|
attr = self.__get_ldap_user(uid)
|
||||||
@@ -134,15 +169,31 @@ class LdapDriver(object):
|
|||||||
@sanitize
|
@sanitize
|
||||||
def get_user_from_access_key(self, access):
|
def get_user_from_access_key(self, access):
|
||||||
"""Retrieve user by access key"""
|
"""Retrieve user by access key"""
|
||||||
|
cache_key = 'uak_dn_%s' % (access,)
|
||||||
|
user_dn = self.mc.get(cache_key)
|
||||||
|
if user_dn:
|
||||||
|
user = self.__to_user(
|
||||||
|
self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
|
||||||
|
if user:
|
||||||
|
if user['access'] == access:
|
||||||
|
return user
|
||||||
|
else:
|
||||||
|
self.mc.set(cache_key, None)
|
||||||
query = '(accessKey=%s)' % access
|
query = '(accessKey=%s)' % access
|
||||||
dn = FLAGS.ldap_user_subtree
|
dn = FLAGS.ldap_user_subtree
|
||||||
return self.__to_user(self.__find_object(dn, query))
|
user_obj = self.__find_object(dn, query)
|
||||||
|
user = self.__to_user(user_obj)
|
||||||
|
if user:
|
||||||
|
self.mc.set(cache_key, user_obj['dn'][0])
|
||||||
|
return user
|
||||||
|
|
||||||
@sanitize
|
@sanitize
|
||||||
|
@__local_cache('pid_project-%s')
|
||||||
def get_project(self, pid):
|
def get_project(self, pid):
|
||||||
"""Retrieve project by id"""
|
"""Retrieve project by id"""
|
||||||
dn = self.__project_to_dn(pid)
|
dn = self.__project_to_dn(pid, search=False)
|
||||||
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
attr = self.__find_object(dn, LdapDriver.project_pattern,
|
||||||
|
scope=self.ldap.SCOPE_BASE)
|
||||||
return self.__to_project(attr)
|
return self.__to_project(attr)
|
||||||
|
|
||||||
@sanitize
|
@sanitize
|
||||||
@@ -395,6 +446,7 @@ class LdapDriver(object):
|
|||||||
"""Check if project exists"""
|
"""Check if project exists"""
|
||||||
return self.get_project(project_id) is not None
|
return self.get_project(project_id) is not None
|
||||||
|
|
||||||
|
@__local_cache('uid_attrs-%s')
|
||||||
def __get_ldap_user(self, uid):
|
def __get_ldap_user(self, uid):
|
||||||
"""Retrieve LDAP user entry by id"""
|
"""Retrieve LDAP user entry by id"""
|
||||||
dn = FLAGS.ldap_user_subtree
|
dn = FLAGS.ldap_user_subtree
|
||||||
@@ -426,12 +478,20 @@ class LdapDriver(object):
|
|||||||
if scope is None:
|
if scope is None:
|
||||||
# One of the flags is 0!
|
# One of the flags is 0!
|
||||||
scope = self.ldap.SCOPE_SUBTREE
|
scope = self.ldap.SCOPE_SUBTREE
|
||||||
|
if query is None:
|
||||||
|
query = "(objectClass=*)"
|
||||||
try:
|
try:
|
||||||
res = self.conn.search_s(dn, scope, query)
|
res = self.conn.search_s(dn, scope, query)
|
||||||
except self.ldap.NO_SUCH_OBJECT:
|
except self.ldap.NO_SUCH_OBJECT:
|
||||||
return []
|
return []
|
||||||
# Just return the attributes
|
# Just return the attributes
|
||||||
return [attributes for dn, attributes in res]
|
# FIXME(yorik-sar): Whole driver should be refactored to
|
||||||
|
# prevent this hack
|
||||||
|
res1 = []
|
||||||
|
for dn, attrs in res:
|
||||||
|
attrs['dn'] = [dn]
|
||||||
|
res1.append(attrs)
|
||||||
|
return res1
|
||||||
|
|
||||||
def __find_role_dns(self, tree):
|
def __find_role_dns(self, tree):
|
||||||
"""Find dns of role objects in given tree"""
|
"""Find dns of role objects in given tree"""
|
||||||
@@ -564,6 +624,7 @@ class LdapDriver(object):
|
|||||||
'description': attr.get('description', [None])[0],
|
'description': attr.get('description', [None])[0],
|
||||||
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
||||||
|
|
||||||
|
@__local_cache('uid_dn-%s')
|
||||||
def __uid_to_dn(self, uid, search=True):
|
def __uid_to_dn(self, uid, search=True):
|
||||||
"""Convert uid to dn"""
|
"""Convert uid to dn"""
|
||||||
# By default return a generated DN
|
# By default return a generated DN
|
||||||
@@ -576,6 +637,7 @@ class LdapDriver(object):
|
|||||||
userdn = user[0]
|
userdn = user[0]
|
||||||
return userdn
|
return userdn
|
||||||
|
|
||||||
|
@__local_cache('pid_dn-%s')
|
||||||
def __project_to_dn(self, pid, search=True):
|
def __project_to_dn(self, pid, search=True):
|
||||||
"""Convert pid to dn"""
|
"""Convert pid to dn"""
|
||||||
# By default return a generated DN
|
# By default return a generated DN
|
||||||
@@ -603,16 +665,18 @@ class LdapDriver(object):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@__local_cache('dn_uid-%s')
|
||||||
def __dn_to_uid(self, dn):
|
def __dn_to_uid(self, dn):
|
||||||
"""Convert user dn to uid"""
|
"""Convert user dn to uid"""
|
||||||
query = '(objectclass=novaUser)'
|
query = '(objectclass=novaUser)'
|
||||||
user = self.__find_object(dn, query)
|
user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
|
||||||
return user[FLAGS.ldap_user_id_attribute][0]
|
return user[FLAGS.ldap_user_id_attribute][0]
|
||||||
|
|
||||||
|
|
||||||
class FakeLdapDriver(LdapDriver):
|
class FakeLdapDriver(LdapDriver):
|
||||||
"""Fake Ldap Auth driver"""
|
"""Fake Ldap Auth driver"""
|
||||||
|
|
||||||
def __init__(self): # pylint: disable=W0231
|
def __init__(self):
|
||||||
__import__('nova.auth.fakeldap')
|
import nova.auth.fakeldap
|
||||||
self.ldap = sys.modules['nova.auth.fakeldap']
|
sys.modules['ldap'] = nova.auth.fakeldap
|
||||||
|
super(FakeLdapDriver, self).__init__()
|
||||||
|
|||||||
@@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
|||||||
LOG = logging.getLogger('nova.auth.manager')
|
LOG = logging.getLogger('nova.auth.manager')
|
||||||
|
|
||||||
|
|
||||||
|
if FLAGS.memcached_servers:
|
||||||
|
import memcache
|
||||||
|
else:
|
||||||
|
from nova import fakememcache as memcache
|
||||||
|
|
||||||
|
|
||||||
class AuthBase(object):
|
class AuthBase(object):
|
||||||
"""Base class for objects relating to auth
|
"""Base class for objects relating to auth
|
||||||
|
|
||||||
@@ -206,6 +212,7 @@ class AuthManager(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
_instance = None
|
_instance = None
|
||||||
|
mc = None
|
||||||
|
|
||||||
def __new__(cls, *args, **kwargs):
|
def __new__(cls, *args, **kwargs):
|
||||||
"""Returns the AuthManager singleton"""
|
"""Returns the AuthManager singleton"""
|
||||||
@@ -222,13 +229,8 @@ class AuthManager(object):
|
|||||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
if driver or not getattr(self, 'driver', None):
|
if driver or not getattr(self, 'driver', None):
|
||||||
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
||||||
|
if AuthManager.mc is None:
|
||||||
if FLAGS.memcached_servers:
|
AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||||
import memcache
|
|
||||||
else:
|
|
||||||
from nova import fakememcache as memcache
|
|
||||||
self.mc = memcache.Client(FLAGS.memcached_servers,
|
|
||||||
debug=0)
|
|
||||||
|
|
||||||
def authenticate(self, access, signature, params, verb='GET',
|
def authenticate(self, access, signature, params, verb='GET',
|
||||||
server_string='127.0.0.1:8773', path='/',
|
server_string='127.0.0.1:8773', path='/',
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
|
NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||
|
||||||
|
NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
|
||||||
|
NOVA_KEY_DIR=${NOVARC%%/*}
|
||||||
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
||||||
export EC2_SECRET_KEY="%(secret)s"
|
export EC2_SECRET_KEY="%(secret)s"
|
||||||
export EC2_URL="%(ec2)s"
|
export EC2_URL="%(ec2)s"
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ class BuildInProgress(Error):
|
|||||||
|
|
||||||
class DBError(Error):
|
class DBError(Error):
|
||||||
"""Wraps an implementation specific exception."""
|
"""Wraps an implementation specific exception."""
|
||||||
def __init__(self, inner_exception):
|
def __init__(self, inner_exception=None):
|
||||||
self.inner_exception = inner_exception
|
self.inner_exception = inner_exception
|
||||||
super(DBError, self).__init__(str(inner_exception))
|
super(DBError, self).__init__(str(inner_exception))
|
||||||
|
|
||||||
@@ -126,7 +126,7 @@ class NotAuthorized(NovaException):
|
|||||||
message = _("Not authorized.")
|
message = _("Not authorized.")
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(NotFound, self).__init__(**kwargs)
|
super(NotAuthorized, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
class AdminRequired(NotAuthorized):
|
class AdminRequired(NotAuthorized):
|
||||||
@@ -275,6 +275,14 @@ class VolumeNotFoundForInstance(VolumeNotFound):
|
|||||||
message = _("Volume not found for instance %(instance_id)s.")
|
message = _("Volume not found for instance %(instance_id)s.")
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotNotFound(NotFound):
|
||||||
|
message = _("Snapshot %(snapshot_id)s could not be found.")
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeIsBusy(Error):
|
||||||
|
message = _("deleting volume %(volume_name)s that has snapshot")
|
||||||
|
|
||||||
|
|
||||||
class ExportDeviceNotFoundForVolume(NotFound):
|
class ExportDeviceNotFoundForVolume(NotFound):
|
||||||
message = _("No export device found for volume %(volume_id)s.")
|
message = _("No export device found for volume %(volume_id)s.")
|
||||||
|
|
||||||
@@ -287,6 +295,15 @@ class DiskNotFound(NotFound):
|
|||||||
message = _("No disk at %(location)s")
|
message = _("No disk at %(location)s")
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidImageRef(Invalid):
|
||||||
|
message = _("Invalid image href %(image_href)s.")
|
||||||
|
|
||||||
|
|
||||||
|
class ListingImageRefsNotSupported(Invalid):
|
||||||
|
message = _("Some images have been stored via hrefs."
|
||||||
|
+ " This version of the api does not support displaying image hrefs.")
|
||||||
|
|
||||||
|
|
||||||
class ImageNotFound(NotFound):
|
class ImageNotFound(NotFound):
|
||||||
message = _("Image %(image_id)s could not be found.")
|
message = _("Image %(image_id)s could not be found.")
|
||||||
|
|
||||||
@@ -485,11 +502,19 @@ class ZoneNotFound(NotFound):
|
|||||||
message = _("Zone %(zone_id)s could not be found.")
|
message = _("Zone %(zone_id)s could not be found.")
|
||||||
|
|
||||||
|
|
||||||
class SchedulerHostFilterDriverNotFound(NotFound):
|
class SchedulerHostFilterNotFound(NotFound):
|
||||||
message = _("Scheduler Host Filter Driver %(driver_name)s could"
|
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
|
||||||
|
|
||||||
|
|
||||||
|
class SchedulerCostFunctionNotFound(NotFound):
|
||||||
|
message = _("Scheduler cost function %(cost_fn_str)s could"
|
||||||
" not be found.")
|
" not be found.")
|
||||||
|
|
||||||
|
|
||||||
|
class SchedulerWeightFlagNotFound(NotFound):
|
||||||
|
message = _("Scheduler weight flag not found: %(flag_name)s")
|
||||||
|
|
||||||
|
|
||||||
class InstanceMetadataNotFound(NotFound):
|
class InstanceMetadataNotFound(NotFound):
|
||||||
message = _("Instance %(instance_id)s has no metadata with "
|
message = _("Instance %(instance_id)s has no metadata with "
|
||||||
"key %(metadata_key)s.")
|
"key %(metadata_key)s.")
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit")
|
|||||||
|
|
||||||
EXCHANGES = {}
|
EXCHANGES = {}
|
||||||
QUEUES = {}
|
QUEUES = {}
|
||||||
|
CONSUMERS = {}
|
||||||
|
|
||||||
|
|
||||||
class Message(base.BaseMessage):
|
class Message(base.BaseMessage):
|
||||||
@@ -96,17 +97,29 @@ class Backend(base.BaseBackend):
|
|||||||
' key %(routing_key)s') % locals())
|
' key %(routing_key)s') % locals())
|
||||||
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
||||||
|
|
||||||
def declare_consumer(self, queue, callback, *args, **kwargs):
|
def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs):
|
||||||
self.current_queue = queue
|
global CONSUMERS
|
||||||
self.current_callback = callback
|
LOG.debug("Adding consumer %s", consumer_tag)
|
||||||
|
CONSUMERS[consumer_tag] = (queue, callback)
|
||||||
|
|
||||||
|
def cancel(self, consumer_tag):
|
||||||
|
global CONSUMERS
|
||||||
|
LOG.debug("Removing consumer %s", consumer_tag)
|
||||||
|
del CONSUMERS[consumer_tag]
|
||||||
|
|
||||||
def consume(self, limit=None):
|
def consume(self, limit=None):
|
||||||
|
global CONSUMERS
|
||||||
|
num = 0
|
||||||
while True:
|
while True:
|
||||||
item = self.get(self.current_queue)
|
for (queue, callback) in CONSUMERS.itervalues():
|
||||||
|
item = self.get(queue)
|
||||||
if item:
|
if item:
|
||||||
self.current_callback(item)
|
callback(item)
|
||||||
|
num += 1
|
||||||
|
yield
|
||||||
|
if limit and num == limit:
|
||||||
raise StopIteration()
|
raise StopIteration()
|
||||||
greenthread.sleep(0)
|
greenthread.sleep(0.1)
|
||||||
|
|
||||||
def get(self, queue, no_ack=False):
|
def get(self, queue, no_ack=False):
|
||||||
global QUEUES
|
global QUEUES
|
||||||
@@ -134,5 +147,7 @@ class Backend(base.BaseBackend):
|
|||||||
def reset_all():
|
def reset_all():
|
||||||
global EXCHANGES
|
global EXCHANGES
|
||||||
global QUEUES
|
global QUEUES
|
||||||
|
global CONSUMERS
|
||||||
EXCHANGES = {}
|
EXCHANGES = {}
|
||||||
QUEUES = {}
|
QUEUES = {}
|
||||||
|
CONSUMERS = {}
|
||||||
|
|||||||
@@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False,
|
|||||||
'should we use fake network devices and addresses')
|
'should we use fake network devices and addresses')
|
||||||
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
|
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
|
||||||
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
||||||
|
DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
|
||||||
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
||||||
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
||||||
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
||||||
|
|||||||
10
nova/log.py
10
nova/log.py
@@ -35,6 +35,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import nova
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import version
|
from nova import version
|
||||||
|
|
||||||
@@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels',
|
|||||||
'eventlet.wsgi.server=WARN'],
|
'eventlet.wsgi.server=WARN'],
|
||||||
'list of logger=LEVEL pairs')
|
'list of logger=LEVEL pairs')
|
||||||
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
|
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
|
||||||
|
flags.DEFINE_bool('publish_errors', False, 'publish error events')
|
||||||
flags.DEFINE_string('logfile', None, 'output to named file')
|
flags.DEFINE_string('logfile', None, 'output to named file')
|
||||||
|
|
||||||
|
|
||||||
@@ -258,12 +260,20 @@ class NovaRootLogger(NovaLogger):
|
|||||||
else:
|
else:
|
||||||
self.removeHandler(self.filelog)
|
self.removeHandler(self.filelog)
|
||||||
self.addHandler(self.streamlog)
|
self.addHandler(self.streamlog)
|
||||||
|
if FLAGS.publish_errors:
|
||||||
|
self.addHandler(PublishErrorsHandler(ERROR))
|
||||||
if FLAGS.verbose:
|
if FLAGS.verbose:
|
||||||
self.setLevel(DEBUG)
|
self.setLevel(DEBUG)
|
||||||
else:
|
else:
|
||||||
self.setLevel(INFO)
|
self.setLevel(INFO)
|
||||||
|
|
||||||
|
|
||||||
|
class PublishErrorsHandler(logging.Handler):
|
||||||
|
def emit(self, record):
|
||||||
|
nova.notifier.api.notify('nova.error.publisher', 'error_notification',
|
||||||
|
nova.notifier.api.ERROR, dict(error=record.msg))
|
||||||
|
|
||||||
|
|
||||||
def handle_exception(type, value, tb):
|
def handle_exception(type, value, tb):
|
||||||
extra = {}
|
extra = {}
|
||||||
if FLAGS.verbose:
|
if FLAGS.verbose:
|
||||||
|
|||||||
@@ -11,9 +11,8 @@
|
|||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.import datetime
|
# under the License.
|
||||||
|
|
||||||
import datetime
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload):
|
|||||||
|
|
||||||
{'message_id': str(uuid.uuid4()),
|
{'message_id': str(uuid.uuid4()),
|
||||||
'publisher_id': 'compute.host1',
|
'publisher_id': 'compute.host1',
|
||||||
'timestamp': datetime.datetime.utcnow(),
|
'timestamp': utils.utcnow(),
|
||||||
'priority': 'WARN',
|
'priority': 'WARN',
|
||||||
'event_type': 'compute.create_instance',
|
'event_type': 'compute.create_instance',
|
||||||
'payload': {'instance_id': 12, ... }}
|
'payload': {'instance_id': 12, ... }}
|
||||||
@@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload):
|
|||||||
event_type=event_type,
|
event_type=event_type,
|
||||||
priority=priority,
|
priority=priority,
|
||||||
payload=payload,
|
payload=payload,
|
||||||
timestamp=str(datetime.datetime.utcnow()))
|
timestamp=str(utils.utcnow()))
|
||||||
driver.notify(msg)
|
driver.notify(msg)
|
||||||
|
|||||||
100
nova/quota.py
100
nova/quota.py
@@ -28,6 +28,8 @@ flags.DEFINE_integer('quota_instances', 10,
|
|||||||
'number of instances allowed per project')
|
'number of instances allowed per project')
|
||||||
flags.DEFINE_integer('quota_cores', 20,
|
flags.DEFINE_integer('quota_cores', 20,
|
||||||
'number of instance cores allowed per project')
|
'number of instance cores allowed per project')
|
||||||
|
flags.DEFINE_integer('quota_ram', 50 * 1024,
|
||||||
|
'megabytes of instance ram allowed per project')
|
||||||
flags.DEFINE_integer('quota_volumes', 10,
|
flags.DEFINE_integer('quota_volumes', 10,
|
||||||
'number of volumes allowed per project')
|
'number of volumes allowed per project')
|
||||||
flags.DEFINE_integer('quota_gigabytes', 1000,
|
flags.DEFINE_integer('quota_gigabytes', 1000,
|
||||||
@@ -44,14 +46,28 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255,
|
|||||||
'number of bytes allowed per injected file path')
|
'number of bytes allowed per injected file path')
|
||||||
|
|
||||||
|
|
||||||
def get_quota(context, project_id):
|
def _get_default_quotas():
|
||||||
rval = {'instances': FLAGS.quota_instances,
|
defaults = {
|
||||||
|
'instances': FLAGS.quota_instances,
|
||||||
'cores': FLAGS.quota_cores,
|
'cores': FLAGS.quota_cores,
|
||||||
|
'ram': FLAGS.quota_ram,
|
||||||
'volumes': FLAGS.quota_volumes,
|
'volumes': FLAGS.quota_volumes,
|
||||||
'gigabytes': FLAGS.quota_gigabytes,
|
'gigabytes': FLAGS.quota_gigabytes,
|
||||||
'floating_ips': FLAGS.quota_floating_ips,
|
'floating_ips': FLAGS.quota_floating_ips,
|
||||||
'metadata_items': FLAGS.quota_metadata_items}
|
'metadata_items': FLAGS.quota_metadata_items,
|
||||||
|
'injected_files': FLAGS.quota_max_injected_files,
|
||||||
|
'injected_file_content_bytes':
|
||||||
|
FLAGS.quota_max_injected_file_content_bytes,
|
||||||
|
}
|
||||||
|
# -1 in the quota flags means unlimited
|
||||||
|
for key in defaults.keys():
|
||||||
|
if defaults[key] == -1:
|
||||||
|
defaults[key] = None
|
||||||
|
return defaults
|
||||||
|
|
||||||
|
|
||||||
|
def get_project_quotas(context, project_id):
|
||||||
|
rval = _get_default_quotas()
|
||||||
quota = db.quota_get_all_by_project(context, project_id)
|
quota = db.quota_get_all_by_project(context, project_id)
|
||||||
for key in rval.keys():
|
for key in rval.keys():
|
||||||
if key in quota:
|
if key in quota:
|
||||||
@@ -65,71 +81,81 @@ def _get_request_allotment(requested, used, quota):
|
|||||||
return quota - used
|
return quota - used
|
||||||
|
|
||||||
|
|
||||||
def allowed_instances(context, num_instances, instance_type):
|
def allowed_instances(context, requested_instances, instance_type):
|
||||||
"""Check quota and return min(num_instances, allowed_instances)."""
|
"""Check quota and return min(requested_instances, allowed_instances)."""
|
||||||
project_id = context.project_id
|
project_id = context.project_id
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
num_cores = num_instances * instance_type['vcpus']
|
requested_cores = requested_instances * instance_type['vcpus']
|
||||||
used_instances, used_cores = db.instance_data_get_for_project(context,
|
requested_ram = requested_instances * instance_type['memory_mb']
|
||||||
project_id)
|
usage = db.instance_data_get_for_project(context, project_id)
|
||||||
quota = get_quota(context, project_id)
|
used_instances, used_cores, used_ram = usage
|
||||||
allowed_instances = _get_request_allotment(num_instances, used_instances,
|
quota = get_project_quotas(context, project_id)
|
||||||
|
allowed_instances = _get_request_allotment(requested_instances,
|
||||||
|
used_instances,
|
||||||
quota['instances'])
|
quota['instances'])
|
||||||
allowed_cores = _get_request_allotment(num_cores, used_cores,
|
allowed_cores = _get_request_allotment(requested_cores, used_cores,
|
||||||
quota['cores'])
|
quota['cores'])
|
||||||
|
allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])
|
||||||
allowed_instances = min(allowed_instances,
|
allowed_instances = min(allowed_instances,
|
||||||
int(allowed_cores // instance_type['vcpus']))
|
allowed_cores // instance_type['vcpus'],
|
||||||
return min(num_instances, allowed_instances)
|
allowed_ram // instance_type['memory_mb'])
|
||||||
|
return min(requested_instances, allowed_instances)
|
||||||
|
|
||||||
|
|
||||||
def allowed_volumes(context, num_volumes, size):
|
def allowed_volumes(context, requested_volumes, size):
|
||||||
"""Check quota and return min(num_volumes, allowed_volumes)."""
|
"""Check quota and return min(requested_volumes, allowed_volumes)."""
|
||||||
project_id = context.project_id
|
project_id = context.project_id
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
size = int(size)
|
size = int(size)
|
||||||
num_gigabytes = num_volumes * size
|
requested_gigabytes = requested_volumes * size
|
||||||
used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
|
used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
|
||||||
project_id)
|
project_id)
|
||||||
quota = get_quota(context, project_id)
|
quota = get_project_quotas(context, project_id)
|
||||||
allowed_volumes = _get_request_allotment(num_volumes, used_volumes,
|
allowed_volumes = _get_request_allotment(requested_volumes, used_volumes,
|
||||||
quota['volumes'])
|
quota['volumes'])
|
||||||
allowed_gigabytes = _get_request_allotment(num_gigabytes, used_gigabytes,
|
allowed_gigabytes = _get_request_allotment(requested_gigabytes,
|
||||||
|
used_gigabytes,
|
||||||
quota['gigabytes'])
|
quota['gigabytes'])
|
||||||
allowed_volumes = min(allowed_volumes,
|
allowed_volumes = min(allowed_volumes,
|
||||||
int(allowed_gigabytes // size))
|
int(allowed_gigabytes // size))
|
||||||
return min(num_volumes, allowed_volumes)
|
return min(requested_volumes, allowed_volumes)
|
||||||
|
|
||||||
|
|
||||||
def allowed_floating_ips(context, num_floating_ips):
|
def allowed_floating_ips(context, requested_floating_ips):
|
||||||
"""Check quota and return min(num_floating_ips, allowed_floating_ips)."""
|
"""Check quota and return min(requested, allowed) floating ips."""
|
||||||
project_id = context.project_id
|
project_id = context.project_id
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
used_floating_ips = db.floating_ip_count_by_project(context, project_id)
|
used_floating_ips = db.floating_ip_count_by_project(context, project_id)
|
||||||
quota = get_quota(context, project_id)
|
quota = get_project_quotas(context, project_id)
|
||||||
allowed_floating_ips = _get_request_allotment(num_floating_ips,
|
allowed_floating_ips = _get_request_allotment(requested_floating_ips,
|
||||||
used_floating_ips,
|
used_floating_ips,
|
||||||
quota['floating_ips'])
|
quota['floating_ips'])
|
||||||
return min(num_floating_ips, allowed_floating_ips)
|
return min(requested_floating_ips, allowed_floating_ips)
|
||||||
|
|
||||||
|
|
||||||
def allowed_metadata_items(context, num_metadata_items):
|
def _calculate_simple_quota(context, resource, requested):
|
||||||
"""Check quota; return min(num_metadata_items,allowed_metadata_items)."""
|
"""Check quota for resource; return min(requested, allowed)."""
|
||||||
project_id = context.project_id
|
quota = get_project_quotas(context, context.project_id)
|
||||||
context = context.elevated()
|
allowed = _get_request_allotment(requested, 0, quota[resource])
|
||||||
quota = get_quota(context, project_id)
|
return min(requested, allowed)
|
||||||
allowed_metadata_items = _get_request_allotment(num_metadata_items, 0,
|
|
||||||
quota['metadata_items'])
|
|
||||||
return min(num_metadata_items, allowed_metadata_items)
|
|
||||||
|
|
||||||
|
|
||||||
def allowed_injected_files(context):
|
def allowed_metadata_items(context, requested_metadata_items):
|
||||||
|
"""Return the number of metadata items allowed."""
|
||||||
|
return _calculate_simple_quota(context, 'metadata_items',
|
||||||
|
requested_metadata_items)
|
||||||
|
|
||||||
|
|
||||||
|
def allowed_injected_files(context, requested_injected_files):
|
||||||
"""Return the number of injected files allowed."""
|
"""Return the number of injected files allowed."""
|
||||||
return FLAGS.quota_max_injected_files
|
return _calculate_simple_quota(context, 'injected_files',
|
||||||
|
requested_injected_files)
|
||||||
|
|
||||||
|
|
||||||
def allowed_injected_file_content_bytes(context):
|
def allowed_injected_file_content_bytes(context, requested_bytes):
|
||||||
"""Return the number of bytes allowed per injected file content."""
|
"""Return the number of bytes allowed per injected file content."""
|
||||||
return FLAGS.quota_max_injected_file_content_bytes
|
resource = 'injected_file_content_bytes'
|
||||||
|
return _calculate_simple_quota(context, resource, requested_bytes)
|
||||||
|
|
||||||
|
|
||||||
def allowed_injected_file_path_bytes(context):
|
def allowed_injected_file_path_bytes(context):
|
||||||
|
|||||||
232
nova/rpc.py
232
nova/rpc.py
@@ -28,12 +28,15 @@ import json
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import types
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from carrot import connection as carrot_connection
|
from carrot import connection as carrot_connection
|
||||||
from carrot import messaging
|
from carrot import messaging
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
from eventlet import pools
|
||||||
|
from eventlet import queue
|
||||||
|
import greenlet
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@@ -47,7 +50,10 @@ LOG = logging.getLogger('nova.rpc')
|
|||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
|
flags.DEFINE_integer('rpc_thread_pool_size', 1024,
|
||||||
|
'Size of RPC thread pool')
|
||||||
|
flags.DEFINE_integer('rpc_conn_pool_size', 30,
|
||||||
|
'Size of RPC connection pool')
|
||||||
|
|
||||||
|
|
||||||
class Connection(carrot_connection.BrokerConnection):
|
class Connection(carrot_connection.BrokerConnection):
|
||||||
@@ -59,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection):
|
|||||||
if new or not hasattr(cls, '_instance'):
|
if new or not hasattr(cls, '_instance'):
|
||||||
params = dict(hostname=FLAGS.rabbit_host,
|
params = dict(hostname=FLAGS.rabbit_host,
|
||||||
port=FLAGS.rabbit_port,
|
port=FLAGS.rabbit_port,
|
||||||
|
ssl=FLAGS.rabbit_use_ssl,
|
||||||
userid=FLAGS.rabbit_userid,
|
userid=FLAGS.rabbit_userid,
|
||||||
password=FLAGS.rabbit_password,
|
password=FLAGS.rabbit_password,
|
||||||
virtual_host=FLAGS.rabbit_virtual_host)
|
virtual_host=FLAGS.rabbit_virtual_host)
|
||||||
@@ -90,6 +97,22 @@ class Connection(carrot_connection.BrokerConnection):
|
|||||||
return cls.instance()
|
return cls.instance()
|
||||||
|
|
||||||
|
|
||||||
|
class Pool(pools.Pool):
|
||||||
|
"""Class that implements a Pool of Connections."""
|
||||||
|
|
||||||
|
# TODO(comstud): Timeout connections not used in a while
|
||||||
|
def create(self):
|
||||||
|
LOG.debug('Creating new connection')
|
||||||
|
return Connection.instance(new=True)
|
||||||
|
|
||||||
|
# Create a ConnectionPool to use for RPC calls. We'll order the
|
||||||
|
# pool as a stack (LIFO), so that we can potentially loop through and
|
||||||
|
# timeout old unused connections at some point
|
||||||
|
ConnectionPool = Pool(
|
||||||
|
max_size=FLAGS.rpc_conn_pool_size,
|
||||||
|
order_as_stack=True)
|
||||||
|
|
||||||
|
|
||||||
class Consumer(messaging.Consumer):
|
class Consumer(messaging.Consumer):
|
||||||
"""Consumer base class.
|
"""Consumer base class.
|
||||||
|
|
||||||
@@ -131,7 +154,9 @@ class Consumer(messaging.Consumer):
|
|||||||
self.connection = Connection.recreate()
|
self.connection = Connection.recreate()
|
||||||
self.backend = self.connection.create_backend()
|
self.backend = self.connection.create_backend()
|
||||||
self.declare()
|
self.declare()
|
||||||
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
return super(Consumer, self).fetch(no_ack,
|
||||||
|
auto_ack,
|
||||||
|
enable_callbacks)
|
||||||
if self.failed_connection:
|
if self.failed_connection:
|
||||||
LOG.error(_('Reconnected to queue'))
|
LOG.error(_('Reconnected to queue'))
|
||||||
self.failed_connection = False
|
self.failed_connection = False
|
||||||
@@ -159,13 +184,13 @@ class AdapterConsumer(Consumer):
|
|||||||
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
|
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
|
||||||
super(AdapterConsumer, self).__init__(connection=connection,
|
super(AdapterConsumer, self).__init__(connection=connection,
|
||||||
topic=topic)
|
topic=topic)
|
||||||
|
self.register_callback(self.process_data)
|
||||||
|
|
||||||
def receive(self, *args, **kwargs):
|
def process_data(self, message_data, message):
|
||||||
self.pool.spawn_n(self._receive, *args, **kwargs)
|
"""Consumer callback to call a method on a proxy object.
|
||||||
|
|
||||||
@exception.wrap_exception
|
Parses the message for validity and fires off a thread to call the
|
||||||
def _receive(self, message_data, message):
|
proxy object method.
|
||||||
"""Magically looks for a method on the proxy object and calls it.
|
|
||||||
|
|
||||||
Message data should be a dictionary with two keys:
|
Message data should be a dictionary with two keys:
|
||||||
method: string representing the method to call
|
method: string representing the method to call
|
||||||
@@ -175,8 +200,8 @@ class AdapterConsumer(Consumer):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
LOG.debug(_('received %s') % message_data)
|
LOG.debug(_('received %s') % message_data)
|
||||||
msg_id = message_data.pop('_msg_id', None)
|
# This will be popped off in _unpack_context
|
||||||
|
msg_id = message_data.get('_msg_id', None)
|
||||||
ctxt = _unpack_context(message_data)
|
ctxt = _unpack_context(message_data)
|
||||||
|
|
||||||
method = message_data.get('method')
|
method = message_data.get('method')
|
||||||
@@ -188,8 +213,17 @@ class AdapterConsumer(Consumer):
|
|||||||
# we just log the message and send an error string
|
# we just log the message and send an error string
|
||||||
# back to the caller
|
# back to the caller
|
||||||
LOG.warn(_('no method for message: %s') % message_data)
|
LOG.warn(_('no method for message: %s') % message_data)
|
||||||
msg_reply(msg_id, _('No method for message: %s') % message_data)
|
if msg_id:
|
||||||
|
msg_reply(msg_id,
|
||||||
|
_('No method for message: %s') % message_data)
|
||||||
return
|
return
|
||||||
|
self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)
|
||||||
|
|
||||||
|
@exception.wrap_exception
|
||||||
|
def _process_data(self, msg_id, ctxt, method, args):
|
||||||
|
"""Thread that maigcally looks for a method on the proxy
|
||||||
|
object and calls it.
|
||||||
|
"""
|
||||||
|
|
||||||
node_func = getattr(self.proxy, str(method))
|
node_func = getattr(self.proxy, str(method))
|
||||||
node_args = dict((str(k), v) for k, v in args.iteritems())
|
node_args = dict((str(k), v) for k, v in args.iteritems())
|
||||||
@@ -197,7 +231,18 @@ class AdapterConsumer(Consumer):
|
|||||||
try:
|
try:
|
||||||
rval = node_func(context=ctxt, **node_args)
|
rval = node_func(context=ctxt, **node_args)
|
||||||
if msg_id:
|
if msg_id:
|
||||||
|
# Check if the result was a generator
|
||||||
|
if isinstance(rval, types.GeneratorType):
|
||||||
|
for x in rval:
|
||||||
|
msg_reply(msg_id, x, None)
|
||||||
|
else:
|
||||||
msg_reply(msg_id, rval, None)
|
msg_reply(msg_id, rval, None)
|
||||||
|
|
||||||
|
# This final None tells multicall that it is done.
|
||||||
|
msg_reply(msg_id, None, None)
|
||||||
|
elif isinstance(rval, types.GeneratorType):
|
||||||
|
# NOTE(vish): this iterates through the generator
|
||||||
|
list(rval)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception('Exception during message handling')
|
logging.exception('Exception during message handling')
|
||||||
if msg_id:
|
if msg_id:
|
||||||
@@ -205,11 +250,6 @@ class AdapterConsumer(Consumer):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
class Publisher(messaging.Publisher):
|
|
||||||
"""Publisher base class."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TopicAdapterConsumer(AdapterConsumer):
|
class TopicAdapterConsumer(AdapterConsumer):
|
||||||
"""Consumes messages on a specific topic."""
|
"""Consumes messages on a specific topic."""
|
||||||
|
|
||||||
@@ -242,6 +282,58 @@ class FanoutAdapterConsumer(AdapterConsumer):
|
|||||||
topic=topic, proxy=proxy)
|
topic=topic, proxy=proxy)
|
||||||
|
|
||||||
|
|
||||||
|
class ConsumerSet(object):
|
||||||
|
"""Groups consumers to listen on together on a single connection."""
|
||||||
|
|
||||||
|
def __init__(self, connection, consumer_list):
|
||||||
|
self.consumer_list = set(consumer_list)
|
||||||
|
self.consumer_set = None
|
||||||
|
self.enabled = True
|
||||||
|
self.init(connection)
|
||||||
|
|
||||||
|
def init(self, conn):
|
||||||
|
if not conn:
|
||||||
|
conn = Connection.instance(new=True)
|
||||||
|
if self.consumer_set:
|
||||||
|
self.consumer_set.close()
|
||||||
|
self.consumer_set = messaging.ConsumerSet(conn)
|
||||||
|
for consumer in self.consumer_list:
|
||||||
|
consumer.connection = conn
|
||||||
|
# consumer.backend is set for us
|
||||||
|
self.consumer_set.add_consumer(consumer)
|
||||||
|
|
||||||
|
def reconnect(self):
|
||||||
|
self.init(None)
|
||||||
|
|
||||||
|
def wait(self, limit=None):
|
||||||
|
running = True
|
||||||
|
while running:
|
||||||
|
it = self.consumer_set.iterconsume(limit=limit)
|
||||||
|
if not it:
|
||||||
|
break
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
it.next()
|
||||||
|
except StopIteration:
|
||||||
|
return
|
||||||
|
except greenlet.GreenletExit:
|
||||||
|
running = False
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(_("Exception while processing consumer"))
|
||||||
|
self.reconnect()
|
||||||
|
# Break to outer loop
|
||||||
|
break
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.consumer_set.close()
|
||||||
|
|
||||||
|
|
||||||
|
class Publisher(messaging.Publisher):
|
||||||
|
"""Publisher base class."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
class TopicPublisher(Publisher):
|
||||||
"""Publishes messages on a specific topic."""
|
"""Publishes messages on a specific topic."""
|
||||||
|
|
||||||
@@ -306,7 +398,8 @@ def msg_reply(msg_id, reply=None, failure=None):
|
|||||||
LOG.error(_("Returning exception %s to caller"), message)
|
LOG.error(_("Returning exception %s to caller"), message)
|
||||||
LOG.error(tb)
|
LOG.error(tb)
|
||||||
failure = (failure[0].__name__, str(failure[1]), tb)
|
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||||
conn = Connection.instance()
|
|
||||||
|
with ConnectionPool.item() as conn:
|
||||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||||
try:
|
try:
|
||||||
publisher.send({'result': reply, 'failure': failure})
|
publisher.send({'result': reply, 'failure': failure})
|
||||||
@@ -315,6 +408,7 @@ def msg_reply(msg_id, reply=None, failure=None):
|
|||||||
{'result': dict((k, repr(v))
|
{'result': dict((k, repr(v))
|
||||||
for k, v in reply.__dict__.iteritems()),
|
for k, v in reply.__dict__.iteritems()),
|
||||||
'failure': failure})
|
'failure': failure})
|
||||||
|
|
||||||
publisher.close()
|
publisher.close()
|
||||||
|
|
||||||
|
|
||||||
@@ -347,8 +441,9 @@ def _unpack_context(msg):
|
|||||||
if key.startswith('_context_'):
|
if key.startswith('_context_'):
|
||||||
value = msg.pop(key)
|
value = msg.pop(key)
|
||||||
context_dict[key[9:]] = value
|
context_dict[key[9:]] = value
|
||||||
|
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
||||||
LOG.debug(_('unpacked context: %s'), context_dict)
|
LOG.debug(_('unpacked context: %s'), context_dict)
|
||||||
return context.RequestContext.from_dict(context_dict)
|
return RpcContext.from_dict(context_dict)
|
||||||
|
|
||||||
|
|
||||||
def _pack_context(msg, context):
|
def _pack_context(msg, context):
|
||||||
@@ -360,57 +455,99 @@ def _pack_context(msg, context):
|
|||||||
for args at some point.
|
for args at some point.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
context = dict([('_context_%s' % key, value)
|
context_d = dict([('_context_%s' % key, value)
|
||||||
for (key, value) in context.to_dict().iteritems()])
|
for (key, value) in context.to_dict().iteritems()])
|
||||||
msg.update(context)
|
msg.update(context_d)
|
||||||
|
|
||||||
|
|
||||||
def call(context, topic, msg):
|
class RpcContext(context.RequestContext):
|
||||||
"""Sends a message on a topic and wait for a response."""
|
def __init__(self, *args, **kwargs):
|
||||||
|
msg_id = kwargs.pop('msg_id', None)
|
||||||
|
self.msg_id = msg_id
|
||||||
|
super(RpcContext, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def reply(self, *args, **kwargs):
|
||||||
|
msg_reply(self.msg_id, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def multicall(context, topic, msg):
|
||||||
|
"""Make a call that returns multiple times."""
|
||||||
LOG.debug(_('Making asynchronous call on %s ...'), topic)
|
LOG.debug(_('Making asynchronous call on %s ...'), topic)
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
|
|
||||||
class WaitMessage(object):
|
con_conn = ConnectionPool.get()
|
||||||
|
consumer = DirectConsumer(connection=con_conn, msg_id=msg_id)
|
||||||
|
wait_msg = MulticallWaiter(consumer)
|
||||||
|
consumer.register_callback(wait_msg)
|
||||||
|
|
||||||
|
publisher = TopicPublisher(connection=con_conn, topic=topic)
|
||||||
|
publisher.send(msg)
|
||||||
|
publisher.close()
|
||||||
|
|
||||||
|
return wait_msg
|
||||||
|
|
||||||
|
|
||||||
|
class MulticallWaiter(object):
|
||||||
|
def __init__(self, consumer):
|
||||||
|
self._consumer = consumer
|
||||||
|
self._results = queue.Queue()
|
||||||
|
self._closed = False
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self._closed = True
|
||||||
|
self._consumer.close()
|
||||||
|
ConnectionPool.put(self._consumer.connection)
|
||||||
|
|
||||||
def __call__(self, data, message):
|
def __call__(self, data, message):
|
||||||
"""Acks message and sets result."""
|
"""Acks message and sets result."""
|
||||||
message.ack()
|
message.ack()
|
||||||
if data['failure']:
|
if data['failure']:
|
||||||
self.result = RemoteError(*data['failure'])
|
self._results.put(RemoteError(*data['failure']))
|
||||||
else:
|
else:
|
||||||
self.result = data['result']
|
self._results.put(data['result'])
|
||||||
|
|
||||||
wait_msg = WaitMessage()
|
def __iter__(self):
|
||||||
conn = Connection.instance()
|
return self.wait()
|
||||||
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
|
|
||||||
consumer.register_callback(wait_msg)
|
|
||||||
|
|
||||||
conn = Connection.instance()
|
|
||||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
|
||||||
publisher.send(msg)
|
|
||||||
publisher.close()
|
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
while True:
|
||||||
|
rv = None
|
||||||
|
while rv is None and not self._closed:
|
||||||
try:
|
try:
|
||||||
consumer.wait(limit=1)
|
rv = self._consumer.fetch(enable_callbacks=True)
|
||||||
except StopIteration:
|
except Exception:
|
||||||
pass
|
self.close()
|
||||||
consumer.close()
|
raise
|
||||||
# NOTE(termie): this is a little bit of a change from the original
|
time.sleep(0.01)
|
||||||
# non-eventlet code where returning a Failure
|
|
||||||
# instance from a deferred call is very similar to
|
result = self._results.get()
|
||||||
# raising an exception
|
if isinstance(result, Exception):
|
||||||
if isinstance(wait_msg.result, Exception):
|
self.close()
|
||||||
raise wait_msg.result
|
raise result
|
||||||
return wait_msg.result
|
if result == None:
|
||||||
|
self.close()
|
||||||
|
raise StopIteration
|
||||||
|
yield result
|
||||||
|
|
||||||
|
|
||||||
|
def call(context, topic, msg):
|
||||||
|
"""Sends a message on a topic and wait for a response."""
|
||||||
|
rv = multicall(context, topic, msg)
|
||||||
|
# NOTE(vish): return the last result from the multicall
|
||||||
|
rv = list(rv)
|
||||||
|
if not rv:
|
||||||
|
return
|
||||||
|
return rv[-1]
|
||||||
|
|
||||||
|
|
||||||
def cast(context, topic, msg):
|
def cast(context, topic, msg):
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
"""Sends a message on a topic without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
conn = Connection.instance()
|
with ConnectionPool.item() as conn:
|
||||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
publisher = TopicPublisher(connection=conn, topic=topic)
|
||||||
publisher.send(msg)
|
publisher.send(msg)
|
||||||
publisher.close()
|
publisher.close()
|
||||||
@@ -420,7 +557,7 @@ def fanout_cast(context, topic, msg):
|
|||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
LOG.debug(_('Making asynchronous fanout cast...'))
|
||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
conn = Connection.instance()
|
with ConnectionPool.item() as conn:
|
||||||
publisher = FanoutPublisher(topic, connection=conn)
|
publisher = FanoutPublisher(topic, connection=conn)
|
||||||
publisher.send(msg)
|
publisher.send(msg)
|
||||||
publisher.close()
|
publisher.close()
|
||||||
@@ -459,6 +596,7 @@ def send_message(topic, message, wait=True):
|
|||||||
|
|
||||||
if wait:
|
if wait:
|
||||||
consumer.wait()
|
consumer.wait()
|
||||||
|
consumer.close()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ from nova import exception
|
|||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
|
from nova import utils
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -61,7 +62,7 @@ class Scheduler(object):
|
|||||||
"""Check whether a service is up based on last heartbeat."""
|
"""Check whether a service is up based on last heartbeat."""
|
||||||
last_heartbeat = service['updated_at'] or service['created_at']
|
last_heartbeat = service['updated_at'] or service['created_at']
|
||||||
# Timestamps in DB are UTC.
|
# Timestamps in DB are UTC.
|
||||||
elapsed = datetime.datetime.utcnow() - last_heartbeat
|
elapsed = utils.utcnow() - last_heartbeat
|
||||||
return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time)
|
return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time)
|
||||||
|
|
||||||
def hosts_up(self, context, topic):
|
def hosts_up(self, context, topic):
|
||||||
|
|||||||
@@ -14,8 +14,8 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Host Filter is a driver mechanism for requesting instance resources.
|
Host Filter is a mechanism for requesting instance resources.
|
||||||
Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
|
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||||
filter grammar.
|
filter grammar.
|
||||||
@@ -41,18 +41,20 @@ import json
|
|||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
from nova.scheduler import zone_aware_scheduler
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
from nova.scheduler import zone_aware_scheduler
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_string('default_host_filter_driver',
|
flags.DEFINE_string('default_host_filter',
|
||||||
'nova.scheduler.host_filter.AllHostsFilter',
|
'nova.scheduler.host_filter.AllHostsFilter',
|
||||||
'Which driver to use for filtering hosts.')
|
'Which filter to use for filtering hosts.')
|
||||||
|
|
||||||
|
|
||||||
class HostFilter(object):
|
class HostFilter(object):
|
||||||
"""Base class for host filter drivers."""
|
"""Base class for host filters."""
|
||||||
|
|
||||||
def instance_type_to_filter(self, instance_type):
|
def instance_type_to_filter(self, instance_type):
|
||||||
"""Convert instance_type into a filter for most common use-case."""
|
"""Convert instance_type into a filter for most common use-case."""
|
||||||
@@ -63,14 +65,15 @@ class HostFilter(object):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def _full_name(self):
|
def _full_name(self):
|
||||||
"""module.classname of the filter driver"""
|
"""module.classname of the filter."""
|
||||||
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
return "%s.%s" % (self.__module__, self.__class__.__name__)
|
||||||
|
|
||||||
|
|
||||||
class AllHostsFilter(HostFilter):
|
class AllHostsFilter(HostFilter):
|
||||||
"""NOP host filter driver. Returns all hosts in ZoneManager.
|
""" NOP host filter. Returns all hosts in ZoneManager.
|
||||||
This essentially does what the old Scheduler+Chance used
|
This essentially does what the old Scheduler+Chance used
|
||||||
to give us."""
|
to give us.
|
||||||
|
"""
|
||||||
|
|
||||||
def instance_type_to_filter(self, instance_type):
|
def instance_type_to_filter(self, instance_type):
|
||||||
"""Return anything to prevent base-class from raising
|
"""Return anything to prevent base-class from raising
|
||||||
@@ -83,8 +86,8 @@ class AllHostsFilter(HostFilter):
|
|||||||
for host, services in zone_manager.service_states.iteritems()]
|
for host, services in zone_manager.service_states.iteritems()]
|
||||||
|
|
||||||
|
|
||||||
class FlavorFilter(HostFilter):
|
class InstanceTypeFilter(HostFilter):
|
||||||
"""HostFilter driver hard-coded to work with flavors."""
|
"""HostFilter hard-coded to work with InstanceType records."""
|
||||||
|
|
||||||
def instance_type_to_filter(self, instance_type):
|
def instance_type_to_filter(self, instance_type):
|
||||||
"""Use instance_type to filter hosts."""
|
"""Use instance_type to filter hosts."""
|
||||||
@@ -98,8 +101,9 @@ class FlavorFilter(HostFilter):
|
|||||||
capabilities = services.get('compute', {})
|
capabilities = services.get('compute', {})
|
||||||
host_ram_mb = capabilities['host_memory_free']
|
host_ram_mb = capabilities['host_memory_free']
|
||||||
disk_bytes = capabilities['disk_available']
|
disk_bytes = capabilities['disk_available']
|
||||||
if host_ram_mb >= instance_type['memory_mb'] and \
|
spec_ram = instance_type['memory_mb']
|
||||||
disk_bytes >= instance_type['local_gb']:
|
spec_disk = instance_type['local_gb']
|
||||||
|
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
|
||||||
selected_hosts.append((host, capabilities))
|
selected_hosts.append((host, capabilities))
|
||||||
return selected_hosts
|
return selected_hosts
|
||||||
|
|
||||||
@@ -109,15 +113,15 @@ class FlavorFilter(HostFilter):
|
|||||||
# 'host_memory_total': 8244539392,
|
# 'host_memory_total': 8244539392,
|
||||||
# 'host_memory_overhead': 184225792,
|
# 'host_memory_overhead': 184225792,
|
||||||
# 'host_memory_free': 3868327936,
|
# 'host_memory_free': 3868327936,
|
||||||
# 'host_memory_free_computed': 3840843776},
|
# 'host_memory_free_computed': 3840843776,
|
||||||
# 'host_other-config': {},
|
# 'host_other_config': {},
|
||||||
# 'host_ip_address': '192.168.1.109',
|
# 'host_ip_address': '192.168.1.109',
|
||||||
# 'host_cpu_info': {},
|
# 'host_cpu_info': {},
|
||||||
# 'disk_available': 32954957824,
|
# 'disk_available': 32954957824,
|
||||||
# 'disk_total': 50394562560,
|
# 'disk_total': 50394562560,
|
||||||
# 'disk_used': 17439604736},
|
# 'disk_used': 17439604736,
|
||||||
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
|
||||||
# 'host_name-label': 'xs-mini'}
|
# 'host_name_label': 'xs-mini'}
|
||||||
|
|
||||||
# instance_type table has:
|
# instance_type table has:
|
||||||
#name = Column(String(255), unique=True)
|
#name = Column(String(255), unique=True)
|
||||||
@@ -131,8 +135,9 @@ class FlavorFilter(HostFilter):
|
|||||||
|
|
||||||
|
|
||||||
class JsonFilter(HostFilter):
|
class JsonFilter(HostFilter):
|
||||||
"""Host Filter driver to allow simple JSON-based grammar for
|
"""Host Filter to allow simple JSON-based grammar for
|
||||||
selecting hosts."""
|
selecting hosts.
|
||||||
|
"""
|
||||||
|
|
||||||
def _equals(self, args):
|
def _equals(self, args):
|
||||||
"""First term is == all the other terms."""
|
"""First term is == all the other terms."""
|
||||||
@@ -227,7 +232,8 @@ class JsonFilter(HostFilter):
|
|||||||
|
|
||||||
def _parse_string(self, string, host, services):
|
def _parse_string(self, string, host, services):
|
||||||
"""Strings prefixed with $ are capability lookups in the
|
"""Strings prefixed with $ are capability lookups in the
|
||||||
form '$service.capability[.subcap*]'"""
|
form '$service.capability[.subcap*]'
|
||||||
|
"""
|
||||||
if not string:
|
if not string:
|
||||||
return None
|
return None
|
||||||
if string[0] != '$':
|
if string[0] != '$':
|
||||||
@@ -270,18 +276,48 @@ class JsonFilter(HostFilter):
|
|||||||
return hosts
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
|
FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
|
||||||
|
|
||||||
|
|
||||||
def choose_driver(driver_name=None):
|
def choose_host_filter(filter_name=None):
|
||||||
"""Since the caller may specify which driver to use we need
|
"""Since the caller may specify which filter to use we need
|
||||||
to have an authoritative list of what is permissible. This
|
to have an authoritative list of what is permissible. This
|
||||||
function checks the driver name against a predefined set
|
function checks the filter name against a predefined set
|
||||||
of acceptable drivers."""
|
of acceptable filters.
|
||||||
|
"""
|
||||||
|
|
||||||
if not driver_name:
|
if not filter_name:
|
||||||
driver_name = FLAGS.default_host_filter_driver
|
filter_name = FLAGS.default_host_filter
|
||||||
for driver in DRIVERS:
|
for filter_class in FILTERS:
|
||||||
if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
|
host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
|
||||||
return driver()
|
if host_match == filter_name:
|
||||||
raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
|
return filter_class()
|
||||||
|
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
|
||||||
|
|
||||||
|
|
||||||
|
class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
|
"""The HostFilterScheduler uses the HostFilter to filter
|
||||||
|
hosts for weighing. The particular filter used may be passed in
|
||||||
|
as an argument or the default will be used.
|
||||||
|
|
||||||
|
request_spec = {'filter': <Filter name>,
|
||||||
|
'instance_type': <InstanceType dict>}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def filter_hosts(self, num, request_spec):
|
||||||
|
"""Filter the full host list (from the ZoneManager)"""
|
||||||
|
filter_name = request_spec.get('filter', None)
|
||||||
|
host_filter = choose_host_filter(filter_name)
|
||||||
|
|
||||||
|
# TODO(sandy): We're only using InstanceType-based specs
|
||||||
|
# currently. Later we'll need to snoop for more detailed
|
||||||
|
# host filter requests.
|
||||||
|
instance_type = request_spec['instance_type']
|
||||||
|
name, query = host_filter.instance_type_to_filter(instance_type)
|
||||||
|
return host_filter.filter_hosts(self.zone_manager, query)
|
||||||
|
|
||||||
|
def weigh_hosts(self, num, request_spec, hosts):
|
||||||
|
"""Derived classes must override this method and return
|
||||||
|
a lists of hosts in [{weight, hostname}] format.
|
||||||
|
"""
|
||||||
|
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||||
|
|||||||
156
nova/scheduler/least_cost.py
Normal file
156
nova/scheduler/least_cost.py
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# Copyright (c) 2011 Openstack, LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Least Cost Scheduler is a mechanism for choosing which host machines to
|
||||||
|
provision a set of resources to. The input of the least-cost-scheduler is a
|
||||||
|
set of objective-functions, called the 'cost-functions', a weight for each
|
||||||
|
cost-function, and a list of candidate hosts (gathered via FilterHosts).
|
||||||
|
|
||||||
|
The cost-function and weights are tabulated, and the host with the least cost
|
||||||
|
is then selected for provisioning.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
|
from nova.scheduler import zone_aware_scheduler
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
LOG = logging.getLogger('nova.scheduler.least_cost')
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
||||||
|
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||||
|
'Which cost functions the LeastCostScheduler should use.')
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||||
|
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||||
|
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||||
|
'How much weight to give the noop cost function')
|
||||||
|
|
||||||
|
|
||||||
|
def noop_cost_fn(host):
|
||||||
|
"""Return a pre-weight cost of 1 for each host"""
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
|
||||||
|
'How much weight to give the fill-first cost function')
|
||||||
|
|
||||||
|
|
||||||
|
def fill_first_cost_fn(host):
|
||||||
|
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||||
|
hosts that don't have enough ram"""
|
||||||
|
hostname, caps = host
|
||||||
|
free_mem = caps['compute']['host_memory_free']
|
||||||
|
return free_mem
|
||||||
|
|
||||||
|
|
||||||
|
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
|
def get_cost_fns(self):
|
||||||
|
"""Returns a list of tuples containing weights and cost functions to
|
||||||
|
use for weighing hosts
|
||||||
|
"""
|
||||||
|
cost_fns = []
|
||||||
|
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||||
|
|
||||||
|
try:
|
||||||
|
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||||
|
# any callable from a module
|
||||||
|
cost_fn = utils.import_class(cost_fn_str)
|
||||||
|
except exception.ClassNotFound:
|
||||||
|
raise exception.SchedulerCostFunctionNotFound(
|
||||||
|
cost_fn_str=cost_fn_str)
|
||||||
|
|
||||||
|
try:
|
||||||
|
weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__)
|
||||||
|
except AttributeError:
|
||||||
|
raise exception.SchedulerWeightFlagNotFound(
|
||||||
|
flag_name=flag_name)
|
||||||
|
|
||||||
|
cost_fns.append((weight, cost_fn))
|
||||||
|
|
||||||
|
return cost_fns
|
||||||
|
|
||||||
|
def weigh_hosts(self, num, request_spec, hosts):
|
||||||
|
"""Returns a list of dictionaries of form:
|
||||||
|
[ {weight: weight, hostname: hostname} ]"""
|
||||||
|
|
||||||
|
# FIXME(sirp): weigh_hosts should handle more than just instances
|
||||||
|
hostnames = [hostname for hostname, caps in hosts]
|
||||||
|
|
||||||
|
cost_fns = self.get_cost_fns()
|
||||||
|
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||||
|
|
||||||
|
weighted = []
|
||||||
|
weight_log = []
|
||||||
|
for cost, hostname in zip(costs, hostnames):
|
||||||
|
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||||
|
weight_dict = dict(weight=cost, hostname=hostname)
|
||||||
|
weighted.append(weight_dict)
|
||||||
|
|
||||||
|
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||||
|
return weighted
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_list(L):
|
||||||
|
"""Normalize an array of numbers such that each element satisfies:
|
||||||
|
0 <= e <= 1"""
|
||||||
|
if not L:
|
||||||
|
return L
|
||||||
|
max_ = max(L)
|
||||||
|
if max_ > 0:
|
||||||
|
return [(float(e) / max_) for e in L]
|
||||||
|
return L
|
||||||
|
|
||||||
|
|
||||||
|
def weighted_sum(domain, weighted_fns, normalize=True):
|
||||||
|
"""Use the weighted-sum method to compute a score for an array of objects.
|
||||||
|
Normalize the results of the objective-functions so that the weights are
|
||||||
|
meaningful regardless of objective-function's range.
|
||||||
|
|
||||||
|
domain - input to be scored
|
||||||
|
weighted_fns - list of weights and functions like:
|
||||||
|
[(weight, objective-functions)]
|
||||||
|
|
||||||
|
Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
|
||||||
|
"""
|
||||||
|
# Table of form:
|
||||||
|
# { domain1: [score1, score2, ..., scoreM]
|
||||||
|
# ...
|
||||||
|
# domainN: [score1, score2, ..., scoreM] }
|
||||||
|
score_table = collections.defaultdict(list)
|
||||||
|
for weight, fn in weighted_fns:
|
||||||
|
scores = [fn(elem) for elem in domain]
|
||||||
|
|
||||||
|
if normalize:
|
||||||
|
norm_scores = normalize_list(scores)
|
||||||
|
else:
|
||||||
|
norm_scores = scores
|
||||||
|
|
||||||
|
for idx, score in enumerate(norm_scores):
|
||||||
|
weighted_score = score * weight
|
||||||
|
score_table[idx].append(weighted_score)
|
||||||
|
|
||||||
|
# Sum rows in table to compute score for each element in domain
|
||||||
|
domain_scores = []
|
||||||
|
for idx in sorted(score_table):
|
||||||
|
elem_score = sum(score_table[idx])
|
||||||
|
elem = domain[idx]
|
||||||
|
domain_scores.append(elem_score)
|
||||||
|
|
||||||
|
return domain_scores
|
||||||
@@ -83,11 +83,16 @@ class SchedulerManager(manager.Manager):
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
host = self.driver.schedule(elevated, topic, *args, **kwargs)
|
host = self.driver.schedule(elevated, topic, *args, **kwargs)
|
||||||
|
|
||||||
|
if not host:
|
||||||
|
LOG.debug(_("%(topic)s %(method)s handled in Scheduler")
|
||||||
|
% locals())
|
||||||
|
return
|
||||||
|
|
||||||
rpc.cast(context,
|
rpc.cast(context,
|
||||||
db.queue_get_for(context, topic, host),
|
db.queue_get_for(context, topic, host),
|
||||||
{"method": method,
|
{"method": method,
|
||||||
"args": kwargs})
|
"args": kwargs})
|
||||||
LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
|
LOG.debug(_("Casted to %(topic)s %(host)s for %(method)s") % locals())
|
||||||
|
|
||||||
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
|
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
|
||||||
# Based on bexar design summit discussion,
|
# Based on bexar design summit discussion,
|
||||||
|
|||||||
@@ -21,10 +21,9 @@
|
|||||||
Simple Scheduler
|
Simple Scheduler
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import utils
|
||||||
from nova.scheduler import driver
|
from nova.scheduler import driver
|
||||||
from nova.scheduler import chance
|
from nova.scheduler import chance
|
||||||
|
|
||||||
@@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
|
|
||||||
# TODO(vish): this probably belongs in the manager, if we
|
# TODO(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.instance_update(context, instance_id, {'host': host,
|
db.instance_update(context, instance_id, {'host': host,
|
||||||
'scheduled_at': now})
|
'scheduled_at': now})
|
||||||
return host
|
return host
|
||||||
@@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
if self.service_is_up(service):
|
if self.service_is_up(service):
|
||||||
# NOTE(vish): this probably belongs in the manager, if we
|
# NOTE(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.instance_update(context,
|
db.instance_update(context,
|
||||||
instance_id,
|
instance_id,
|
||||||
{'host': service['host'],
|
{'host': service['host'],
|
||||||
@@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
|
|
||||||
# TODO(vish): this probably belongs in the manager, if we
|
# TODO(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.volume_update(context, volume_id, {'host': host,
|
db.volume_update(context, volume_id, {'host': host,
|
||||||
'scheduled_at': now})
|
'scheduled_at': now})
|
||||||
return host
|
return host
|
||||||
@@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
if self.service_is_up(service):
|
if self.service_is_up(service):
|
||||||
# NOTE(vish): this probably belongs in the manager, if we
|
# NOTE(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.volume_update(context,
|
db.volume_update(context,
|
||||||
volume_id,
|
volume_id,
|
||||||
{'host': service['host'],
|
{'host': service['host'],
|
||||||
|
|||||||
@@ -22,7 +22,9 @@ across zones. There are two expansion points to this class for:
|
|||||||
|
|
||||||
import operator
|
import operator
|
||||||
|
|
||||||
|
from nova import db
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
from nova import rpc
|
||||||
from nova.scheduler import api
|
from nova.scheduler import api
|
||||||
from nova.scheduler import driver
|
from nova.scheduler import driver
|
||||||
|
|
||||||
@@ -36,7 +38,7 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
"""Call novaclient zone method. Broken out for testing."""
|
"""Call novaclient zone method. Broken out for testing."""
|
||||||
return api.call_zone_method(context, method, specs=specs)
|
return api.call_zone_method(context, method, specs=specs)
|
||||||
|
|
||||||
def schedule_run_instance(self, context, topic='compute', specs={},
|
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
"""This method is called from nova.compute.api to provision
|
"""This method is called from nova.compute.api to provision
|
||||||
an instance. However we need to look at the parameters being
|
an instance. However we need to look at the parameters being
|
||||||
@@ -44,56 +46,86 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
1. Create a Build Plan and then provision, or
|
1. Create a Build Plan and then provision, or
|
||||||
2. Use the Build Plan information in the request parameters
|
2. Use the Build Plan information in the request parameters
|
||||||
to simply create the instance (either in this zone or
|
to simply create the instance (either in this zone or
|
||||||
a child zone)."""
|
a child zone).
|
||||||
|
"""
|
||||||
|
|
||||||
if 'blob' in specs:
|
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||||
return self.provision_instance(context, topic, specs)
|
|
||||||
|
if 'blob' in request_spec:
|
||||||
|
self.provision_resource(context, request_spec, instance_id, kwargs)
|
||||||
|
return None
|
||||||
|
|
||||||
# Create build plan and provision ...
|
# Create build plan and provision ...
|
||||||
build_plan = self.select(context, specs)
|
build_plan = self.select(context, request_spec)
|
||||||
for item in build_plan:
|
if not build_plan:
|
||||||
self.provision_instance(context, topic, item)
|
raise driver.NoValidHost(_('No hosts were available'))
|
||||||
|
|
||||||
def provision_instance(context, topic, item):
|
for item in build_plan:
|
||||||
"""Create the requested instance in this Zone or a child zone."""
|
self.provision_resource(context, item, instance_id, kwargs)
|
||||||
|
|
||||||
|
# Returning None short-circuits the routing to Compute (since
|
||||||
|
# we've already done it here)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def provision_resource(self, context, item, instance_id, kwargs):
|
||||||
|
"""Create the requested resource in this Zone or a child zone."""
|
||||||
|
if "hostname" in item:
|
||||||
|
host = item['hostname']
|
||||||
|
kwargs['instance_id'] = instance_id
|
||||||
|
rpc.cast(context,
|
||||||
|
db.queue_get_for(context, "compute", host),
|
||||||
|
{"method": "run_instance",
|
||||||
|
"args": kwargs})
|
||||||
|
LOG.debug(_("Casted to compute %(host)s for run_instance")
|
||||||
|
% locals())
|
||||||
|
else:
|
||||||
|
# TODO(sandy) Provision in child zone ...
|
||||||
|
LOG.warning(_("Provision to Child Zone not supported (yet)"))
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def select(self, context, *args, **kwargs):
|
def select(self, context, request_spec, *args, **kwargs):
|
||||||
"""Select returns a list of weights and zone/host information
|
"""Select returns a list of weights and zone/host information
|
||||||
corresponding to the best hosts to service the request. Any
|
corresponding to the best hosts to service the request. Any
|
||||||
child zone information has been encrypted so as not to reveal
|
child zone information has been encrypted so as not to reveal
|
||||||
anything about the children."""
|
anything about the children.
|
||||||
return self._schedule(context, "compute", *args, **kwargs)
|
"""
|
||||||
|
return self._schedule(context, "compute", request_spec,
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
def schedule(self, context, topic, *args, **kwargs):
|
# TODO(sandy): We're only focused on compute instances right now,
|
||||||
|
# so we don't implement the default "schedule()" method required
|
||||||
|
# of Schedulers.
|
||||||
|
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||||
"""The schedule() contract requires we return the one
|
"""The schedule() contract requires we return the one
|
||||||
best-suited host for this request.
|
best-suited host for this request.
|
||||||
"""
|
"""
|
||||||
res = self._schedule(context, topic, *args, **kwargs)
|
|
||||||
# TODO(sirp): should this be a host object rather than a weight-dict?
|
|
||||||
if not res:
|
|
||||||
raise driver.NoValidHost(_('No hosts were available'))
|
raise driver.NoValidHost(_('No hosts were available'))
|
||||||
return res[0]
|
|
||||||
|
|
||||||
def _schedule(self, context, topic, *args, **kwargs):
|
def _schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||||
"""Returns a list of hosts that meet the required specs,
|
"""Returns a list of hosts that meet the required specs,
|
||||||
ordered by their fitness.
|
ordered by their fitness.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
#TODO(sandy): extract these from args.
|
if topic != "compute":
|
||||||
|
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
||||||
|
"Compute nodes (for now)"))
|
||||||
|
|
||||||
|
#TODO(sandy): how to infer this from OS API params?
|
||||||
num_instances = 1
|
num_instances = 1
|
||||||
specs = {}
|
|
||||||
|
|
||||||
# Filter local hosts based on requirements ...
|
# Filter local hosts based on requirements ...
|
||||||
host_list = self.filter_hosts(num_instances, specs)
|
host_list = self.filter_hosts(num_instances, request_spec)
|
||||||
|
|
||||||
|
# TODO(sirp): weigh_hosts should also be a function of 'topic' or
|
||||||
|
# resources, so that we can apply different objective functions to it
|
||||||
|
|
||||||
# then weigh the selected hosts.
|
# then weigh the selected hosts.
|
||||||
# weighted = [{weight=weight, name=hostname}, ...]
|
# weighted = [{weight=weight, name=hostname}, ...]
|
||||||
weighted = self.weigh_hosts(num_instances, specs, host_list)
|
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||||
|
|
||||||
# Next, tack on the best weights from the child zones ...
|
# Next, tack on the best weights from the child zones ...
|
||||||
child_results = self._call_zone_method(context, "select",
|
child_results = self._call_zone_method(context, "select",
|
||||||
specs=specs)
|
specs=request_spec)
|
||||||
for child_zone, result in child_results:
|
for child_zone, result in child_results:
|
||||||
for weighting in result:
|
for weighting in result:
|
||||||
# Remember the child_zone so we can get back to
|
# Remember the child_zone so we can get back to
|
||||||
@@ -108,12 +140,18 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
weighted.sort(key=operator.itemgetter('weight'))
|
weighted.sort(key=operator.itemgetter('weight'))
|
||||||
return weighted
|
return weighted
|
||||||
|
|
||||||
def filter_hosts(self, num, specs):
|
def filter_hosts(self, num, request_spec):
|
||||||
"""Derived classes must override this method and return
|
"""Derived classes must override this method and return
|
||||||
a list of hosts in [(hostname, capability_dict)] format."""
|
a list of hosts in [(hostname, capability_dict)] format.
|
||||||
raise NotImplemented()
|
"""
|
||||||
|
# NOTE(sirp): The default logic is the equivalent to AllHostsFilter
|
||||||
|
service_states = self.zone_manager.service_states
|
||||||
|
return [(host, services)
|
||||||
|
for host, services in service_states.iteritems()]
|
||||||
|
|
||||||
def weigh_hosts(self, num, specs, hosts):
|
def weigh_hosts(self, num, request_spec, hosts):
|
||||||
"""Derived classes must override this method and return
|
"""Derived classes may override this to provide more sophisticated
|
||||||
a lists of hosts in [{weight, hostname}] format."""
|
scheduling objectives
|
||||||
raise NotImplemented()
|
"""
|
||||||
|
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||||
|
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||||
|
|||||||
@@ -17,16 +17,17 @@
|
|||||||
ZoneManager oversees all communications with child Zones.
|
ZoneManager oversees all communications with child Zones.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
import novaclient
|
import novaclient
|
||||||
import thread
|
import thread
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer('zone_db_check_interval', 60,
|
flags.DEFINE_integer('zone_db_check_interval', 60,
|
||||||
@@ -42,7 +43,7 @@ class ZoneState(object):
|
|||||||
self.name = None
|
self.name = None
|
||||||
self.capabilities = None
|
self.capabilities = None
|
||||||
self.attempt = 0
|
self.attempt = 0
|
||||||
self.last_seen = datetime.min
|
self.last_seen = datetime.datetime.min
|
||||||
self.last_exception = None
|
self.last_exception = None
|
||||||
self.last_exception_time = None
|
self.last_exception_time = None
|
||||||
|
|
||||||
@@ -56,7 +57,7 @@ class ZoneState(object):
|
|||||||
def update_metadata(self, zone_metadata):
|
def update_metadata(self, zone_metadata):
|
||||||
"""Update zone metadata after successful communications with
|
"""Update zone metadata after successful communications with
|
||||||
child zone."""
|
child zone."""
|
||||||
self.last_seen = datetime.now()
|
self.last_seen = utils.utcnow()
|
||||||
self.attempt = 0
|
self.attempt = 0
|
||||||
self.name = zone_metadata.get("name", "n/a")
|
self.name = zone_metadata.get("name", "n/a")
|
||||||
self.capabilities = ", ".join(["%s=%s" % (k, v)
|
self.capabilities = ", ".join(["%s=%s" % (k, v)
|
||||||
@@ -72,7 +73,7 @@ class ZoneState(object):
|
|||||||
"""Something went wrong. Check to see if zone should be
|
"""Something went wrong. Check to see if zone should be
|
||||||
marked as offline."""
|
marked as offline."""
|
||||||
self.last_exception = exception
|
self.last_exception = exception
|
||||||
self.last_exception_time = datetime.now()
|
self.last_exception_time = utils.utcnow()
|
||||||
api_url = self.api_url
|
api_url = self.api_url
|
||||||
logging.warning(_("'%(exception)s' error talking to "
|
logging.warning(_("'%(exception)s' error talking to "
|
||||||
"zone %(api_url)s") % locals())
|
"zone %(api_url)s") % locals())
|
||||||
@@ -104,7 +105,7 @@ def _poll_zone(zone):
|
|||||||
class ZoneManager(object):
|
class ZoneManager(object):
|
||||||
"""Keeps the zone states updated."""
|
"""Keeps the zone states updated."""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.last_zone_db_check = datetime.min
|
self.last_zone_db_check = datetime.datetime.min
|
||||||
self.zone_states = {} # { <zone_id> : ZoneState }
|
self.zone_states = {} # { <zone_id> : ZoneState }
|
||||||
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
||||||
self.green_pool = greenpool.GreenPool()
|
self.green_pool = greenpool.GreenPool()
|
||||||
@@ -158,10 +159,10 @@ class ZoneManager(object):
|
|||||||
|
|
||||||
def ping(self, context=None):
|
def ping(self, context=None):
|
||||||
"""Ping should be called periodically to update zone status."""
|
"""Ping should be called periodically to update zone status."""
|
||||||
diff = datetime.now() - self.last_zone_db_check
|
diff = utils.utcnow() - self.last_zone_db_check
|
||||||
if diff.seconds >= FLAGS.zone_db_check_interval:
|
if diff.seconds >= FLAGS.zone_db_check_interval:
|
||||||
logging.debug(_("Updating zone cache from db."))
|
logging.debug(_("Updating zone cache from db."))
|
||||||
self.last_zone_db_check = datetime.now()
|
self.last_zone_db_check = utils.utcnow()
|
||||||
self._refresh_from_db(context)
|
self._refresh_from_db(context)
|
||||||
self._poll_zones(context)
|
self._poll_zones(context)
|
||||||
|
|
||||||
|
|||||||
@@ -19,14 +19,11 @@
|
|||||||
|
|
||||||
"""Generic Node baseclass for all workers that run on hosts."""
|
"""Generic Node baseclass for all workers that run on hosts."""
|
||||||
|
|
||||||
|
import greenlet
|
||||||
import inspect
|
import inspect
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
from eventlet import event
|
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
from eventlet import greenpool
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
@@ -91,27 +88,37 @@ class Service(object):
|
|||||||
if 'nova-compute' == self.binary:
|
if 'nova-compute' == self.binary:
|
||||||
self.manager.update_available_resource(ctxt)
|
self.manager.update_available_resource(ctxt)
|
||||||
|
|
||||||
conn1 = rpc.Connection.instance(new=True)
|
self.conn = rpc.Connection.instance(new=True)
|
||||||
conn2 = rpc.Connection.instance(new=True)
|
logging.debug("Creating Consumer connection for Service %s" %
|
||||||
conn3 = rpc.Connection.instance(new=True)
|
self.topic)
|
||||||
if self.report_interval:
|
|
||||||
|
# Share this same connection for these Consumers
|
||||||
consumer_all = rpc.TopicAdapterConsumer(
|
consumer_all = rpc.TopicAdapterConsumer(
|
||||||
connection=conn1,
|
connection=self.conn,
|
||||||
topic=self.topic,
|
topic=self.topic,
|
||||||
proxy=self)
|
proxy=self)
|
||||||
consumer_node = rpc.TopicAdapterConsumer(
|
consumer_node = rpc.TopicAdapterConsumer(
|
||||||
connection=conn2,
|
connection=self.conn,
|
||||||
topic='%s.%s' % (self.topic, self.host),
|
topic='%s.%s' % (self.topic, self.host),
|
||||||
proxy=self)
|
proxy=self)
|
||||||
fanout = rpc.FanoutAdapterConsumer(
|
fanout = rpc.FanoutAdapterConsumer(
|
||||||
connection=conn3,
|
connection=self.conn,
|
||||||
topic=self.topic,
|
topic=self.topic,
|
||||||
proxy=self)
|
proxy=self)
|
||||||
|
consumer_set = rpc.ConsumerSet(
|
||||||
|
connection=self.conn,
|
||||||
|
consumer_list=[consumer_all, consumer_node, fanout])
|
||||||
|
|
||||||
self.timers.append(consumer_all.attach_to_eventlet())
|
# Wait forever, processing these consumers
|
||||||
self.timers.append(consumer_node.attach_to_eventlet())
|
def _wait():
|
||||||
self.timers.append(fanout.attach_to_eventlet())
|
try:
|
||||||
|
consumer_set.wait()
|
||||||
|
finally:
|
||||||
|
consumer_set.close()
|
||||||
|
|
||||||
|
self.consumer_set_thread = greenthread.spawn(_wait)
|
||||||
|
|
||||||
|
if self.report_interval:
|
||||||
pulse = utils.LoopingCall(self.report_state)
|
pulse = utils.LoopingCall(self.report_state)
|
||||||
pulse.start(interval=self.report_interval, now=False)
|
pulse.start(interval=self.report_interval, now=False)
|
||||||
self.timers.append(pulse)
|
self.timers.append(pulse)
|
||||||
@@ -174,6 +181,11 @@ class Service(object):
|
|||||||
logging.warn(_('Service killed that has no database entry'))
|
logging.warn(_('Service killed that has no database entry'))
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
self.consumer_set_thread.kill()
|
||||||
|
try:
|
||||||
|
self.consumer_set_thread.wait()
|
||||||
|
except greenlet.GreenletExit:
|
||||||
|
pass
|
||||||
for x in self.timers:
|
for x in self.timers:
|
||||||
try:
|
try:
|
||||||
x.stop()
|
x.stop()
|
||||||
|
|||||||
30
nova/test.py
30
nova/test.py
@@ -23,7 +23,6 @@ inline callbacks.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
@@ -36,14 +35,14 @@ import shutil
|
|||||||
import stubout
|
import stubout
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from nova import context
|
|
||||||
from nova import db
|
|
||||||
from nova import fakerabbit
|
from nova import fakerabbit
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log
|
from nova import log
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
|
from nova import utils
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
|
from nova.virt import fake
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -89,7 +88,7 @@ class TestCase(unittest.TestCase):
|
|||||||
# NOTE(vish): We need a better method for creating fixtures for tests
|
# NOTE(vish): We need a better method for creating fixtures for tests
|
||||||
# now that we have some required db setup for the system
|
# now that we have some required db setup for the system
|
||||||
# to work properly.
|
# to work properly.
|
||||||
self.start = datetime.datetime.utcnow()
|
self.start = utils.utcnow()
|
||||||
shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
|
shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
|
||||||
os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
|
os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
|
||||||
|
|
||||||
@@ -103,6 +102,7 @@ class TestCase(unittest.TestCase):
|
|||||||
self._monkey_patch_attach()
|
self._monkey_patch_attach()
|
||||||
self._monkey_patch_wsgi()
|
self._monkey_patch_wsgi()
|
||||||
self._original_flags = FLAGS.FlagValuesDict()
|
self._original_flags = FLAGS.FlagValuesDict()
|
||||||
|
rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
"""Runs after each test method to tear down test environment."""
|
"""Runs after each test method to tear down test environment."""
|
||||||
@@ -117,6 +117,10 @@ class TestCase(unittest.TestCase):
|
|||||||
if FLAGS.fake_rabbit:
|
if FLAGS.fake_rabbit:
|
||||||
fakerabbit.reset_all()
|
fakerabbit.reset_all()
|
||||||
|
|
||||||
|
if FLAGS.connection_type == 'fake':
|
||||||
|
if hasattr(fake.FakeConnection, '_instance'):
|
||||||
|
del fake.FakeConnection._instance
|
||||||
|
|
||||||
# Reset any overriden flags
|
# Reset any overriden flags
|
||||||
self.reset_flags()
|
self.reset_flags()
|
||||||
|
|
||||||
@@ -199,7 +203,7 @@ class TestCase(unittest.TestCase):
|
|||||||
wsgi.Server.start = _wrapped_start
|
wsgi.Server.start = _wrapped_start
|
||||||
|
|
||||||
# Useful assertions
|
# Useful assertions
|
||||||
def assertDictMatch(self, d1, d2):
|
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
|
||||||
"""Assert two dicts are equivalent.
|
"""Assert two dicts are equivalent.
|
||||||
|
|
||||||
This is a 'deep' match in the sense that it handles nested
|
This is a 'deep' match in the sense that it handles nested
|
||||||
@@ -230,15 +234,26 @@ class TestCase(unittest.TestCase):
|
|||||||
for key in d1keys:
|
for key in d1keys:
|
||||||
d1value = d1[key]
|
d1value = d1[key]
|
||||||
d2value = d2[key]
|
d2value = d2[key]
|
||||||
|
try:
|
||||||
|
error = abs(float(d1value) - float(d2value))
|
||||||
|
within_tolerance = error <= tolerance
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
# If both values aren't convertable to float, just ignore
|
||||||
|
# ValueError if arg is a str, TypeError if it's something else
|
||||||
|
# (like None)
|
||||||
|
within_tolerance = False
|
||||||
|
|
||||||
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
|
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
|
||||||
self.assertDictMatch(d1value, d2value)
|
self.assertDictMatch(d1value, d2value)
|
||||||
elif 'DONTCARE' in (d1value, d2value):
|
elif 'DONTCARE' in (d1value, d2value):
|
||||||
continue
|
continue
|
||||||
|
elif approx_equal and within_tolerance:
|
||||||
|
continue
|
||||||
elif d1value != d2value:
|
elif d1value != d2value:
|
||||||
raise_assertion("d1['%(key)s']=%(d1value)s != "
|
raise_assertion("d1['%(key)s']=%(d1value)s != "
|
||||||
"d2['%(key)s']=%(d2value)s" % locals())
|
"d2['%(key)s']=%(d2value)s" % locals())
|
||||||
|
|
||||||
def assertDictListMatch(self, L1, L2):
|
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
|
||||||
"""Assert a list of dicts are equivalent."""
|
"""Assert a list of dicts are equivalent."""
|
||||||
def raise_assertion(msg):
|
def raise_assertion(msg):
|
||||||
L1str = str(L1)
|
L1str = str(L1)
|
||||||
@@ -254,4 +269,5 @@ class TestCase(unittest.TestCase):
|
|||||||
'len(L2)=%(L2count)d' % locals())
|
'len(L2)=%(L2count)d' % locals())
|
||||||
|
|
||||||
for d1, d2 in zip(L1, L2):
|
for d1, d2 in zip(L1, L2):
|
||||||
self.assertDictMatch(d1, d2)
|
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
|
||||||
|
tolerance=tolerance)
|
||||||
|
|||||||
0
nova/tests/scheduler/__init__.py
Normal file
0
nova/tests/scheduler/__init__.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For Scheduler Host Filters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
|
from nova import flags
|
||||||
|
from nova import test
|
||||||
|
from nova.scheduler import host_filter
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class FakeZoneManager:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class HostFilterTestCase(test.TestCase):
|
||||||
|
"""Test case for host filters."""
|
||||||
|
|
||||||
|
def _host_caps(self, multiplier):
|
||||||
|
# Returns host capabilities in the following way:
|
||||||
|
# host1 = memory:free 10 (100max)
|
||||||
|
# disk:available 100 (1000max)
|
||||||
|
# hostN = memory:free 10 + 10N
|
||||||
|
# disk:available 100 + 100N
|
||||||
|
# in other words: hostN has more resources than host0
|
||||||
|
# which means ... don't go above 10 hosts.
|
||||||
|
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||||
|
'host_hostname': 'xs-%s' % multiplier,
|
||||||
|
'host_memory_total': 100,
|
||||||
|
'host_memory_overhead': 10,
|
||||||
|
'host_memory_free': 10 + multiplier * 10,
|
||||||
|
'host_memory_free-computed': 10 + multiplier * 10,
|
||||||
|
'host_other-config': {},
|
||||||
|
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||||
|
'host_cpu_info': {},
|
||||||
|
'disk_available': 100 + multiplier * 100,
|
||||||
|
'disk_total': 1000,
|
||||||
|
'disk_used': 0,
|
||||||
|
'host_uuid': 'xxx-%d' % multiplier,
|
||||||
|
'host_name-label': 'xs-%s' % multiplier}
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.old_flag = FLAGS.default_host_filter
|
||||||
|
FLAGS.default_host_filter = \
|
||||||
|
'nova.scheduler.host_filter.AllHostsFilter'
|
||||||
|
self.instance_type = dict(name='tiny',
|
||||||
|
memory_mb=50,
|
||||||
|
vcpus=10,
|
||||||
|
local_gb=500,
|
||||||
|
flavorid=1,
|
||||||
|
swap=500,
|
||||||
|
rxtx_quota=30000,
|
||||||
|
rxtx_cap=200)
|
||||||
|
|
||||||
|
self.zone_manager = FakeZoneManager()
|
||||||
|
states = {}
|
||||||
|
for x in xrange(10):
|
||||||
|
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||||
|
self.zone_manager.service_states = states
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
FLAGS.default_host_filter = self.old_flag
|
||||||
|
|
||||||
|
def test_choose_filter(self):
|
||||||
|
# Test default filter ...
|
||||||
|
hf = host_filter.choose_host_filter()
|
||||||
|
self.assertEquals(hf._full_name(),
|
||||||
|
'nova.scheduler.host_filter.AllHostsFilter')
|
||||||
|
# Test valid filter ...
|
||||||
|
hf = host_filter.choose_host_filter(
|
||||||
|
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||||
|
self.assertEquals(hf._full_name(),
|
||||||
|
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||||
|
# Test invalid filter ...
|
||||||
|
try:
|
||||||
|
host_filter.choose_host_filter('does not exist')
|
||||||
|
self.fail("Should not find host filter.")
|
||||||
|
except exception.SchedulerHostFilterNotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_all_host_filter(self):
|
||||||
|
hf = host_filter.AllHostsFilter()
|
||||||
|
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.assertEquals(10, len(hosts))
|
||||||
|
for host, capabilities in hosts:
|
||||||
|
self.assertTrue(host.startswith('host'))
|
||||||
|
|
||||||
|
def test_instance_type_filter(self):
|
||||||
|
hf = host_filter.InstanceTypeFilter()
|
||||||
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
|
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
|
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||||
|
name)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.assertEquals(6, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
self.assertEquals('host05', just_hosts[0])
|
||||||
|
self.assertEquals('host10', just_hosts[5])
|
||||||
|
|
||||||
|
def test_json_filter(self):
|
||||||
|
hf = host_filter.JsonFilter()
|
||||||
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
|
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
|
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.assertEquals(6, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
self.assertEquals('host05', just_hosts[0])
|
||||||
|
self.assertEquals('host10', just_hosts[5])
|
||||||
|
|
||||||
|
# Try some custom queries
|
||||||
|
|
||||||
|
raw = ['or',
|
||||||
|
['and',
|
||||||
|
['<', '$compute.host_memory_free', 30],
|
||||||
|
['<', '$compute.disk_available', 300]
|
||||||
|
],
|
||||||
|
['and',
|
||||||
|
['>', '$compute.host_memory_free', 70],
|
||||||
|
['>', '$compute.disk_available', 700]
|
||||||
|
]
|
||||||
|
]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
|
self.assertEquals(5, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||||
|
self.assertEquals('host%02d' % index, host)
|
||||||
|
|
||||||
|
raw = ['not',
|
||||||
|
['=', '$compute.host_memory_free', 30],
|
||||||
|
]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
|
self.assertEquals(9, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||||
|
self.assertEquals('host%02d' % index, host)
|
||||||
|
|
||||||
|
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
|
self.assertEquals(5, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||||
|
self.assertEquals('host%02d' % index, host)
|
||||||
|
|
||||||
|
# Try some bogus input ...
|
||||||
|
raw = ['unknown command', ]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
try:
|
||||||
|
hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.fail("Should give KeyError")
|
||||||
|
except KeyError, e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||||
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||||
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
|
['not', True, False, True, False]
|
||||||
|
)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
|
'not', True, False, True, False
|
||||||
|
))
|
||||||
|
self.fail("Should give KeyError")
|
||||||
|
except KeyError, e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(['=', '$foo', 100])))
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(['=', '$.....', 100])))
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(
|
||||||
|
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||||
|
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||||
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For Least Cost Scheduler
|
||||||
|
"""
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import test
|
||||||
|
from nova.scheduler import least_cost
|
||||||
|
from nova.tests.scheduler import test_zone_aware_scheduler
|
||||||
|
|
||||||
|
MB = 1024 * 1024
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class FakeHost(object):
|
||||||
|
def __init__(self, host_id, free_ram, io):
|
||||||
|
self.id = host_id
|
||||||
|
self.free_ram = free_ram
|
||||||
|
self.io = io
|
||||||
|
|
||||||
|
|
||||||
|
class WeightedSumTestCase(test.TestCase):
|
||||||
|
def test_empty_domain(self):
|
||||||
|
domain = []
|
||||||
|
weighted_fns = []
|
||||||
|
result = least_cost.weighted_sum(domain, weighted_fns)
|
||||||
|
expected = []
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_basic_costing(self):
|
||||||
|
hosts = [
|
||||||
|
FakeHost(1, 512 * MB, 100),
|
||||||
|
FakeHost(2, 256 * MB, 400),
|
||||||
|
FakeHost(3, 512 * MB, 100)
|
||||||
|
]
|
||||||
|
|
||||||
|
weighted_fns = [
|
||||||
|
(1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost*
|
||||||
|
(2, lambda h: h.io), # Avoid high I/O
|
||||||
|
]
|
||||||
|
|
||||||
|
costs = least_cost.weighted_sum(
|
||||||
|
domain=hosts, weighted_fns=weighted_fns)
|
||||||
|
|
||||||
|
# Each 256 MB unit of free-ram contributes 0.5 points by way of:
|
||||||
|
# cost = weight * (score/max_score) = 1 * (256/512) = 0.5
|
||||||
|
# Each 100 iops of IO adds 0.5 points by way of:
|
||||||
|
# cost = 2 * (100/400) = 2 * 0.25 = 0.5
|
||||||
|
expected = [1.5, 2.5, 1.5]
|
||||||
|
self.assertEqual(expected, costs)
|
||||||
|
|
||||||
|
|
||||||
|
class LeastCostSchedulerTestCase(test.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(LeastCostSchedulerTestCase, self).setUp()
|
||||||
|
|
||||||
|
class FakeZoneManager:
|
||||||
|
pass
|
||||||
|
|
||||||
|
zone_manager = FakeZoneManager()
|
||||||
|
|
||||||
|
states = test_zone_aware_scheduler.fake_zone_manager_service_states(
|
||||||
|
num_hosts=10)
|
||||||
|
zone_manager.service_states = states
|
||||||
|
|
||||||
|
self.sched = least_cost.LeastCostScheduler()
|
||||||
|
self.sched.zone_manager = zone_manager
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def assertWeights(self, expected, num, request_spec, hosts):
|
||||||
|
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||||
|
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||||
|
|
||||||
|
def test_no_hosts(self):
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = []
|
||||||
|
|
||||||
|
expected = []
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
|
def test_noop_cost_fn(self):
|
||||||
|
FLAGS.least_cost_scheduler_cost_functions = [
|
||||||
|
'nova.scheduler.least_cost.noop_cost_fn'
|
||||||
|
]
|
||||||
|
FLAGS.noop_cost_fn_weight = 1
|
||||||
|
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = self.sched.filter_hosts(num, request_spec)
|
||||||
|
|
||||||
|
expected = [dict(weight=1, hostname=hostname)
|
||||||
|
for hostname, caps in hosts]
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
|
def test_cost_fn_weights(self):
|
||||||
|
FLAGS.least_cost_scheduler_cost_functions = [
|
||||||
|
'nova.scheduler.least_cost.noop_cost_fn'
|
||||||
|
]
|
||||||
|
FLAGS.noop_cost_fn_weight = 2
|
||||||
|
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = self.sched.filter_hosts(num, request_spec)
|
||||||
|
|
||||||
|
expected = [dict(weight=2, hostname=hostname)
|
||||||
|
for hostname, caps in hosts]
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
|
def test_fill_first_cost_fn(self):
|
||||||
|
FLAGS.least_cost_scheduler_cost_functions = [
|
||||||
|
'nova.scheduler.least_cost.fill_first_cost_fn'
|
||||||
|
]
|
||||||
|
FLAGS.fill_first_cost_fn_weight = 1
|
||||||
|
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = self.sched.filter_hosts(num, request_spec)
|
||||||
|
|
||||||
|
expected = []
|
||||||
|
for idx, (hostname, caps) in enumerate(hosts):
|
||||||
|
# Costs are normalized so over 10 hosts, each host with increasing
|
||||||
|
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||||
|
# free ram, we add in the 1/N for the base_cost
|
||||||
|
weight = 0.1 + (0.1 * idx)
|
||||||
|
weight_dict = dict(weight=weight, hostname=hostname)
|
||||||
|
expected.append(weight_dict)
|
||||||
|
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
@@ -61,7 +61,8 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
"""Test case for scheduler"""
|
"""Test case for scheduler"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(SchedulerTestCase, self).setUp()
|
super(SchedulerTestCase, self).setUp()
|
||||||
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
|
driver = 'nova.tests.scheduler.test_scheduler.TestDriver'
|
||||||
|
self.flags(scheduler_driver=driver)
|
||||||
|
|
||||||
def _create_compute_service(self):
|
def _create_compute_service(self):
|
||||||
"""Create compute-manager(ComputeNode and Service record)."""
|
"""Create compute-manager(ComputeNode and Service record)."""
|
||||||
@@ -196,7 +197,7 @@ class ZoneSchedulerTestCase(test.TestCase):
|
|||||||
service.topic = 'compute'
|
service.topic = 'compute'
|
||||||
service.id = kwargs['id']
|
service.id = kwargs['id']
|
||||||
service.availability_zone = kwargs['zone']
|
service.availability_zone = kwargs['zone']
|
||||||
service.created_at = datetime.datetime.utcnow()
|
service.created_at = utils.utcnow()
|
||||||
return service
|
return service
|
||||||
|
|
||||||
def test_with_two_zones(self):
|
def test_with_two_zones(self):
|
||||||
@@ -289,7 +290,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
dic['host'] = kwargs.get('host', 'dummy')
|
dic['host'] = kwargs.get('host', 'dummy')
|
||||||
s_ref = db.service_create(self.context, dic)
|
s_ref = db.service_create(self.context, dic)
|
||||||
if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
|
if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
|
||||||
t = datetime.datetime.utcnow() - datetime.timedelta(0)
|
t = utils.utcnow() - datetime.timedelta(0)
|
||||||
dic['created_at'] = kwargs.get('created_at', t)
|
dic['created_at'] = kwargs.get('created_at', t)
|
||||||
dic['updated_at'] = kwargs.get('updated_at', t)
|
dic['updated_at'] = kwargs.get('updated_at', t)
|
||||||
db.service_update(self.context, s_ref['id'], dic)
|
db.service_update(self.context, s_ref['id'], dic)
|
||||||
@@ -400,7 +401,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
FLAGS.compute_manager)
|
FLAGS.compute_manager)
|
||||||
compute1.start()
|
compute1.start()
|
||||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||||
past = now - delta
|
past = now - delta
|
||||||
db.service_update(self.context, s1['id'], {'updated_at': past})
|
db.service_update(self.context, s1['id'], {'updated_at': past})
|
||||||
@@ -541,7 +542,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
def test_wont_sechedule_if_specified_host_is_down(self):
|
def test_wont_sechedule_if_specified_host_is_down(self):
|
||||||
compute1 = self.start_service('compute', host='host1')
|
compute1 = self.start_service('compute', host='host1')
|
||||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||||
past = now - delta
|
past = now - delta
|
||||||
db.service_update(self.context, s1['id'], {'updated_at': past})
|
db.service_update(self.context, s1['id'], {'updated_at': past})
|
||||||
@@ -691,7 +692,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
dic = {'instance_id': instance_id, 'size': 1}
|
dic = {'instance_id': instance_id, 'size': 1}
|
||||||
v_ref = db.volume_create(self.context, {'instance_id': instance_id,
|
v_ref = db.volume_create(self.context, {'instance_id': instance_id,
|
||||||
'size': 1})
|
'size': 1})
|
||||||
t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
|
t1 = utils.utcnow() - datetime.timedelta(1)
|
||||||
dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
|
dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
|
||||||
'topic': 'volume', 'report_count': 0}
|
'topic': 'volume', 'report_count': 0}
|
||||||
s_ref = db.service_create(self.context, dic)
|
s_ref = db.service_create(self.context, dic)
|
||||||
@@ -708,7 +709,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
"""Confirms src-compute node is alive."""
|
"""Confirms src-compute node is alive."""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
i_ref = db.instance_get(self.context, instance_id)
|
i_ref = db.instance_get(self.context, instance_id)
|
||||||
t = datetime.datetime.utcnow() - datetime.timedelta(10)
|
t = utils.utcnow() - datetime.timedelta(10)
|
||||||
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
||||||
host=i_ref['host'])
|
host=i_ref['host'])
|
||||||
|
|
||||||
@@ -736,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
"""Confirms exception raises in case dest host does not exist."""
|
"""Confirms exception raises in case dest host does not exist."""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
i_ref = db.instance_get(self.context, instance_id)
|
i_ref = db.instance_get(self.context, instance_id)
|
||||||
t = datetime.datetime.utcnow() - datetime.timedelta(10)
|
t = utils.utcnow() - datetime.timedelta(10)
|
||||||
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
s_ref = self._create_compute_service(created_at=t, updated_at=t,
|
||||||
host=i_ref['host'])
|
host=i_ref['host'])
|
||||||
|
|
||||||
@@ -795,7 +796,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
# mocks for live_migration_common_check()
|
# mocks for live_migration_common_check()
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
i_ref = db.instance_get(self.context, instance_id)
|
i_ref = db.instance_get(self.context, instance_id)
|
||||||
t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
|
t1 = utils.utcnow() - datetime.timedelta(10)
|
||||||
s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
|
s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
|
||||||
host=dest)
|
host=dest)
|
||||||
|
|
||||||
@@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler
|
|||||||
from nova.scheduler import zone_manager
|
from nova.scheduler import zone_manager
|
||||||
|
|
||||||
|
|
||||||
|
def _host_caps(multiplier):
|
||||||
|
# Returns host capabilities in the following way:
|
||||||
|
# host1 = memory:free 10 (100max)
|
||||||
|
# disk:available 100 (1000max)
|
||||||
|
# hostN = memory:free 10 + 10N
|
||||||
|
# disk:available 100 + 100N
|
||||||
|
# in other words: hostN has more resources than host0
|
||||||
|
# which means ... don't go above 10 hosts.
|
||||||
|
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||||
|
'host_hostname': 'xs-%s' % multiplier,
|
||||||
|
'host_memory_total': 100,
|
||||||
|
'host_memory_overhead': 10,
|
||||||
|
'host_memory_free': 10 + multiplier * 10,
|
||||||
|
'host_memory_free-computed': 10 + multiplier * 10,
|
||||||
|
'host_other-config': {},
|
||||||
|
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||||
|
'host_cpu_info': {},
|
||||||
|
'disk_available': 100 + multiplier * 100,
|
||||||
|
'disk_total': 1000,
|
||||||
|
'disk_used': 0,
|
||||||
|
'host_uuid': 'xxx-%d' % multiplier,
|
||||||
|
'host_name-label': 'xs-%s' % multiplier}
|
||||||
|
|
||||||
|
|
||||||
|
def fake_zone_manager_service_states(num_hosts):
|
||||||
|
states = {}
|
||||||
|
for x in xrange(num_hosts):
|
||||||
|
states['host%02d' % (x + 1)] = {'compute': _host_caps(x)}
|
||||||
|
return states
|
||||||
|
|
||||||
|
|
||||||
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
def filter_hosts(self, num, specs):
|
def filter_hosts(self, num, specs):
|
||||||
# NOTE(sirp): this is returning [(hostname, services)]
|
# NOTE(sirp): this is returning [(hostname, services)]
|
||||||
@@ -112,4 +143,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
|||||||
sched.set_zone_manager(zm)
|
sched.set_zone_manager(zm)
|
||||||
|
|
||||||
fake_context = {}
|
fake_context = {}
|
||||||
self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {})
|
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||||
|
fake_context, 1,
|
||||||
|
dict(host_filter=None, instance_type={}))
|
||||||
@@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
|
|||||||
super(_AuthManagerBaseTestCase, self).setUp()
|
super(_AuthManagerBaseTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake')
|
self.flags(connection_type='fake')
|
||||||
self.manager = manager.AuthManager(new=True)
|
self.manager = manager.AuthManager(new=True)
|
||||||
|
self.manager.mc.cache = {}
|
||||||
|
|
||||||
def test_create_and_find_user(self):
|
def test_create_and_find_user(self):
|
||||||
with user_generator(self.manager):
|
with user_generator(self.manager):
|
||||||
|
|||||||
@@ -17,13 +17,9 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
import json
|
|
||||||
from M2Crypto import BIO
|
from M2Crypto import BIO
|
||||||
from M2Crypto import RSA
|
from M2Crypto import RSA
|
||||||
import os
|
import os
|
||||||
import shutil
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
|
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
@@ -33,12 +29,10 @@ from nova import db
|
|||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import service
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import power_state
|
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
from nova.api.ec2 import ec2utils
|
from nova.api.ec2 import ec2utils
|
||||||
from nova.image import local
|
from nova.image import local
|
||||||
@@ -79,6 +73,15 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||||
|
|
||||||
|
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
|
||||||
|
rpc_cast = rpc.cast
|
||||||
|
|
||||||
|
def finish_cast(*args, **kwargs):
|
||||||
|
rpc_cast(*args, **kwargs)
|
||||||
|
greenthread.sleep(0.2)
|
||||||
|
|
||||||
|
self.stubs.Set(rpc, 'cast', finish_cast)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
networks = db.project_get_networks(self.context, self.project.id,
|
networks = db.project_get_networks(self.context, self.project.id,
|
||||||
associate=False)
|
associate=False)
|
||||||
@@ -86,8 +89,6 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.network_disassociate(self.context, network['id'])
|
db.network_disassociate(self.context, network['id'])
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
self.compute.kill()
|
|
||||||
self.network.kill()
|
|
||||||
super(CloudTestCase, self).tearDown()
|
super(CloudTestCase, self).tearDown()
|
||||||
|
|
||||||
def _create_key(self, name):
|
def _create_key(self, name):
|
||||||
@@ -114,7 +115,6 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.cloud.describe_addresses(self.context)
|
self.cloud.describe_addresses(self.context)
|
||||||
self.cloud.release_address(self.context,
|
self.cloud.release_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
@test.skip_test("Skipping this pending future merge")
|
@test.skip_test("Skipping this pending future merge")
|
||||||
@@ -150,12 +150,10 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.cloud.associate_address(self.context,
|
self.cloud.associate_address(self.context,
|
||||||
instance_id=ec2_id,
|
instance_id=ec2_id,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
self.cloud.disassociate_address(self.context,
|
self.cloud.disassociate_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
self.cloud.release_address(self.context,
|
self.cloud.release_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
self.network.deallocate_fixed_ip(self.context, fixed)
|
self.network.deallocate_fixed_ip(self.context, fixed)
|
||||||
db.instance_destroy(self.context, inst['id'])
|
db.instance_destroy(self.context, inst['id'])
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
@@ -192,6 +190,25 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.volume_destroy(self.context, vol1['id'])
|
db.volume_destroy(self.context, vol1['id'])
|
||||||
db.volume_destroy(self.context, vol2['id'])
|
db.volume_destroy(self.context, vol2['id'])
|
||||||
|
|
||||||
|
def test_create_volume_from_snapshot(self):
|
||||||
|
"""Makes sure create_volume works when we specify a snapshot."""
|
||||||
|
vol = db.volume_create(self.context, {'size': 1})
|
||||||
|
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||||
|
'volume_size': vol['size'],
|
||||||
|
'status': "available"})
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.create_volume(self.context,
|
||||||
|
snapshot_id=snapshot_id)
|
||||||
|
volume_id = result['volumeId']
|
||||||
|
result = self.cloud.describe_volumes(self.context)
|
||||||
|
self.assertEqual(len(result['volumeSet']), 2)
|
||||||
|
self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
|
||||||
|
|
||||||
|
db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
|
||||||
|
db.snapshot_destroy(self.context, snap['id'])
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
def test_describe_availability_zones(self):
|
def test_describe_availability_zones(self):
|
||||||
"""Makes sure describe_availability_zones works and filters results."""
|
"""Makes sure describe_availability_zones works and filters results."""
|
||||||
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
||||||
@@ -211,13 +228,59 @@ class CloudTestCase(test.TestCase):
|
|||||||
|
|
||||||
# NOTE(jkoelker): this test relies on fixed_ip being in instances
|
# NOTE(jkoelker): this test relies on fixed_ip being in instances
|
||||||
@test.skip_test("EC2 stuff needs fixed_ip in instance_ref")
|
@test.skip_test("EC2 stuff needs fixed_ip in instance_ref")
|
||||||
|
def test_describe_snapshots(self):
|
||||||
|
"""Makes sure describe_snapshots works and filters results."""
|
||||||
|
vol = db.volume_create(self.context, {})
|
||||||
|
snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||||
|
snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||||
|
result = self.cloud.describe_snapshots(self.context)
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 2)
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x')
|
||||||
|
result = self.cloud.describe_snapshots(self.context,
|
||||||
|
snapshot_id=[snapshot_id])
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 1)
|
||||||
|
self.assertEqual(
|
||||||
|
ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']),
|
||||||
|
snap2['id'])
|
||||||
|
db.snapshot_destroy(self.context, snap1['id'])
|
||||||
|
db.snapshot_destroy(self.context, snap2['id'])
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
|
def test_create_snapshot(self):
|
||||||
|
"""Makes sure create_snapshot works."""
|
||||||
|
vol = db.volume_create(self.context, {'status': "available"})
|
||||||
|
volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.create_snapshot(self.context,
|
||||||
|
volume_id=volume_id)
|
||||||
|
snapshot_id = result['snapshotId']
|
||||||
|
result = self.cloud.describe_snapshots(self.context)
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 1)
|
||||||
|
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
|
||||||
|
|
||||||
|
db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
|
def test_delete_snapshot(self):
|
||||||
|
"""Makes sure delete_snapshot works."""
|
||||||
|
vol = db.volume_create(self.context, {'status': "available"})
|
||||||
|
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||||
|
'status': "available"})
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.delete_snapshot(self.context,
|
||||||
|
snapshot_id=snapshot_id)
|
||||||
|
self.assertTrue(result)
|
||||||
|
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
def test_describe_instances(self):
|
def test_describe_instances(self):
|
||||||
"""Makes sure describe_instances works and filters results."""
|
"""Makes sure describe_instances works and filters results."""
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'host': 'host1'})
|
'host': 'host1'})
|
||||||
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'host': 'host2'})
|
'host': 'host2'})
|
||||||
comp1 = db.service_create(self.context, {'host': 'host1',
|
comp1 = db.service_create(self.context, {'host': 'host1',
|
||||||
'availability_zone': 'zone1',
|
'availability_zone': 'zone1',
|
||||||
@@ -329,31 +392,25 @@ class CloudTestCase(test.TestCase):
|
|||||||
'instance_type': instance_type,
|
'instance_type': instance_type,
|
||||||
'max_count': max_count}
|
'max_count': max_count}
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
output = self.cloud.get_console_output(context=self.context,
|
output = self.cloud.get_console_output(context=self.context,
|
||||||
instance_id=[instance_id])
|
instance_id=[instance_id])
|
||||||
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
|
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
|
||||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||||
# for unit tests.
|
# for unit tests.
|
||||||
greenthread.sleep(0.3)
|
|
||||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||||
greenthread.sleep(0.3)
|
|
||||||
|
|
||||||
def test_ajax_console(self):
|
def test_ajax_console(self):
|
||||||
kwargs = {'image_id': 'ami-1'}
|
kwargs = {'image_id': 'ami-1'}
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
greenthread.sleep(0.3)
|
|
||||||
output = self.cloud.get_ajax_console(context=self.context,
|
output = self.cloud.get_ajax_console(context=self.context,
|
||||||
instance_id=[instance_id])
|
instance_id=[instance_id])
|
||||||
self.assertEquals(output['url'],
|
self.assertEquals(output['url'],
|
||||||
'%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
|
'%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
|
||||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||||
# for unit tests.
|
# for unit tests.
|
||||||
greenthread.sleep(0.3)
|
|
||||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||||
greenthread.sleep(0.3)
|
|
||||||
|
|
||||||
def test_key_generation(self):
|
def test_key_generation(self):
|
||||||
result = self._create_key('test')
|
result = self._create_key('test')
|
||||||
@@ -413,7 +470,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_terminate_instances(self):
|
def test_terminate_instances(self):
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'host': 'host1'})
|
'host': 'host1'})
|
||||||
terminate_instances = self.cloud.terminate_instances
|
terminate_instances = self.cloud.terminate_instances
|
||||||
# valid instance_id
|
# valid instance_id
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
Tests For Compute
|
Tests For Compute
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
import mox
|
import mox
|
||||||
import stubout
|
import stubout
|
||||||
|
|
||||||
@@ -84,7 +83,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
def _create_instance(self, params={}):
|
def _create_instance(self, params={}):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 1
|
inst['image_ref'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
@@ -149,7 +148,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
ref = self.compute_api.create(
|
ref = self.compute_api.create(
|
||||||
self.context,
|
self.context,
|
||||||
instance_type=instance_types.get_default_instance_type(),
|
instance_type=instance_types.get_default_instance_type(),
|
||||||
image_id=None,
|
image_href=None,
|
||||||
security_group=['testgroup'])
|
security_group=['testgroup'])
|
||||||
try:
|
try:
|
||||||
self.assertEqual(len(db.security_group_get_by_instance(
|
self.assertEqual(len(db.security_group_get_by_instance(
|
||||||
@@ -167,7 +166,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
ref = self.compute_api.create(
|
ref = self.compute_api.create(
|
||||||
self.context,
|
self.context,
|
||||||
instance_type=instance_types.get_default_instance_type(),
|
instance_type=instance_types.get_default_instance_type(),
|
||||||
image_id=None,
|
image_href=None,
|
||||||
security_group=['testgroup'])
|
security_group=['testgroup'])
|
||||||
try:
|
try:
|
||||||
db.instance_destroy(self.context, ref[0]['id'])
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
@@ -183,7 +182,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
ref = self.compute_api.create(
|
ref = self.compute_api.create(
|
||||||
self.context,
|
self.context,
|
||||||
instance_type=instance_types.get_default_instance_type(),
|
instance_type=instance_types.get_default_instance_type(),
|
||||||
image_id=None,
|
image_href=None,
|
||||||
security_group=['testgroup'])
|
security_group=['testgroup'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -216,12 +215,12 @@ class ComputeTestCase(test.TestCase):
|
|||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
self.assertEqual(instance_ref['launched_at'], None)
|
self.assertEqual(instance_ref['launched_at'], None)
|
||||||
self.assertEqual(instance_ref['deleted_at'], None)
|
self.assertEqual(instance_ref['deleted_at'], None)
|
||||||
launch = datetime.datetime.utcnow()
|
launch = utils.utcnow()
|
||||||
self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
self.assert_(instance_ref['launched_at'] > launch)
|
self.assert_(instance_ref['launched_at'] > launch)
|
||||||
self.assertEqual(instance_ref['deleted_at'], None)
|
self.assertEqual(instance_ref['deleted_at'], None)
|
||||||
terminate = datetime.datetime.utcnow()
|
terminate = utils.utcnow()
|
||||||
self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
self.context = self.context.elevated(True)
|
self.context = self.context.elevated(True)
|
||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
|
|||||||
@@ -20,8 +20,6 @@
|
|||||||
Tests For Console proxy.
|
Tests For Console proxy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ import eventlet
|
|||||||
import mox
|
import mox
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||||
@@ -32,7 +33,8 @@ from nova import utils
|
|||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.virt import libvirt_conn
|
from nova.virt.libvirt import connection
|
||||||
|
from nova.virt.libvirt import firewall
|
||||||
|
|
||||||
libvirt = None
|
libvirt = None
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -101,7 +103,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_same_fname_concurrency(self):
|
def test_same_fname_concurrency(self):
|
||||||
"""Ensures that the same fname cache runs at a sequentially"""
|
"""Ensures that the same fname cache runs at a sequentially"""
|
||||||
conn = libvirt_conn.LibvirtConnection
|
conn = connection.LibvirtConnection
|
||||||
wait1 = eventlet.event.Event()
|
wait1 = eventlet.event.Event()
|
||||||
done1 = eventlet.event.Event()
|
done1 = eventlet.event.Event()
|
||||||
eventlet.spawn(conn._cache_image, _concurrency,
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
@@ -122,7 +124,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_different_fname_concurrency(self):
|
def test_different_fname_concurrency(self):
|
||||||
"""Ensures that two different fname caches are concurrent"""
|
"""Ensures that two different fname caches are concurrent"""
|
||||||
conn = libvirt_conn.LibvirtConnection
|
conn = connection.LibvirtConnection
|
||||||
wait1 = eventlet.event.Event()
|
wait1 = eventlet.event.Event()
|
||||||
done1 = eventlet.event.Event()
|
done1 = eventlet.event.Event()
|
||||||
eventlet.spawn(conn._cache_image, _concurrency,
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
@@ -143,7 +145,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
|||||||
class LibvirtConnTestCase(test.TestCase):
|
class LibvirtConnTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LibvirtConnTestCase, self).setUp()
|
super(LibvirtConnTestCase, self).setUp()
|
||||||
libvirt_conn._late_load_cheetah()
|
connection._late_load_cheetah()
|
||||||
self.flags(fake_call=True)
|
self.flags(fake_call=True)
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
|
|
||||||
@@ -182,6 +184,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
'vcpus': 2,
|
'vcpus': 2,
|
||||||
'project_id': 'fake',
|
'project_id': 'fake',
|
||||||
'bridge': 'br101',
|
'bridge': 'br101',
|
||||||
|
'image_ref': '123456',
|
||||||
'instance_type_id': '5'} # m1.small
|
'instance_type_id': '5'} # m1.small
|
||||||
|
|
||||||
def lazy_load_library_exists(self):
|
def lazy_load_library_exists(self):
|
||||||
@@ -194,8 +197,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
return False
|
return False
|
||||||
global libvirt
|
global libvirt
|
||||||
libvirt = __import__('libvirt')
|
libvirt = __import__('libvirt')
|
||||||
libvirt_conn.libvirt = __import__('libvirt')
|
connection.libvirt = __import__('libvirt')
|
||||||
libvirt_conn.libxml2 = __import__('libxml2')
|
connection.libxml2 = __import__('libxml2')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def create_fake_libvirt_mock(self, **kwargs):
|
def create_fake_libvirt_mock(self, **kwargs):
|
||||||
@@ -205,7 +208,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
class FakeLibvirtConnection(object):
|
class FakeLibvirtConnection(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# A fake libvirt_conn.IptablesFirewallDriver
|
# A fake connection.IptablesFirewallDriver
|
||||||
class FakeIptablesFirewallDriver(object):
|
class FakeIptablesFirewallDriver(object):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
@@ -221,11 +224,11 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
for key, val in kwargs.items():
|
for key, val in kwargs.items():
|
||||||
fake.__setattr__(key, val)
|
fake.__setattr__(key, val)
|
||||||
|
|
||||||
# Inevitable mocks for libvirt_conn.LibvirtConnection
|
# Inevitable mocks for connection.LibvirtConnection
|
||||||
self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
|
self.mox.StubOutWithMock(connection.utils, 'import_class')
|
||||||
libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
||||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
|
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||||
libvirt_conn.LibvirtConnection._conn = fake
|
connection.LibvirtConnection._conn = fake
|
||||||
|
|
||||||
def create_service(self, **kwargs):
|
def create_service(self, **kwargs):
|
||||||
service_ref = {'host': kwargs.get('host', 'dummy'),
|
service_ref = {'host': kwargs.get('host', 'dummy'),
|
||||||
@@ -238,7 +241,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
@test.skip_test("Please review this test to ensure intent")
|
@test.skip_test("Please review this test to ensure intent")
|
||||||
def test_preparing_xml_info(self):
|
def test_preparing_xml_info(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||||
|
|
||||||
result = conn._prepare_xml_info(instance_ref, False)
|
result = conn._prepare_xml_info(instance_ref, False)
|
||||||
@@ -253,7 +256,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.assertTrue(len(result['nics']) == 2)
|
self.assertTrue(len(result['nics']) == 2)
|
||||||
|
|
||||||
def test_get_nic_for_xml_v4(self):
|
def test_get_nic_for_xml_v4(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
network, mapping = _create_network_info()[0]
|
network, mapping = _create_network_info()[0]
|
||||||
self.flags(use_ipv6=False)
|
self.flags(use_ipv6=False)
|
||||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||||
@@ -261,7 +264,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.assertTrue(params.find('PROJMASKV6') == -1)
|
self.assertTrue(params.find('PROJMASKV6') == -1)
|
||||||
|
|
||||||
def test_get_nic_for_xml_v6(self):
|
def test_get_nic_for_xml_v6(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
network, mapping = _create_network_info()[0]
|
network, mapping = _create_network_info()[0]
|
||||||
self.flags(use_ipv6=True)
|
self.flags(use_ipv6=True)
|
||||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||||
@@ -303,10 +306,72 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
self._check_xml_and_container(instance_data)
|
self._check_xml_and_container(instance_data)
|
||||||
|
|
||||||
|
def test_snapshot(self):
|
||||||
|
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||||
|
|
||||||
|
# Only file-based instance storages are supported at the moment
|
||||||
|
test_xml = """
|
||||||
|
<domain type='kvm'>
|
||||||
|
<devices>
|
||||||
|
<disk type='file'>
|
||||||
|
<source file='filename'/>
|
||||||
|
</disk>
|
||||||
|
</devices>
|
||||||
|
</domain>
|
||||||
|
"""
|
||||||
|
|
||||||
|
class FakeVirtDomain(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def snapshotCreateXML(self, *args):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def XMLDesc(self, *args):
|
||||||
|
return test_xml
|
||||||
|
|
||||||
|
def fake_lookup(instance_name):
|
||||||
|
if instance_name == instance_ref.name:
|
||||||
|
return FakeVirtDomain()
|
||||||
|
|
||||||
|
def fake_execute(*args):
|
||||||
|
# Touch filename to pass 'with open(out_path)'
|
||||||
|
open(args[-1], "a").close()
|
||||||
|
|
||||||
|
# Start test
|
||||||
|
image_service = utils.import_object(FLAGS.image_service)
|
||||||
|
|
||||||
|
# Assuming that base image already exists in image_service
|
||||||
|
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||||
|
properties = {'instance_id': instance_ref['id'],
|
||||||
|
'user_id': str(self.context.user_id)}
|
||||||
|
snapshot_name = 'test-snap'
|
||||||
|
sent_meta = {'name': snapshot_name, 'is_public': False,
|
||||||
|
'status': 'creating', 'properties': properties}
|
||||||
|
# Create new image. It will be updated in snapshot method
|
||||||
|
# To work with it from snapshot, the single image_service is needed
|
||||||
|
recv_meta = image_service.create(context, sent_meta)
|
||||||
|
|
||||||
|
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||||
|
connection.LibvirtConnection._conn.lookupByName = fake_lookup
|
||||||
|
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||||
|
connection.utils.execute = fake_execute
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
|
conn = connection.LibvirtConnection(False)
|
||||||
|
conn.snapshot(instance_ref, recv_meta['id'])
|
||||||
|
|
||||||
|
snapshot = image_service.show(context, recv_meta['id'])
|
||||||
|
self.assertEquals(snapshot['properties']['image_state'], 'available')
|
||||||
|
self.assertEquals(snapshot['status'], 'active')
|
||||||
|
self.assertEquals(snapshot['name'], snapshot_name)
|
||||||
|
|
||||||
def test_multi_nic(self):
|
def test_multi_nic(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
network_info = _create_network_info(2)
|
network_info = _create_network_info(2)
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
instance_ref = db.instance_create(self.context, instance_data)
|
instance_ref = db.instance_create(self.context, instance_data)
|
||||||
xml = conn.to_xml(instance_ref, False, network_info)
|
xml = conn.to_xml(instance_ref, False, network_info)
|
||||||
tree = xml_to_tree(xml)
|
tree = xml_to_tree(xml)
|
||||||
@@ -343,7 +408,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
'instance_id': instance_ref['id']})
|
'instance_id': instance_ref['id']})
|
||||||
|
|
||||||
self.flags(libvirt_type='lxc')
|
self.flags(libvirt_type='lxc')
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
|
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, 'lxc:///')
|
self.assertEquals(uri, 'lxc:///')
|
||||||
@@ -441,7 +506,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
FLAGS.libvirt_type = libvirt_type
|
FLAGS.libvirt_type = libvirt_type
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
|
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, expected_uri)
|
self.assertEquals(uri, expected_uri)
|
||||||
@@ -468,7 +533,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
FLAGS.libvirt_uri = testuri
|
FLAGS.libvirt_uri = testuri
|
||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
FLAGS.libvirt_type = libvirt_type
|
FLAGS.libvirt_type = libvirt_type
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, testuri)
|
self.assertEquals(uri, testuri)
|
||||||
db.instance_destroy(user_context, instance_ref['id'])
|
db.instance_destroy(user_context, instance_ref['id'])
|
||||||
@@ -492,13 +557,13 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.create_fake_libvirt_mock(getVersion=getVersion,
|
self.create_fake_libvirt_mock(getVersion=getVersion,
|
||||||
getType=getType,
|
getType=getType,
|
||||||
listDomainsID=listDomainsID)
|
listDomainsID=listDomainsID)
|
||||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
self.mox.StubOutWithMock(connection.LibvirtConnection,
|
||||||
'get_cpu_info')
|
'get_cpu_info')
|
||||||
libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
conn.update_available_resource(self.context, 'dummy')
|
conn.update_available_resource(self.context, 'dummy')
|
||||||
service_ref = db.service_get(self.context, service_ref['id'])
|
service_ref = db.service_get(self.context, service_ref['id'])
|
||||||
compute_node = service_ref['compute_node'][0]
|
compute_node = service_ref['compute_node'][0]
|
||||||
@@ -532,7 +597,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.create_fake_libvirt_mock()
|
self.create_fake_libvirt_mock()
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||||
conn.update_available_resource,
|
conn.update_available_resource,
|
||||||
self.context, 'dummy')
|
self.context, 'dummy')
|
||||||
@@ -567,7 +632,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
try:
|
try:
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||||
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
||||||
@@ -616,7 +681,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
self.assertRaises(libvirt.libvirtError,
|
self.assertRaises(libvirt.libvirtError,
|
||||||
conn._live_migration,
|
conn._live_migration,
|
||||||
self.context, instance_ref, 'dest', '',
|
self.context, instance_ref, 'dest', '',
|
||||||
@@ -645,7 +710,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||||
|
|
||||||
@@ -666,10 +731,12 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
except Exception, e:
|
except Exception, e:
|
||||||
count = (0 <= str(e.message).find('Unexpected method call'))
|
count = (0 <= str(e.message).find('Unexpected method call'))
|
||||||
|
|
||||||
|
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
|
||||||
|
|
||||||
self.assertTrue(count)
|
self.assertTrue(count)
|
||||||
|
|
||||||
def test_get_host_ip_addr(self):
|
def test_get_host_ip_addr(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
ip = conn.get_host_ip_addr()
|
ip = conn.get_host_ip_addr()
|
||||||
self.assertEquals(ip, FLAGS.my_ip)
|
self.assertEquals(ip, FLAGS.my_ip)
|
||||||
|
|
||||||
@@ -688,7 +755,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
class FakeLibvirtConnection(object):
|
class FakeLibvirtConnection(object):
|
||||||
pass
|
pass
|
||||||
self.fake_libvirt_connection = FakeLibvirtConnection()
|
self.fake_libvirt_connection = FakeLibvirtConnection()
|
||||||
self.fw = libvirt_conn.IptablesFirewallDriver(
|
self.fw = firewall.IptablesFirewallDriver(
|
||||||
get_connection=lambda: self.fake_libvirt_connection)
|
get_connection=lambda: self.fake_libvirt_connection)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
@@ -918,7 +985,7 @@ class NWFilterTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.fake_libvirt_connection = Mock()
|
self.fake_libvirt_connection = Mock()
|
||||||
|
|
||||||
self.fw = libvirt_conn.NWFilterFirewall(
|
self.fw = firewall.NWFilterFirewall(
|
||||||
lambda: self.fake_libvirt_connection)
|
lambda: self.fake_libvirt_connection)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
@@ -16,7 +16,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import datetime
|
|
||||||
import webob
|
import webob
|
||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|||||||
@@ -21,11 +21,24 @@ import select
|
|||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.utils import parse_mailmap, str_dict_replace
|
from nova.utils import parse_mailmap, str_dict_replace
|
||||||
|
|
||||||
|
|
||||||
|
class ExceptionTestCase(test.TestCase):
|
||||||
|
@staticmethod
|
||||||
|
def _raise_exc(exc):
|
||||||
|
raise exc()
|
||||||
|
|
||||||
|
def test_exceptions_raise(self):
|
||||||
|
for name in dir(exception):
|
||||||
|
exc = getattr(exception, name)
|
||||||
|
if isinstance(exc, type):
|
||||||
|
self.assertRaises(exc, self._raise_exc, exc)
|
||||||
|
|
||||||
|
|
||||||
class ProjectTestCase(test.TestCase):
|
class ProjectTestCase(test.TestCase):
|
||||||
def test_authors_up_to_date(self):
|
def test_authors_up_to_date(self):
|
||||||
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
|
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
|
||||||
|
|||||||
@@ -13,10 +13,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import nova
|
import stubout
|
||||||
|
|
||||||
|
import nova
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
import nova.notifier.api
|
import nova.notifier.api
|
||||||
from nova.notifier.api import notify
|
from nova.notifier.api import notify
|
||||||
@@ -24,8 +26,6 @@ from nova.notifier import no_op_notifier
|
|||||||
from nova.notifier import rabbit_notifier
|
from nova.notifier import rabbit_notifier
|
||||||
from nova import test
|
from nova import test
|
||||||
|
|
||||||
import stubout
|
|
||||||
|
|
||||||
|
|
||||||
class NotifierTestCase(test.TestCase):
|
class NotifierTestCase(test.TestCase):
|
||||||
"""Test case for notifications"""
|
"""Test case for notifications"""
|
||||||
@@ -115,3 +115,22 @@ class NotifierTestCase(test.TestCase):
|
|||||||
notify('publisher_id',
|
notify('publisher_id',
|
||||||
'event_type', 'DEBUG', dict(a=3))
|
'event_type', 'DEBUG', dict(a=3))
|
||||||
self.assertEqual(self.test_topic, 'testnotify.debug')
|
self.assertEqual(self.test_topic, 'testnotify.debug')
|
||||||
|
|
||||||
|
def test_error_notification(self):
|
||||||
|
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
|
||||||
|
'nova.notifier.rabbit_notifier')
|
||||||
|
self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True)
|
||||||
|
LOG = log.getLogger('nova')
|
||||||
|
LOG.setup_from_flags()
|
||||||
|
msgs = []
|
||||||
|
|
||||||
|
def mock_cast(context, topic, data):
|
||||||
|
msgs.append(data)
|
||||||
|
|
||||||
|
self.stubs.Set(nova.rpc, 'cast', mock_cast)
|
||||||
|
LOG.error('foo')
|
||||||
|
self.assertEqual(1, len(msgs))
|
||||||
|
msg = msgs[0]
|
||||||
|
self.assertEqual(msg['event_type'], 'error_notification')
|
||||||
|
self.assertEqual(msg['priority'], 'ERROR')
|
||||||
|
self.assertEqual(msg['payload']['error'], 'foo')
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc')
|
|||||||
|
|
||||||
|
|
||||||
class RpcTestCase(test.TestCase):
|
class RpcTestCase(test.TestCase):
|
||||||
"""Test cases for rpc"""
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(RpcTestCase, self).setUp()
|
super(RpcTestCase, self).setUp()
|
||||||
self.conn = rpc.Connection.instance(True)
|
self.conn = rpc.Connection.instance(True)
|
||||||
@@ -43,14 +42,55 @@ class RpcTestCase(test.TestCase):
|
|||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
def test_call_succeed(self):
|
def test_call_succeed(self):
|
||||||
"""Get a value through rpc call"""
|
|
||||||
value = 42
|
value = 42
|
||||||
result = rpc.call(self.context, 'test', {"method": "echo",
|
result = rpc.call(self.context, 'test', {"method": "echo",
|
||||||
"args": {"value": value}})
|
"args": {"value": value}})
|
||||||
self.assertEqual(value, result)
|
self.assertEqual(value, result)
|
||||||
|
|
||||||
|
def test_call_succeed_despite_multiple_returns(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.call(self.context, 'test', {"method": "echo_three_times",
|
||||||
|
"args": {"value": value}})
|
||||||
|
self.assertEqual(value + 2, result)
|
||||||
|
|
||||||
|
def test_call_succeed_despite_multiple_returns_yield(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.call(self.context, 'test',
|
||||||
|
{"method": "echo_three_times_yield",
|
||||||
|
"args": {"value": value}})
|
||||||
|
self.assertEqual(value + 2, result)
|
||||||
|
|
||||||
|
def test_multicall_succeed_once(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.multicall(self.context,
|
||||||
|
'test',
|
||||||
|
{"method": "echo",
|
||||||
|
"args": {"value": value}})
|
||||||
|
for i, x in enumerate(result):
|
||||||
|
if i > 0:
|
||||||
|
self.fail('should only receive one response')
|
||||||
|
self.assertEqual(value + i, x)
|
||||||
|
|
||||||
|
def test_multicall_succeed_three_times(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.multicall(self.context,
|
||||||
|
'test',
|
||||||
|
{"method": "echo_three_times",
|
||||||
|
"args": {"value": value}})
|
||||||
|
for i, x in enumerate(result):
|
||||||
|
self.assertEqual(value + i, x)
|
||||||
|
|
||||||
|
def test_multicall_succeed_three_times_yield(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.multicall(self.context,
|
||||||
|
'test',
|
||||||
|
{"method": "echo_three_times_yield",
|
||||||
|
"args": {"value": value}})
|
||||||
|
for i, x in enumerate(result):
|
||||||
|
self.assertEqual(value + i, x)
|
||||||
|
|
||||||
def test_context_passed(self):
|
def test_context_passed(self):
|
||||||
"""Makes sure a context is passed through rpc call"""
|
"""Makes sure a context is passed through rpc call."""
|
||||||
value = 42
|
value = 42
|
||||||
result = rpc.call(self.context,
|
result = rpc.call(self.context,
|
||||||
'test', {"method": "context",
|
'test', {"method": "context",
|
||||||
@@ -58,11 +98,12 @@ class RpcTestCase(test.TestCase):
|
|||||||
self.assertEqual(self.context.to_dict(), result)
|
self.assertEqual(self.context.to_dict(), result)
|
||||||
|
|
||||||
def test_call_exception(self):
|
def test_call_exception(self):
|
||||||
"""Test that exception gets passed back properly
|
"""Test that exception gets passed back properly.
|
||||||
|
|
||||||
rpc.call returns a RemoteError object. The value of the
|
rpc.call returns a RemoteError object. The value of the
|
||||||
exception is converted to a string, so we convert it back
|
exception is converted to a string, so we convert it back
|
||||||
to an int in the test.
|
to an int in the test.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
value = 42
|
value = 42
|
||||||
self.assertRaises(rpc.RemoteError,
|
self.assertRaises(rpc.RemoteError,
|
||||||
@@ -81,7 +122,7 @@ class RpcTestCase(test.TestCase):
|
|||||||
self.assertEqual(int(exc.value), value)
|
self.assertEqual(int(exc.value), value)
|
||||||
|
|
||||||
def test_nested_calls(self):
|
def test_nested_calls(self):
|
||||||
"""Test that we can do an rpc.call inside another call"""
|
"""Test that we can do an rpc.call inside another call."""
|
||||||
class Nested(object):
|
class Nested(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def echo(context, queue, value):
|
def echo(context, queue, value):
|
||||||
@@ -108,25 +149,80 @@ class RpcTestCase(test.TestCase):
|
|||||||
"value": value}})
|
"value": value}})
|
||||||
self.assertEqual(value, result)
|
self.assertEqual(value, result)
|
||||||
|
|
||||||
|
def test_connectionpool_single(self):
|
||||||
|
"""Test that ConnectionPool recycles a single connection."""
|
||||||
|
conn1 = rpc.ConnectionPool.get()
|
||||||
|
rpc.ConnectionPool.put(conn1)
|
||||||
|
conn2 = rpc.ConnectionPool.get()
|
||||||
|
rpc.ConnectionPool.put(conn2)
|
||||||
|
self.assertEqual(conn1, conn2)
|
||||||
|
|
||||||
|
def test_connectionpool_double(self):
|
||||||
|
"""Test that ConnectionPool returns and reuses separate connections.
|
||||||
|
|
||||||
|
When called consecutively we should get separate connections and upon
|
||||||
|
returning them those connections should be reused for future calls
|
||||||
|
before generating a new connection.
|
||||||
|
|
||||||
|
"""
|
||||||
|
conn1 = rpc.ConnectionPool.get()
|
||||||
|
conn2 = rpc.ConnectionPool.get()
|
||||||
|
|
||||||
|
self.assertNotEqual(conn1, conn2)
|
||||||
|
rpc.ConnectionPool.put(conn1)
|
||||||
|
rpc.ConnectionPool.put(conn2)
|
||||||
|
|
||||||
|
conn3 = rpc.ConnectionPool.get()
|
||||||
|
conn4 = rpc.ConnectionPool.get()
|
||||||
|
self.assertEqual(conn1, conn3)
|
||||||
|
self.assertEqual(conn2, conn4)
|
||||||
|
|
||||||
|
def test_connectionpool_limit(self):
|
||||||
|
"""Test connection pool limit and connection uniqueness."""
|
||||||
|
max_size = FLAGS.rpc_conn_pool_size
|
||||||
|
conns = []
|
||||||
|
|
||||||
|
for i in xrange(max_size):
|
||||||
|
conns.append(rpc.ConnectionPool.get())
|
||||||
|
|
||||||
|
self.assertFalse(rpc.ConnectionPool.free_items)
|
||||||
|
self.assertEqual(rpc.ConnectionPool.current_size,
|
||||||
|
rpc.ConnectionPool.max_size)
|
||||||
|
self.assertEqual(len(set(conns)), max_size)
|
||||||
|
|
||||||
|
|
||||||
class TestReceiver(object):
|
class TestReceiver(object):
|
||||||
"""Simple Proxy class so the consumer has methods to call
|
"""Simple Proxy class so the consumer has methods to call.
|
||||||
|
|
||||||
Uses static methods because we aren't actually storing any state"""
|
Uses static methods because we aren't actually storing any state.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def echo(context, value):
|
def echo(context, value):
|
||||||
"""Simply returns whatever value is sent in"""
|
"""Simply returns whatever value is sent in."""
|
||||||
LOG.debug(_("Received %s"), value)
|
LOG.debug(_("Received %s"), value)
|
||||||
return value
|
return value
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def context(context, value):
|
def context(context, value):
|
||||||
"""Returns dictionary version of context"""
|
"""Returns dictionary version of context."""
|
||||||
LOG.debug(_("Received %s"), context)
|
LOG.debug(_("Received %s"), context)
|
||||||
return context.to_dict()
|
return context.to_dict()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def echo_three_times(context, value):
|
||||||
|
context.reply(value)
|
||||||
|
context.reply(value + 1)
|
||||||
|
context.reply(value + 2)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def echo_three_times_yield(context, value):
|
||||||
|
yield value
|
||||||
|
yield value + 1
|
||||||
|
yield value + 2
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def fail(context, value):
|
def fail(context, value):
|
||||||
"""Raises an exception with the value sent in"""
|
"""Raises an exception with the value sent in."""
|
||||||
raise Exception(value)
|
raise Exception(value)
|
||||||
|
|||||||
@@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_volume(size='0'):
|
def _create_volume(size='0', snapshot_id=None):
|
||||||
"""Create a volume object."""
|
"""Create a volume object."""
|
||||||
vol = {}
|
vol = {}
|
||||||
vol['size'] = size
|
vol['size'] = size
|
||||||
|
vol['snapshot_id'] = snapshot_id
|
||||||
vol['user_id'] = 'fake'
|
vol['user_id'] = 'fake'
|
||||||
vol['project_id'] = 'fake'
|
vol['project_id'] = 'fake'
|
||||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||||
@@ -69,6 +70,25 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
|
|
||||||
|
def test_create_volume_from_snapshot(self):
|
||||||
|
"""Test volume can be created from a snapshot."""
|
||||||
|
volume_src_id = self._create_volume()
|
||||||
|
self.volume.create_volume(self.context, volume_src_id)
|
||||||
|
snapshot_id = self._create_snapshot(volume_src_id)
|
||||||
|
self.volume.create_snapshot(self.context, volume_src_id, snapshot_id)
|
||||||
|
volume_dst_id = self._create_volume(0, snapshot_id)
|
||||||
|
self.volume.create_volume(self.context, volume_dst_id, snapshot_id)
|
||||||
|
self.assertEqual(volume_dst_id, db.volume_get(
|
||||||
|
context.get_admin_context(),
|
||||||
|
volume_dst_id).id)
|
||||||
|
self.assertEqual(snapshot_id, db.volume_get(
|
||||||
|
context.get_admin_context(),
|
||||||
|
volume_dst_id).snapshot_id)
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_dst_id)
|
||||||
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
|
self.volume.delete_volume(self.context, volume_src_id)
|
||||||
|
|
||||||
def test_too_big_volume(self):
|
def test_too_big_volume(self):
|
||||||
"""Ensure failure if a too large of a volume is requested."""
|
"""Ensure failure if a too large of a volume is requested."""
|
||||||
# FIXME(vish): validation needs to move into the data layer in
|
# FIXME(vish): validation needs to move into the data layer in
|
||||||
@@ -175,6 +195,34 @@ class VolumeTestCase(test.TestCase):
|
|||||||
# This will allow us to test cross-node interactions
|
# This will allow us to test cross-node interactions
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_snapshot(volume_id, size='0'):
|
||||||
|
"""Create a snapshot object."""
|
||||||
|
snap = {}
|
||||||
|
snap['volume_size'] = size
|
||||||
|
snap['user_id'] = 'fake'
|
||||||
|
snap['project_id'] = 'fake'
|
||||||
|
snap['volume_id'] = volume_id
|
||||||
|
snap['status'] = "creating"
|
||||||
|
return db.snapshot_create(context.get_admin_context(), snap)['id']
|
||||||
|
|
||||||
|
def test_create_delete_snapshot(self):
|
||||||
|
"""Test snapshot can be created and deleted."""
|
||||||
|
volume_id = self._create_volume()
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
snapshot_id = self._create_snapshot(volume_id)
|
||||||
|
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
|
||||||
|
self.assertEqual(snapshot_id,
|
||||||
|
db.snapshot_get(context.get_admin_context(),
|
||||||
|
snapshot_id).id)
|
||||||
|
|
||||||
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
|
self.assertRaises(exception.NotFound,
|
||||||
|
db.snapshot_get,
|
||||||
|
self.context,
|
||||||
|
snapshot_id)
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
|
|
||||||
class DriverTestCase(test.TestCase):
|
class DriverTestCase(test.TestCase):
|
||||||
"""Base Test class for Drivers."""
|
"""Base Test class for Drivers."""
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase):
|
|||||||
self.values = {'id': 1,
|
self.values = {'id': 1,
|
||||||
'project_id': 'fake',
|
'project_id': 'fake',
|
||||||
'user_id': 'fake',
|
'user_id': 'fake',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': 2,
|
'kernel_id': 2,
|
||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type_id': '3', # m1.large
|
'instance_type_id': '3', # m1.large
|
||||||
@@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
stubs.stubout_is_vdi_pv(self.stubs)
|
stubs.stubout_is_vdi_pv(self.stubs)
|
||||||
self.stubs.Set(VMOps, 'reset_network', reset_network)
|
self.stubs.Set(VMOps, 'reset_network', reset_network)
|
||||||
stubs.stub_out_vm_methods(self.stubs)
|
stubs.stub_out_vm_methods(self.stubs)
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
glance_stubs.FakeGlance)
|
|
||||||
fake_utils.stub_out_utils_execute(self.stubs)
|
fake_utils.stub_out_utils_execute(self.stubs)
|
||||||
self.context = context.RequestContext('fake', 'fake', False)
|
self.context = context.RequestContext('fake', 'fake', False)
|
||||||
self.conn = xenapi_conn.get_connection(False)
|
self.conn = xenapi_conn.get_connection(False)
|
||||||
@@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'id': id,
|
'id': id,
|
||||||
'project_id': proj,
|
'project_id': proj,
|
||||||
'user_id': user,
|
'user_id': user,
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': 2,
|
'kernel_id': 2,
|
||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type_id': '3', # m1.large
|
'instance_type_id': '3', # m1.large
|
||||||
@@ -351,14 +350,14 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
self.assertEquals(self.vm['HVM_boot_params'], {})
|
self.assertEquals(self.vm['HVM_boot_params'], {})
|
||||||
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||||
|
|
||||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
|
||||||
instance_type_id="3", os_type="linux",
|
instance_type_id="3", os_type="linux",
|
||||||
instance_id=1, check_injection=False, create_record=True):
|
instance_id=1, check_injection=False, create_record=True):
|
||||||
stubs.stubout_loopingcall_start(self.stubs)
|
stubs.stubout_loopingcall_start(self.stubs)
|
||||||
values = {'id': instance_id,
|
values = {'id': instance_id,
|
||||||
'project_id': self.project.id,
|
'project_id': self.project.id,
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'image_id': image_id,
|
'image_ref': image_ref,
|
||||||
'kernel_id': kernel_id,
|
'kernel_id': kernel_id,
|
||||||
'ramdisk_id': ramdisk_id,
|
'ramdisk_id': ramdisk_id,
|
||||||
'instance_type_id': instance_type_id,
|
'instance_type_id': instance_type_id,
|
||||||
@@ -398,6 +397,29 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
os_type="linux")
|
os_type="linux")
|
||||||
self.check_vm_params_for_linux()
|
self.check_vm_params_for_linux()
|
||||||
|
|
||||||
|
def test_spawn_vhd_glance_swapdisk(self):
|
||||||
|
# Change the default host_call_plugin to one that'll return
|
||||||
|
# a swap disk
|
||||||
|
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
|
||||||
|
|
||||||
|
stubs.FakeSessionForVMTests.host_call_plugin = \
|
||||||
|
stubs.FakeSessionForVMTests.host_call_plugin_swap
|
||||||
|
|
||||||
|
try:
|
||||||
|
# We'll steal the above glance linux test
|
||||||
|
self.test_spawn_vhd_glance_linux()
|
||||||
|
finally:
|
||||||
|
# Make sure to put this back
|
||||||
|
stubs.FakeSessionForVMTests.host_call_plugin = orig_func
|
||||||
|
|
||||||
|
# We should have 2 VBDs.
|
||||||
|
self.assertEqual(len(self.vm['VBDs']), 2)
|
||||||
|
# Now test that we have 1.
|
||||||
|
self.tearDown()
|
||||||
|
self.setUp()
|
||||||
|
self.test_spawn_vhd_glance_linux()
|
||||||
|
self.assertEqual(len(self.vm['VBDs']), 1)
|
||||||
|
|
||||||
def test_spawn_vhd_glance_windows(self):
|
def test_spawn_vhd_glance_windows(self):
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
||||||
@@ -561,7 +583,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'id': instance_id,
|
'id': instance_id,
|
||||||
'project_id': self.project.id,
|
'project_id': self.project.id,
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': 2,
|
'kernel_id': 2,
|
||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type_id': '3', # m1.large
|
'instance_type_id': '3', # m1.large
|
||||||
@@ -586,11 +608,29 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
|
|||||||
bob_shared = self.bob.compute_shared(alice_pub)
|
bob_shared = self.bob.compute_shared(alice_pub)
|
||||||
self.assertEquals(alice_shared, bob_shared)
|
self.assertEquals(alice_shared, bob_shared)
|
||||||
|
|
||||||
def test_encryption(self):
|
def _test_encryption(self, message):
|
||||||
msg = "This is a top-secret message"
|
enc = self.alice.encrypt(message)
|
||||||
enc = self.alice.encrypt(msg)
|
self.assertFalse(enc.endswith('\n'))
|
||||||
dec = self.bob.decrypt(enc)
|
dec = self.bob.decrypt(enc)
|
||||||
self.assertEquals(dec, msg)
|
self.assertEquals(dec, message)
|
||||||
|
|
||||||
|
def test_encrypt_simple_message(self):
|
||||||
|
self._test_encryption('This is a simple message.')
|
||||||
|
|
||||||
|
def test_encrypt_message_with_newlines_at_end(self):
|
||||||
|
self._test_encryption('This message has a newline at the end.\n')
|
||||||
|
|
||||||
|
def test_encrypt_many_newlines_at_end(self):
|
||||||
|
self._test_encryption('Message with lotsa newlines.\n\n\n')
|
||||||
|
|
||||||
|
def test_encrypt_newlines_inside_message(self):
|
||||||
|
self._test_encryption('Message\nwith\ninterior\nnewlines.')
|
||||||
|
|
||||||
|
def test_encrypt_with_leading_newlines(self):
|
||||||
|
self._test_encryption('\n\nMessage with leading newlines.')
|
||||||
|
|
||||||
|
def test_encrypt_really_long_message(self):
|
||||||
|
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
||||||
@@ -617,7 +657,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
self.values = {'id': 1,
|
self.values = {'id': 1,
|
||||||
'project_id': self.project.id,
|
'project_id': self.project.id,
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': None,
|
'kernel_id': None,
|
||||||
'ramdisk_id': None,
|
'ramdisk_id': None,
|
||||||
'local_gb': 5,
|
'local_gb': 5,
|
||||||
@@ -628,8 +668,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
fake_utils.stub_out_utils_execute(self.stubs)
|
fake_utils.stub_out_utils_execute(self.stubs)
|
||||||
stubs.stub_out_migration_methods(self.stubs)
|
stubs.stub_out_migration_methods(self.stubs)
|
||||||
stubs.stubout_get_this_vm_uuid(self.stubs)
|
stubs.stubout_get_this_vm_uuid(self.stubs)
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
glance_stubs.FakeGlance)
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(XenAPIMigrateInstance, self).tearDown()
|
super(XenAPIMigrateInstance, self).tearDown()
|
||||||
@@ -655,8 +694,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
"""Unit tests for code that detects the ImageType."""
|
"""Unit tests for code that detects the ImageType."""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(XenAPIDetermineDiskImageTestCase, self).setUp()
|
super(XenAPIDetermineDiskImageTestCase, self).setUp()
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
glance_stubs.FakeGlance)
|
|
||||||
|
|
||||||
class FakeInstance(object):
|
class FakeInstance(object):
|
||||||
pass
|
pass
|
||||||
@@ -673,7 +711,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
def test_instance_disk(self):
|
def test_instance_disk(self):
|
||||||
"""If a kernel is specified, the image type is DISK (aka machine)."""
|
"""If a kernel is specified, the image type is DISK (aka machine)."""
|
||||||
FLAGS.xenapi_image_service = 'objectstore'
|
FLAGS.xenapi_image_service = 'objectstore'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
|
||||||
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
|
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK)
|
self.assert_disk_type(vm_utils.ImageType.DISK)
|
||||||
|
|
||||||
@@ -683,7 +721,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
DISK_RAW is assumed.
|
DISK_RAW is assumed.
|
||||||
"""
|
"""
|
||||||
FLAGS.xenapi_image_service = 'objectstore'
|
FLAGS.xenapi_image_service = 'objectstore'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||||
self.fake_instance.kernel_id = None
|
self.fake_instance.kernel_id = None
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||||
|
|
||||||
@@ -693,7 +731,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
this case will be 'raw'.
|
this case will be 'raw'.
|
||||||
"""
|
"""
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||||
self.fake_instance.kernel_id = None
|
self.fake_instance.kernel_id = None
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||||
|
|
||||||
@@ -703,7 +741,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
this case will be 'vhd'.
|
this case will be 'vhd'.
|
||||||
"""
|
"""
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
|
||||||
self.fake_instance.kernel_id = None
|
self.fake_instance.kernel_id = None
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
|
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
|
||||||
|
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs):
|
|||||||
'name': values['name'],
|
'name': values['name'],
|
||||||
'id': values['id'],
|
'id': values['id'],
|
||||||
'reservation_id': utils.generate_uid('r'),
|
'reservation_id': utils.generate_uid('r'),
|
||||||
'image_id': values['image_id'],
|
'image_ref': values['image_ref'],
|
||||||
'kernel_id': values['kernel_id'],
|
'kernel_id': values['kernel_id'],
|
||||||
'ramdisk_id': values['ramdisk_id'],
|
'ramdisk_id': values['ramdisk_id'],
|
||||||
'state_description': 'scheduling',
|
'state_description': 'scheduling',
|
||||||
|
|||||||
@@ -17,6 +17,7 @@
|
|||||||
"""Stubouts, mocks and fixtures for the test suite"""
|
"""Stubouts, mocks and fixtures for the test suite"""
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
import json
|
||||||
from nova.virt import xenapi_conn
|
from nova.virt import xenapi_conn
|
||||||
from nova.virt.xenapi import fake
|
from nova.virt.xenapi import fake
|
||||||
from nova.virt.xenapi import volume_utils
|
from nova.virt.xenapi import volume_utils
|
||||||
@@ -37,7 +38,7 @@ def stubout_instance_snapshot(stubs):
|
|||||||
sr_ref=sr_ref, sharable=False)
|
sr_ref=sr_ref, sharable=False)
|
||||||
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
||||||
vdi_uuid = vdi_rec['uuid']
|
vdi_uuid = vdi_rec['uuid']
|
||||||
return vdi_uuid
|
return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
|
||||||
|
|
||||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||||
|
|
||||||
@@ -132,11 +133,30 @@ class FakeSessionForVMTests(fake.SessionBase):
|
|||||||
def __init__(self, uri):
|
def __init__(self, uri):
|
||||||
super(FakeSessionForVMTests, self).__init__(uri)
|
super(FakeSessionForVMTests, self).__init__(uri)
|
||||||
|
|
||||||
def host_call_plugin(self, _1, _2, _3, _4, _5):
|
def host_call_plugin(self, _1, _2, plugin, method, _5):
|
||||||
sr_ref = fake.get_all('SR')[0]
|
sr_ref = fake.get_all('SR')[0]
|
||||||
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||||
vdi_rec = fake.get_record('VDI', vdi_ref)
|
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||||
return '<string>%s</string>' % vdi_rec['uuid']
|
if plugin == "glance" and method == "download_vhd":
|
||||||
|
ret_str = json.dumps([dict(vdi_type='os',
|
||||||
|
vdi_uuid=vdi_rec['uuid'])])
|
||||||
|
else:
|
||||||
|
ret_str = vdi_rec['uuid']
|
||||||
|
return '<string>%s</string>' % ret_str
|
||||||
|
|
||||||
|
def host_call_plugin_swap(self, _1, _2, plugin, method, _5):
|
||||||
|
sr_ref = fake.get_all('SR')[0]
|
||||||
|
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||||
|
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||||
|
if plugin == "glance" and method == "download_vhd":
|
||||||
|
swap_vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||||
|
swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref)
|
||||||
|
ret_str = json.dumps(
|
||||||
|
[dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']),
|
||||||
|
dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])])
|
||||||
|
else:
|
||||||
|
ret_str = vdi_rec['uuid']
|
||||||
|
return '<string>%s</string>' % ret_str
|
||||||
|
|
||||||
def VM_start(self, _1, ref, _2, _3):
|
def VM_start(self, _1, ref, _2, _3):
|
||||||
vm = fake.get_record('VM', ref)
|
vm = fake.get_record('VM', ref)
|
||||||
|
|||||||
@@ -299,7 +299,7 @@ def get_my_linklocal(interface):
|
|||||||
|
|
||||||
|
|
||||||
def utcnow():
|
def utcnow():
|
||||||
"""Overridable version of datetime.datetime.utcnow."""
|
"""Overridable version of utils.utcnow."""
|
||||||
if utcnow.override_time:
|
if utcnow.override_time:
|
||||||
return utcnow.override_time
|
return utcnow.override_time
|
||||||
return datetime.datetime.utcnow()
|
return datetime.datetime.utcnow()
|
||||||
|
|||||||
252
nova/wsgi.py
252
nova/wsgi.py
@@ -85,36 +85,7 @@ class Server(object):
|
|||||||
|
|
||||||
|
|
||||||
class Request(webob.Request):
|
class Request(webob.Request):
|
||||||
|
pass
|
||||||
def best_match_content_type(self):
|
|
||||||
"""Determine the most acceptable content-type.
|
|
||||||
|
|
||||||
Based on the query extension then the Accept header.
|
|
||||||
|
|
||||||
"""
|
|
||||||
parts = self.path.rsplit('.', 1)
|
|
||||||
|
|
||||||
if len(parts) > 1:
|
|
||||||
format = parts[1]
|
|
||||||
if format in ['json', 'xml']:
|
|
||||||
return 'application/{0}'.format(parts[1])
|
|
||||||
|
|
||||||
ctypes = ['application/json', 'application/xml']
|
|
||||||
bm = self.accept.best_match(ctypes)
|
|
||||||
|
|
||||||
return bm or 'application/json'
|
|
||||||
|
|
||||||
def get_content_type(self):
|
|
||||||
allowed_types = ("application/xml", "application/json")
|
|
||||||
if not "Content-Type" in self.headers:
|
|
||||||
msg = _("Missing Content-Type")
|
|
||||||
LOG.debug(msg)
|
|
||||||
raise webob.exc.HTTPBadRequest(msg)
|
|
||||||
type = self.content_type
|
|
||||||
if type in allowed_types:
|
|
||||||
return type
|
|
||||||
LOG.debug(_("Wrong Content-Type: %s") % type)
|
|
||||||
raise webob.exc.HTTPBadRequest("Invalid content type")
|
|
||||||
|
|
||||||
|
|
||||||
class Application(object):
|
class Application(object):
|
||||||
@@ -289,8 +260,8 @@ class Router(object):
|
|||||||
|
|
||||||
Each route in `mapper` must specify a 'controller', which is a
|
Each route in `mapper` must specify a 'controller', which is a
|
||||||
WSGI app to call. You'll probably want to specify an 'action' as
|
WSGI app to call. You'll probably want to specify an 'action' as
|
||||||
well and have your controller be a wsgi.Controller, who will route
|
well and have your controller be an object that can route
|
||||||
the request to the action method.
|
the request to the action-specific method.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
mapper = routes.Mapper()
|
mapper = routes.Mapper()
|
||||||
@@ -338,223 +309,6 @@ class Router(object):
|
|||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
class Controller(object):
|
|
||||||
"""WSGI app that dispatched to methods.
|
|
||||||
|
|
||||||
WSGI app that reads routing information supplied by RoutesMiddleware
|
|
||||||
and calls the requested action method upon itself. All action methods
|
|
||||||
must, in addition to their normal parameters, accept a 'req' argument
|
|
||||||
which is the incoming wsgi.Request. They raise a webob.exc exception,
|
|
||||||
or return a dict which will be serialized by requested content type.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
@webob.dec.wsgify(RequestClass=Request)
|
|
||||||
def __call__(self, req):
|
|
||||||
"""Call the method specified in req.environ by RoutesMiddleware."""
|
|
||||||
arg_dict = req.environ['wsgiorg.routing_args'][1]
|
|
||||||
action = arg_dict['action']
|
|
||||||
method = getattr(self, action)
|
|
||||||
LOG.debug("%s %s" % (req.method, req.url))
|
|
||||||
del arg_dict['controller']
|
|
||||||
del arg_dict['action']
|
|
||||||
if 'format' in arg_dict:
|
|
||||||
del arg_dict['format']
|
|
||||||
arg_dict['req'] = req
|
|
||||||
result = method(**arg_dict)
|
|
||||||
|
|
||||||
if type(result) is dict:
|
|
||||||
content_type = req.best_match_content_type()
|
|
||||||
default_xmlns = self.get_default_xmlns(req)
|
|
||||||
body = self._serialize(result, content_type, default_xmlns)
|
|
||||||
|
|
||||||
response = webob.Response()
|
|
||||||
response.headers['Content-Type'] = content_type
|
|
||||||
response.body = body
|
|
||||||
msg_dict = dict(url=req.url, status=response.status_int)
|
|
||||||
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
|
|
||||||
LOG.debug(msg)
|
|
||||||
return response
|
|
||||||
else:
|
|
||||||
return result
|
|
||||||
|
|
||||||
def _serialize(self, data, content_type, default_xmlns):
|
|
||||||
"""Serialize the given dict to the provided content_type.
|
|
||||||
|
|
||||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
|
||||||
MIME types to information needed to serialize to that type.
|
|
||||||
|
|
||||||
"""
|
|
||||||
_metadata = getattr(type(self), '_serialization_metadata', {})
|
|
||||||
|
|
||||||
serializer = Serializer(_metadata, default_xmlns)
|
|
||||||
try:
|
|
||||||
return serializer.serialize(data, content_type)
|
|
||||||
except exception.InvalidContentType:
|
|
||||||
raise webob.exc.HTTPNotAcceptable()
|
|
||||||
|
|
||||||
def _deserialize(self, data, content_type):
|
|
||||||
"""Deserialize the request body to the specefied content type.
|
|
||||||
|
|
||||||
Uses self._serialization_metadata if it exists, which is a dict mapping
|
|
||||||
MIME types to information needed to serialize to that type.
|
|
||||||
|
|
||||||
"""
|
|
||||||
_metadata = getattr(type(self), '_serialization_metadata', {})
|
|
||||||
serializer = Serializer(_metadata)
|
|
||||||
return serializer.deserialize(data, content_type)
|
|
||||||
|
|
||||||
def get_default_xmlns(self, req):
|
|
||||||
"""Provide the XML namespace to use if none is otherwise specified."""
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class Serializer(object):
|
|
||||||
"""Serializes and deserializes dictionaries to certain MIME types."""
|
|
||||||
|
|
||||||
def __init__(self, metadata=None, default_xmlns=None):
|
|
||||||
"""Create a serializer based on the given WSGI environment.
|
|
||||||
|
|
||||||
'metadata' is an optional dict mapping MIME types to information
|
|
||||||
needed to serialize a dictionary to that type.
|
|
||||||
|
|
||||||
"""
|
|
||||||
self.metadata = metadata or {}
|
|
||||||
self.default_xmlns = default_xmlns
|
|
||||||
|
|
||||||
def _get_serialize_handler(self, content_type):
|
|
||||||
handlers = {
|
|
||||||
'application/json': self._to_json,
|
|
||||||
'application/xml': self._to_xml,
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
return handlers[content_type]
|
|
||||||
except Exception:
|
|
||||||
raise exception.InvalidContentType(content_type=content_type)
|
|
||||||
|
|
||||||
def serialize(self, data, content_type):
|
|
||||||
"""Serialize a dictionary into the specified content type."""
|
|
||||||
return self._get_serialize_handler(content_type)(data)
|
|
||||||
|
|
||||||
def deserialize(self, datastring, content_type):
|
|
||||||
"""Deserialize a string to a dictionary.
|
|
||||||
|
|
||||||
The string must be in the format of a supported MIME type.
|
|
||||||
|
|
||||||
"""
|
|
||||||
return self.get_deserialize_handler(content_type)(datastring)
|
|
||||||
|
|
||||||
def get_deserialize_handler(self, content_type):
|
|
||||||
handlers = {
|
|
||||||
'application/json': self._from_json,
|
|
||||||
'application/xml': self._from_xml,
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
return handlers[content_type]
|
|
||||||
except Exception:
|
|
||||||
raise exception.InvalidContentType(content_type=content_type)
|
|
||||||
|
|
||||||
def _from_json(self, datastring):
|
|
||||||
return utils.loads(datastring)
|
|
||||||
|
|
||||||
def _from_xml(self, datastring):
|
|
||||||
xmldata = self.metadata.get('application/xml', {})
|
|
||||||
plurals = set(xmldata.get('plurals', {}))
|
|
||||||
node = minidom.parseString(datastring).childNodes[0]
|
|
||||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
|
||||||
|
|
||||||
def _from_xml_node(self, node, listnames):
|
|
||||||
"""Convert a minidom node to a simple Python type.
|
|
||||||
|
|
||||||
listnames is a collection of names of XML nodes whose subnodes should
|
|
||||||
be considered list items.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
|
|
||||||
return node.childNodes[0].nodeValue
|
|
||||||
elif node.nodeName in listnames:
|
|
||||||
return [self._from_xml_node(n, listnames) for n in node.childNodes]
|
|
||||||
else:
|
|
||||||
result = dict()
|
|
||||||
for attr in node.attributes.keys():
|
|
||||||
result[attr] = node.attributes[attr].nodeValue
|
|
||||||
for child in node.childNodes:
|
|
||||||
if child.nodeType != node.TEXT_NODE:
|
|
||||||
result[child.nodeName] = self._from_xml_node(child,
|
|
||||||
listnames)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def _to_json(self, data):
|
|
||||||
return utils.dumps(data)
|
|
||||||
|
|
||||||
def _to_xml(self, data):
|
|
||||||
metadata = self.metadata.get('application/xml', {})
|
|
||||||
# We expect data to contain a single key which is the XML root.
|
|
||||||
root_key = data.keys()[0]
|
|
||||||
doc = minidom.Document()
|
|
||||||
node = self._to_xml_node(doc, metadata, root_key, data[root_key])
|
|
||||||
|
|
||||||
xmlns = node.getAttribute('xmlns')
|
|
||||||
if not xmlns and self.default_xmlns:
|
|
||||||
node.setAttribute('xmlns', self.default_xmlns)
|
|
||||||
|
|
||||||
return node.toprettyxml(indent=' ')
|
|
||||||
|
|
||||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
|
||||||
"""Recursive method to convert data members to XML nodes."""
|
|
||||||
result = doc.createElement(nodename)
|
|
||||||
|
|
||||||
# Set the xml namespace if one is specified
|
|
||||||
# TODO(justinsb): We could also use prefixes on the keys
|
|
||||||
xmlns = metadata.get('xmlns', None)
|
|
||||||
if xmlns:
|
|
||||||
result.setAttribute('xmlns', xmlns)
|
|
||||||
|
|
||||||
if type(data) is list:
|
|
||||||
collections = metadata.get('list_collections', {})
|
|
||||||
if nodename in collections:
|
|
||||||
metadata = collections[nodename]
|
|
||||||
for item in data:
|
|
||||||
node = doc.createElement(metadata['item_name'])
|
|
||||||
node.setAttribute(metadata['item_key'], str(item))
|
|
||||||
result.appendChild(node)
|
|
||||||
return result
|
|
||||||
singular = metadata.get('plurals', {}).get(nodename, None)
|
|
||||||
if singular is None:
|
|
||||||
if nodename.endswith('s'):
|
|
||||||
singular = nodename[:-1]
|
|
||||||
else:
|
|
||||||
singular = 'item'
|
|
||||||
for item in data:
|
|
||||||
node = self._to_xml_node(doc, metadata, singular, item)
|
|
||||||
result.appendChild(node)
|
|
||||||
elif type(data) is dict:
|
|
||||||
collections = metadata.get('dict_collections', {})
|
|
||||||
if nodename in collections:
|
|
||||||
metadata = collections[nodename]
|
|
||||||
for k, v in data.items():
|
|
||||||
node = doc.createElement(metadata['item_name'])
|
|
||||||
node.setAttribute(metadata['item_key'], str(k))
|
|
||||||
text = doc.createTextNode(str(v))
|
|
||||||
node.appendChild(text)
|
|
||||||
result.appendChild(node)
|
|
||||||
return result
|
|
||||||
attrs = metadata.get('attributes', {}).get(nodename, {})
|
|
||||||
for k, v in data.items():
|
|
||||||
if k in attrs:
|
|
||||||
result.setAttribute(k, str(v))
|
|
||||||
else:
|
|
||||||
node = self._to_xml_node(doc, metadata, k, v)
|
|
||||||
result.appendChild(node)
|
|
||||||
else:
|
|
||||||
# Type is atom
|
|
||||||
node = doc.createTextNode(str(data))
|
|
||||||
result.appendChild(node)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def paste_config_file(basename):
|
def paste_config_file(basename):
|
||||||
"""Find the best location in the system for a paste config file.
|
"""Find the best location in the system for a paste config file.
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user