Merge trunk
Adapting code for removing dangling vdis and kernel/ramdisk files to latest changes
This commit is contained in:
2
Authors
2
Authors
@@ -31,6 +31,7 @@ Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
|||||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
Ilya Alekseyev <ialekseev@griddynamics.com>
|
||||||
Isaku Yamahata <yamahata@valinux.co.jp>
|
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||||
|
Jason Cannavale <jason.cannavale@rackspace.com>
|
||||||
Jason Koelker <jason@koelker.net>
|
Jason Koelker <jason@koelker.net>
|
||||||
Jay Pipes <jaypipes@gmail.com>
|
Jay Pipes <jaypipes@gmail.com>
|
||||||
Jesse Andrews <anotherjesse@gmail.com>
|
Jesse Andrews <anotherjesse@gmail.com>
|
||||||
@@ -59,6 +60,7 @@ Mark Washenberger <mark.washenberger@rackspace.com>
|
|||||||
Masanori Itoh <itoumsn@nttdata.co.jp>
|
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||||
Matt Dietz <matt.dietz@rackspace.com>
|
Matt Dietz <matt.dietz@rackspace.com>
|
||||||
Michael Gundlach <michael.gundlach@rackspace.com>
|
Michael Gundlach <michael.gundlach@rackspace.com>
|
||||||
|
Mike Scherbakov <mihgen@gmail.com>
|
||||||
Monsyne Dragon <mdragon@rackspace.com>
|
Monsyne Dragon <mdragon@rackspace.com>
|
||||||
Monty Taylor <mordred@inaugust.com>
|
Monty Taylor <mordred@inaugust.com>
|
||||||
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
||||||
|
|||||||
@@ -53,7 +53,6 @@
|
|||||||
CLI interface for nova management.
|
CLI interface for nova management.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
import gettext
|
import gettext
|
||||||
import glob
|
import glob
|
||||||
import json
|
import json
|
||||||
@@ -78,6 +77,7 @@ from nova import crypto
|
|||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import image
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import quota
|
from nova import quota
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
@@ -96,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager')
|
|||||||
flags.DECLARE('vlan_start', 'nova.network.manager')
|
flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||||
|
flags.DECLARE('gateway_v6', 'nova.network.manager')
|
||||||
flags.DECLARE('images_path', 'nova.image.local')
|
flags.DECLARE('images_path', 'nova.image.local')
|
||||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
|
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
|
||||||
flags.DEFINE_flag(flags.HelpFlag())
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
@@ -536,7 +537,7 @@ class FloatingIpCommands(object):
|
|||||||
for floating_ip in floating_ips:
|
for floating_ip in floating_ips:
|
||||||
instance = None
|
instance = None
|
||||||
if floating_ip['fixed_ip']:
|
if floating_ip['fixed_ip']:
|
||||||
instance = floating_ip['fixed_ip']['instance']['ec2_id']
|
instance = floating_ip['fixed_ip']['instance']['hostname']
|
||||||
print "%s\t%s\t%s" % (floating_ip['host'],
|
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||||
floating_ip['address'],
|
floating_ip['address'],
|
||||||
instance)
|
instance)
|
||||||
@@ -545,13 +546,10 @@ class FloatingIpCommands(object):
|
|||||||
class NetworkCommands(object):
|
class NetworkCommands(object):
|
||||||
"""Class for managing networks."""
|
"""Class for managing networks."""
|
||||||
|
|
||||||
def create(self, fixed_range=None, num_networks=None,
|
def create(self, fixed_range=None, num_networks=None, network_size=None,
|
||||||
network_size=None, vlan_start=None,
|
vlan_start=None, vpn_start=None, fixed_range_v6=None,
|
||||||
vpn_start=None, fixed_range_v6=None, label='public'):
|
gateway_v6=None, label='public'):
|
||||||
"""Creates fixed ips for host by range
|
"""Creates fixed ips for host by range"""
|
||||||
arguments: fixed_range=FLAG, [num_networks=FLAG],
|
|
||||||
[network_size=FLAG], [vlan_start=FLAG],
|
|
||||||
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
|
|
||||||
if not fixed_range:
|
if not fixed_range:
|
||||||
msg = _('Fixed range in the form of 10.0.0.0/8 is '
|
msg = _('Fixed range in the form of 10.0.0.0/8 is '
|
||||||
'required to create networks.')
|
'required to create networks.')
|
||||||
@@ -567,6 +565,8 @@ class NetworkCommands(object):
|
|||||||
vpn_start = FLAGS.vpn_start
|
vpn_start = FLAGS.vpn_start
|
||||||
if not fixed_range_v6:
|
if not fixed_range_v6:
|
||||||
fixed_range_v6 = FLAGS.fixed_range_v6
|
fixed_range_v6 = FLAGS.fixed_range_v6
|
||||||
|
if not gateway_v6:
|
||||||
|
gateway_v6 = FLAGS.gateway_v6
|
||||||
net_manager = utils.import_object(FLAGS.network_manager)
|
net_manager = utils.import_object(FLAGS.network_manager)
|
||||||
try:
|
try:
|
||||||
net_manager.create_networks(context.get_admin_context(),
|
net_manager.create_networks(context.get_admin_context(),
|
||||||
@@ -576,6 +576,7 @@ class NetworkCommands(object):
|
|||||||
vlan_start=int(vlan_start),
|
vlan_start=int(vlan_start),
|
||||||
vpn_start=int(vpn_start),
|
vpn_start=int(vpn_start),
|
||||||
cidr_v6=fixed_range_v6,
|
cidr_v6=fixed_range_v6,
|
||||||
|
gateway_v6=gateway_v6,
|
||||||
label=label)
|
label=label)
|
||||||
except ValueError, e:
|
except ValueError, e:
|
||||||
print e
|
print e
|
||||||
@@ -689,7 +690,7 @@ class ServiceCommands(object):
|
|||||||
"""Show a list of all running services. Filter by host & service name.
|
"""Show a list of all running services. Filter by host & service name.
|
||||||
args: [host] [service]"""
|
args: [host] [service]"""
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
services = db.service_get_all(ctxt)
|
services = db.service_get_all(ctxt)
|
||||||
if host:
|
if host:
|
||||||
services = [s for s in services if s['host'] == host]
|
services = [s for s in services if s['host'] == host]
|
||||||
@@ -936,7 +937,7 @@ class ImageCommands(object):
|
|||||||
"""Methods for dealing with a cloud in an odd state"""
|
"""Methods for dealing with a cloud in an odd state"""
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.image_service = utils.import_object(FLAGS.image_service)
|
self.image_service = image.get_default_image_service()
|
||||||
|
|
||||||
def _register(self, container_format, disk_format,
|
def _register(self, container_format, disk_format,
|
||||||
path, owner, name=None, is_public='T',
|
path, owner, name=None, is_public='T',
|
||||||
@@ -1081,24 +1082,35 @@ class ImageCommands(object):
|
|||||||
self._convert_images(machine_images)
|
self._convert_images(machine_images)
|
||||||
|
|
||||||
|
|
||||||
|
class ConfigCommands(object):
|
||||||
|
"""Class for exposing the flags defined by flag_file(s)."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
print FLAGS.FlagsIntoString()
|
||||||
|
|
||||||
|
|
||||||
CATEGORIES = [
|
CATEGORIES = [
|
||||||
('user', UserCommands),
|
|
||||||
('account', AccountCommands),
|
('account', AccountCommands),
|
||||||
('project', ProjectCommands),
|
('config', ConfigCommands),
|
||||||
('role', RoleCommands),
|
|
||||||
('shell', ShellCommands),
|
|
||||||
('vpn', VpnCommands),
|
|
||||||
('fixed', FixedIpCommands),
|
|
||||||
('floating', FloatingIpCommands),
|
|
||||||
('network', NetworkCommands),
|
|
||||||
('vm', VmCommands),
|
|
||||||
('service', ServiceCommands),
|
|
||||||
('db', DbCommands),
|
('db', DbCommands),
|
||||||
('volume', VolumeCommands),
|
('fixed', FixedIpCommands),
|
||||||
|
('flavor', InstanceTypeCommands),
|
||||||
|
('floating', FloatingIpCommands),
|
||||||
('instance_type', InstanceTypeCommands),
|
('instance_type', InstanceTypeCommands),
|
||||||
('image', ImageCommands),
|
('image', ImageCommands),
|
||||||
('flavor', InstanceTypeCommands),
|
('network', NetworkCommands),
|
||||||
('version', VersionCommands)]
|
('project', ProjectCommands),
|
||||||
|
('role', RoleCommands),
|
||||||
|
('service', ServiceCommands),
|
||||||
|
('shell', ShellCommands),
|
||||||
|
('user', UserCommands),
|
||||||
|
('version', VersionCommands),
|
||||||
|
('vm', VmCommands),
|
||||||
|
('volume', VolumeCommands),
|
||||||
|
('vpn', VpnCommands)]
|
||||||
|
|
||||||
|
|
||||||
def lazy_match(name, key_value_tuples):
|
def lazy_match(name, key_value_tuples):
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ other backends by creating another class that exposes the same
|
|||||||
public methods.
|
public methods.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@@ -68,6 +69,12 @@ flags.DEFINE_string('ldap_developer',
|
|||||||
LOG = logging.getLogger("nova.ldapdriver")
|
LOG = logging.getLogger("nova.ldapdriver")
|
||||||
|
|
||||||
|
|
||||||
|
if FLAGS.memcached_servers:
|
||||||
|
import memcache
|
||||||
|
else:
|
||||||
|
from nova import fakememcache as memcache
|
||||||
|
|
||||||
|
|
||||||
# TODO(vish): make an abstract base class with the same public methods
|
# TODO(vish): make an abstract base class with the same public methods
|
||||||
# to define a set interface for AuthDrivers. I'm delaying
|
# to define a set interface for AuthDrivers. I'm delaying
|
||||||
# creating this now because I'm expecting an auth refactor
|
# creating this now because I'm expecting an auth refactor
|
||||||
@@ -85,6 +92,7 @@ def _clean(attr):
|
|||||||
|
|
||||||
def sanitize(fn):
|
def sanitize(fn):
|
||||||
"""Decorator to sanitize all args"""
|
"""Decorator to sanitize all args"""
|
||||||
|
@functools.wraps(fn)
|
||||||
def _wrapped(self, *args, **kwargs):
|
def _wrapped(self, *args, **kwargs):
|
||||||
args = [_clean(x) for x in args]
|
args = [_clean(x) for x in args]
|
||||||
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
|
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
|
||||||
@@ -103,29 +111,56 @@ class LdapDriver(object):
|
|||||||
isadmin_attribute = 'isNovaAdmin'
|
isadmin_attribute = 'isNovaAdmin'
|
||||||
project_attribute = 'owner'
|
project_attribute = 'owner'
|
||||||
project_objectclass = 'groupOfNames'
|
project_objectclass = 'groupOfNames'
|
||||||
|
conn = None
|
||||||
|
mc = None
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Imports the LDAP module"""
|
"""Imports the LDAP module"""
|
||||||
self.ldap = __import__('ldap')
|
self.ldap = __import__('ldap')
|
||||||
self.conn = None
|
|
||||||
if FLAGS.ldap_schema_version == 1:
|
if FLAGS.ldap_schema_version == 1:
|
||||||
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
||||||
LdapDriver.isadmin_attribute = 'isAdmin'
|
LdapDriver.isadmin_attribute = 'isAdmin'
|
||||||
LdapDriver.project_attribute = 'projectManager'
|
LdapDriver.project_attribute = 'projectManager'
|
||||||
LdapDriver.project_objectclass = 'novaProject'
|
LdapDriver.project_objectclass = 'novaProject'
|
||||||
|
self.__cache = None
|
||||||
|
if LdapDriver.conn is None:
|
||||||
|
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
|
||||||
|
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
|
||||||
|
FLAGS.ldap_password)
|
||||||
|
if LdapDriver.mc is None:
|
||||||
|
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
"""Creates the connection to LDAP"""
|
# TODO(yorik-sar): Should be per-request cache, not per-driver-request
|
||||||
self.conn = self.ldap.initialize(FLAGS.ldap_url)
|
self.__cache = {}
|
||||||
self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
"""Destroys the connection to LDAP"""
|
self.__cache = None
|
||||||
self.conn.unbind_s()
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def __local_cache(key_fmt): # pylint: disable=E0213
|
||||||
|
"""Wrap function to cache it's result in self.__cache.
|
||||||
|
Works only with functions with one fixed argument.
|
||||||
|
"""
|
||||||
|
def do_wrap(fn):
|
||||||
|
@functools.wraps(fn)
|
||||||
|
def inner(self, arg, **kwargs):
|
||||||
|
cache_key = key_fmt % (arg,)
|
||||||
|
try:
|
||||||
|
res = self.__cache[cache_key]
|
||||||
|
LOG.debug('Local cache hit for %s by key %s' %
|
||||||
|
(fn.__name__, cache_key))
|
||||||
|
return res
|
||||||
|
except KeyError:
|
||||||
|
res = fn(self, arg, **kwargs)
|
||||||
|
self.__cache[cache_key] = res
|
||||||
|
return res
|
||||||
|
return inner
|
||||||
|
return do_wrap
|
||||||
|
|
||||||
@sanitize
|
@sanitize
|
||||||
|
@__local_cache('uid_user-%s')
|
||||||
def get_user(self, uid):
|
def get_user(self, uid):
|
||||||
"""Retrieve user by id"""
|
"""Retrieve user by id"""
|
||||||
attr = self.__get_ldap_user(uid)
|
attr = self.__get_ldap_user(uid)
|
||||||
@@ -134,15 +169,31 @@ class LdapDriver(object):
|
|||||||
@sanitize
|
@sanitize
|
||||||
def get_user_from_access_key(self, access):
|
def get_user_from_access_key(self, access):
|
||||||
"""Retrieve user by access key"""
|
"""Retrieve user by access key"""
|
||||||
|
cache_key = 'uak_dn_%s' % (access,)
|
||||||
|
user_dn = self.mc.get(cache_key)
|
||||||
|
if user_dn:
|
||||||
|
user = self.__to_user(
|
||||||
|
self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
|
||||||
|
if user:
|
||||||
|
if user['access'] == access:
|
||||||
|
return user
|
||||||
|
else:
|
||||||
|
self.mc.set(cache_key, None)
|
||||||
query = '(accessKey=%s)' % access
|
query = '(accessKey=%s)' % access
|
||||||
dn = FLAGS.ldap_user_subtree
|
dn = FLAGS.ldap_user_subtree
|
||||||
return self.__to_user(self.__find_object(dn, query))
|
user_obj = self.__find_object(dn, query)
|
||||||
|
user = self.__to_user(user_obj)
|
||||||
|
if user:
|
||||||
|
self.mc.set(cache_key, user_obj['dn'][0])
|
||||||
|
return user
|
||||||
|
|
||||||
@sanitize
|
@sanitize
|
||||||
|
@__local_cache('pid_project-%s')
|
||||||
def get_project(self, pid):
|
def get_project(self, pid):
|
||||||
"""Retrieve project by id"""
|
"""Retrieve project by id"""
|
||||||
dn = self.__project_to_dn(pid)
|
dn = self.__project_to_dn(pid, search=False)
|
||||||
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
attr = self.__find_object(dn, LdapDriver.project_pattern,
|
||||||
|
scope=self.ldap.SCOPE_BASE)
|
||||||
return self.__to_project(attr)
|
return self.__to_project(attr)
|
||||||
|
|
||||||
@sanitize
|
@sanitize
|
||||||
@@ -395,6 +446,7 @@ class LdapDriver(object):
|
|||||||
"""Check if project exists"""
|
"""Check if project exists"""
|
||||||
return self.get_project(project_id) is not None
|
return self.get_project(project_id) is not None
|
||||||
|
|
||||||
|
@__local_cache('uid_attrs-%s')
|
||||||
def __get_ldap_user(self, uid):
|
def __get_ldap_user(self, uid):
|
||||||
"""Retrieve LDAP user entry by id"""
|
"""Retrieve LDAP user entry by id"""
|
||||||
dn = FLAGS.ldap_user_subtree
|
dn = FLAGS.ldap_user_subtree
|
||||||
@@ -426,12 +478,20 @@ class LdapDriver(object):
|
|||||||
if scope is None:
|
if scope is None:
|
||||||
# One of the flags is 0!
|
# One of the flags is 0!
|
||||||
scope = self.ldap.SCOPE_SUBTREE
|
scope = self.ldap.SCOPE_SUBTREE
|
||||||
|
if query is None:
|
||||||
|
query = "(objectClass=*)"
|
||||||
try:
|
try:
|
||||||
res = self.conn.search_s(dn, scope, query)
|
res = self.conn.search_s(dn, scope, query)
|
||||||
except self.ldap.NO_SUCH_OBJECT:
|
except self.ldap.NO_SUCH_OBJECT:
|
||||||
return []
|
return []
|
||||||
# Just return the attributes
|
# Just return the attributes
|
||||||
return [attributes for dn, attributes in res]
|
# FIXME(yorik-sar): Whole driver should be refactored to
|
||||||
|
# prevent this hack
|
||||||
|
res1 = []
|
||||||
|
for dn, attrs in res:
|
||||||
|
attrs['dn'] = [dn]
|
||||||
|
res1.append(attrs)
|
||||||
|
return res1
|
||||||
|
|
||||||
def __find_role_dns(self, tree):
|
def __find_role_dns(self, tree):
|
||||||
"""Find dns of role objects in given tree"""
|
"""Find dns of role objects in given tree"""
|
||||||
@@ -564,6 +624,7 @@ class LdapDriver(object):
|
|||||||
'description': attr.get('description', [None])[0],
|
'description': attr.get('description', [None])[0],
|
||||||
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
||||||
|
|
||||||
|
@__local_cache('uid_dn-%s')
|
||||||
def __uid_to_dn(self, uid, search=True):
|
def __uid_to_dn(self, uid, search=True):
|
||||||
"""Convert uid to dn"""
|
"""Convert uid to dn"""
|
||||||
# By default return a generated DN
|
# By default return a generated DN
|
||||||
@@ -576,6 +637,7 @@ class LdapDriver(object):
|
|||||||
userdn = user[0]
|
userdn = user[0]
|
||||||
return userdn
|
return userdn
|
||||||
|
|
||||||
|
@__local_cache('pid_dn-%s')
|
||||||
def __project_to_dn(self, pid, search=True):
|
def __project_to_dn(self, pid, search=True):
|
||||||
"""Convert pid to dn"""
|
"""Convert pid to dn"""
|
||||||
# By default return a generated DN
|
# By default return a generated DN
|
||||||
@@ -603,16 +665,18 @@ class LdapDriver(object):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@__local_cache('dn_uid-%s')
|
||||||
def __dn_to_uid(self, dn):
|
def __dn_to_uid(self, dn):
|
||||||
"""Convert user dn to uid"""
|
"""Convert user dn to uid"""
|
||||||
query = '(objectclass=novaUser)'
|
query = '(objectclass=novaUser)'
|
||||||
user = self.__find_object(dn, query)
|
user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
|
||||||
return user[FLAGS.ldap_user_id_attribute][0]
|
return user[FLAGS.ldap_user_id_attribute][0]
|
||||||
|
|
||||||
|
|
||||||
class FakeLdapDriver(LdapDriver):
|
class FakeLdapDriver(LdapDriver):
|
||||||
"""Fake Ldap Auth driver"""
|
"""Fake Ldap Auth driver"""
|
||||||
|
|
||||||
def __init__(self): # pylint: disable=W0231
|
def __init__(self):
|
||||||
__import__('nova.auth.fakeldap')
|
import nova.auth.fakeldap
|
||||||
self.ldap = sys.modules['nova.auth.fakeldap']
|
sys.modules['ldap'] = nova.auth.fakeldap
|
||||||
|
super(FakeLdapDriver, self).__init__()
|
||||||
|
|||||||
@@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
|||||||
LOG = logging.getLogger('nova.auth.manager')
|
LOG = logging.getLogger('nova.auth.manager')
|
||||||
|
|
||||||
|
|
||||||
|
if FLAGS.memcached_servers:
|
||||||
|
import memcache
|
||||||
|
else:
|
||||||
|
from nova import fakememcache as memcache
|
||||||
|
|
||||||
|
|
||||||
class AuthBase(object):
|
class AuthBase(object):
|
||||||
"""Base class for objects relating to auth
|
"""Base class for objects relating to auth
|
||||||
|
|
||||||
@@ -206,6 +212,7 @@ class AuthManager(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
_instance = None
|
_instance = None
|
||||||
|
mc = None
|
||||||
|
|
||||||
def __new__(cls, *args, **kwargs):
|
def __new__(cls, *args, **kwargs):
|
||||||
"""Returns the AuthManager singleton"""
|
"""Returns the AuthManager singleton"""
|
||||||
@@ -222,13 +229,8 @@ class AuthManager(object):
|
|||||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
if driver or not getattr(self, 'driver', None):
|
if driver or not getattr(self, 'driver', None):
|
||||||
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
||||||
|
if AuthManager.mc is None:
|
||||||
if FLAGS.memcached_servers:
|
AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||||
import memcache
|
|
||||||
else:
|
|
||||||
from nova import fakememcache as memcache
|
|
||||||
self.mc = memcache.Client(FLAGS.memcached_servers,
|
|
||||||
debug=0)
|
|
||||||
|
|
||||||
def authenticate(self, access, signature, params, verb='GET',
|
def authenticate(self, access, signature, params, verb='GET',
|
||||||
server_string='127.0.0.1:8773', path='/',
|
server_string='127.0.0.1:8773', path='/',
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE}))
|
NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||
|
||||||
|
NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
|
||||||
|
NOVA_KEY_DIR=${NOVARC%%/*}
|
||||||
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
||||||
export EC2_SECRET_KEY="%(secret)s"
|
export EC2_SECRET_KEY="%(secret)s"
|
||||||
export EC2_URL="%(ec2)s"
|
export EC2_URL="%(ec2)s"
|
||||||
@@ -12,4 +14,5 @@ alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_P
|
|||||||
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
|
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
|
||||||
export NOVA_API_KEY="%(access)s"
|
export NOVA_API_KEY="%(access)s"
|
||||||
export NOVA_USERNAME="%(user)s"
|
export NOVA_USERNAME="%(user)s"
|
||||||
|
export NOVA_PROJECT_ID="%(project)s"
|
||||||
export NOVA_URL="%(os)s"
|
export NOVA_URL="%(os)s"
|
||||||
|
|||||||
@@ -270,8 +270,10 @@ DEFINE_list('region_list',
|
|||||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||||
DEFINE_integer('glance_port', 9292, 'glance port')
|
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
|
||||||
DEFINE_string('glance_host', '$my_ip', 'glance host')
|
DEFINE_list('glance_api_servers',
|
||||||
|
['127.0.0.1:9292'],
|
||||||
|
'list of glance api servers available to nova (host:port)')
|
||||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||||
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
|
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
|
||||||
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
|
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
|
||||||
@@ -296,6 +298,7 @@ DEFINE_bool('fake_network', False,
|
|||||||
'should we use fake network devices and addresses')
|
'should we use fake network devices and addresses')
|
||||||
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
|
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
|
||||||
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
||||||
|
DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
|
||||||
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
||||||
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
||||||
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
||||||
@@ -380,3 +383,5 @@ DEFINE_string('zone_name', 'nova', 'name of this zone')
|
|||||||
DEFINE_list('zone_capabilities',
|
DEFINE_list('zone_capabilities',
|
||||||
['hypervisor=xenserver;kvm', 'os=linux;windows'],
|
['hypervisor=xenserver;kvm', 'os=linux;windows'],
|
||||||
'Key/Multi-value list representng capabilities of this zone')
|
'Key/Multi-value list representng capabilities of this zone')
|
||||||
|
DEFINE_string('build_plan_encryption_key', None,
|
||||||
|
'128bit (hex) encryption key for scheduler build plans.')
|
||||||
|
|||||||
10
nova/log.py
10
nova/log.py
@@ -35,6 +35,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import nova
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import version
|
from nova import version
|
||||||
|
|
||||||
@@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels',
|
|||||||
'eventlet.wsgi.server=WARN'],
|
'eventlet.wsgi.server=WARN'],
|
||||||
'list of logger=LEVEL pairs')
|
'list of logger=LEVEL pairs')
|
||||||
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
|
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
|
||||||
|
flags.DEFINE_bool('publish_errors', False, 'publish error events')
|
||||||
flags.DEFINE_string('logfile', None, 'output to named file')
|
flags.DEFINE_string('logfile', None, 'output to named file')
|
||||||
|
|
||||||
|
|
||||||
@@ -258,12 +260,20 @@ class NovaRootLogger(NovaLogger):
|
|||||||
else:
|
else:
|
||||||
self.removeHandler(self.filelog)
|
self.removeHandler(self.filelog)
|
||||||
self.addHandler(self.streamlog)
|
self.addHandler(self.streamlog)
|
||||||
|
if FLAGS.publish_errors:
|
||||||
|
self.addHandler(PublishErrorsHandler(ERROR))
|
||||||
if FLAGS.verbose:
|
if FLAGS.verbose:
|
||||||
self.setLevel(DEBUG)
|
self.setLevel(DEBUG)
|
||||||
else:
|
else:
|
||||||
self.setLevel(INFO)
|
self.setLevel(INFO)
|
||||||
|
|
||||||
|
|
||||||
|
class PublishErrorsHandler(logging.Handler):
|
||||||
|
def emit(self, record):
|
||||||
|
nova.notifier.api.notify('nova.error.publisher', 'error_notification',
|
||||||
|
nova.notifier.api.ERROR, dict(error=record.msg))
|
||||||
|
|
||||||
|
|
||||||
def handle_exception(type, value, tb):
|
def handle_exception(type, value, tb):
|
||||||
extra = {}
|
extra = {}
|
||||||
if FLAGS.verbose:
|
if FLAGS.verbose:
|
||||||
|
|||||||
@@ -11,9 +11,8 @@
|
|||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.import datetime
|
# under the License.
|
||||||
|
|
||||||
import datetime
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload):
|
|||||||
|
|
||||||
{'message_id': str(uuid.uuid4()),
|
{'message_id': str(uuid.uuid4()),
|
||||||
'publisher_id': 'compute.host1',
|
'publisher_id': 'compute.host1',
|
||||||
'timestamp': datetime.datetime.utcnow(),
|
'timestamp': utils.utcnow(),
|
||||||
'priority': 'WARN',
|
'priority': 'WARN',
|
||||||
'event_type': 'compute.create_instance',
|
'event_type': 'compute.create_instance',
|
||||||
'payload': {'instance_id': 12, ... }}
|
'payload': {'instance_id': 12, ... }}
|
||||||
@@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload):
|
|||||||
event_type=event_type,
|
event_type=event_type,
|
||||||
priority=priority,
|
priority=priority,
|
||||||
payload=payload,
|
payload=payload,
|
||||||
timestamp=str(datetime.datetime.utcnow()))
|
timestamp=str(utils.utcnow()))
|
||||||
driver.notify(msg)
|
driver.notify(msg)
|
||||||
|
|||||||
@@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection):
|
|||||||
if new or not hasattr(cls, '_instance'):
|
if new or not hasattr(cls, '_instance'):
|
||||||
params = dict(hostname=FLAGS.rabbit_host,
|
params = dict(hostname=FLAGS.rabbit_host,
|
||||||
port=FLAGS.rabbit_port,
|
port=FLAGS.rabbit_port,
|
||||||
|
ssl=FLAGS.rabbit_use_ssl,
|
||||||
userid=FLAGS.rabbit_userid,
|
userid=FLAGS.rabbit_userid,
|
||||||
password=FLAGS.rabbit_password,
|
password=FLAGS.rabbit_password,
|
||||||
virtual_host=FLAGS.rabbit_virtual_host)
|
virtual_host=FLAGS.rabbit_virtual_host)
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ def get_zone_capabilities(context):
|
|||||||
def select(context, specs=None):
|
def select(context, specs=None):
|
||||||
"""Returns a list of hosts."""
|
"""Returns a list of hosts."""
|
||||||
return _call_scheduler('select', context=context,
|
return _call_scheduler('select', context=context,
|
||||||
params={"specs": specs})
|
params={"request_spec": specs})
|
||||||
|
|
||||||
|
|
||||||
def update_service_capabilities(context, service_name, host, capabilities):
|
def update_service_capabilities(context, service_name, host, capabilities):
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ import json
|
|||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
from nova.scheduler import zone_aware_scheduler
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.scheduler import zone_aware_scheduler
|
from nova.scheduler import zone_aware_scheduler
|
||||||
|
|
||||||
@@ -226,7 +227,7 @@ class JsonFilter(HostFilter):
|
|||||||
required_disk = instance_type['local_gb']
|
required_disk = instance_type['local_gb']
|
||||||
query = ['and',
|
query = ['and',
|
||||||
['>=', '$compute.host_memory_free', required_ram],
|
['>=', '$compute.host_memory_free', required_ram],
|
||||||
['>=', '$compute.disk_available', required_disk]
|
['>=', '$compute.disk_available', required_disk],
|
||||||
]
|
]
|
||||||
return (self._full_name(), json.dumps(query))
|
return (self._full_name(), json.dumps(query))
|
||||||
|
|
||||||
|
|||||||
156
nova/scheduler/least_cost.py
Normal file
156
nova/scheduler/least_cost.py
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
# Copyright (c) 2011 Openstack, LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Least Cost Scheduler is a mechanism for choosing which host machines to
|
||||||
|
provision a set of resources to. The input of the least-cost-scheduler is a
|
||||||
|
set of objective-functions, called the 'cost-functions', a weight for each
|
||||||
|
cost-function, and a list of candidate hosts (gathered via FilterHosts).
|
||||||
|
|
||||||
|
The cost-function and weights are tabulated, and the host with the least cost
|
||||||
|
is then selected for provisioning.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
|
from nova.scheduler import zone_aware_scheduler
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
LOG = logging.getLogger('nova.scheduler.least_cost')
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
||||||
|
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||||
|
'Which cost functions the LeastCostScheduler should use.')
|
||||||
|
|
||||||
|
|
||||||
|
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||||
|
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||||
|
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||||
|
'How much weight to give the noop cost function')
|
||||||
|
|
||||||
|
|
||||||
|
def noop_cost_fn(host):
|
||||||
|
"""Return a pre-weight cost of 1 for each host"""
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
|
||||||
|
'How much weight to give the fill-first cost function')
|
||||||
|
|
||||||
|
|
||||||
|
def fill_first_cost_fn(host):
|
||||||
|
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||||
|
hosts that don't have enough ram"""
|
||||||
|
hostname, caps = host
|
||||||
|
free_mem = caps['compute']['host_memory_free']
|
||||||
|
return free_mem
|
||||||
|
|
||||||
|
|
||||||
|
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
|
def get_cost_fns(self):
|
||||||
|
"""Returns a list of tuples containing weights and cost functions to
|
||||||
|
use for weighing hosts
|
||||||
|
"""
|
||||||
|
cost_fns = []
|
||||||
|
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||||
|
|
||||||
|
try:
|
||||||
|
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||||
|
# any callable from a module
|
||||||
|
cost_fn = utils.import_class(cost_fn_str)
|
||||||
|
except exception.ClassNotFound:
|
||||||
|
raise exception.SchedulerCostFunctionNotFound(
|
||||||
|
cost_fn_str=cost_fn_str)
|
||||||
|
|
||||||
|
try:
|
||||||
|
weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__)
|
||||||
|
except AttributeError:
|
||||||
|
raise exception.SchedulerWeightFlagNotFound(
|
||||||
|
flag_name=flag_name)
|
||||||
|
|
||||||
|
cost_fns.append((weight, cost_fn))
|
||||||
|
|
||||||
|
return cost_fns
|
||||||
|
|
||||||
|
def weigh_hosts(self, num, request_spec, hosts):
|
||||||
|
"""Returns a list of dictionaries of form:
|
||||||
|
[ {weight: weight, hostname: hostname} ]"""
|
||||||
|
|
||||||
|
# FIXME(sirp): weigh_hosts should handle more than just instances
|
||||||
|
hostnames = [hostname for hostname, caps in hosts]
|
||||||
|
|
||||||
|
cost_fns = self.get_cost_fns()
|
||||||
|
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||||
|
|
||||||
|
weighted = []
|
||||||
|
weight_log = []
|
||||||
|
for cost, hostname in zip(costs, hostnames):
|
||||||
|
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||||
|
weight_dict = dict(weight=cost, hostname=hostname)
|
||||||
|
weighted.append(weight_dict)
|
||||||
|
|
||||||
|
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||||
|
return weighted
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_list(L):
|
||||||
|
"""Normalize an array of numbers such that each element satisfies:
|
||||||
|
0 <= e <= 1"""
|
||||||
|
if not L:
|
||||||
|
return L
|
||||||
|
max_ = max(L)
|
||||||
|
if max_ > 0:
|
||||||
|
return [(float(e) / max_) for e in L]
|
||||||
|
return L
|
||||||
|
|
||||||
|
|
||||||
|
def weighted_sum(domain, weighted_fns, normalize=True):
|
||||||
|
"""Use the weighted-sum method to compute a score for an array of objects.
|
||||||
|
Normalize the results of the objective-functions so that the weights are
|
||||||
|
meaningful regardless of objective-function's range.
|
||||||
|
|
||||||
|
domain - input to be scored
|
||||||
|
weighted_fns - list of weights and functions like:
|
||||||
|
[(weight, objective-functions)]
|
||||||
|
|
||||||
|
Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
|
||||||
|
"""
|
||||||
|
# Table of form:
|
||||||
|
# { domain1: [score1, score2, ..., scoreM]
|
||||||
|
# ...
|
||||||
|
# domainN: [score1, score2, ..., scoreM] }
|
||||||
|
score_table = collections.defaultdict(list)
|
||||||
|
for weight, fn in weighted_fns:
|
||||||
|
scores = [fn(elem) for elem in domain]
|
||||||
|
|
||||||
|
if normalize:
|
||||||
|
norm_scores = normalize_list(scores)
|
||||||
|
else:
|
||||||
|
norm_scores = scores
|
||||||
|
|
||||||
|
for idx, score in enumerate(norm_scores):
|
||||||
|
weighted_score = score * weight
|
||||||
|
score_table[idx].append(weighted_score)
|
||||||
|
|
||||||
|
# Sum rows in table to compute score for each element in domain
|
||||||
|
domain_scores = []
|
||||||
|
for idx in sorted(score_table):
|
||||||
|
elem_score = sum(score_table[idx])
|
||||||
|
elem = domain[idx]
|
||||||
|
domain_scores.append(elem_score)
|
||||||
|
|
||||||
|
return domain_scores
|
||||||
@@ -21,10 +21,9 @@
|
|||||||
Simple Scheduler
|
Simple Scheduler
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import utils
|
||||||
from nova.scheduler import driver
|
from nova.scheduler import driver
|
||||||
from nova.scheduler import chance
|
from nova.scheduler import chance
|
||||||
|
|
||||||
@@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
|
|
||||||
# TODO(vish): this probably belongs in the manager, if we
|
# TODO(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.instance_update(context, instance_id, {'host': host,
|
db.instance_update(context, instance_id, {'host': host,
|
||||||
'scheduled_at': now})
|
'scheduled_at': now})
|
||||||
return host
|
return host
|
||||||
@@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
if self.service_is_up(service):
|
if self.service_is_up(service):
|
||||||
# NOTE(vish): this probably belongs in the manager, if we
|
# NOTE(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.instance_update(context,
|
db.instance_update(context,
|
||||||
instance_id,
|
instance_id,
|
||||||
{'host': service['host'],
|
{'host': service['host'],
|
||||||
@@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
|
|
||||||
# TODO(vish): this probably belongs in the manager, if we
|
# TODO(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.volume_update(context, volume_id, {'host': host,
|
db.volume_update(context, volume_id, {'host': host,
|
||||||
'scheduled_at': now})
|
'scheduled_at': now})
|
||||||
return host
|
return host
|
||||||
@@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
if self.service_is_up(service):
|
if self.service_is_up(service):
|
||||||
# NOTE(vish): this probably belongs in the manager, if we
|
# NOTE(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
now = datetime.datetime.utcnow()
|
now = utils.utcnow()
|
||||||
db.volume_update(context,
|
db.volume_update(context,
|
||||||
volume_id,
|
volume_id,
|
||||||
{'host': service['host'],
|
{'host': service['host'],
|
||||||
|
|||||||
@@ -21,16 +21,30 @@ across zones. There are two expansion points to this class for:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import operator
|
import operator
|
||||||
|
import json
|
||||||
|
|
||||||
|
import M2Crypto
|
||||||
|
import novaclient
|
||||||
|
|
||||||
|
from nova import crypto
|
||||||
from nova import db
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
|
|
||||||
from nova.scheduler import api
|
from nova.scheduler import api
|
||||||
from nova.scheduler import driver
|
from nova.scheduler import driver
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
|
LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidBlob(exception.NovaException):
|
||||||
|
message = _("Ill-formed or incorrectly routed 'blob' data sent "
|
||||||
|
"to instance create request.")
|
||||||
|
|
||||||
|
|
||||||
class ZoneAwareScheduler(driver.Scheduler):
|
class ZoneAwareScheduler(driver.Scheduler):
|
||||||
"""Base class for creating Zone Aware Schedulers."""
|
"""Base class for creating Zone Aware Schedulers."""
|
||||||
|
|
||||||
@@ -38,8 +52,114 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
"""Call novaclient zone method. Broken out for testing."""
|
"""Call novaclient zone method. Broken out for testing."""
|
||||||
return api.call_zone_method(context, method, specs=specs)
|
return api.call_zone_method(context, method, specs=specs)
|
||||||
|
|
||||||
|
def _provision_resource_locally(self, context, item, instance_id, kwargs):
|
||||||
|
"""Create the requested resource in this Zone."""
|
||||||
|
host = item['hostname']
|
||||||
|
kwargs['instance_id'] = instance_id
|
||||||
|
rpc.cast(context,
|
||||||
|
db.queue_get_for(context, "compute", host),
|
||||||
|
{"method": "run_instance",
|
||||||
|
"args": kwargs})
|
||||||
|
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||||
|
% locals())
|
||||||
|
|
||||||
|
def _decrypt_blob(self, blob):
|
||||||
|
"""Returns the decrypted blob or None if invalid. Broken out
|
||||||
|
for testing."""
|
||||||
|
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
||||||
|
try:
|
||||||
|
json_entry = decryptor(blob)
|
||||||
|
return json.dumps(entry)
|
||||||
|
except M2Crypto.EVP.EVPError:
|
||||||
|
pass
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _ask_child_zone_to_create_instance(self, context, zone_info,
|
||||||
|
request_spec, kwargs):
|
||||||
|
"""Once we have determined that the request should go to one
|
||||||
|
of our children, we need to fabricate a new POST /servers/
|
||||||
|
call with the same parameters that were passed into us.
|
||||||
|
|
||||||
|
Note that we have to reverse engineer from our args to get back the
|
||||||
|
image, flavor, ipgroup, etc. since the original call could have
|
||||||
|
come in from EC2 (which doesn't use these things)."""
|
||||||
|
|
||||||
|
instance_type = request_spec['instance_type']
|
||||||
|
instance_properties = request_spec['instance_properties']
|
||||||
|
|
||||||
|
name = instance_properties['display_name']
|
||||||
|
image_id = instance_properties['image_id']
|
||||||
|
meta = instance_properties['metadata']
|
||||||
|
flavor_id = instance_type['flavorid']
|
||||||
|
|
||||||
|
files = kwargs['injected_files']
|
||||||
|
ipgroup = None # Not supported in OS API ... yet
|
||||||
|
|
||||||
|
child_zone = zone_info['child_zone']
|
||||||
|
child_blob = zone_info['child_blob']
|
||||||
|
zone = db.zone_get(context, child_zone)
|
||||||
|
url = zone.api_url
|
||||||
|
LOG.debug(_("Forwarding instance create call to child zone %(url)s")
|
||||||
|
% locals())
|
||||||
|
nova = None
|
||||||
|
try:
|
||||||
|
nova = novaclient.OpenStack(zone.username, zone.password, url)
|
||||||
|
nova.authenticate()
|
||||||
|
except novaclient.exceptions.BadRequest, e:
|
||||||
|
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||||
|
"to talk to zone at %(url)s.") % locals())
|
||||||
|
|
||||||
|
nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files,
|
||||||
|
child_blob)
|
||||||
|
|
||||||
|
def _provision_resource_from_blob(self, context, item, instance_id,
|
||||||
|
request_spec, kwargs):
|
||||||
|
"""Create the requested resource locally or in a child zone
|
||||||
|
based on what is stored in the zone blob info.
|
||||||
|
|
||||||
|
Attempt to decrypt the blob to see if this request is:
|
||||||
|
1. valid, and
|
||||||
|
2. intended for this zone or a child zone.
|
||||||
|
|
||||||
|
Note: If we have "blob" that means the request was passed
|
||||||
|
into us from a parent zone. If we have "child_blob" that
|
||||||
|
means we gathered the info from one of our children.
|
||||||
|
It's possible that, when we decrypt the 'blob' field, it
|
||||||
|
contains "child_blob" data. In which case we forward the
|
||||||
|
request."""
|
||||||
|
|
||||||
|
host_info = None
|
||||||
|
if "blob" in item:
|
||||||
|
# Request was passed in from above. Is it for us?
|
||||||
|
host_info = self._decrypt_blob(item['blob'])
|
||||||
|
elif "child_blob" in item:
|
||||||
|
# Our immediate child zone provided this info ...
|
||||||
|
host_info = item
|
||||||
|
|
||||||
|
if not host_info:
|
||||||
|
raise InvalidBlob()
|
||||||
|
|
||||||
|
# Valid data ... is it for us?
|
||||||
|
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||||
|
self._ask_child_zone_to_create_instance(context, host_info,
|
||||||
|
request_spec, kwargs)
|
||||||
|
else:
|
||||||
|
self._provision_resource_locally(context, host_info,
|
||||||
|
instance_id, kwargs)
|
||||||
|
|
||||||
|
def _provision_resource(self, context, item, instance_id, request_spec,
|
||||||
|
kwargs):
|
||||||
|
"""Create the requested resource in this Zone or a child zone."""
|
||||||
|
if "hostname" in item:
|
||||||
|
self._provision_resource_locally(context, item, instance_id,
|
||||||
|
kwargs)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._provision_resource_from_blob(context, item, instance_id,
|
||||||
|
request_spec, kwargs)
|
||||||
|
|
||||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||||
*args, **kwargs):
|
*args, **kwargs):
|
||||||
"""This method is called from nova.compute.api to provision
|
"""This method is called from nova.compute.api to provision
|
||||||
an instance. However we need to look at the parameters being
|
an instance. However we need to look at the parameters being
|
||||||
passed in to see if this is a request to:
|
passed in to see if this is a request to:
|
||||||
@@ -51,8 +171,10 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
|
|
||||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||||
|
|
||||||
if 'blob' in request_spec:
|
blob = request_spec.get('blob')
|
||||||
self.provision_resource(context, request_spec, instance_id, kwargs)
|
if blob:
|
||||||
|
self._provision_resource(context, request_spec, instance_id,
|
||||||
|
request_spec, kwargs)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Create build plan and provision ...
|
# Create build plan and provision ...
|
||||||
@@ -61,28 +183,13 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
raise driver.NoValidHost(_('No hosts were available'))
|
raise driver.NoValidHost(_('No hosts were available'))
|
||||||
|
|
||||||
for item in build_plan:
|
for item in build_plan:
|
||||||
self.provision_resource(context, item, instance_id, kwargs)
|
self._provision_resource(context, item, instance_id, request_spec,
|
||||||
|
kwargs)
|
||||||
|
|
||||||
# Returning None short-circuits the routing to Compute (since
|
# Returning None short-circuits the routing to Compute (since
|
||||||
# we've already done it here)
|
# we've already done it here)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def provision_resource(self, context, item, instance_id, kwargs):
|
|
||||||
"""Create the requested resource in this Zone or a child zone."""
|
|
||||||
if "hostname" in item:
|
|
||||||
host = item['hostname']
|
|
||||||
kwargs['instance_id'] = instance_id
|
|
||||||
rpc.cast(context,
|
|
||||||
db.queue_get_for(context, "compute", host),
|
|
||||||
{"method": "run_instance",
|
|
||||||
"args": kwargs})
|
|
||||||
LOG.debug(_("Casted to compute %(host)s for run_instance")
|
|
||||||
% locals())
|
|
||||||
else:
|
|
||||||
# TODO(sandy) Provision in child zone ...
|
|
||||||
LOG.warning(_("Provision to Child Zone not supported (yet)"))
|
|
||||||
pass
|
|
||||||
|
|
||||||
def select(self, context, request_spec, *args, **kwargs):
|
def select(self, context, request_spec, *args, **kwargs):
|
||||||
"""Select returns a list of weights and zone/host information
|
"""Select returns a list of weights and zone/host information
|
||||||
corresponding to the best hosts to service the request. Any
|
corresponding to the best hosts to service the request. Any
|
||||||
@@ -116,22 +223,25 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
# Filter local hosts based on requirements ...
|
# Filter local hosts based on requirements ...
|
||||||
host_list = self.filter_hosts(num_instances, request_spec)
|
host_list = self.filter_hosts(num_instances, request_spec)
|
||||||
|
|
||||||
|
# TODO(sirp): weigh_hosts should also be a function of 'topic' or
|
||||||
|
# resources, so that we can apply different objective functions to it
|
||||||
|
|
||||||
# then weigh the selected hosts.
|
# then weigh the selected hosts.
|
||||||
# weighted = [{weight=weight, name=hostname}, ...]
|
# weighted = [{weight=weight, name=hostname}, ...]
|
||||||
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
||||||
|
|
||||||
# Next, tack on the best weights from the child zones ...
|
# Next, tack on the best weights from the child zones ...
|
||||||
|
json_spec = json.dumps(request_spec)
|
||||||
child_results = self._call_zone_method(context, "select",
|
child_results = self._call_zone_method(context, "select",
|
||||||
specs=request_spec)
|
specs=json_spec)
|
||||||
for child_zone, result in child_results:
|
for child_zone, result in child_results:
|
||||||
for weighting in result:
|
for weighting in result:
|
||||||
# Remember the child_zone so we can get back to
|
# Remember the child_zone so we can get back to
|
||||||
# it later if needed. This implicitly builds a zone
|
# it later if needed. This implicitly builds a zone
|
||||||
# path structure.
|
# path structure.
|
||||||
host_dict = {
|
host_dict = {"weight": weighting["weight"],
|
||||||
"weight": weighting["weight"],
|
"child_zone": child_zone,
|
||||||
"child_zone": child_zone,
|
"child_blob": weighting["blob"]}
|
||||||
"child_blob": weighting["blob"]}
|
|
||||||
weighted.append(host_dict)
|
weighted.append(host_dict)
|
||||||
|
|
||||||
weighted.sort(key=operator.itemgetter('weight'))
|
weighted.sort(key=operator.itemgetter('weight'))
|
||||||
@@ -139,12 +249,16 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
|
|
||||||
def filter_hosts(self, num, request_spec):
|
def filter_hosts(self, num, request_spec):
|
||||||
"""Derived classes must override this method and return
|
"""Derived classes must override this method and return
|
||||||
a list of hosts in [(hostname, capability_dict)] format.
|
a list of hosts in [(hostname, capability_dict)] format.
|
||||||
"""
|
"""
|
||||||
raise NotImplemented()
|
# NOTE(sirp): The default logic is the equivalent to AllHostsFilter
|
||||||
|
service_states = self.zone_manager.service_states
|
||||||
|
return [(host, services)
|
||||||
|
for host, services in service_states.iteritems()]
|
||||||
|
|
||||||
def weigh_hosts(self, num, request_spec, hosts):
|
def weigh_hosts(self, num, request_spec, hosts):
|
||||||
"""Derived classes must override this method and return
|
"""Derived classes may override this to provide more sophisticated
|
||||||
a lists of hosts in [{weight, hostname}] format.
|
scheduling objectives
|
||||||
"""
|
"""
|
||||||
raise NotImplemented()
|
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||||
|
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
||||||
|
|||||||
@@ -17,16 +17,17 @@
|
|||||||
ZoneManager oversees all communications with child Zones.
|
ZoneManager oversees all communications with child Zones.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
import novaclient
|
import novaclient
|
||||||
import thread
|
import thread
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer('zone_db_check_interval', 60,
|
flags.DEFINE_integer('zone_db_check_interval', 60,
|
||||||
@@ -42,7 +43,7 @@ class ZoneState(object):
|
|||||||
self.name = None
|
self.name = None
|
||||||
self.capabilities = None
|
self.capabilities = None
|
||||||
self.attempt = 0
|
self.attempt = 0
|
||||||
self.last_seen = datetime.min
|
self.last_seen = datetime.datetime.min
|
||||||
self.last_exception = None
|
self.last_exception = None
|
||||||
self.last_exception_time = None
|
self.last_exception_time = None
|
||||||
|
|
||||||
@@ -56,7 +57,7 @@ class ZoneState(object):
|
|||||||
def update_metadata(self, zone_metadata):
|
def update_metadata(self, zone_metadata):
|
||||||
"""Update zone metadata after successful communications with
|
"""Update zone metadata after successful communications with
|
||||||
child zone."""
|
child zone."""
|
||||||
self.last_seen = datetime.now()
|
self.last_seen = utils.utcnow()
|
||||||
self.attempt = 0
|
self.attempt = 0
|
||||||
self.name = zone_metadata.get("name", "n/a")
|
self.name = zone_metadata.get("name", "n/a")
|
||||||
self.capabilities = ", ".join(["%s=%s" % (k, v)
|
self.capabilities = ", ".join(["%s=%s" % (k, v)
|
||||||
@@ -72,7 +73,7 @@ class ZoneState(object):
|
|||||||
"""Something went wrong. Check to see if zone should be
|
"""Something went wrong. Check to see if zone should be
|
||||||
marked as offline."""
|
marked as offline."""
|
||||||
self.last_exception = exception
|
self.last_exception = exception
|
||||||
self.last_exception_time = datetime.now()
|
self.last_exception_time = utils.utcnow()
|
||||||
api_url = self.api_url
|
api_url = self.api_url
|
||||||
logging.warning(_("'%(exception)s' error talking to "
|
logging.warning(_("'%(exception)s' error talking to "
|
||||||
"zone %(api_url)s") % locals())
|
"zone %(api_url)s") % locals())
|
||||||
@@ -104,7 +105,7 @@ def _poll_zone(zone):
|
|||||||
class ZoneManager(object):
|
class ZoneManager(object):
|
||||||
"""Keeps the zone states updated."""
|
"""Keeps the zone states updated."""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.last_zone_db_check = datetime.min
|
self.last_zone_db_check = datetime.datetime.min
|
||||||
self.zone_states = {} # { <zone_id> : ZoneState }
|
self.zone_states = {} # { <zone_id> : ZoneState }
|
||||||
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
||||||
self.green_pool = greenpool.GreenPool()
|
self.green_pool = greenpool.GreenPool()
|
||||||
@@ -158,10 +159,10 @@ class ZoneManager(object):
|
|||||||
|
|
||||||
def ping(self, context=None):
|
def ping(self, context=None):
|
||||||
"""Ping should be called periodically to update zone status."""
|
"""Ping should be called periodically to update zone status."""
|
||||||
diff = datetime.now() - self.last_zone_db_check
|
diff = utils.utcnow() - self.last_zone_db_check
|
||||||
if diff.seconds >= FLAGS.zone_db_check_interval:
|
if diff.seconds >= FLAGS.zone_db_check_interval:
|
||||||
logging.debug(_("Updating zone cache from db."))
|
logging.debug(_("Updating zone cache from db."))
|
||||||
self.last_zone_db_check = datetime.now()
|
self.last_zone_db_check = utils.utcnow()
|
||||||
self._refresh_from_db(context)
|
self._refresh_from_db(context)
|
||||||
self._poll_zones(context)
|
self._poll_zones(context)
|
||||||
|
|
||||||
|
|||||||
0
nova/tests/scheduler/__init__.py
Normal file
0
nova/tests/scheduler/__init__.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
206
nova/tests/scheduler/test_host_filter.py
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For Scheduler Host Filters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
|
from nova import flags
|
||||||
|
from nova import test
|
||||||
|
from nova.scheduler import host_filter
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class FakeZoneManager:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class HostFilterTestCase(test.TestCase):
|
||||||
|
"""Test case for host filters."""
|
||||||
|
|
||||||
|
def _host_caps(self, multiplier):
|
||||||
|
# Returns host capabilities in the following way:
|
||||||
|
# host1 = memory:free 10 (100max)
|
||||||
|
# disk:available 100 (1000max)
|
||||||
|
# hostN = memory:free 10 + 10N
|
||||||
|
# disk:available 100 + 100N
|
||||||
|
# in other words: hostN has more resources than host0
|
||||||
|
# which means ... don't go above 10 hosts.
|
||||||
|
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||||
|
'host_hostname': 'xs-%s' % multiplier,
|
||||||
|
'host_memory_total': 100,
|
||||||
|
'host_memory_overhead': 10,
|
||||||
|
'host_memory_free': 10 + multiplier * 10,
|
||||||
|
'host_memory_free-computed': 10 + multiplier * 10,
|
||||||
|
'host_other-config': {},
|
||||||
|
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||||
|
'host_cpu_info': {},
|
||||||
|
'disk_available': 100 + multiplier * 100,
|
||||||
|
'disk_total': 1000,
|
||||||
|
'disk_used': 0,
|
||||||
|
'host_uuid': 'xxx-%d' % multiplier,
|
||||||
|
'host_name-label': 'xs-%s' % multiplier}
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.old_flag = FLAGS.default_host_filter
|
||||||
|
FLAGS.default_host_filter = \
|
||||||
|
'nova.scheduler.host_filter.AllHostsFilter'
|
||||||
|
self.instance_type = dict(name='tiny',
|
||||||
|
memory_mb=50,
|
||||||
|
vcpus=10,
|
||||||
|
local_gb=500,
|
||||||
|
flavorid=1,
|
||||||
|
swap=500,
|
||||||
|
rxtx_quota=30000,
|
||||||
|
rxtx_cap=200)
|
||||||
|
|
||||||
|
self.zone_manager = FakeZoneManager()
|
||||||
|
states = {}
|
||||||
|
for x in xrange(10):
|
||||||
|
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||||
|
self.zone_manager.service_states = states
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
FLAGS.default_host_filter = self.old_flag
|
||||||
|
|
||||||
|
def test_choose_filter(self):
|
||||||
|
# Test default filter ...
|
||||||
|
hf = host_filter.choose_host_filter()
|
||||||
|
self.assertEquals(hf._full_name(),
|
||||||
|
'nova.scheduler.host_filter.AllHostsFilter')
|
||||||
|
# Test valid filter ...
|
||||||
|
hf = host_filter.choose_host_filter(
|
||||||
|
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||||
|
self.assertEquals(hf._full_name(),
|
||||||
|
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||||
|
# Test invalid filter ...
|
||||||
|
try:
|
||||||
|
host_filter.choose_host_filter('does not exist')
|
||||||
|
self.fail("Should not find host filter.")
|
||||||
|
except exception.SchedulerHostFilterNotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_all_host_filter(self):
|
||||||
|
hf = host_filter.AllHostsFilter()
|
||||||
|
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.assertEquals(10, len(hosts))
|
||||||
|
for host, capabilities in hosts:
|
||||||
|
self.assertTrue(host.startswith('host'))
|
||||||
|
|
||||||
|
def test_instance_type_filter(self):
|
||||||
|
hf = host_filter.InstanceTypeFilter()
|
||||||
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
|
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
|
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||||
|
name)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.assertEquals(6, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
self.assertEquals('host05', just_hosts[0])
|
||||||
|
self.assertEquals('host10', just_hosts[5])
|
||||||
|
|
||||||
|
def test_json_filter(self):
|
||||||
|
hf = host_filter.JsonFilter()
|
||||||
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
|
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||||
|
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.assertEquals(6, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
self.assertEquals('host05', just_hosts[0])
|
||||||
|
self.assertEquals('host10', just_hosts[5])
|
||||||
|
|
||||||
|
# Try some custom queries
|
||||||
|
|
||||||
|
raw = ['or',
|
||||||
|
['and',
|
||||||
|
['<', '$compute.host_memory_free', 30],
|
||||||
|
['<', '$compute.disk_available', 300]
|
||||||
|
],
|
||||||
|
['and',
|
||||||
|
['>', '$compute.host_memory_free', 70],
|
||||||
|
['>', '$compute.disk_available', 700]
|
||||||
|
]
|
||||||
|
]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
|
self.assertEquals(5, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||||
|
self.assertEquals('host%02d' % index, host)
|
||||||
|
|
||||||
|
raw = ['not',
|
||||||
|
['=', '$compute.host_memory_free', 30],
|
||||||
|
]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
|
self.assertEquals(9, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||||
|
self.assertEquals('host%02d' % index, host)
|
||||||
|
|
||||||
|
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
|
self.assertEquals(5, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
just_hosts.sort()
|
||||||
|
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||||
|
self.assertEquals('host%02d' % index, host)
|
||||||
|
|
||||||
|
# Try some bogus input ...
|
||||||
|
raw = ['unknown command', ]
|
||||||
|
cooked = json.dumps(raw)
|
||||||
|
try:
|
||||||
|
hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.fail("Should give KeyError")
|
||||||
|
except KeyError, e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||||
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||||
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
|
['not', True, False, True, False]
|
||||||
|
)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
|
'not', True, False, True, False
|
||||||
|
))
|
||||||
|
self.fail("Should give KeyError")
|
||||||
|
except KeyError, e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(['=', '$foo', 100])))
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(['=', '$.....', 100])))
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(
|
||||||
|
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||||
|
|
||||||
|
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||||
|
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||||
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
144
nova/tests/scheduler/test_least_cost_scheduler.py
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For Least Cost Scheduler
|
||||||
|
"""
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import test
|
||||||
|
from nova.scheduler import least_cost
|
||||||
|
from nova.tests.scheduler import test_zone_aware_scheduler
|
||||||
|
|
||||||
|
MB = 1024 * 1024
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class FakeHost(object):
|
||||||
|
def __init__(self, host_id, free_ram, io):
|
||||||
|
self.id = host_id
|
||||||
|
self.free_ram = free_ram
|
||||||
|
self.io = io
|
||||||
|
|
||||||
|
|
||||||
|
class WeightedSumTestCase(test.TestCase):
|
||||||
|
def test_empty_domain(self):
|
||||||
|
domain = []
|
||||||
|
weighted_fns = []
|
||||||
|
result = least_cost.weighted_sum(domain, weighted_fns)
|
||||||
|
expected = []
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_basic_costing(self):
|
||||||
|
hosts = [
|
||||||
|
FakeHost(1, 512 * MB, 100),
|
||||||
|
FakeHost(2, 256 * MB, 400),
|
||||||
|
FakeHost(3, 512 * MB, 100)
|
||||||
|
]
|
||||||
|
|
||||||
|
weighted_fns = [
|
||||||
|
(1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost*
|
||||||
|
(2, lambda h: h.io), # Avoid high I/O
|
||||||
|
]
|
||||||
|
|
||||||
|
costs = least_cost.weighted_sum(
|
||||||
|
domain=hosts, weighted_fns=weighted_fns)
|
||||||
|
|
||||||
|
# Each 256 MB unit of free-ram contributes 0.5 points by way of:
|
||||||
|
# cost = weight * (score/max_score) = 1 * (256/512) = 0.5
|
||||||
|
# Each 100 iops of IO adds 0.5 points by way of:
|
||||||
|
# cost = 2 * (100/400) = 2 * 0.25 = 0.5
|
||||||
|
expected = [1.5, 2.5, 1.5]
|
||||||
|
self.assertEqual(expected, costs)
|
||||||
|
|
||||||
|
|
||||||
|
class LeastCostSchedulerTestCase(test.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(LeastCostSchedulerTestCase, self).setUp()
|
||||||
|
|
||||||
|
class FakeZoneManager:
|
||||||
|
pass
|
||||||
|
|
||||||
|
zone_manager = FakeZoneManager()
|
||||||
|
|
||||||
|
states = test_zone_aware_scheduler.fake_zone_manager_service_states(
|
||||||
|
num_hosts=10)
|
||||||
|
zone_manager.service_states = states
|
||||||
|
|
||||||
|
self.sched = least_cost.LeastCostScheduler()
|
||||||
|
self.sched.zone_manager = zone_manager
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def assertWeights(self, expected, num, request_spec, hosts):
|
||||||
|
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||||
|
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||||
|
|
||||||
|
def test_no_hosts(self):
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = []
|
||||||
|
|
||||||
|
expected = []
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
|
def test_noop_cost_fn(self):
|
||||||
|
FLAGS.least_cost_scheduler_cost_functions = [
|
||||||
|
'nova.scheduler.least_cost.noop_cost_fn'
|
||||||
|
]
|
||||||
|
FLAGS.noop_cost_fn_weight = 1
|
||||||
|
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = self.sched.filter_hosts(num, request_spec)
|
||||||
|
|
||||||
|
expected = [dict(weight=1, hostname=hostname)
|
||||||
|
for hostname, caps in hosts]
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
|
def test_cost_fn_weights(self):
|
||||||
|
FLAGS.least_cost_scheduler_cost_functions = [
|
||||||
|
'nova.scheduler.least_cost.noop_cost_fn'
|
||||||
|
]
|
||||||
|
FLAGS.noop_cost_fn_weight = 2
|
||||||
|
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = self.sched.filter_hosts(num, request_spec)
|
||||||
|
|
||||||
|
expected = [dict(weight=2, hostname=hostname)
|
||||||
|
for hostname, caps in hosts]
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
|
def test_fill_first_cost_fn(self):
|
||||||
|
FLAGS.least_cost_scheduler_cost_functions = [
|
||||||
|
'nova.scheduler.least_cost.fill_first_cost_fn'
|
||||||
|
]
|
||||||
|
FLAGS.fill_first_cost_fn_weight = 1
|
||||||
|
|
||||||
|
num = 1
|
||||||
|
request_spec = {}
|
||||||
|
hosts = self.sched.filter_hosts(num, request_spec)
|
||||||
|
|
||||||
|
expected = []
|
||||||
|
for idx, (hostname, caps) in enumerate(hosts):
|
||||||
|
# Costs are normalized so over 10 hosts, each host with increasing
|
||||||
|
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||||
|
# free ram, we add in the 1/N for the base_cost
|
||||||
|
weight = 0.1 + (0.1 * idx)
|
||||||
|
weight_dict = dict(weight=weight, hostname=hostname)
|
||||||
|
expected.append(weight_dict)
|
||||||
|
|
||||||
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
296
nova/tests/scheduler/test_zone_aware_scheduler.py
Normal file
296
nova/tests/scheduler/test_zone_aware_scheduler.py
Normal file
@@ -0,0 +1,296 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For Zone Aware Scheduler.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
|
from nova import test
|
||||||
|
from nova.scheduler import driver
|
||||||
|
from nova.scheduler import zone_aware_scheduler
|
||||||
|
from nova.scheduler import zone_manager
|
||||||
|
|
||||||
|
|
||||||
|
def _host_caps(multiplier):
|
||||||
|
# Returns host capabilities in the following way:
|
||||||
|
# host1 = memory:free 10 (100max)
|
||||||
|
# disk:available 100 (1000max)
|
||||||
|
# hostN = memory:free 10 + 10N
|
||||||
|
# disk:available 100 + 100N
|
||||||
|
# in other words: hostN has more resources than host0
|
||||||
|
# which means ... don't go above 10 hosts.
|
||||||
|
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||||
|
'host_hostname': 'xs-%s' % multiplier,
|
||||||
|
'host_memory_total': 100,
|
||||||
|
'host_memory_overhead': 10,
|
||||||
|
'host_memory_free': 10 + multiplier * 10,
|
||||||
|
'host_memory_free-computed': 10 + multiplier * 10,
|
||||||
|
'host_other-config': {},
|
||||||
|
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||||
|
'host_cpu_info': {},
|
||||||
|
'disk_available': 100 + multiplier * 100,
|
||||||
|
'disk_total': 1000,
|
||||||
|
'disk_used': 0,
|
||||||
|
'host_uuid': 'xxx-%d' % multiplier,
|
||||||
|
'host_name-label': 'xs-%s' % multiplier}
|
||||||
|
|
||||||
|
|
||||||
|
def fake_zone_manager_service_states(num_hosts):
|
||||||
|
states = {}
|
||||||
|
for x in xrange(num_hosts):
|
||||||
|
states['host%02d' % (x + 1)] = {'compute': _host_caps(x)}
|
||||||
|
return states
|
||||||
|
|
||||||
|
|
||||||
|
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
|
def filter_hosts(self, num, specs):
|
||||||
|
# NOTE(sirp): this is returning [(hostname, services)]
|
||||||
|
return self.zone_manager.service_states.items()
|
||||||
|
|
||||||
|
def weigh_hosts(self, num, specs, hosts):
|
||||||
|
fake_weight = 99
|
||||||
|
weighted = []
|
||||||
|
for hostname, caps in hosts:
|
||||||
|
weighted.append(dict(weight=fake_weight, name=hostname))
|
||||||
|
return weighted
|
||||||
|
|
||||||
|
|
||||||
|
class FakeZoneManager(zone_manager.ZoneManager):
|
||||||
|
def __init__(self):
|
||||||
|
self.service_states = {
|
||||||
|
'host1': {
|
||||||
|
'compute': {'ram': 1000},
|
||||||
|
},
|
||||||
|
'host2': {
|
||||||
|
'compute': {'ram': 2000},
|
||||||
|
},
|
||||||
|
'host3': {
|
||||||
|
'compute': {'ram': 3000},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class FakeEmptyZoneManager(zone_manager.ZoneManager):
|
||||||
|
def __init__(self):
|
||||||
|
self.service_states = {}
|
||||||
|
|
||||||
|
|
||||||
|
def fake_empty_call_zone_method(context, method, specs):
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
# Hmm, I should probably be using mox for this.
|
||||||
|
was_called = False
|
||||||
|
|
||||||
|
|
||||||
|
def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
|
||||||
|
global was_called
|
||||||
|
was_called = True
|
||||||
|
|
||||||
|
|
||||||
|
def fake_ask_child_zone_to_create_instance(context, zone_info,
|
||||||
|
request_spec, kwargs):
|
||||||
|
global was_called
|
||||||
|
was_called = True
|
||||||
|
|
||||||
|
|
||||||
|
def fake_provision_resource_locally(context, item, instance_id, kwargs):
|
||||||
|
global was_called
|
||||||
|
was_called = True
|
||||||
|
|
||||||
|
|
||||||
|
def fake_provision_resource_from_blob(context, item, instance_id,
|
||||||
|
request_spec, kwargs):
|
||||||
|
global was_called
|
||||||
|
was_called = True
|
||||||
|
|
||||||
|
|
||||||
|
def fake_decrypt_blob_returns_local_info(blob):
|
||||||
|
return {'foo': True} # values aren't important.
|
||||||
|
|
||||||
|
|
||||||
|
def fake_decrypt_blob_returns_child_info(blob):
|
||||||
|
return {'child_zone': True,
|
||||||
|
'child_blob': True} # values aren't important. Keys are.
|
||||||
|
|
||||||
|
|
||||||
|
def fake_call_zone_method(context, method, specs):
|
||||||
|
return [
|
||||||
|
('zone1', [
|
||||||
|
dict(weight=1, blob='AAAAAAA'),
|
||||||
|
dict(weight=111, blob='BBBBBBB'),
|
||||||
|
dict(weight=112, blob='CCCCCCC'),
|
||||||
|
dict(weight=113, blob='DDDDDDD'),
|
||||||
|
]),
|
||||||
|
('zone2', [
|
||||||
|
dict(weight=120, blob='EEEEEEE'),
|
||||||
|
dict(weight=2, blob='FFFFFFF'),
|
||||||
|
dict(weight=122, blob='GGGGGGG'),
|
||||||
|
dict(weight=123, blob='HHHHHHH'),
|
||||||
|
]),
|
||||||
|
('zone3', [
|
||||||
|
dict(weight=130, blob='IIIIIII'),
|
||||||
|
dict(weight=131, blob='JJJJJJJ'),
|
||||||
|
dict(weight=132, blob='KKKKKKK'),
|
||||||
|
dict(weight=3, blob='LLLLLLL'),
|
||||||
|
]),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||||
|
"""Test case for Zone Aware Scheduler."""
|
||||||
|
|
||||||
|
def test_zone_aware_scheduler(self):
|
||||||
|
"""
|
||||||
|
Create a nested set of FakeZones, ensure that a select call returns the
|
||||||
|
appropriate build plan.
|
||||||
|
"""
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||||
|
|
||||||
|
zm = FakeZoneManager()
|
||||||
|
sched.set_zone_manager(zm)
|
||||||
|
|
||||||
|
fake_context = {}
|
||||||
|
build_plan = sched.select(fake_context, {})
|
||||||
|
|
||||||
|
self.assertEqual(15, len(build_plan))
|
||||||
|
|
||||||
|
hostnames = [plan_item['name']
|
||||||
|
for plan_item in build_plan if 'name' in plan_item]
|
||||||
|
self.assertEqual(3, len(hostnames))
|
||||||
|
|
||||||
|
def test_empty_zone_aware_scheduler(self):
|
||||||
|
"""
|
||||||
|
Ensure empty hosts & child_zones result in NoValidHosts exception.
|
||||||
|
"""
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
|
||||||
|
|
||||||
|
zm = FakeEmptyZoneManager()
|
||||||
|
sched.set_zone_manager(zm)
|
||||||
|
|
||||||
|
fake_context = {}
|
||||||
|
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||||
|
fake_context, 1,
|
||||||
|
dict(host_filter=None,
|
||||||
|
request_spec={'instance_type': {}}))
|
||||||
|
|
||||||
|
def test_schedule_do_not_schedule_with_hint(self):
|
||||||
|
"""
|
||||||
|
Check the local/child zone routing in the run_instance() call.
|
||||||
|
If the zone_blob hint was passed in, don't re-schedule.
|
||||||
|
"""
|
||||||
|
global was_called
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
was_called = False
|
||||||
|
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
|
||||||
|
request_spec = {
|
||||||
|
'instance_properties': {},
|
||||||
|
'instance_type': {},
|
||||||
|
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
|
||||||
|
'blob': "Non-None blob data"
|
||||||
|
}
|
||||||
|
|
||||||
|
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||||
|
self.assertEquals(None, result)
|
||||||
|
self.assertTrue(was_called)
|
||||||
|
|
||||||
|
def test_provision_resource_local(self):
|
||||||
|
"""Provision a resource locally or remotely."""
|
||||||
|
global was_called
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
was_called = False
|
||||||
|
self.stubs.Set(sched, '_provision_resource_locally',
|
||||||
|
fake_provision_resource_locally)
|
||||||
|
|
||||||
|
request_spec = {'hostname': "foo"}
|
||||||
|
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||||
|
self.assertTrue(was_called)
|
||||||
|
|
||||||
|
def test_provision_resource_remote(self):
|
||||||
|
"""Provision a resource locally or remotely."""
|
||||||
|
global was_called
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
was_called = False
|
||||||
|
self.stubs.Set(sched, '_provision_resource_from_blob',
|
||||||
|
fake_provision_resource_from_blob)
|
||||||
|
|
||||||
|
request_spec = {}
|
||||||
|
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||||
|
self.assertTrue(was_called)
|
||||||
|
|
||||||
|
def test_provision_resource_from_blob_empty(self):
|
||||||
|
"""Provision a resource locally or remotely given no hints."""
|
||||||
|
global was_called
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
request_spec = {}
|
||||||
|
self.assertRaises(zone_aware_scheduler.InvalidBlob,
|
||||||
|
sched._provision_resource_from_blob,
|
||||||
|
None, {}, 1, {}, {})
|
||||||
|
|
||||||
|
def test_provision_resource_from_blob_with_local_blob(self):
|
||||||
|
"""
|
||||||
|
Provision a resource locally or remotely when blob hint passed in.
|
||||||
|
"""
|
||||||
|
global was_called
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
was_called = False
|
||||||
|
self.stubs.Set(sched, '_decrypt_blob',
|
||||||
|
fake_decrypt_blob_returns_local_info)
|
||||||
|
self.stubs.Set(sched, '_provision_resource_locally',
|
||||||
|
fake_provision_resource_locally)
|
||||||
|
|
||||||
|
request_spec = {'blob': "Non-None blob data"}
|
||||||
|
|
||||||
|
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||||
|
request_spec, {})
|
||||||
|
self.assertTrue(was_called)
|
||||||
|
|
||||||
|
def test_provision_resource_from_blob_with_child_blob(self):
|
||||||
|
"""
|
||||||
|
Provision a resource locally or remotely when child blob hint
|
||||||
|
passed in.
|
||||||
|
"""
|
||||||
|
global was_called
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
self.stubs.Set(sched, '_decrypt_blob',
|
||||||
|
fake_decrypt_blob_returns_child_info)
|
||||||
|
was_called = False
|
||||||
|
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||||
|
fake_ask_child_zone_to_create_instance)
|
||||||
|
|
||||||
|
request_spec = {'blob': "Non-None blob data"}
|
||||||
|
|
||||||
|
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||||
|
request_spec, {})
|
||||||
|
self.assertTrue(was_called)
|
||||||
|
|
||||||
|
def test_provision_resource_from_blob_with_immediate_child_blob(self):
|
||||||
|
"""
|
||||||
|
Provision a resource locally or remotely when blob hint passed in
|
||||||
|
from an immediate child.
|
||||||
|
"""
|
||||||
|
global was_called
|
||||||
|
sched = FakeZoneAwareScheduler()
|
||||||
|
was_called = False
|
||||||
|
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
|
||||||
|
fake_ask_child_zone_to_create_instance)
|
||||||
|
|
||||||
|
request_spec = {'child_blob': True, 'child_zone': True}
|
||||||
|
|
||||||
|
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||||
|
request_spec, {})
|
||||||
|
self.assertTrue(was_called)
|
||||||
@@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
|
|||||||
super(_AuthManagerBaseTestCase, self).setUp()
|
super(_AuthManagerBaseTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake')
|
self.flags(connection_type='fake')
|
||||||
self.manager = manager.AuthManager(new=True)
|
self.manager = manager.AuthManager(new=True)
|
||||||
|
self.manager.mc.cache = {}
|
||||||
|
|
||||||
def test_create_and_find_user(self):
|
def test_create_and_find_user(self):
|
||||||
with user_generator(self.manager):
|
with user_generator(self.manager):
|
||||||
|
|||||||
@@ -26,17 +26,16 @@ from eventlet import greenthread
|
|||||||
from nova import context
|
from nova import context
|
||||||
from nova import crypto
|
from nova import crypto
|
||||||
from nova import db
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import exception
|
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
from nova.api.ec2 import ec2utils
|
from nova.api.ec2 import ec2utils
|
||||||
from nova.image import local
|
from nova.image import local
|
||||||
from nova.exception import NotFound
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -68,7 +67,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
|
|
||||||
def fake_show(meh, context, id):
|
def fake_show(meh, context, id):
|
||||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||||
'type': 'machine'}}
|
'type': 'machine', 'image_state': 'available'}}
|
||||||
|
|
||||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||||
@@ -116,6 +115,18 @@ class CloudTestCase(test.TestCase):
|
|||||||
public_ip=address)
|
public_ip=address)
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
|
def test_allocate_address(self):
|
||||||
|
address = "10.10.10.10"
|
||||||
|
allocate = self.cloud.allocate_address
|
||||||
|
db.floating_ip_create(self.context,
|
||||||
|
{'address': address,
|
||||||
|
'host': self.network.host})
|
||||||
|
self.assertEqual(allocate(self.context)['publicIp'], address)
|
||||||
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
self.assertRaises(exception.NoMoreFloatingIps,
|
||||||
|
allocate,
|
||||||
|
self.context)
|
||||||
|
|
||||||
def test_associate_disassociate_address(self):
|
def test_associate_disassociate_address(self):
|
||||||
"""Verifies associate runs cleanly without raising an exception"""
|
"""Verifies associate runs cleanly without raising an exception"""
|
||||||
address = "10.10.10.10"
|
address = "10.10.10.10"
|
||||||
@@ -254,10 +265,10 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_describe_instances(self):
|
def test_describe_instances(self):
|
||||||
"""Makes sure describe_instances works and filters results."""
|
"""Makes sure describe_instances works and filters results."""
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'host': 'host1'})
|
'host': 'host1'})
|
||||||
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'host': 'host2'})
|
'host': 'host2'})
|
||||||
comp1 = db.service_create(self.context, {'host': 'host1',
|
comp1 = db.service_create(self.context, {'host': 'host1',
|
||||||
'availability_zone': 'zone1',
|
'availability_zone': 'zone1',
|
||||||
@@ -290,7 +301,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
'type': 'machine'}}]
|
'type': 'machine'}}]
|
||||||
|
|
||||||
def fake_show_none(meh, context, id):
|
def fake_show_none(meh, context, id):
|
||||||
raise NotFound
|
raise exception.ImageNotFound(image_id='bad_image_id')
|
||||||
|
|
||||||
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
|
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
|
||||||
# list all
|
# list all
|
||||||
@@ -308,7 +319,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.stubs.UnsetAll()
|
self.stubs.UnsetAll()
|
||||||
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
|
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
|
||||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
|
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
|
||||||
self.assertRaises(NotFound, describe_images,
|
self.assertRaises(exception.ImageNotFound, describe_images,
|
||||||
self.context, ['ami-fake'])
|
self.context, ['ami-fake'])
|
||||||
|
|
||||||
def test_describe_image_attribute(self):
|
def test_describe_image_attribute(self):
|
||||||
@@ -445,9 +456,67 @@ class CloudTestCase(test.TestCase):
|
|||||||
self._create_key('test')
|
self._create_key('test')
|
||||||
self.cloud.delete_key_pair(self.context, 'test')
|
self.cloud.delete_key_pair(self.context, 'test')
|
||||||
|
|
||||||
|
def test_run_instances(self):
|
||||||
|
kwargs = {'image_id': FLAGS.default_image,
|
||||||
|
'instance_type': FLAGS.default_instance_type,
|
||||||
|
'max_count': 1}
|
||||||
|
run_instances = self.cloud.run_instances
|
||||||
|
result = run_instances(self.context, **kwargs)
|
||||||
|
instance = result['instancesSet'][0]
|
||||||
|
self.assertEqual(instance['imageId'], 'ami-00000001')
|
||||||
|
self.assertEqual(instance['displayName'], 'Server 1')
|
||||||
|
self.assertEqual(instance['instanceId'], 'i-00000001')
|
||||||
|
self.assertEqual(instance['instanceState']['name'], 'networking')
|
||||||
|
self.assertEqual(instance['instanceType'], 'm1.small')
|
||||||
|
|
||||||
|
def test_run_instances_image_state_none(self):
|
||||||
|
kwargs = {'image_id': FLAGS.default_image,
|
||||||
|
'instance_type': FLAGS.default_instance_type,
|
||||||
|
'max_count': 1}
|
||||||
|
run_instances = self.cloud.run_instances
|
||||||
|
|
||||||
|
def fake_show_no_state(self, context, id):
|
||||||
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||||
|
'type': 'machine'}}
|
||||||
|
|
||||||
|
self.stubs.UnsetAll()
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state)
|
||||||
|
self.assertRaises(exception.ApiError, run_instances,
|
||||||
|
self.context, **kwargs)
|
||||||
|
|
||||||
|
def test_run_instances_image_state_invalid(self):
|
||||||
|
kwargs = {'image_id': FLAGS.default_image,
|
||||||
|
'instance_type': FLAGS.default_instance_type,
|
||||||
|
'max_count': 1}
|
||||||
|
run_instances = self.cloud.run_instances
|
||||||
|
|
||||||
|
def fake_show_decrypt(self, context, id):
|
||||||
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||||
|
'type': 'machine', 'image_state': 'decrypting'}}
|
||||||
|
|
||||||
|
self.stubs.UnsetAll()
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt)
|
||||||
|
self.assertRaises(exception.ApiError, run_instances,
|
||||||
|
self.context, **kwargs)
|
||||||
|
|
||||||
|
def test_run_instances_image_status_active(self):
|
||||||
|
kwargs = {'image_id': FLAGS.default_image,
|
||||||
|
'instance_type': FLAGS.default_instance_type,
|
||||||
|
'max_count': 1}
|
||||||
|
run_instances = self.cloud.run_instances
|
||||||
|
|
||||||
|
def fake_show_stat_active(self, context, id):
|
||||||
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||||
|
'type': 'machine'}, 'status': 'active'}
|
||||||
|
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
|
||||||
|
|
||||||
|
result = run_instances(self.context, **kwargs)
|
||||||
|
self.assertEqual(len(result['instancesSet']), 1)
|
||||||
|
|
||||||
def test_terminate_instances(self):
|
def test_terminate_instances(self):
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'host': 'host1'})
|
'host': 'host1'})
|
||||||
terminate_instances = self.cloud.terminate_instances
|
terminate_instances = self.cloud.terminate_instances
|
||||||
# valid instance_id
|
# valid instance_id
|
||||||
|
|||||||
@@ -19,7 +19,6 @@
|
|||||||
Tests For Compute
|
Tests For Compute
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
import mox
|
import mox
|
||||||
import stubout
|
import stubout
|
||||||
|
|
||||||
@@ -84,7 +83,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
def _create_instance(self, params={}):
|
def _create_instance(self, params={}):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 1
|
inst['image_ref'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
@@ -150,7 +149,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
ref = self.compute_api.create(
|
ref = self.compute_api.create(
|
||||||
self.context,
|
self.context,
|
||||||
instance_type=instance_types.get_default_instance_type(),
|
instance_type=instance_types.get_default_instance_type(),
|
||||||
image_id=None,
|
image_href=None,
|
||||||
security_group=['testgroup'])
|
security_group=['testgroup'])
|
||||||
try:
|
try:
|
||||||
self.assertEqual(len(db.security_group_get_by_instance(
|
self.assertEqual(len(db.security_group_get_by_instance(
|
||||||
@@ -168,7 +167,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
ref = self.compute_api.create(
|
ref = self.compute_api.create(
|
||||||
self.context,
|
self.context,
|
||||||
instance_type=instance_types.get_default_instance_type(),
|
instance_type=instance_types.get_default_instance_type(),
|
||||||
image_id=None,
|
image_href=None,
|
||||||
security_group=['testgroup'])
|
security_group=['testgroup'])
|
||||||
try:
|
try:
|
||||||
db.instance_destroy(self.context, ref[0]['id'])
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
@@ -184,7 +183,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
ref = self.compute_api.create(
|
ref = self.compute_api.create(
|
||||||
self.context,
|
self.context,
|
||||||
instance_type=instance_types.get_default_instance_type(),
|
instance_type=instance_types.get_default_instance_type(),
|
||||||
image_id=None,
|
image_href=None,
|
||||||
security_group=['testgroup'])
|
security_group=['testgroup'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase):
|
|||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
self.assertEqual(instance_ref['launched_at'], None)
|
self.assertEqual(instance_ref['launched_at'], None)
|
||||||
self.assertEqual(instance_ref['deleted_at'], None)
|
self.assertEqual(instance_ref['deleted_at'], None)
|
||||||
launch = datetime.datetime.utcnow()
|
launch = utils.utcnow()
|
||||||
self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
self.assert_(instance_ref['launched_at'] > launch)
|
self.assert_(instance_ref['launched_at'] > launch)
|
||||||
self.assertEqual(instance_ref['deleted_at'], None)
|
self.assertEqual(instance_ref['deleted_at'], None)
|
||||||
terminate = datetime.datetime.utcnow()
|
terminate = utils.utcnow()
|
||||||
self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
self.context = self.context.elevated(True)
|
self.context = self.context.elevated(True)
|
||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
|
|||||||
@@ -20,8 +20,6 @@
|
|||||||
Tests For Console proxy.
|
Tests For Console proxy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
|
|||||||
@@ -133,13 +133,14 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
raw = ['or',
|
raw = ['or',
|
||||||
['and',
|
['and',
|
||||||
['<', '$compute.host_memory_free', 30],
|
['<', '$compute.host_memory_free', 30],
|
||||||
['<', '$compute.disk_available', 300]
|
['<', '$compute.disk_available', 300],
|
||||||
],
|
],
|
||||||
['and',
|
['and',
|
||||||
['>', '$compute.host_memory_free', 70],
|
['>', '$compute.host_memory_free', 70],
|
||||||
['>', '$compute.disk_available', 700]
|
['>', '$compute.disk_available', 700],
|
||||||
]
|
],
|
||||||
]
|
]
|
||||||
|
|
||||||
cooked = json.dumps(raw)
|
cooked = json.dumps(raw)
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
@@ -183,13 +184,11 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
['not', True, False, True, False]
|
['not', True, False, True, False])))
|
||||||
)))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||||
'not', True, False, True, False
|
'not', True, False, True, False))
|
||||||
))
|
|
||||||
self.fail("Should give KeyError")
|
self.fail("Should give KeyError")
|
||||||
except KeyError, e:
|
except KeyError, e:
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -14,10 +14,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
import eventlet
|
import eventlet
|
||||||
import mox
|
import mox
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||||
@@ -124,6 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class LibvirtConnTestCase(test.TestCase):
|
class LibvirtConnTestCase(test.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LibvirtConnTestCase, self).setUp()
|
super(LibvirtConnTestCase, self).setUp()
|
||||||
connection._late_load_cheetah()
|
connection._late_load_cheetah()
|
||||||
@@ -160,6 +163,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
'vcpus': 2,
|
'vcpus': 2,
|
||||||
'project_id': 'fake',
|
'project_id': 'fake',
|
||||||
'bridge': 'br101',
|
'bridge': 'br101',
|
||||||
|
'image_ref': '123456',
|
||||||
'instance_type_id': '5'} # m1.small
|
'instance_type_id': '5'} # m1.small
|
||||||
|
|
||||||
def lazy_load_library_exists(self):
|
def lazy_load_library_exists(self):
|
||||||
@@ -205,6 +209,29 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||||
connection.LibvirtConnection._conn = fake
|
connection.LibvirtConnection._conn = fake
|
||||||
|
|
||||||
|
def fake_lookup(self, instance_name):
|
||||||
|
|
||||||
|
class FakeVirtDomain(object):
|
||||||
|
|
||||||
|
def snapshotCreateXML(self, *args):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def XMLDesc(self, *args):
|
||||||
|
return """
|
||||||
|
<domain type='kvm'>
|
||||||
|
<devices>
|
||||||
|
<disk type='file'>
|
||||||
|
<source file='filename'/>
|
||||||
|
</disk>
|
||||||
|
</devices>
|
||||||
|
</domain>
|
||||||
|
"""
|
||||||
|
|
||||||
|
return FakeVirtDomain()
|
||||||
|
|
||||||
|
def fake_execute(self, *args):
|
||||||
|
open(args[-1], "a").close()
|
||||||
|
|
||||||
def create_service(self, **kwargs):
|
def create_service(self, **kwargs):
|
||||||
service_ref = {'host': kwargs.get('host', 'dummy'),
|
service_ref = {'host': kwargs.get('host', 'dummy'),
|
||||||
'binary': 'nova-compute',
|
'binary': 'nova-compute',
|
||||||
@@ -280,6 +307,81 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
self._check_xml_and_container(instance_data)
|
self._check_xml_and_container(instance_data)
|
||||||
|
|
||||||
|
def test_snapshot(self):
|
||||||
|
if not self.lazy_load_library_exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||||
|
|
||||||
|
# Start test
|
||||||
|
image_service = utils.import_object(FLAGS.image_service)
|
||||||
|
|
||||||
|
# Assuming that base image already exists in image_service
|
||||||
|
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||||
|
properties = {'instance_id': instance_ref['id'],
|
||||||
|
'user_id': str(self.context.user_id)}
|
||||||
|
snapshot_name = 'test-snap'
|
||||||
|
sent_meta = {'name': snapshot_name, 'is_public': False,
|
||||||
|
'status': 'creating', 'properties': properties}
|
||||||
|
# Create new image. It will be updated in snapshot method
|
||||||
|
# To work with it from snapshot, the single image_service is needed
|
||||||
|
recv_meta = image_service.create(context, sent_meta)
|
||||||
|
|
||||||
|
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||||
|
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
|
||||||
|
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||||
|
connection.utils.execute = self.fake_execute
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
|
conn = connection.LibvirtConnection(False)
|
||||||
|
conn.snapshot(instance_ref, recv_meta['id'])
|
||||||
|
|
||||||
|
snapshot = image_service.show(context, recv_meta['id'])
|
||||||
|
self.assertEquals(snapshot['properties']['image_state'], 'available')
|
||||||
|
self.assertEquals(snapshot['status'], 'active')
|
||||||
|
self.assertEquals(snapshot['name'], snapshot_name)
|
||||||
|
|
||||||
|
def test_snapshot_no_image_architecture(self):
|
||||||
|
if not self.lazy_load_library_exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
||||||
|
|
||||||
|
# Start test
|
||||||
|
image_service = utils.import_object(FLAGS.image_service)
|
||||||
|
|
||||||
|
# Assign image_ref = 2 from nova/images/fakes for testing different
|
||||||
|
# base image
|
||||||
|
test_instance = copy.deepcopy(self.test_instance)
|
||||||
|
test_instance["image_ref"] = "2"
|
||||||
|
|
||||||
|
# Assuming that base image already exists in image_service
|
||||||
|
instance_ref = db.instance_create(self.context, test_instance)
|
||||||
|
properties = {'instance_id': instance_ref['id'],
|
||||||
|
'user_id': str(self.context.user_id)}
|
||||||
|
snapshot_name = 'test-snap'
|
||||||
|
sent_meta = {'name': snapshot_name, 'is_public': False,
|
||||||
|
'status': 'creating', 'properties': properties}
|
||||||
|
# Create new image. It will be updated in snapshot method
|
||||||
|
# To work with it from snapshot, the single image_service is needed
|
||||||
|
recv_meta = image_service.create(context, sent_meta)
|
||||||
|
|
||||||
|
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||||
|
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
|
||||||
|
self.mox.StubOutWithMock(connection.utils, 'execute')
|
||||||
|
connection.utils.execute = self.fake_execute
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
|
conn = connection.LibvirtConnection(False)
|
||||||
|
conn.snapshot(instance_ref, recv_meta['id'])
|
||||||
|
|
||||||
|
snapshot = image_service.show(context, recv_meta['id'])
|
||||||
|
self.assertEquals(snapshot['properties']['image_state'], 'available')
|
||||||
|
self.assertEquals(snapshot['status'], 'active')
|
||||||
|
self.assertEquals(snapshot['name'], snapshot_name)
|
||||||
|
|
||||||
def test_multi_nic(self):
|
def test_multi_nic(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
network_info = _create_network_info(2)
|
network_info = _create_network_info(2)
|
||||||
@@ -645,6 +747,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
except Exception, e:
|
except Exception, e:
|
||||||
count = (0 <= str(e.message).find('Unexpected method call'))
|
count = (0 <= str(e.message).find('Unexpected method call'))
|
||||||
|
|
||||||
|
shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
|
||||||
|
|
||||||
self.assertTrue(count)
|
self.assertTrue(count)
|
||||||
|
|
||||||
def test_get_host_ip_addr(self):
|
def test_get_host_ip_addr(self):
|
||||||
@@ -658,6 +762,31 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
super(LibvirtConnTestCase, self).tearDown()
|
super(LibvirtConnTestCase, self).tearDown()
|
||||||
|
|
||||||
|
|
||||||
|
class NWFilterFakes:
|
||||||
|
def __init__(self):
|
||||||
|
self.filters = {}
|
||||||
|
|
||||||
|
def nwfilterLookupByName(self, name):
|
||||||
|
if name in self.filters:
|
||||||
|
return self.filters[name]
|
||||||
|
raise libvirt.libvirtError('Filter Not Found')
|
||||||
|
|
||||||
|
def filterDefineXMLMock(self, xml):
|
||||||
|
class FakeNWFilterInternal:
|
||||||
|
def __init__(self, parent, name):
|
||||||
|
self.name = name
|
||||||
|
self.parent = parent
|
||||||
|
|
||||||
|
def undefine(self):
|
||||||
|
del self.parent.filters[self.name]
|
||||||
|
pass
|
||||||
|
tree = xml_to_tree(xml)
|
||||||
|
name = tree.get('name')
|
||||||
|
if name not in self.filters:
|
||||||
|
self.filters[name] = FakeNWFilterInternal(self, name)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class IptablesFirewallTestCase(test.TestCase):
|
class IptablesFirewallTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(IptablesFirewallTestCase, self).setUp()
|
super(IptablesFirewallTestCase, self).setUp()
|
||||||
@@ -675,6 +804,20 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
self.fw = firewall.IptablesFirewallDriver(
|
self.fw = firewall.IptablesFirewallDriver(
|
||||||
get_connection=lambda: self.fake_libvirt_connection)
|
get_connection=lambda: self.fake_libvirt_connection)
|
||||||
|
|
||||||
|
def lazy_load_library_exists(self):
|
||||||
|
"""check if libvirt is available."""
|
||||||
|
# try to connect libvirt. if fail, skip test.
|
||||||
|
try:
|
||||||
|
import libvirt
|
||||||
|
import libxml2
|
||||||
|
except ImportError:
|
||||||
|
return False
|
||||||
|
global libvirt
|
||||||
|
libvirt = __import__('libvirt')
|
||||||
|
connection.libvirt = __import__('libvirt')
|
||||||
|
connection.libxml2 = __import__('libxml2')
|
||||||
|
return True
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
@@ -880,6 +1023,40 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
self.fw.do_refresh_security_group_rules("fake")
|
self.fw.do_refresh_security_group_rules("fake")
|
||||||
|
|
||||||
|
def test_unfilter_instance_undefines_nwfilter(self):
|
||||||
|
# Skip if non-libvirt environment
|
||||||
|
if not self.lazy_load_library_exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
admin_ctxt = context.get_admin_context()
|
||||||
|
|
||||||
|
fakefilter = NWFilterFakes()
|
||||||
|
self.fw.nwfilter._conn.nwfilterDefineXML =\
|
||||||
|
fakefilter.filterDefineXMLMock
|
||||||
|
self.fw.nwfilter._conn.nwfilterLookupByName =\
|
||||||
|
fakefilter.nwfilterLookupByName
|
||||||
|
|
||||||
|
instance_ref = self._create_instance_ref()
|
||||||
|
inst_id = instance_ref['id']
|
||||||
|
instance = db.instance_get(self.context, inst_id)
|
||||||
|
|
||||||
|
ip = '10.11.12.13'
|
||||||
|
network_ref = db.project_get_network(self.context, 'fake')
|
||||||
|
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
|
||||||
|
db.fixed_ip_create(admin_ctxt, fixed_ip)
|
||||||
|
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
|
||||||
|
'instance_id': inst_id})
|
||||||
|
self.fw.setup_basic_filtering(instance)
|
||||||
|
self.fw.prepare_instance_filter(instance)
|
||||||
|
self.fw.apply_instance_filter(instance)
|
||||||
|
original_filter_count = len(fakefilter.filters)
|
||||||
|
self.fw.unfilter_instance(instance)
|
||||||
|
|
||||||
|
# should undefine just the instance filter
|
||||||
|
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
|
||||||
|
|
||||||
|
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
||||||
|
|
||||||
|
|
||||||
class NWFilterTestCase(test.TestCase):
|
class NWFilterTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -1056,3 +1233,37 @@ class NWFilterTestCase(test.TestCase):
|
|||||||
network_info,
|
network_info,
|
||||||
"fake")
|
"fake")
|
||||||
self.assertEquals(len(result), 3)
|
self.assertEquals(len(result), 3)
|
||||||
|
|
||||||
|
def test_unfilter_instance_undefines_nwfilters(self):
|
||||||
|
admin_ctxt = context.get_admin_context()
|
||||||
|
|
||||||
|
fakefilter = NWFilterFakes()
|
||||||
|
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
|
||||||
|
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
|
||||||
|
|
||||||
|
instance_ref = self._create_instance()
|
||||||
|
inst_id = instance_ref['id']
|
||||||
|
|
||||||
|
self.security_group = self.setup_and_return_security_group()
|
||||||
|
|
||||||
|
db.instance_add_security_group(self.context, inst_id,
|
||||||
|
self.security_group.id)
|
||||||
|
|
||||||
|
instance = db.instance_get(self.context, inst_id)
|
||||||
|
|
||||||
|
ip = '10.11.12.13'
|
||||||
|
network_ref = db.project_get_network(self.context, 'fake')
|
||||||
|
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
|
||||||
|
db.fixed_ip_create(admin_ctxt, fixed_ip)
|
||||||
|
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
|
||||||
|
'instance_id': inst_id})
|
||||||
|
self.fw.setup_basic_filtering(instance)
|
||||||
|
self.fw.prepare_instance_filter(instance)
|
||||||
|
self.fw.apply_instance_filter(instance)
|
||||||
|
original_filter_count = len(fakefilter.filters)
|
||||||
|
self.fw.unfilter_instance(instance)
|
||||||
|
|
||||||
|
# should undefine 2 filters: instance and instance-secgroup
|
||||||
|
self.assertEqual(original_filter_count - len(fakefilter.filters), 2)
|
||||||
|
|
||||||
|
db.instance_destroy(admin_ctxt, instance_ref['id'])
|
||||||
|
|||||||
@@ -16,7 +16,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import datetime
|
|
||||||
import webob
|
import webob
|
||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|||||||
@@ -21,11 +21,24 @@ import select
|
|||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.utils import parse_mailmap, str_dict_replace
|
from nova.utils import parse_mailmap, str_dict_replace
|
||||||
|
|
||||||
|
|
||||||
|
class ExceptionTestCase(test.TestCase):
|
||||||
|
@staticmethod
|
||||||
|
def _raise_exc(exc):
|
||||||
|
raise exc()
|
||||||
|
|
||||||
|
def test_exceptions_raise(self):
|
||||||
|
for name in dir(exception):
|
||||||
|
exc = getattr(exception, name)
|
||||||
|
if isinstance(exc, type):
|
||||||
|
self.assertRaises(exc, self._raise_exc, exc)
|
||||||
|
|
||||||
|
|
||||||
class ProjectTestCase(test.TestCase):
|
class ProjectTestCase(test.TestCase):
|
||||||
def test_authors_up_to_date(self):
|
def test_authors_up_to_date(self):
|
||||||
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
|
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
|
||||||
|
|||||||
@@ -13,10 +13,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import nova
|
import stubout
|
||||||
|
|
||||||
|
import nova
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import log
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
import nova.notifier.api
|
import nova.notifier.api
|
||||||
from nova.notifier.api import notify
|
from nova.notifier.api import notify
|
||||||
@@ -24,8 +26,6 @@ from nova.notifier import no_op_notifier
|
|||||||
from nova.notifier import rabbit_notifier
|
from nova.notifier import rabbit_notifier
|
||||||
from nova import test
|
from nova import test
|
||||||
|
|
||||||
import stubout
|
|
||||||
|
|
||||||
|
|
||||||
class NotifierTestCase(test.TestCase):
|
class NotifierTestCase(test.TestCase):
|
||||||
"""Test case for notifications"""
|
"""Test case for notifications"""
|
||||||
@@ -115,3 +115,22 @@ class NotifierTestCase(test.TestCase):
|
|||||||
notify('publisher_id',
|
notify('publisher_id',
|
||||||
'event_type', 'DEBUG', dict(a=3))
|
'event_type', 'DEBUG', dict(a=3))
|
||||||
self.assertEqual(self.test_topic, 'testnotify.debug')
|
self.assertEqual(self.test_topic, 'testnotify.debug')
|
||||||
|
|
||||||
|
def test_error_notification(self):
|
||||||
|
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
|
||||||
|
'nova.notifier.rabbit_notifier')
|
||||||
|
self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True)
|
||||||
|
LOG = log.getLogger('nova')
|
||||||
|
LOG.setup_from_flags()
|
||||||
|
msgs = []
|
||||||
|
|
||||||
|
def mock_cast(context, topic, data):
|
||||||
|
msgs.append(data)
|
||||||
|
|
||||||
|
self.stubs.Set(nova.rpc, 'cast', mock_cast)
|
||||||
|
LOG.error('foo')
|
||||||
|
self.assertEqual(1, len(msgs))
|
||||||
|
msg = msgs[0]
|
||||||
|
self.assertEqual(msg['event_type'], 'error_notification')
|
||||||
|
self.assertEqual(msg['priority'], 'ERROR')
|
||||||
|
self.assertEqual(msg['payload']['error'], 'foo')
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase):
|
|||||||
vmwareapi_fake.reset()
|
vmwareapi_fake.reset()
|
||||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||||
stubs.set_stubs(self.stubs)
|
stubs.set_stubs(self.stubs)
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
glance_stubs.FakeGlance)
|
|
||||||
self.conn = vmwareapi_conn.get_connection(False)
|
self.conn = vmwareapi_conn.get_connection(False)
|
||||||
|
|
||||||
def _create_instance_in_the_db(self):
|
def _create_instance_in_the_db(self):
|
||||||
@@ -64,13 +63,13 @@ class VMWareAPIVMTestCase(test.TestCase):
|
|||||||
'id': 1,
|
'id': 1,
|
||||||
'project_id': self.project.id,
|
'project_id': self.project.id,
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'image_id': "1",
|
'image_ref': "1",
|
||||||
'kernel_id': "1",
|
'kernel_id': "1",
|
||||||
'ramdisk_id': "1",
|
'ramdisk_id': "1",
|
||||||
'instance_type': 'm1.large',
|
'instance_type': 'm1.large',
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||||
}
|
}
|
||||||
self.instance = db.instance_create(values)
|
self.instance = db.instance_create(None, values)
|
||||||
|
|
||||||
def _create_vm(self):
|
def _create_vm(self):
|
||||||
"""Create and spawn the VM."""
|
"""Create and spawn the VM."""
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase):
|
|||||||
self.values = {'id': 1,
|
self.values = {'id': 1,
|
||||||
'project_id': 'fake',
|
'project_id': 'fake',
|
||||||
'user_id': 'fake',
|
'user_id': 'fake',
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': 2,
|
'kernel_id': 2,
|
||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type_id': '3', # m1.large
|
'instance_type_id': '3', # m1.large
|
||||||
@@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
stubs.stubout_determine_is_pv_objectstore(self.stubs)
|
stubs.stubout_determine_is_pv_objectstore(self.stubs)
|
||||||
self.stubs.Set(VMOps, 'reset_network', reset_network)
|
self.stubs.Set(VMOps, 'reset_network', reset_network)
|
||||||
stubs.stub_out_vm_methods(self.stubs)
|
stubs.stub_out_vm_methods(self.stubs)
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
glance_stubs.FakeGlance)
|
|
||||||
fake_utils.stub_out_utils_execute(self.stubs)
|
fake_utils.stub_out_utils_execute(self.stubs)
|
||||||
self.context = context.RequestContext('fake', 'fake', False)
|
self.context = context.RequestContext('fake', 'fake', False)
|
||||||
self.conn = xenapi_conn.get_connection(False)
|
self.conn = xenapi_conn.get_connection(False)
|
||||||
@@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'id': id,
|
'id': id,
|
||||||
'project_id': proj,
|
'project_id': proj,
|
||||||
'user_id': user,
|
'user_id': user,
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': 2,
|
'kernel_id': 2,
|
||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type_id': '3', # m1.large
|
'instance_type_id': '3', # m1.large
|
||||||
@@ -351,26 +350,14 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
self.assertEquals(self.vm['HVM_boot_params'], {})
|
self.assertEquals(self.vm['HVM_boot_params'], {})
|
||||||
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||||
|
|
||||||
def _list_vdis(self):
|
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
|
||||||
url = FLAGS.xenapi_connection_url
|
|
||||||
username = FLAGS.xenapi_connection_username
|
|
||||||
password = FLAGS.xenapi_connection_password
|
|
||||||
session = xenapi_conn.XenAPISession(url, username, password)
|
|
||||||
return session.call_xenapi('VDI.get_all')
|
|
||||||
|
|
||||||
def _check_vdis(self, start_list, end_list):
|
|
||||||
for vdi_ref in end_list:
|
|
||||||
if not vdi_ref in start_list:
|
|
||||||
self.fail('Found unexpected VDI:%s' % vdi_ref)
|
|
||||||
|
|
||||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
|
||||||
instance_type_id="3", os_type="linux",
|
instance_type_id="3", os_type="linux",
|
||||||
instance_id=1, check_injection=False):
|
instance_id=1, check_injection=False):
|
||||||
stubs.stubout_loopingcall_start(self.stubs)
|
stubs.stubout_loopingcall_start(self.stubs)
|
||||||
values = {'id': instance_id,
|
values = {'id': instance_id,
|
||||||
'project_id': self.project.id,
|
'project_id': self.project.id,
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'image_id': image_id,
|
'image_ref': image_ref,
|
||||||
'kernel_id': kernel_id,
|
'kernel_id': kernel_id,
|
||||||
'ramdisk_id': ramdisk_id,
|
'ramdisk_id': ramdisk_id,
|
||||||
'instance_type_id': instance_type_id,
|
'instance_type_id': instance_type_id,
|
||||||
@@ -607,7 +594,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'id': 1,
|
'id': 1,
|
||||||
'project_id': self.project.id,
|
'project_id': self.project.id,
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': 2,
|
'kernel_id': 2,
|
||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type_id': '3', # m1.large
|
'instance_type_id': '3', # m1.large
|
||||||
@@ -681,7 +668,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
self.values = {'id': 1,
|
self.values = {'id': 1,
|
||||||
'project_id': self.project.id,
|
'project_id': self.project.id,
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'image_id': 1,
|
'image_ref': 1,
|
||||||
'kernel_id': None,
|
'kernel_id': None,
|
||||||
'ramdisk_id': None,
|
'ramdisk_id': None,
|
||||||
'local_gb': 5,
|
'local_gb': 5,
|
||||||
@@ -692,8 +679,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
fake_utils.stub_out_utils_execute(self.stubs)
|
fake_utils.stub_out_utils_execute(self.stubs)
|
||||||
stubs.stub_out_migration_methods(self.stubs)
|
stubs.stub_out_migration_methods(self.stubs)
|
||||||
stubs.stubout_get_this_vm_uuid(self.stubs)
|
stubs.stubout_get_this_vm_uuid(self.stubs)
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
glance_stubs.FakeGlance)
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(XenAPIMigrateInstance, self).tearDown()
|
super(XenAPIMigrateInstance, self).tearDown()
|
||||||
@@ -719,8 +705,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
"""Unit tests for code that detects the ImageType."""
|
"""Unit tests for code that detects the ImageType."""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(XenAPIDetermineDiskImageTestCase, self).setUp()
|
super(XenAPIDetermineDiskImageTestCase, self).setUp()
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
glance_stubs.FakeGlance)
|
|
||||||
|
|
||||||
class FakeInstance(object):
|
class FakeInstance(object):
|
||||||
pass
|
pass
|
||||||
@@ -737,7 +722,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
def test_instance_disk(self):
|
def test_instance_disk(self):
|
||||||
"""If a kernel is specified, the image type is DISK (aka machine)."""
|
"""If a kernel is specified, the image type is DISK (aka machine)."""
|
||||||
FLAGS.xenapi_image_service = 'objectstore'
|
FLAGS.xenapi_image_service = 'objectstore'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
|
||||||
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
|
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK)
|
self.assert_disk_type(vm_utils.ImageType.DISK)
|
||||||
|
|
||||||
@@ -747,7 +732,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
DISK_RAW is assumed.
|
DISK_RAW is assumed.
|
||||||
"""
|
"""
|
||||||
FLAGS.xenapi_image_service = 'objectstore'
|
FLAGS.xenapi_image_service = 'objectstore'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||||
self.fake_instance.kernel_id = None
|
self.fake_instance.kernel_id = None
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||||
|
|
||||||
@@ -757,7 +742,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
this case will be 'raw'.
|
this case will be 'raw'.
|
||||||
"""
|
"""
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
|
||||||
self.fake_instance.kernel_id = None
|
self.fake_instance.kernel_id = None
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
|
||||||
|
|
||||||
@@ -767,7 +752,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
this case will be 'vhd'.
|
this case will be 'vhd'.
|
||||||
"""
|
"""
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
|
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
|
||||||
self.fake_instance.kernel_id = None
|
self.fake_instance.kernel_id = None
|
||||||
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
|
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
|
||||||
|
|
||||||
|
|||||||
@@ -1,121 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack LLC.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
Tests For Zone Aware Scheduler.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from nova import test
|
|
||||||
from nova.scheduler import driver
|
|
||||||
from nova.scheduler import zone_aware_scheduler
|
|
||||||
from nova.scheduler import zone_manager
|
|
||||||
|
|
||||||
|
|
||||||
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
|
||||||
def filter_hosts(self, num, specs):
|
|
||||||
# NOTE(sirp): this is returning [(hostname, services)]
|
|
||||||
return self.zone_manager.service_states.items()
|
|
||||||
|
|
||||||
def weigh_hosts(self, num, specs, hosts):
|
|
||||||
fake_weight = 99
|
|
||||||
weighted = []
|
|
||||||
for hostname, caps in hosts:
|
|
||||||
weighted.append(dict(weight=fake_weight, name=hostname))
|
|
||||||
return weighted
|
|
||||||
|
|
||||||
|
|
||||||
class FakeZoneManager(zone_manager.ZoneManager):
|
|
||||||
def __init__(self):
|
|
||||||
self.service_states = {
|
|
||||||
'host1': {
|
|
||||||
'compute': {'ram': 1000}
|
|
||||||
},
|
|
||||||
'host2': {
|
|
||||||
'compute': {'ram': 2000}
|
|
||||||
},
|
|
||||||
'host3': {
|
|
||||||
'compute': {'ram': 3000}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class FakeEmptyZoneManager(zone_manager.ZoneManager):
|
|
||||||
def __init__(self):
|
|
||||||
self.service_states = {}
|
|
||||||
|
|
||||||
|
|
||||||
def fake_empty_call_zone_method(context, method, specs):
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def fake_call_zone_method(context, method, specs):
|
|
||||||
return [
|
|
||||||
('zone1', [
|
|
||||||
dict(weight=1, blob='AAAAAAA'),
|
|
||||||
dict(weight=111, blob='BBBBBBB'),
|
|
||||||
dict(weight=112, blob='CCCCCCC'),
|
|
||||||
dict(weight=113, blob='DDDDDDD'),
|
|
||||||
]),
|
|
||||||
('zone2', [
|
|
||||||
dict(weight=120, blob='EEEEEEE'),
|
|
||||||
dict(weight=2, blob='FFFFFFF'),
|
|
||||||
dict(weight=122, blob='GGGGGGG'),
|
|
||||||
dict(weight=123, blob='HHHHHHH'),
|
|
||||||
]),
|
|
||||||
('zone3', [
|
|
||||||
dict(weight=130, blob='IIIIIII'),
|
|
||||||
dict(weight=131, blob='JJJJJJJ'),
|
|
||||||
dict(weight=132, blob='KKKKKKK'),
|
|
||||||
dict(weight=3, blob='LLLLLLL'),
|
|
||||||
]),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class ZoneAwareSchedulerTestCase(test.TestCase):
|
|
||||||
"""Test case for Zone Aware Scheduler."""
|
|
||||||
|
|
||||||
def test_zone_aware_scheduler(self):
|
|
||||||
"""
|
|
||||||
Create a nested set of FakeZones, ensure that a select call returns the
|
|
||||||
appropriate build plan.
|
|
||||||
"""
|
|
||||||
sched = FakeZoneAwareScheduler()
|
|
||||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
|
||||||
|
|
||||||
zm = FakeZoneManager()
|
|
||||||
sched.set_zone_manager(zm)
|
|
||||||
|
|
||||||
fake_context = {}
|
|
||||||
build_plan = sched.select(fake_context, {})
|
|
||||||
|
|
||||||
self.assertEqual(15, len(build_plan))
|
|
||||||
|
|
||||||
hostnames = [plan_item['name']
|
|
||||||
for plan_item in build_plan if 'name' in plan_item]
|
|
||||||
self.assertEqual(3, len(hostnames))
|
|
||||||
|
|
||||||
def test_empty_zone_aware_scheduler(self):
|
|
||||||
"""
|
|
||||||
Ensure empty hosts & child_zones result in NoValidHosts exception.
|
|
||||||
"""
|
|
||||||
sched = FakeZoneAwareScheduler()
|
|
||||||
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
|
|
||||||
|
|
||||||
zm = FakeEmptyZoneManager()
|
|
||||||
sched.set_zone_manager(zm)
|
|
||||||
|
|
||||||
fake_context = {}
|
|
||||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
|
||||||
fake_context, 1,
|
|
||||||
dict(host_filter=None, instance_type={}))
|
|
||||||
@@ -52,7 +52,7 @@ def stub_out_db_instance_api(stubs):
|
|||||||
else:
|
else:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def fake_instance_create(values):
|
def fake_instance_create(context, values):
|
||||||
"""Stubs out the db.instance_create method."""
|
"""Stubs out the db.instance_create method."""
|
||||||
|
|
||||||
type_data = INSTANCE_TYPES[values['instance_type']]
|
type_data = INSTANCE_TYPES[values['instance_type']]
|
||||||
@@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs):
|
|||||||
'name': values['name'],
|
'name': values['name'],
|
||||||
'id': values['id'],
|
'id': values['id'],
|
||||||
'reservation_id': utils.generate_uid('r'),
|
'reservation_id': utils.generate_uid('r'),
|
||||||
'image_id': values['image_id'],
|
'image_ref': values['image_ref'],
|
||||||
'kernel_id': values['kernel_id'],
|
'kernel_id': values['kernel_id'],
|
||||||
'ramdisk_id': values['ramdisk_id'],
|
'ramdisk_id': values['ramdisk_id'],
|
||||||
'state_description': 'scheduling',
|
'state_description': 'scheduling',
|
||||||
|
|||||||
@@ -42,20 +42,6 @@ def stubout_instance_snapshot(stubs):
|
|||||||
|
|
||||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||||
|
|
||||||
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
|
|
||||||
original_parent_uuid):
|
|
||||||
from nova.virt.xenapi.fake import create_vdi
|
|
||||||
name_label = "instance-%s" % instance_id
|
|
||||||
#TODO: create fake SR record
|
|
||||||
sr_ref = "fakesr"
|
|
||||||
vdi_ref = create_vdi(name_label=name_label, read_only=False,
|
|
||||||
sr_ref=sr_ref, sharable=False)
|
|
||||||
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
|
||||||
vdi_uuid = vdi_rec['uuid']
|
|
||||||
return vdi_uuid
|
|
||||||
|
|
||||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
|
||||||
|
|
||||||
def fake_parse_xmlrpc_value(val):
|
def fake_parse_xmlrpc_value(val):
|
||||||
return val
|
return val
|
||||||
|
|
||||||
@@ -284,10 +270,10 @@ class FakeSessionForMigrationTests(fake.SessionBase):
|
|||||||
def __init__(self, uri):
|
def __init__(self, uri):
|
||||||
super(FakeSessionForMigrationTests, self).__init__(uri)
|
super(FakeSessionForMigrationTests, self).__init__(uri)
|
||||||
|
|
||||||
def VDI_get_by_uuid(*args):
|
def VDI_get_by_uuid(self, *args):
|
||||||
return 'hurr'
|
return 'hurr'
|
||||||
|
|
||||||
def VDI_resize_online(*args):
|
def VDI_resize_online(self, *args):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def VM_start(self, _1, ref, _2, _3):
|
def VM_start(self, _1, ref, _2, _3):
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ def WrapTwistedOptions(wrapped):
|
|||||||
self._absorbParameters()
|
self._absorbParameters()
|
||||||
self._absorbHandlers()
|
self._absorbHandlers()
|
||||||
|
|
||||||
super(TwistedOptionsToFlags, self).__init__()
|
wrapped.__init__(self)
|
||||||
|
|
||||||
def _absorbFlags(self):
|
def _absorbFlags(self):
|
||||||
twistd_flags = []
|
twistd_flags = []
|
||||||
@@ -163,12 +163,12 @@ def WrapTwistedOptions(wrapped):
|
|||||||
def parseArgs(self, *args):
|
def parseArgs(self, *args):
|
||||||
# TODO(termie): figure out a decent way of dealing with args
|
# TODO(termie): figure out a decent way of dealing with args
|
||||||
#return
|
#return
|
||||||
super(TwistedOptionsToFlags, self).parseArgs(*args)
|
wrapped.parseArgs(self, *args)
|
||||||
|
|
||||||
def postOptions(self):
|
def postOptions(self):
|
||||||
self._doHandlers()
|
self._doHandlers()
|
||||||
|
|
||||||
super(TwistedOptionsToFlags, self).postOptions()
|
wrapped.postOptions(self)
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
key = key.replace('-', '_')
|
key = key.replace('-', '_')
|
||||||
|
|||||||
Reference in New Issue
Block a user