merge with trey
This commit is contained in:
4
.mailmap
4
.mailmap
@@ -47,3 +47,7 @@
|
|||||||
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
||||||
<vishvananda@gmail.com> <root@ubuntu>
|
<vishvananda@gmail.com> <root@ubuntu>
|
||||||
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
||||||
|
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
|
||||||
|
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
|
||||||
|
<reldan@oscloud.ru> <enugaev@griddynamics.com>
|
||||||
|
<kshileev@gmail.com> <kshileev@griddynamics.com>
|
||||||
5
Authors
5
Authors
@@ -22,14 +22,14 @@ David Pravec <David.Pravec@danix.org>
|
|||||||
Dean Troyer <dtroyer@gmail.com>
|
Dean Troyer <dtroyer@gmail.com>
|
||||||
Devin Carlen <devin.carlen@gmail.com>
|
Devin Carlen <devin.carlen@gmail.com>
|
||||||
Ed Leafe <ed@leafe.com>
|
Ed Leafe <ed@leafe.com>
|
||||||
Eldar Nugaev <enugaev@griddynamics.com>
|
Eldar Nugaev <reldan@oscloud.ru>
|
||||||
Eric Day <eday@oddments.org>
|
Eric Day <eday@oddments.org>
|
||||||
Eric Windisch <eric@cloudscaling.com>
|
Eric Windisch <eric@cloudscaling.com>
|
||||||
Ewan Mellor <ewan.mellor@citrix.com>
|
Ewan Mellor <ewan.mellor@citrix.com>
|
||||||
Gabe Westmaas <gabe.westmaas@rackspace.com>
|
Gabe Westmaas <gabe.westmaas@rackspace.com>
|
||||||
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
Ilya Alekseyev <ilyaalekseyev@acm.org>
|
||||||
Isaku Yamahata <yamahata@valinux.co.jp>
|
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||||
Jason Cannavale <jason.cannavale@rackspace.com>
|
Jason Cannavale <jason.cannavale@rackspace.com>
|
||||||
Jason Koelker <jason@koelker.net>
|
Jason Koelker <jason@koelker.net>
|
||||||
@@ -53,6 +53,7 @@ Kei Masumoto <masumotok@nttdata.co.jp>
|
|||||||
Ken Pepple <ken.pepple@gmail.com>
|
Ken Pepple <ken.pepple@gmail.com>
|
||||||
Kevin Bringard <kbringard@attinteractive.com>
|
Kevin Bringard <kbringard@attinteractive.com>
|
||||||
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
|
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
|
||||||
|
Kirill Shileev <kshileev@gmail.com>
|
||||||
Koji Iida <iida.koji@lab.ntt.co.jp>
|
Koji Iida <iida.koji@lab.ntt.co.jp>
|
||||||
Lorin Hochstein <lorin@isi.edu>
|
Lorin Hochstein <lorin@isi.edu>
|
||||||
Lvov Maxim <usrleon@gmail.com>
|
Lvov Maxim <usrleon@gmail.com>
|
||||||
|
|||||||
@@ -137,8 +137,9 @@ if __name__ == '__main__':
|
|||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
FLAGS(sys.argv)
|
FLAGS(sys.argv)
|
||||||
logging.setup()
|
logging.setup()
|
||||||
server = wsgi.Server()
|
acp_port = FLAGS.ajax_console_proxy_port
|
||||||
acp = AjaxConsoleProxy()
|
acp = AjaxConsoleProxy()
|
||||||
acp.register_listeners()
|
acp.register_listeners()
|
||||||
server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
|
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
|
||||||
|
server.start()
|
||||||
server.wait()
|
server.wait()
|
||||||
|
|||||||
57
bin/nova-api
57
bin/nova-api
@@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# pylint: disable=C0103
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
@@ -18,44 +17,34 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
"""Starter script for Nova API."""
|
"""Starter script for Nova API.
|
||||||
|
|
||||||
|
Starts both the EC2 and OpenStack APIs in separate processes.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
import gettext
|
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
import nova.service
|
||||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
import nova.utils
|
||||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|
||||||
os.pardir,
|
|
||||||
os.pardir))
|
|
||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|
||||||
sys.path.insert(0, possible_topdir)
|
|
||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova import log as logging
|
|
||||||
from nova import service
|
|
||||||
from nova import utils
|
|
||||||
from nova import version
|
|
||||||
from nova import wsgi
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.api')
|
def main():
|
||||||
|
"""Launch EC2 and OSAPI services."""
|
||||||
|
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
|
||||||
|
|
||||||
|
ec2 = nova.service.WSGIService("ec2")
|
||||||
|
osapi = nova.service.WSGIService("osapi")
|
||||||
|
|
||||||
|
launcher = nova.service.Launcher()
|
||||||
|
launcher.launch_service(ec2)
|
||||||
|
launcher.launch_service(osapi)
|
||||||
|
|
||||||
|
try:
|
||||||
|
launcher.wait()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
launcher.stop()
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
sys.exit(main())
|
||||||
FLAGS(sys.argv)
|
|
||||||
logging.setup()
|
|
||||||
LOG.audit(_("Starting nova-api node (version %s)"),
|
|
||||||
version.version_string_with_vcs())
|
|
||||||
LOG.debug(_("Full set of FLAGS:"))
|
|
||||||
for flag in FLAGS:
|
|
||||||
flag_get = FLAGS.get(flag, None)
|
|
||||||
LOG.debug("%(flag)s : %(flag_get)s" % locals())
|
|
||||||
|
|
||||||
service = service.serve_wsgi(service.ApiService)
|
|
||||||
service.wait()
|
|
||||||
|
|||||||
@@ -93,6 +93,9 @@ if __name__ == '__main__':
|
|||||||
with_req = direct.PostParamsMiddleware(with_json)
|
with_req = direct.PostParamsMiddleware(with_json)
|
||||||
with_auth = direct.DelegatedAuthMiddleware(with_req)
|
with_auth = direct.DelegatedAuthMiddleware(with_req)
|
||||||
|
|
||||||
server = wsgi.Server()
|
server = wsgi.Server("Direct API",
|
||||||
server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host)
|
with_auth,
|
||||||
|
host=FLAGS.direct_host,
|
||||||
|
port=FLAGS.direct_port)
|
||||||
|
server.start()
|
||||||
server.wait()
|
server.wait()
|
||||||
|
|||||||
@@ -50,6 +50,9 @@ if __name__ == '__main__':
|
|||||||
FLAGS(sys.argv)
|
FLAGS(sys.argv)
|
||||||
logging.setup()
|
logging.setup()
|
||||||
router = s3server.S3Application(FLAGS.buckets_path)
|
router = s3server.S3Application(FLAGS.buckets_path)
|
||||||
server = wsgi.Server()
|
server = wsgi.Server("S3 Objectstore",
|
||||||
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
|
router,
|
||||||
|
port=FLAGS.s3_port,
|
||||||
|
host=FLAGS.s3_host)
|
||||||
|
server.start()
|
||||||
server.wait()
|
server.wait()
|
||||||
|
|||||||
@@ -96,6 +96,9 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
service.serve()
|
service.serve()
|
||||||
|
|
||||||
server = wsgi.Server()
|
server = wsgi.Server("VNC Proxy",
|
||||||
server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host)
|
with_auth,
|
||||||
|
host=FLAGS.vncproxy_host,
|
||||||
|
port=FLAGS.vncproxy_port)
|
||||||
|
server.start()
|
||||||
server.wait()
|
server.wait()
|
||||||
|
|||||||
@@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SERVER_DOWN(Exception): # pylint: disable=C0103
|
||||||
|
"""Duplicate exception class from real LDAP module."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def initialize(_uri):
|
def initialize(_uri):
|
||||||
"""Opens a fake connection with an LDAP server."""
|
"""Opens a fake connection with an LDAP server."""
|
||||||
return FakeLDAP()
|
return FakeLDAP()
|
||||||
@@ -202,25 +207,38 @@ def _to_json(unencoded):
|
|||||||
return json.dumps(list(unencoded))
|
return json.dumps(list(unencoded))
|
||||||
|
|
||||||
|
|
||||||
|
server_fail = False
|
||||||
|
|
||||||
|
|
||||||
class FakeLDAP(object):
|
class FakeLDAP(object):
|
||||||
"""Fake LDAP connection."""
|
"""Fake LDAP connection."""
|
||||||
|
|
||||||
def simple_bind_s(self, dn, password):
|
def simple_bind_s(self, dn, password):
|
||||||
"""This method is ignored, but provided for compatibility."""
|
"""This method is ignored, but provided for compatibility."""
|
||||||
|
if server_fail:
|
||||||
|
raise SERVER_DOWN
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def unbind_s(self):
|
def unbind_s(self):
|
||||||
"""This method is ignored, but provided for compatibility."""
|
"""This method is ignored, but provided for compatibility."""
|
||||||
|
if server_fail:
|
||||||
|
raise SERVER_DOWN
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def add_s(self, dn, attr):
|
def add_s(self, dn, attr):
|
||||||
"""Add an object with the specified attributes at dn."""
|
"""Add an object with the specified attributes at dn."""
|
||||||
|
if server_fail:
|
||||||
|
raise SERVER_DOWN
|
||||||
|
|
||||||
key = "%s%s" % (self.__prefix, dn)
|
key = "%s%s" % (self.__prefix, dn)
|
||||||
value_dict = dict([(k, _to_json(v)) for k, v in attr])
|
value_dict = dict([(k, _to_json(v)) for k, v in attr])
|
||||||
Store.instance().hmset(key, value_dict)
|
Store.instance().hmset(key, value_dict)
|
||||||
|
|
||||||
def delete_s(self, dn):
|
def delete_s(self, dn):
|
||||||
"""Remove the ldap object at specified dn."""
|
"""Remove the ldap object at specified dn."""
|
||||||
|
if server_fail:
|
||||||
|
raise SERVER_DOWN
|
||||||
|
|
||||||
Store.instance().delete("%s%s" % (self.__prefix, dn))
|
Store.instance().delete("%s%s" % (self.__prefix, dn))
|
||||||
|
|
||||||
def modify_s(self, dn, attrs):
|
def modify_s(self, dn, attrs):
|
||||||
@@ -232,6 +250,9 @@ class FakeLDAP(object):
|
|||||||
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
|
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
if server_fail:
|
||||||
|
raise SERVER_DOWN
|
||||||
|
|
||||||
store = Store.instance()
|
store = Store.instance()
|
||||||
key = "%s%s" % (self.__prefix, dn)
|
key = "%s%s" % (self.__prefix, dn)
|
||||||
|
|
||||||
@@ -255,6 +276,9 @@ class FakeLDAP(object):
|
|||||||
fields -- fields to return. Returns all fields if not specified
|
fields -- fields to return. Returns all fields if not specified
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
if server_fail:
|
||||||
|
raise SERVER_DOWN
|
||||||
|
|
||||||
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
|
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
|
||||||
raise NotImplementedError(str(scope))
|
raise NotImplementedError(str(scope))
|
||||||
store = Store.instance()
|
store = Store.instance()
|
||||||
|
|||||||
@@ -101,6 +101,41 @@ def sanitize(fn):
|
|||||||
return _wrapped
|
return _wrapped
|
||||||
|
|
||||||
|
|
||||||
|
class LDAPWrapper(object):
|
||||||
|
def __init__(self, ldap, url, user, password):
|
||||||
|
self.ldap = ldap
|
||||||
|
self.url = url
|
||||||
|
self.user = user
|
||||||
|
self.password = password
|
||||||
|
self.conn = None
|
||||||
|
|
||||||
|
def __wrap_reconnect(f):
|
||||||
|
def inner(self, *args, **kwargs):
|
||||||
|
if self.conn is None:
|
||||||
|
self.connect()
|
||||||
|
return f(self.conn)(*args, **kwargs)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
return f(self.conn)(*args, **kwargs)
|
||||||
|
except self.ldap.SERVER_DOWN:
|
||||||
|
self.connect()
|
||||||
|
return f(self.conn)(*args, **kwargs)
|
||||||
|
return inner
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
try:
|
||||||
|
self.conn = self.ldap.initialize(self.url)
|
||||||
|
self.conn.simple_bind_s(self.user, self.password)
|
||||||
|
except self.ldap.SERVER_DOWN:
|
||||||
|
self.conn = None
|
||||||
|
raise
|
||||||
|
|
||||||
|
search_s = __wrap_reconnect(lambda conn: conn.search_s)
|
||||||
|
add_s = __wrap_reconnect(lambda conn: conn.add_s)
|
||||||
|
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
|
||||||
|
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
|
||||||
|
|
||||||
|
|
||||||
class LdapDriver(object):
|
class LdapDriver(object):
|
||||||
"""Ldap Auth driver
|
"""Ldap Auth driver
|
||||||
|
|
||||||
@@ -124,8 +159,8 @@ class LdapDriver(object):
|
|||||||
LdapDriver.project_objectclass = 'novaProject'
|
LdapDriver.project_objectclass = 'novaProject'
|
||||||
self.__cache = None
|
self.__cache = None
|
||||||
if LdapDriver.conn is None:
|
if LdapDriver.conn is None:
|
||||||
LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
|
LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
|
||||||
LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
|
FLAGS.ldap_user_dn,
|
||||||
FLAGS.ldap_password)
|
FLAGS.ldap_password)
|
||||||
if LdapDriver.mc is None:
|
if LdapDriver.mc is None:
|
||||||
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
|
||||||
|
|||||||
11
nova/log.py
11
nova/log.py
@@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger)
|
|||||||
def audit(msg, *args, **kwargs):
|
def audit(msg, *args, **kwargs):
|
||||||
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
|
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
|
||||||
logging.root.log(AUDIT, msg, *args, **kwargs)
|
logging.root.log(AUDIT, msg, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class WritableLogger(object):
|
||||||
|
"""A thin wrapper that responds to `write` and logs."""
|
||||||
|
|
||||||
|
def __init__(self, logger, level=logging.INFO):
|
||||||
|
self.logger = logger
|
||||||
|
self.level = level
|
||||||
|
|
||||||
|
def write(self, msg):
|
||||||
|
self.logger.log(self.level, msg)
|
||||||
|
|||||||
@@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter):
|
|||||||
"""Use instance_type to filter hosts."""
|
"""Use instance_type to filter hosts."""
|
||||||
return (self._full_name(), instance_type)
|
return (self._full_name(), instance_type)
|
||||||
|
|
||||||
|
def _satisfies_extra_specs(self, capabilities, instance_type):
|
||||||
|
"""Check that the capabilities provided by the compute service
|
||||||
|
satisfy the extra specs associated with the instance type"""
|
||||||
|
|
||||||
|
if 'extra_specs' not in instance_type:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Note(lorinh): For now, we are just checking exact matching on the
|
||||||
|
# values. Later on, we want to handle numerical
|
||||||
|
# values so we can represent things like number of GPU cards
|
||||||
|
|
||||||
|
try:
|
||||||
|
for key, value in instance_type['extra_specs'].iteritems():
|
||||||
|
if capabilities[key] != value:
|
||||||
|
return False
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def filter_hosts(self, zone_manager, query):
|
def filter_hosts(self, zone_manager, query):
|
||||||
"""Return a list of hosts that can create instance_type."""
|
"""Return a list of hosts that can create instance_type."""
|
||||||
instance_type = query
|
instance_type = query
|
||||||
@@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter):
|
|||||||
disk_bytes = capabilities['disk_available']
|
disk_bytes = capabilities['disk_available']
|
||||||
spec_ram = instance_type['memory_mb']
|
spec_ram = instance_type['memory_mb']
|
||||||
spec_disk = instance_type['local_gb']
|
spec_disk = instance_type['local_gb']
|
||||||
if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
|
extra_specs = instance_type['extra_specs']
|
||||||
|
|
||||||
|
if host_ram_mb >= spec_ram and \
|
||||||
|
disk_bytes >= spec_disk and \
|
||||||
|
self._satisfies_extra_specs(capabilities, instance_type):
|
||||||
selected_hosts.append((host, capabilities))
|
selected_hosts.append((host, capabilities))
|
||||||
return selected_hosts
|
return selected_hosts
|
||||||
|
|
||||||
|
|||||||
@@ -67,7 +67,18 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
flavorid=1,
|
flavorid=1,
|
||||||
swap=500,
|
swap=500,
|
||||||
rxtx_quota=30000,
|
rxtx_quota=30000,
|
||||||
rxtx_cap=200)
|
rxtx_cap=200,
|
||||||
|
extra_specs={})
|
||||||
|
self.gpu_instance_type = dict(name='tiny.gpu',
|
||||||
|
memory_mb=50,
|
||||||
|
vcpus=10,
|
||||||
|
local_gb=500,
|
||||||
|
flavorid=2,
|
||||||
|
swap=500,
|
||||||
|
rxtx_quota=30000,
|
||||||
|
rxtx_cap=200,
|
||||||
|
extra_specs={'xpu_arch': 'fermi',
|
||||||
|
'xpu_info': 'Tesla 2050'})
|
||||||
|
|
||||||
self.zone_manager = FakeZoneManager()
|
self.zone_manager = FakeZoneManager()
|
||||||
states = {}
|
states = {}
|
||||||
@@ -75,6 +86,18 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||||
self.zone_manager.service_states = states
|
self.zone_manager.service_states = states
|
||||||
|
|
||||||
|
# Add some extra capabilities to some hosts
|
||||||
|
host07 = self.zone_manager.service_states['host07']['compute']
|
||||||
|
host07['xpu_arch'] = 'fermi'
|
||||||
|
host07['xpu_info'] = 'Tesla 2050'
|
||||||
|
|
||||||
|
host08 = self.zone_manager.service_states['host08']['compute']
|
||||||
|
host08['xpu_arch'] = 'radeon'
|
||||||
|
|
||||||
|
host09 = self.zone_manager.service_states['host09']['compute']
|
||||||
|
host09['xpu_arch'] = 'fermi'
|
||||||
|
host09['xpu_info'] = 'Tesla 2150'
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
FLAGS.default_host_filter = self.old_flag
|
FLAGS.default_host_filter = self.old_flag
|
||||||
|
|
||||||
@@ -116,6 +139,17 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
self.assertEquals('host05', just_hosts[0])
|
self.assertEquals('host05', just_hosts[0])
|
||||||
self.assertEquals('host10', just_hosts[5])
|
self.assertEquals('host10', just_hosts[5])
|
||||||
|
|
||||||
|
def test_instance_type_filter_extra_specs(self):
|
||||||
|
hf = host_filter.InstanceTypeFilter()
|
||||||
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
|
name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
|
||||||
|
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||||
|
name)
|
||||||
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
self.assertEquals(1, len(hosts))
|
||||||
|
just_hosts = [host for host, caps in hosts]
|
||||||
|
self.assertEquals('host07', just_hosts[0])
|
||||||
|
|
||||||
def test_json_filter(self):
|
def test_json_filter(self):
|
||||||
hf = host_filter.JsonFilter()
|
hf = host_filter.JsonFilter()
|
||||||
# filter all hosts that can support 50 ram and 500 disk
|
# filter all hosts that can support 50 ram and 500 disk
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ from nova import log as logging
|
|||||||
from nova import test
|
from nova import test
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
|
from nova.auth import fakeldap
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
LOG = logging.getLogger('nova.tests.auth_unittest')
|
LOG = logging.getLogger('nova.tests.auth_unittest')
|
||||||
@@ -369,6 +370,15 @@ class _AuthManagerBaseTestCase(test.TestCase):
|
|||||||
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
|
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
|
||||||
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||||
|
|
||||||
|
def test_reconnect_on_server_failure(self):
|
||||||
|
self.manager.get_users()
|
||||||
|
fakeldap.server_fail = True
|
||||||
|
try:
|
||||||
|
self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users)
|
||||||
|
finally:
|
||||||
|
fakeldap.server_fail = False
|
||||||
|
self.manager.get_users()
|
||||||
|
|
||||||
|
|
||||||
class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
|
class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
|
||||||
auth_driver = 'nova.auth.dbdriver.DbDriver'
|
auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||||
|
|||||||
@@ -67,7 +67,8 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
flavorid=1,
|
flavorid=1,
|
||||||
swap=500,
|
swap=500,
|
||||||
rxtx_quota=30000,
|
rxtx_quota=30000,
|
||||||
rxtx_cap=200)
|
rxtx_cap=200,
|
||||||
|
extra_specs={})
|
||||||
|
|
||||||
self.zone_manager = FakeZoneManager()
|
self.zone_manager = FakeZoneManager()
|
||||||
states = {}
|
states = {}
|
||||||
|
|||||||
@@ -69,7 +69,6 @@ from nose import core
|
|||||||
from nose import result
|
from nose import result
|
||||||
|
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova.tests import fake_flags
|
|
||||||
|
|
||||||
|
|
||||||
class _AnsiColorizer(object):
|
class _AnsiColorizer(object):
|
||||||
|
|||||||
Reference in New Issue
Block a user