Merged with trunk

This commit is contained in:
Justin Santa Barbara
2011-03-28 08:05:28 -07:00
42 changed files with 1837 additions and 871 deletions

View File

@@ -1,4 +1,5 @@
Andy Smith <code@term.ie>
Andy Southgate <andy.southgate@citrix.com>
Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
@@ -21,6 +22,7 @@ Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
@@ -33,6 +35,7 @@ Jonathan Bryce <jbryce@jbryce.com>
Jordan Rinke <jordan@openstack.org>
Josh Durgin <joshd@hq.newdream.net>
Josh Kearney <josh@jk0.org>
Josh Kleinpeter <josh@kleinpeter.org>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Kei Masumoto <masumotok@nttdata.co.jp>
@@ -60,6 +63,7 @@ Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com>
Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com>
Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org>
Todd Willey <todd@ansolabs.com>

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -34,12 +34,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import compute
from nova import flags
from nova import log as logging
from nova import network
from nova import utils
from nova import volume
from nova import wsgi
from nova.api import direct
from nova.compute import api as compute_api
FLAGS = flags.FLAGS
@@ -50,13 +52,42 @@ flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
# An example of an API that only exposes read-only methods.
# In this case we're just limiting which methods are exposed.
class ReadOnlyCompute(direct.Limited):
"""Read-only Compute API."""
_allowed = ['get', 'get_all', 'get_console_output']
# An example of an API that provides a backwards compatibility layer.
# In this case we're overwriting the implementation to ensure
# compatibility with an older version. In reality we would want the
# "description=None" to be part of the actual API so that code
# like this isn't even necessary, but this example shows what one can
# do if that isn't the situation.
class VolumeVersionOne(direct.Limited):
_allowed = ['create', 'delete', 'update', 'get']
def create(self, context, size, name):
self.proxy.create(context, size, name, description=None)
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
direct.register_service('compute', compute_api.API())
direct.register_service('compute', compute.API())
direct.register_service('volume', volume.API())
direct.register_service('network', network.API())
direct.register_service('reflect', direct.Reflection())
# Here is how we could expose the code in the examples above.
#direct.register_service('compute-readonly',
# ReadOnlyCompute(compute.API()))
#direct.register_service('volume-v1', VolumeVersionOne(volume.API()))
router = direct.Router()
with_json = direct.JsonParamsMiddleware(router)
with_req = direct.PostParamsMiddleware(with_json)

View File

@@ -50,7 +50,7 @@ if __name__ == '__main__':
if __name__ == '__builtin__':
LOG.warn(_('Starting instance monitor'))
# pylint: disable-msg=C0103
# pylint: disable=C0103
monitor = monitor.InstanceMonitor()
# This is the parent service that twistd will be looking for when it

View File

@@ -97,6 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DECLARE('images_path', 'nova.image.local')
flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
@@ -518,11 +519,12 @@ class NetworkCommands(object):
network_size=None, vlan_start=None,
vpn_start=None, fixed_range_v6=None, label='public'):
"""Creates fixed ips for host by range
arguments: [fixed_range=FLAG], [num_networks=FLAG],
arguments: fixed_range=FLAG, [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
if not fixed_range:
fixed_range = FLAGS.fixed_range
raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is '
'required to create networks.'))
if not num_networks:
num_networks = FLAGS.num_networks
if not network_size:
@@ -579,8 +581,10 @@ class VmCommands(object):
ctxt = context.get_admin_context()
instance_id = ec2utils.ec2_id_to_id(ec2_id)
if FLAGS.connection_type != 'libvirt':
msg = _('Only KVM is supported for now. Sorry!')
if (FLAGS.connection_type != 'libvirt' or
(FLAGS.connection_type == 'libvirt' and
FLAGS.libvirt_type not in ['kvm', 'qemu'])):
msg = _('Only KVM and QEmu are supported for now. Sorry!')
raise exception.Error(msg)
if (FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \
@@ -607,7 +611,7 @@ class ServiceCommands(object):
args: [host] [service]"""
ctxt = context.get_admin_context()
now = datetime.datetime.utcnow()
services = db.service_get_all(ctxt) + db.service_get_all(ctxt, True)
services = db.service_get_all(ctxt)
if host:
services = [s for s in services if s['host'] == host]
if service:
@@ -872,7 +876,7 @@ class InstanceTypeCommands(object):
if name == None:
inst_types = instance_types.get_all_types()
elif name == "--all":
inst_types = instance_types.get_all_types(1)
inst_types = instance_types.get_all_types(True)
else:
inst_types = instance_types.get_instance_type(name)
except exception.DBError, e:

View File

@@ -36,9 +36,10 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import utils
from nova import twistd
from nova.objectstore import handler
from nova import wsgi
from nova.objectstore import s3server
FLAGS = flags.FLAGS
@@ -46,7 +47,9 @@ FLAGS = flags.FLAGS
if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
application = handler.get_application() # pylint: disable-msg=C0103
FLAGS(sys.argv)
logging.setup()
router = s3server.S3Application(FLAGS.buckets_path)
server = wsgi.Server()
server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
server.wait()

View File

@@ -59,11 +59,21 @@ USAGE = """usage: stack [options] <controller> <method> [arg1=value arg2=value]
def format_help(d):
"""Format help text, keys are labels and values are descriptions."""
MAX_INDENT = 30
indent = max([len(k) for k in d])
if indent > MAX_INDENT:
indent = MAX_INDENT - 6
out = []
for k, v in d.iteritems():
t = textwrap.TextWrapper(initial_indent=' %s ' % k.ljust(indent),
subsequent_indent=' ' * (indent + 6))
if (len(k) + 6) > MAX_INDENT:
out.extend([' %s' % k])
initial_indent = ' ' * (indent + 6)
else:
initial_indent = ' %s ' % k.ljust(indent)
subsequent_indent = ' ' * (indent + 6)
t = textwrap.TextWrapper(initial_indent=initial_indent,
subsequent_indent=subsequent_indent)
out.extend(t.wrap(v))
return out

View File

@@ -162,6 +162,8 @@ class DbDriver(object):
values['description'] = description
db.project_update(context.get_admin_context(), project_id, values)
if not self.is_in_project(manager_uid, project_id):
self.add_to_project(manager_uid, project_id)
def add_to_project(self, uid, project_id):
"""Add user to project"""

View File

@@ -90,12 +90,12 @@ MOD_DELETE = 1
MOD_REPLACE = 2
class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
class NO_SUCH_OBJECT(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
@@ -268,7 +268,7 @@ class FakeLDAP(object):
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
# pylint: disable-msg=E1103
# pylint: disable=E1103
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
@@ -277,12 +277,12 @@ class FakeLDAP(object):
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((key[len(self.__prefix):], attrs))
# pylint: enable-msg=E1103
# pylint: enable=E1103
if objects == []:
raise NO_SUCH_OBJECT()
return objects
@property
def __prefix(self): # pylint: disable-msg=R0201
def __prefix(self): # pylint: disable=R0201
"""Get the prefix to use for all keys."""
return 'ldap:'

View File

@@ -275,6 +275,8 @@ class LdapDriver(object):
attr.append((self.ldap.MOD_REPLACE, 'description', description))
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
if not self.is_in_project(manager_uid, project_id):
self.add_to_project(manager_uid, project_id)
@sanitize
def add_to_project(self, uid, project_id):
@@ -632,6 +634,6 @@ class LdapDriver(object):
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
def __init__(self): # pylint: disable-msg=W0231
def __init__(self): # pylint: disable=W0231
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']

View File

@@ -22,7 +22,7 @@ Nova authentication management
import os
import shutil
import string # pylint: disable-msg=W0402
import string # pylint: disable=W0402
import tempfile
import uuid
import zipfile
@@ -96,10 +96,19 @@ class AuthBase(object):
class User(AuthBase):
"""Object representing a user"""
"""Object representing a user
The following attributes are defined:
:id: A system identifier for the user. A string (for LDAP)
:name: The user name, potentially in some more friendly format
:access: The 'username' for EC2 authentication
:secret: The 'password' for EC2 authenticatoin
:admin: ???
"""
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
assert isinstance(id, basestring)
self.id = id
self.name = name
self.access = access

View File

@@ -298,10 +298,14 @@ DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server')
DEFINE_integer('ec2_port', 8773, 'cloud controller port')
DEFINE_string('ec2_scheme', 'http', 'prefix for ec2')
DEFINE_string('ec2_path', '/services/Cloud', 'suffix for ec2')
DEFINE_string('osapi_extensions_path', '/var/lib/nova/extensions',
'default directory for nova extensions')
DEFINE_string('osapi_host', '$my_ip', 'ip of api server')
DEFINE_string('osapi_scheme', 'http', 'prefix for openstack')
DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack')
DEFINE_integer('osapi_max_limit', 1000,
'max number of items returned in a collection response')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
@@ -358,5 +362,6 @@ DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
DEFINE_string('zone_name', 'nova', 'name of this zone')
DEFINE_string('zone_capabilities', 'kypervisor:xenserver;os:linux',
'Key/Value tags which represent capabilities of this zone')
DEFINE_list('zone_capabilities',
['hypervisor=xenserver;kvm', 'os=linux;windows'],
'Key/Multi-value list representng capabilities of this zone')

View File

@@ -62,7 +62,7 @@ class Connection(carrot_connection.BrokerConnection):
params['backend_cls'] = fakerabbit.Backend
# NOTE(vish): magic is fun!
# pylint: disable-msg=W0142
# pylint: disable=W0142
if new:
return cls(**params)
else:
@@ -114,7 +114,7 @@ class Consumer(messaging.Consumer):
if self.failed_connection:
# NOTE(vish): connection is defined in the parent class, we can
# recreate it as long as we create the backend too
# pylint: disable-msg=W0201
# pylint: disable=W0201
self.connection = Connection.recreate()
self.backend = self.connection.create_backend()
self.declare()
@@ -125,7 +125,7 @@ class Consumer(messaging.Consumer):
# NOTE(vish): This is catching all errors because we really don't
# want exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
except Exception: # pylint: disable=W0703
if not self.failed_connection:
LOG.exception(_("Failed to fetch message from queue"))
self.failed_connection = True
@@ -137,24 +137,7 @@ class Consumer(messaging.Consumer):
return timer
class Publisher(messaging.Publisher):
"""Publisher base class"""
pass
class TopicConsumer(Consumer):
"""Consumes messages on a specific topic"""
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.queue = topic
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
super(TopicConsumer, self).__init__(connection=connection)
class AdapterConsumer(TopicConsumer):
class AdapterConsumer(Consumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
@@ -207,6 +190,41 @@ class AdapterConsumer(TopicConsumer):
return
class Publisher(messaging.Publisher):
"""Publisher base class"""
pass
class TopicAdapterConsumer(AdapterConsumer):
"""Consumes messages on a specific topic"""
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast", proxy=None):
self.queue = topic
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
super(TopicAdapterConsumer, self).__init__(connection=connection,
topic=topic, proxy=proxy)
class FanoutAdapterConsumer(AdapterConsumer):
"""Consumes messages from a fanout exchange"""
exchange_type = "fanout"
def __init__(self, connection=None, topic="broadcast", proxy=None):
self.exchange = "%s_fanout" % topic
self.routing_key = topic
unique = uuid.uuid4().hex
self.queue = "%s_fanout_%s" % (topic, unique)
self.durable = False
LOG.info(_("Created '%(exchange)s' fanout exchange "
"with '%(key)s' routing key"),
dict(exchange=self.exchange, key=self.routing_key))
super(FanoutAdapterConsumer, self).__init__(connection=connection,
topic=topic, proxy=proxy)
class TopicPublisher(Publisher):
"""Publishes messages on a specific topic"""
exchange_type = "topic"
@@ -218,6 +236,19 @@ class TopicPublisher(Publisher):
super(TopicPublisher, self).__init__(connection=connection)
class FanoutPublisher(Publisher):
"""Publishes messages to a fanout exchange."""
exchange_type = "fanout"
def __init__(self, topic, connection=None):
self.exchange = "%s_fanout" % topic
self.queue = "%s_fanout" % topic
self.durable = False
LOG.info(_("Creating '%(exchange)s' fanout exchange"),
dict(exchange=self.exchange))
super(FanoutPublisher, self).__init__(connection=connection)
class DirectConsumer(Consumer):
"""Consumes messages directly on a channel specified by msg_id"""
exchange_type = "direct"
@@ -311,7 +342,7 @@ def _pack_context(msg, context):
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
LOG.debug(_("Making asynchronous call..."))
LOG.debug(_("Making asynchronous call on %s ..."), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_("MSG_ID is %s") % (msg_id))
@@ -352,7 +383,7 @@ def call(context, topic, msg):
def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response"""
LOG.debug(_("Making asynchronous cast..."))
LOG.debug(_("Making asynchronous cast on %s..."), topic)
_pack_context(msg, context)
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
@@ -360,6 +391,16 @@ def cast(context, topic, msg):
publisher.close()
def fanout_cast(context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response"""
LOG.debug(_("Making asynchronous fanout cast..."))
_pack_context(msg, context)
conn = Connection.instance()
publisher = FanoutPublisher(topic, connection=conn)
publisher.send(msg)
publisher.close()
def generic_response(message_data, message):
"""Logs a result and exits"""
LOG.debug(_('response %s'), message_data)

View File

@@ -17,18 +17,25 @@
Handles all requests relating to schedulers.
"""
import novaclient
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from eventlet import greenpool
FLAGS = flags.FLAGS
flags.DEFINE_bool('enable_zone_routing',
False,
'When True, routing to child zones will occur.')
LOG = logging.getLogger('nova.scheduler.api')
class API(object):
"""API for interacting with the scheduler."""
def _call_scheduler(self, method, context, params=None):
def _call_scheduler(method, context, params=None):
"""Generic handler for RPC calls to the scheduler.
:param params: Optional dictionary of arguments to be passed to the
@@ -42,8 +49,193 @@ class API(object):
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
def get_zone_list(self, context):
items = self._call_scheduler('get_zone_list', context)
def get_zone_list(context):
"""Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context)
for item in items:
item['api_url'] = item['api_url'].replace('\\/', '/')
if not items:
items = db.zone_get_all(context)
return items
def zone_get(context, zone_id):
return db.zone_get(context, zone_id)
def zone_delete(context, zone_id):
return db.zone_delete(context, zone_id)
def zone_create(context, data):
return db.zone_create(context, data)
def zone_update(context, zone_id, data):
return db.zone_update(context, zone_id, data)
def get_zone_capabilities(context, service=None):
"""Returns a dict of key, value capabilities for this zone,
or for a particular class of services running in this zone."""
return _call_scheduler('get_zone_capabilities', context=context,
params=dict(service=service))
def update_service_capabilities(context, service_name, host, capabilities):
"""Send an update to all the scheduler services informing them
of the capabilities of this service."""
kwargs = dict(method='update_service_capabilities',
args=dict(service_name=service_name, host=host,
capabilities=capabilities))
return rpc.fanout_cast(context, 'scheduler', kwargs)
def _wrap_method(function, self):
"""Wrap method to supply self."""
def _wrap(*args, **kwargs):
return function(self, *args, **kwargs)
return _wrap
def _process(func, zone):
"""Worker stub for green thread pool. Give the worker
an authenticated nova client and zone info."""
nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
nova.authenticate()
return func(nova, zone)
def child_zone_helper(zone_list, func):
"""Fire off a command to each zone in the list.
The return is [novaclient return objects] from each child zone.
For example, if you are calling server.pause(), the list will
be whatever the response from server.pause() is. One entry
per child zone called."""
green_pool = greenpool.GreenPool()
return [result for result in green_pool.imap(
_wrap_method(_process, func), zone_list)]
def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
"""Use novaclient to issue command to a single child zone.
One of these will be run in parallel for each child zone."""
manager = getattr(nova, collection)
result = None
try:
try:
result = manager.get(int(item_id))
except ValueError, e:
result = manager.find(name=item_id)
except novaclient.NotFound:
url = zone.api_url
LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
locals()))
return None
if method_name.lower() not in ['get', 'find']:
result = getattr(result, method_name)()
return result
def wrap_novaclient_function(f, collection, method_name, item_id):
"""Appends collection, method_name and item_id to the incoming
(nova, zone) call from child_zone_helper."""
def inner(nova, zone):
return f(nova, zone, collection, method_name, item_id)
return inner
class RedirectResult(exception.Error):
"""Used to the HTTP API know that these results are pre-cooked
and they can be returned to the caller directly."""
def __init__(self, results):
self.results = results
super(RedirectResult, self).__init__(
message=_("Uncaught Zone redirection exception"))
class reroute_compute(object):
"""Decorator used to indicate that the method should
delegate the call the child zones if the db query
can't find anything."""
def __init__(self, method_name):
self.method_name = method_name
def __call__(self, f):
def wrapped_f(*args, **kwargs):
collection, context, item_id = \
self.get_collection_context_and_id(args, kwargs)
try:
# Call the original function ...
return f(*args, **kwargs)
except exception.InstanceNotFound, e:
LOG.debug(_("Instance %(item_id)s not found "
"locally: '%(e)s'" % locals()))
if not FLAGS.enable_zone_routing:
raise
zones = db.zone_get_all(context)
if not zones:
raise
# Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_id))
# Scrub the results and raise another exception
# so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result))
return wrapped_f
def _call_child_zones(self, zones, function):
"""Ask the child zones to perform this operation.
Broken out for testing."""
return child_zone_helper(zones, function)
def get_collection_context_and_id(self, args, kwargs):
"""Returns a tuple of (novaclient collection name, security
context and resource id. Derived class should override this."""
context = kwargs.get('context', None)
instance_id = kwargs.get('instance_id', None)
if len(args) > 0 and not context:
context = args[1]
if len(args) > 1 and not instance_id:
instance_id = args[2]
return ("servers", context, instance_id)
def unmarshall_result(self, zone_responses):
"""Result is a list of responses from each child zone.
Each decorator derivation is responsible to turning this
into a format expected by the calling method. For
example, this one is expected to return a single Server
dict {'server':{k:v}}. Others may return a list of them, like
{'servers':[{k,v}]}"""
reduced_response = []
for zone_response in zone_responses:
if not zone_response:
continue
server = zone_response.__dict__
for k in server.keys():
if k[0] == '_' or k == 'manager':
del server[k]
reduced_response.append(dict(server=server))
if reduced_response:
return reduced_response[0] # first for now.
return {}
def redirect_handler(f):
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except RedirectResult, e:
return e.results
return new_f

View File

@@ -58,8 +58,9 @@ class ZoneState(object):
child zone."""
self.last_seen = datetime.now()
self.attempt = 0
self.name = zone_metadata["name"]
self.capabilities = zone_metadata["capabilities"]
self.name = zone_metadata.get("name", "n/a")
self.capabilities = ", ".join(["%s=%s" % (k, v)
for k, v in zone_metadata.iteritems() if k != 'name'])
self.is_active = True
def to_dict(self):
@@ -104,13 +105,37 @@ class ZoneManager(object):
"""Keeps the zone states updated."""
def __init__(self):
self.last_zone_db_check = datetime.min
self.zone_states = {}
self.zone_states = {} # { <zone_id> : ZoneState }
self.service_states = {} # { <service> : { <host> : { cap k : v }}}
self.green_pool = greenpool.GreenPool()
def get_zone_list(self):
"""Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()]
def get_zone_capabilities(self, context, service=None):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into
<cap>_min and <cap>_max values."""
service_dict = self.service_states
if service:
service_dict = {service: self.service_states.get(service, {})}
# TODO(sandy) - be smarter about fabricating this structure.
# But it's likely to change once we understand what the Best-Match
# code will need better.
combined = {} # { <service>_<cap> : (min, max), ... }
for service_name, host_dict in service_dict.iteritems():
for host, caps_dict in host_dict.iteritems():
for cap, value in caps_dict.iteritems():
key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value)
max_value = max(max_value, value)
combined[key] = (min_value, max_value)
return combined
def _refresh_from_db(self, context):
"""Make our zone state map match the db."""
# Add/update existing zones ...
@@ -141,3 +166,11 @@ class ZoneManager(object):
self.last_zone_db_check = datetime.now()
self._refresh_from_db(context)
self._poll_zones(context)
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
logging.debug(_("Received %(service_name)s service update from "
"%(host)s: %(capabilities)s") % locals())
service_caps = self.service_states.get(service_name, {})
service_caps[host] = capabilities
self.service_states[service_name] = service_caps

View File

@@ -51,7 +51,7 @@ class HyperVTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, instance)
conn = hyperv.get_connection(False)
conn._create_vm(instance_ref) # pylint: disable-msg=W0212
conn._create_vm(instance_ref) # pylint: disable=W0212
found = [n for n in conn.list_instances()
if n == instance_ref['name']]
self.assertTrue(len(found) == 1)

View File

@@ -1,315 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unittets for S3 objectstore clone.
"""
import boto
import glob
import hashlib
import os
import shutil
import tempfile
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from twisted.internet import reactor, threads, defer
from twisted.web import http, server
from nova import context
from nova import flags
from nova import objectstore
from nova import test
from nova.auth import manager
from nova.exception import NotEmpty, NotFound
from nova.objectstore import image
from nova.objectstore.handler import S3
FLAGS = flags.FLAGS
# Create a unique temporary directory. We don't delete after test to
# allow checking the contents after running tests. Users and/or tools
# running the tests need to remove the tests directories.
OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
# Create bucket/images path
os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
class ObjectStoreTestCase(test.TestCase):
"""Test objectstore API directly."""
def setUp(self):
"""Setup users and projects."""
super(ObjectStoreTestCase, self).setUp()
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
images_path=os.path.join(OSS_TEMPDIR, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
self.auth_manager = manager.AuthManager()
self.auth_manager.create_user('user1')
self.auth_manager.create_user('user2')
self.auth_manager.create_user('admin_user', admin=True)
self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1'])
self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
self.context = context.RequestContext('user1', 'proj1')
def tearDown(self):
"""Tear down users and projects."""
self.auth_manager.delete_project('proj1')
self.auth_manager.delete_project('proj2')
self.auth_manager.delete_user('user1')
self.auth_manager.delete_user('user2')
self.auth_manager.delete_user('admin_user')
super(ObjectStoreTestCase, self).tearDown()
def test_buckets(self):
"""Test the bucket API."""
objectstore.bucket.Bucket.create('new_bucket', self.context)
bucket = objectstore.bucket.Bucket('new_bucket')
# creator is authorized to use bucket
self.assert_(bucket.is_authorized(self.context))
# another user is not authorized
context2 = context.RequestContext('user2', 'proj2')
self.assertFalse(bucket.is_authorized(context2))
# admin is authorized to use bucket
admin_context = context.RequestContext('admin_user', None)
self.assertTrue(bucket.is_authorized(admin_context))
# new buckets are empty
self.assertTrue(bucket.list_keys()['Contents'] == [])
# storing keys works
bucket['foo'] = "bar"
self.assertEquals(len(bucket.list_keys()['Contents']), 1)
self.assertEquals(bucket['foo'].read(), 'bar')
# md5 of key works
self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
# deleting non-empty bucket should throw a NotEmpty exception
self.assertRaises(NotEmpty, bucket.delete)
# deleting key
del bucket['foo']
# deleting empty bucket
bucket.delete()
# accessing deleted bucket throws exception
self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
def test_images(self):
self.do_test_images('1mb.manifest.xml', True,
'image_bucket1', 'i-testing1')
def test_images_no_kernel_or_ramdisk(self):
self.do_test_images('1mb.no_kernel_or_ramdisk.manifest.xml',
False, 'image_bucket2', 'i-testing2')
def do_test_images(self, manifest_file, expect_kernel_and_ramdisk,
image_bucket, image_name):
"Test the image API."
# create a bucket for our bundle
objectstore.bucket.Bucket.create(image_bucket, self.context)
bucket = objectstore.bucket.Bucket(image_bucket)
# upload an image manifest/parts
bundle_path = os.path.join(os.path.dirname(__file__), 'bundle')
for path in glob.glob(bundle_path + '/*'):
bucket[os.path.basename(path)] = open(path, 'rb').read()
# register an image
image.Image.register_aws_image(image_name,
'%s/%s' % (image_bucket, manifest_file),
self.context)
# verify image
my_img = image.Image(image_name)
result_image_file = os.path.join(my_img.path, 'image')
self.assertEqual(os.stat(result_image_file).st_size, 1048576)
sha = hashlib.sha1(open(result_image_file).read()).hexdigest()
self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
if expect_kernel_and_ramdisk:
# Verify the default kernel and ramdisk are set
self.assertEqual(my_img.metadata['kernelId'], 'aki-test')
self.assertEqual(my_img.metadata['ramdiskId'], 'ari-test')
else:
# Verify that the default kernel and ramdisk (the one from FLAGS)
# doesn't get embedded in the metadata
self.assertFalse('kernelId' in my_img.metadata)
self.assertFalse('ramdiskId' in my_img.metadata)
# verify image permissions
context2 = context.RequestContext('user2', 'proj2')
self.assertFalse(my_img.is_authorized(context2))
# change user-editable fields
my_img.update_user_editable_fields({'display_name': 'my cool image'})
self.assertEqual('my cool image', my_img.metadata['displayName'])
my_img.update_user_editable_fields({'display_name': ''})
self.assert_(not my_img.metadata['displayName'])
class TestHTTPChannel(http.HTTPChannel):
"""Dummy site required for twisted.web"""
def checkPersistence(self, _, __): # pylint: disable-msg=C0103
"""Otherwise we end up with an unclean reactor."""
return False
class TestSite(server.Site):
"""Dummy site required for twisted.web"""
protocol = TestHTTPChannel
class S3APITestCase(test.TestCase):
"""Test objectstore through S3 API."""
def setUp(self):
"""Setup users, projects, and start a test server."""
super(S3APITestCase, self).setUp()
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
self.auth_manager = manager.AuthManager()
self.admin_user = self.auth_manager.create_user('admin', admin=True)
self.admin_project = self.auth_manager.create_project('admin',
self.admin_user)
shutil.rmtree(FLAGS.buckets_path)
os.mkdir(FLAGS.buckets_path)
root = S3()
self.site = TestSite(root)
# pylint: disable-msg=E1101
self.listening_port = reactor.listenTCP(0, self.site,
interface='127.0.0.1')
# pylint: enable-msg=E1101
self.tcp_port = self.listening_port.getHost().port
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
self.conn = S3Connection(aws_access_key_id=self.admin_user.access,
aws_secret_access_key=self.admin_user.secret,
host='127.0.0.1',
port=self.tcp_port,
is_secure=False,
calling_format=OrdinaryCallingFormat())
def get_http_connection(host, is_secure):
"""Get a new S3 connection, don't attempt to reuse connections."""
return self.conn.new_http_connection(host, is_secure)
self.conn.get_http_connection = get_http_connection
def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
self.assertEquals(len(buckets), 0, "Bucket list was not empty")
return True
def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
self.assertEquals(len(buckets), 1,
"Bucket list didn't have exactly one element in it")
self.assertEquals(buckets[0].name, name, "Wrong name")
return True
def test_000_list_buckets(self):
"""Make sure we are starting with no buckets."""
deferred = threads.deferToThread(self.conn.get_all_buckets)
deferred.addCallback(self._ensure_no_buckets)
return deferred
def test_001_create_and_delete_bucket(self):
"""Test bucket creation and deletion."""
bucket_name = 'testbucket'
deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
deferred.addCallback(lambda _:
threads.deferToThread(self.conn.get_all_buckets))
deferred.addCallback(self._ensure_one_bucket, bucket_name)
deferred.addCallback(lambda _:
threads.deferToThread(self.conn.delete_bucket,
bucket_name))
deferred.addCallback(lambda _:
threads.deferToThread(self.conn.get_all_buckets))
deferred.addCallback(self._ensure_no_buckets)
return deferred
def test_002_create_bucket_and_key_and_delete_key_again(self):
"""Test key operations on buckets."""
bucket_name = 'testbucket'
key_name = 'somekey'
key_contents = 'somekey'
deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
deferred.addCallback(lambda b:
threads.deferToThread(b.new_key, key_name))
deferred.addCallback(lambda k:
threads.deferToThread(k.set_contents_from_string,
key_contents))
def ensure_key_contents(bucket_name, key_name, contents):
"""Verify contents for a key in the given bucket."""
bucket = self.conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
self.assertEquals(key.get_contents_as_string(), contents,
"Bad contents")
deferred.addCallback(lambda _:
threads.deferToThread(ensure_key_contents,
bucket_name, key_name,
key_contents))
def delete_key(bucket_name, key_name):
"""Delete a key for the given bucket."""
bucket = self.conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.delete()
deferred.addCallback(lambda _:
threads.deferToThread(delete_key, bucket_name,
key_name))
deferred.addCallback(lambda _:
threads.deferToThread(self.conn.get_bucket,
bucket_name))
deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys))
deferred.addCallback(self._ensure_no_buckets)
return deferred
def tearDown(self):
"""Tear down auth and test server."""
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
super(S3APITestCase, self).tearDown()
return defer.DeferredList([stop_listening])

View File

@@ -20,6 +20,7 @@
import boto
from boto.ec2 import regioninfo
from boto.exception import EC2ResponseError
import datetime
import httplib
import random
@@ -124,7 +125,7 @@ class ApiEc2TestCase(test.TestCase):
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable-msg=E1103
# pylint: disable=E1103
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
@@ -177,6 +178,17 @@ class ApiEc2TestCase(test.TestCase):
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_terminate_invalid_instance(self):
"""Attempt to terminate an invalid instance"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
self.assertRaises(EC2ResponseError, self.ec2.terminate_instances,
"i-00000005")
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly"""

View File

@@ -299,6 +299,13 @@ class _AuthManagerBaseTestCase(test.TestCase):
self.assertEqual('test2', project.project_manager_id)
self.assertEqual('new desc', project.description)
def test_modify_project_adds_new_manager(self):
with user_and_project_generator(self.manager):
with user_generator(self.manager, name='test2'):
self.manager.modify_project('testproj', 'test2', 'new desc')
project = self.manager.get_project('testproj')
self.assertTrue('test2' in project.member_ids)
def test_can_delete_project(self):
with user_generator(self.manager):
self.manager.create_project('testproj', 'test1')

View File

@@ -35,31 +35,22 @@ from nova import log as logging
from nova import rpc
from nova import service
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import power_state
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import local
from nova.objectstore import image
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.cloud')
# Temp dirs for working with image attributes through the cloud controller
# (stole this from objectstore_unittest.py)
OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images')
os.makedirs(IMAGES_PATH)
# TODO(termie): these tests are rather fragile, they should at the lest be
# wiping database state after each run
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.flags(connection_type='fake',
images_path=IMAGES_PATH)
self.flags(connection_type='fake')
self.conn = rpc.Connection.instance()
@@ -70,6 +61,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
@@ -318,41 +310,6 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
@staticmethod
def _fake_set_image_description(ctxt, image_id, description):
from nova.objectstore import handler
class req:
pass
request = req()
request.context = ctxt
request.args = {'image_id': [image_id],
'description': [description]}
resource = handler.ImagesResource()
resource.render_POST(request)
def test_user_editable_image_endpoint(self):
pathdir = os.path.join(FLAGS.images_path, 'ami-testing')
os.mkdir(pathdir)
info = {'isPublic': False}
with open(os.path.join(pathdir, 'info.json'), 'w') as f:
json.dump(info, f)
img = image.Image('ami-testing')
# self.cloud.set_image_description(self.context, 'ami-testing',
# 'Foo Img')
# NOTE(vish): Above won't work unless we start objectstore or create
# a fake version of api/ec2/images.py conn that can
# call methods directly instead of going through boto.
# for now, just cheat and call the method directly
self._fake_set_image_description(self.context, 'ami-testing',
'Foo Img')
self.assertEqual('Foo Img', img.metadata['description'])
self._fake_set_image_description(self.context, 'ami-testing', '')
self.assertEqual('', img.metadata['description'])
shutil.rmtree(pathdir)
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
ec2_id = ec2utils.id_to_ec2_id(inst['id'])

View File

@@ -44,6 +44,14 @@ flags.DECLARE('stub_network', 'nova.compute.manager')
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
class ComputeTestCase(test.TestCase):
"""Test case for compute"""
def setUp(self):
@@ -82,6 +90,21 @@ class ComputeTestCase(test.TestCase):
inst.update(params)
return db.instance_create(self.context, inst)['id']
def _create_instance_type(self, params={}):
"""Create a test instance"""
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = '1024'
inst['vcpus'] = '1'
inst['local_gb'] = '20'
inst['flavorid'] = '1'
inst['swap'] = '2048'
inst['rxtx_quota'] = 100
inst['rxtx_cap'] = 200
inst.update(params)
return db.instance_type_create(context, inst)['id']
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
@@ -299,15 +322,53 @@ class ComputeTestCase(test.TestCase):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()
context = self.context.elevated()
self.compute.run_instance(self.context, instance_id)
db.instance_update(self.context, instance_id, {'host': 'foo'})
self.compute.prep_resize(context, instance_id)
self.compute.prep_resize(context, instance_id, 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
self.compute.resize_instance(context, instance_id,
migration_ref['id'])
self.compute.terminate_instance(context, instance_id)
def test_resize_invalid_flavor_fails(self):
"""Ensure invalid flavors raise"""
instance_id = self._create_instance()
context = self.context.elevated()
self.compute.run_instance(self.context, instance_id)
self.assertRaises(exception.NotFound, self.compute_api.resize,
context, instance_id, 200)
self.compute.terminate_instance(context, instance_id)
def test_resize_down_fails(self):
"""Ensure resizing down raises and fails"""
context = self.context.elevated()
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
db.instance_update(self.context, instance_id,
{'instance_type': 'm1.xlarge'})
self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
def test_resize_same_size_fails(self):
"""Ensure invalid flavors raise"""
context = self.context.elevated()
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
def test_get_by_flavor_id(self):
type = instance_types.get_by_flavor_id(1)
self.assertEqual(type, 'm1.tiny')
@@ -318,10 +379,8 @@ class ComputeTestCase(test.TestCase):
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.assertRaises(exception.Error, self.compute.prep_resize,
self.context, instance_id)
self.context, instance_id, 1)
self.compute.terminate_instance(self.context, instance_id)
type = instance_types.get_by_flavor_id("1")
self.assertEqual(type, 'm1.tiny')
def _setup_other_managers(self):
self.volume_manager = utils.import_object(FLAGS.volume_manager)
@@ -342,7 +401,7 @@ class ComputeTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.NotFound,
self.compute.pre_live_migration,
c, instance_ref['id'])
c, instance_ref['id'], time=FakeTime())
def test_pre_live_migration_instance_has_volume(self):
"""Confirm setup_compute_volume is called when volume is mounted."""
@@ -395,7 +454,7 @@ class ComputeTestCase(test.TestCase):
self.compute.driver = drivermock
self.mox.ReplayAll()
ret = self.compute.pre_live_migration(c, i_ref['id'])
ret = self.compute.pre_live_migration(c, i_ref['id'], time=FakeTime())
self.assertEqual(ret, None)
def test_pre_live_migration_setup_compute_node_fail(self):
@@ -428,7 +487,7 @@ class ComputeTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
self.compute.pre_live_migration,
c, i_ref['id'])
c, i_ref['id'], time=FakeTime())
def test_live_migration_works_correctly_with_volume(self):
"""Confirm check_for_export to confirm volume health check."""
@@ -575,3 +634,24 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(c, instance_id)
db.volume_destroy(c, v_ref['id'])
db.floating_ip_destroy(c, flo_addr)
def test_run_kill_vm(self):
"""Detect when a vm is terminated behind the scenes"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
instance_name = instances[0].name
self.compute.driver.test_remove_vm(instance_name)
# Force the compute manager to do its periodic poll
error_list = self.compute.periodic_tasks(context.get_admin_context())
self.assertFalse(error_list)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 0)

View File

@@ -25,12 +25,18 @@ import webob
from nova import compute
from nova import context
from nova import exception
from nova import network
from nova import test
from nova import volume
from nova import utils
from nova.api import direct
from nova.tests import test_cloud
class ArbitraryObject(object):
pass
class FakeService(object):
def echo(self, context, data):
return {'data': data}
@@ -39,6 +45,9 @@ class FakeService(object):
return {'user': context.user_id,
'project': context.project_id}
def invalid_return(self, context):
return ArbitraryObject()
class DirectTestCase(test.TestCase):
def setUp(self):
@@ -84,6 +93,12 @@ class DirectTestCase(test.TestCase):
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
def test_invalid(self):
req = webob.Request.blank('/fake/invalid_return')
req.environ['openstack.context'] = self.context
req.method = 'POST'
self.assertRaises(exception.Error, req.get_response, self.router)
def test_proxy(self):
proxy = direct.Proxy(self.router)
rv = proxy.fake.echo(self.context, data='baz')
@@ -93,12 +108,20 @@ class DirectTestCase(test.TestCase):
class DirectCloudTestCase(test_cloud.CloudTestCase):
def setUp(self):
super(DirectCloudTestCase, self).setUp()
compute_handle = compute.API(network_api=self.cloud.network_api,
volume_api=self.cloud.volume_api)
compute_handle = compute.API(image_service=self.cloud.image_service)
volume_handle = volume.API()
network_handle = network.API()
direct.register_service('compute', compute_handle)
direct.register_service('volume', volume_handle)
direct.register_service('network', network_handle)
self.router = direct.JsonParamsMiddleware(direct.Router())
proxy = direct.Proxy(self.router)
self.cloud.compute_api = proxy.compute
self.cloud.volume_api = proxy.volume
self.cloud.network_api = proxy.network
compute_handle.volume_api = proxy.volume
compute_handle.network_api = proxy.network
def tearDown(self):
super(DirectCloudTestCase, self).tearDown()

View File

@@ -0,0 +1,161 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for flat network code
"""
import IPy
import os
import unittest
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.tests.network import base
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
class FlatNetworkTestCase(base.NetworkTestCase):
"""Test cases for network code"""
def test_public_network_association(self):
"""Makes sure that we can allocate a public ip"""
# TODO(vish): better way of adding floating ips
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
pubnet = IPy.IP(flags.FLAGS.floating_range)
address = str(pubnet[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'host': FLAGS.host})
self.assertRaises(NotImplementedError,
self.network.allocate_floating_ip,
self.context, self.projects[0].id)
fix_addr = self._create_address(0)
float_addr = address
self.assertRaises(NotImplementedError,
self.network.associate_floating_ip,
self.context, float_addr, fix_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, None)
self.assertRaises(NotImplementedError,
self.network.disassociate_floating_ip,
self.context, float_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, None)
self.assertRaises(NotImplementedError,
self.network.deallocate_floating_ip,
self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
address = self._create_address(0)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
self._deallocate_address(0, address)
# check if the fixed ip address is really deallocated
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
def test_side_effects(self):
"""Ensures allocating and releasing has no side effects"""
address = self._create_address(0)
address2 = self._create_address(1, self.instance2_id)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[1].id))
self._deallocate_address(0, address)
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
# First address release shouldn't affect the second
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[0].id))
self._deallocate_address(1, address2)
self.assertFalse(self._is_allocated_in_project(address2,
self.projects[1].id))
def test_ips_are_reused(self):
"""Makes sure that ip addresses that are deallocated get reused"""
address = self._create_address(0)
self.network.deallocate_fixed_ip(self.context, address)
address2 = self._create_address(0)
self.assertEqual(address, address2)
self.network.deallocate_fixed_ip(self.context, address2)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
admin_context = context.get_admin_context()
network = db.project_get_network(admin_context, self.projects[0].id)
num_available_ips = db.network_count_available_ips(admin_context,
network['id'])
addresses = []
instance_ids = []
for i in range(num_available_ips):
instance_ref = self._create_instance(0)
instance_ids.append(instance_ref['id'])
address = self._create_address(0, instance_ref['id'])
addresses.append(address)
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, 0)
self.assertRaises(db.NoMoreAddresses,
self.network.allocate_fixed_ip,
self.context,
'foo')
for i in range(num_available_ips):
self.network.deallocate_fixed_ip(self.context, addresses[i])
db.instance_destroy(context.get_admin_context(), instance_ids[i])
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, num_available_ips)
def run(self, result=None):
if(FLAGS.network_manager == 'nova.network.manager.FlatManager'):
super(FlatNetworkTestCase, self).run(result)

View File

@@ -21,9 +21,10 @@ import sys
import unittest
import nova
from nova import test
class LocalizationTestCase(unittest.TestCase):
class LocalizationTestCase(test.TestCase):
def test_multiple_positional_format_placeholders(self):
pat = re.compile("\W_\(")
single_pat = re.compile("\W%\W")

View File

@@ -40,12 +40,12 @@ def conditional_forbid(req):
class LockoutTestCase(test.TestCase):
"""Test case for the Lockout middleware."""
def setUp(self): # pylint: disable-msg=C0103
def setUp(self): # pylint: disable=C0103
super(LockoutTestCase, self).setUp()
utils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
def tearDown(self): # pylint: disable-msg=C0103
def tearDown(self): # pylint: disable=C0103
utils.clear_time_override()
super(LockoutTestCase, self).tearDown()

View File

@@ -18,8 +18,12 @@ import errno
import os
import select
from eventlet import greenpool
from eventlet import greenthread
from nova import test
from nova.utils import parse_mailmap, str_dict_replace, synchronized
from nova import utils
from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase):
@@ -63,7 +67,7 @@ class ProjectTestCase(test.TestCase):
class LockTestCase(test.TestCase):
def test_synchronized_wrapped_function_metadata(self):
@synchronized('whatever')
@utils.synchronized('whatever')
def foo():
"""Bar"""
pass
@@ -72,11 +76,42 @@ class LockTestCase(test.TestCase):
self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
"got mangled")
def test_synchronized(self):
def test_synchronized_internally(self):
"""We can lock across multiple green threads"""
saved_sem_num = len(utils._semaphores)
seen_threads = list()
@utils.synchronized('testlock2', external=False)
def f(id):
for x in range(10):
seen_threads.append(id)
greenthread.sleep(0)
threads = []
pool = greenpool.GreenPool(10)
for i in range(10):
threads.append(pool.spawn(f, i))
for thread in threads:
thread.wait()
self.assertEquals(len(seen_threads), 100)
# Looking at the seen threads, split it into chunks of 10, and verify
# that the last 9 match the first in each chunk.
for i in range(10):
for j in range(9):
self.assertEquals(seen_threads[i * 10],
seen_threads[i * 10 + 1 + j])
self.assertEqual(saved_sem_num, len(utils._semaphores),
"Semaphore leak detected")
def test_synchronized_externally(self):
"""We can lock across multiple processes"""
rpipe1, wpipe1 = os.pipe()
rpipe2, wpipe2 = os.pipe()
@synchronized('testlock')
@utils.synchronized('testlock1', external=True)
def f(rpipe, wpipe):
try:
os.write(wpipe, "foo")

View File

@@ -20,21 +20,10 @@ Unit Tests for network code
"""
import IPy
import os
import time
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.network import linux_net
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
class IptablesManagerTestCase(test.TestCase):
sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
@@ -175,363 +164,3 @@ class IptablesManagerTestCase(test.TestCase):
self.assertTrue('-A %s -j run_tests.py-%s' \
% (chain, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
class NetworkTestCase(test.TestCase):
"""Test cases for network code"""
def setUp(self):
super(NetworkTestCase, self).setUp()
# NOTE(vish): if you change these flags, make sure to change the
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_call=True,
fake_network=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.RequestContext(project=None, user=self.user)
for i in range(FLAGS.num_networks):
name = 'project%s' % i
project = self.manager.create_project(name, 'netuser', name)
self.projects.append(project)
# create the necessary network data for the project
user_context = context.RequestContext(project=self.projects[i],
user=self.user)
host = self.network.get_network_host(user_context.elevated())
instance_ref = self._create_instance(0)
self.instance_id = instance_ref['id']
instance_ref = self._create_instance(1)
self.instance2_id = instance_ref['id']
def tearDown(self):
# TODO(termie): this should really be instantiating clean datastores
# in between runs, one failure kills all the tests
db.instance_destroy(context.get_admin_context(), self.instance_id)
db.instance_destroy(context.get_admin_context(), self.instance2_id)
for project in self.projects:
self.manager.delete_project(project)
self.manager.delete_user(self.user)
super(NetworkTestCase, self).tearDown()
def _create_instance(self, project_num, mac=None):
if not mac:
mac = utils.generate_mac()
project = self.projects[project_num]
self.context._project = project
self.context.project_id = project.id
return db.instance_create(self.context,
{'project_id': project.id,
'mac_address': mac})
def _create_address(self, project_num, instance_id=None):
"""Create an address in given project num"""
if instance_id is None:
instance_id = self.instance_id
self.context._project = self.projects[project_num]
self.context.project_id = self.projects[project_num].id
return self.network.allocate_fixed_ip(self.context, instance_id)
def _deallocate_address(self, project_num, address):
self.context._project = self.projects[project_num]
self.context.project_id = self.projects[project_num].id
self.network.deallocate_fixed_ip(self.context, address)
def test_private_ipv6(self):
"""Make sure ipv6 is OK"""
if FLAGS.use_ipv6:
instance_ref = self._create_instance(0)
address = self._create_address(0, instance_ref['id'])
network_ref = db.project_get_network(
context.get_admin_context(),
self.context.project_id)
address_v6 = db.instance_get_fixed_address_v6(
context.get_admin_context(),
instance_ref['id'])
self.assertEqual(instance_ref['mac_address'],
utils.to_mac(address_v6))
instance_ref2 = db.fixed_ip_get_instance_v6(
context.get_admin_context(),
address_v6)
self.assertEqual(instance_ref['id'], instance_ref2['id'])
self.assertEqual(address_v6,
utils.to_global_ipv6(
network_ref['cidr_v6'],
instance_ref['mac_address']))
self._deallocate_address(0, address)
db.instance_destroy(context.get_admin_context(),
instance_ref['id'])
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
# TODO(vish): better way of adding floating ips
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
pubnet = IPy.IP(flags.FLAGS.floating_range)
address = str(pubnet[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.projects[0].id)
fix_addr = self._create_address(0)
lease_ip(fix_addr)
self.assertEqual(float_addr, str(pubnet[0]))
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, float_addr)
self.network.disassociate_floating_ip(self.context, float_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, None)
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
release_ip(fix_addr)
db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
address = self._create_address(0)
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
lease_ip(address)
self._deallocate_address(0, address)
# Doesn't go away until it's dhcp released
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
release_ip(address)
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
def test_side_effects(self):
"""Ensures allocating and releasing has no side effects"""
address = self._create_address(0)
address2 = self._create_address(1, self.instance2_id)
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
self.assertFalse(is_allocated_in_project(address, self.projects[1].id))
# Addresses are allocated before they're issued
lease_ip(address)
lease_ip(address2)
self._deallocate_address(0, address)
release_ip(address)
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
self._deallocate_address(1, address2)
release_ip(address2)
self.assertFalse(is_allocated_in_project(address2,
self.projects[1].id))
def test_subnet_edge(self):
"""Makes sure that private ips don't overlap"""
first = self._create_address(0)
lease_ip(first)
instance_ids = []
for i in range(1, FLAGS.num_networks):
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address = self._create_address(i, instance_ref['id'])
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address2 = self._create_address(i, instance_ref['id'])
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address3 = self._create_address(i, instance_ref['id'])
lease_ip(address)
lease_ip(address2)
lease_ip(address3)
self.context._project = self.projects[i]
self.context.project_id = self.projects[i].id
self.assertFalse(is_allocated_in_project(address,
self.projects[0].id))
self.assertFalse(is_allocated_in_project(address2,
self.projects[0].id))
self.assertFalse(is_allocated_in_project(address3,
self.projects[0].id))
self.network.deallocate_fixed_ip(self.context, address)
self.network.deallocate_fixed_ip(self.context, address2)
self.network.deallocate_fixed_ip(self.context, address3)
release_ip(address)
release_ip(address2)
release_ip(address3)
for instance_id in instance_ids:
db.instance_destroy(context.get_admin_context(), instance_id)
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
self.network.deallocate_fixed_ip(self.context, first)
self._deallocate_address(0, first)
release_ip(first)
def test_vpn_ip_and_port_looks_valid(self):
"""Ensure the vpn ip and port are reasonable"""
self.assert_(self.projects[0].vpn_ip)
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
FLAGS.num_networks)
def test_too_many_networks(self):
"""Ensure error is raised if we run out of networks"""
projects = []
networks_left = (FLAGS.num_networks -
db.network_count(context.get_admin_context()))
for i in range(networks_left):
project = self.manager.create_project('many%s' % i, self.user)
projects.append(project)
db.project_get_network(context.get_admin_context(), project.id)
project = self.manager.create_project('last', self.user)
projects.append(project)
self.assertRaises(db.NoMoreNetworks,
db.project_get_network,
context.get_admin_context(),
project.id)
for project in projects:
self.manager.delete_project(project)
def test_ips_are_reused(self):
"""Makes sure that ip addresses that are deallocated get reused"""
address = self._create_address(0)
lease_ip(address)
self.network.deallocate_fixed_ip(self.context, address)
release_ip(address)
address2 = self._create_address(0)
self.assertEqual(address, address2)
lease_ip(address)
self.network.deallocate_fixed_ip(self.context, address2)
release_ip(address)
def test_available_ips(self):
"""Make sure the number of available ips for the network is correct
The number of available IP addresses depends on the test
environment's setup.
Network size is set in test fixture's setUp method.
There are ips reserved at the bottom and top of the range.
services (network, gateway, CloudPipe, broadcast)
"""
network = db.project_get_network(context.get_admin_context(),
self.projects[0].id)
net_size = flags.FLAGS.network_size
admin_context = context.get_admin_context()
total_ips = (db.network_count_available_ips(admin_context,
network['id']) +
db.network_count_reserved_ips(admin_context,
network['id']) +
db.network_count_allocated_ips(admin_context,
network['id']))
self.assertEqual(total_ips, net_size)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
admin_context = context.get_admin_context()
network = db.project_get_network(admin_context, self.projects[0].id)
num_available_ips = db.network_count_available_ips(admin_context,
network['id'])
addresses = []
instance_ids = []
for i in range(num_available_ips):
instance_ref = self._create_instance(0)
instance_ids.append(instance_ref['id'])
address = self._create_address(0, instance_ref['id'])
addresses.append(address)
lease_ip(address)
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, 0)
self.assertRaises(db.NoMoreAddresses,
self.network.allocate_fixed_ip,
self.context,
'foo')
for i in range(num_available_ips):
self.network.deallocate_fixed_ip(self.context, addresses[i])
release_ip(addresses[i])
db.instance_destroy(context.get_admin_context(), instance_ids[i])
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, num_available_ips)
def test_dhcp_lease_output(self):
admin_ctxt = context.get_admin_context()
address = self._create_address(0, self.instance_id)
lease_ip(address)
network_ref = db.network_get_by_instance(admin_ctxt, self.instance_id)
leases = linux_net.get_dhcp_leases(context.get_admin_context(),
network_ref['id'])
for line in leases.split('\n'):
seconds, mac, ip, hostname, client_id = line.split(' ')
self.assertTrue(int(seconds) > time.time(), 'Lease expires in '
'the past')
octets = mac.split(':')
self.assertEqual(len(octets), 6, "Wrong number of octets "
"in %s" % (max,))
for octet in octets:
self.assertEqual(len(octet), 2, "Oddly sized octet: %s"
% (octet,))
# This will throw an exception if the octet is invalid
int(octet, 16)
# And this will raise an exception in case of an invalid IP
IPy.IP(ip)
release_ip(address)
def is_allocated_in_project(address, project_id):
"""Returns true if address is in specified project"""
project_net = db.project_get_network(context.get_admin_context(),
project_id)
network = db.fixed_ip_get_network(context.get_admin_context(), address)
instance = db.fixed_ip_get_instance(context.get_admin_context(), address)
# instance exists until release
return instance is not None and network['id'] == project_net['id']
def binpath(script):
"""Returns the absolute path to a script in bin"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
def lease_ip(private_ip):
"""Run add command on dhcpbridge"""
network_ref = db.fixed_ip_get_network(context.get_admin_context(),
private_ip)
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
private_ip)
cmd = (binpath('nova-dhcpbridge'), 'add',
instance_ref['mac_address'],
private_ip, 'fake')
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(*cmd, addl_env=env)
LOG.debug("ISSUE_IP: %s, %s ", out, err)
def release_ip(private_ip):
"""Run del command on dhcpbridge"""
network_ref = db.fixed_ip_get_network(context.get_admin_context(),
private_ip)
instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
private_ip)
cmd = (binpath('nova-dhcpbridge'), 'del',
instance_ref['mac_address'],
private_ip, 'fake')
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(*cmd, addl_env=env)
LOG.debug("RELEASE_IP: %s, %s ", out, err)

View File

@@ -36,7 +36,7 @@ class RpcTestCase(test.TestCase):
super(RpcTestCase, self).setUp()
self.conn = rpc.Connection.instance(True)
self.receiver = TestReceiver()
self.consumer = rpc.AdapterConsumer(connection=self.conn,
self.consumer = rpc.TopicAdapterConsumer(connection=self.conn,
topic='test',
proxy=self.receiver)
self.consumer.attach_to_eventlet()
@@ -97,7 +97,7 @@ class RpcTestCase(test.TestCase):
nested = Nested()
conn = rpc.Connection.instance(True)
consumer = rpc.AdapterConsumer(connection=conn,
consumer = rpc.TopicAdapterConsumer(connection=conn,
topic='nested',
proxy=nested)
consumer.attach_to_eventlet()

View File

@@ -21,6 +21,9 @@ Tests For Scheduler
import datetime
import mox
import novaclient.exceptions
import stubout
import webob
from mox import IgnoreArg
from nova import context
@@ -32,6 +35,7 @@ from nova import test
from nova import rpc
from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import api
from nova.scheduler import manager
from nova.scheduler import driver
from nova.compute import power_state
@@ -937,3 +941,160 @@ class SimpleDriverTestCase(test.TestCase):
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
class FakeZone(object):
def __init__(self, api_url, username, password):
self.api_url = api_url
self.username = username
self.password = password
def zone_get_all(context):
return [
FakeZone('http://example.com', 'bob', 'xxx'),
]
class FakeRerouteCompute(api.reroute_compute):
def _call_child_zones(self, zones, function):
return []
def get_collection_context_and_id(self, args, kwargs):
return ("servers", None, 1)
def unmarshall_result(self, zone_responses):
return dict(magic="found me")
def go_boom(self, context, instance):
raise exception.InstanceNotFound("boom message", instance)
def found_instance(self, context, instance):
return dict(name='myserver')
class FakeResource(object):
def __init__(self, attribute_dict):
for k, v in attribute_dict.iteritems():
setattr(self, k, v)
def pause(self):
pass
class ZoneRedirectTest(test.TestCase):
def setUp(self):
super(ZoneRedirectTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(db, 'zone_get_all', zone_get_all)
self.enable_zone_routing = FLAGS.enable_zone_routing
FLAGS.enable_zone_routing = True
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.enable_zone_routing = self.enable_zone_routing
super(ZoneRedirectTest, self).tearDown()
def test_trap_found_locally(self):
decorator = FakeRerouteCompute("foo")
try:
result = decorator(found_instance)(None, None, 1)
except api.RedirectResult, e:
self.fail(_("Successful database hit should succeed"))
def test_trap_not_found_locally(self):
decorator = FakeRerouteCompute("foo")
try:
result = decorator(go_boom)(None, None, 1)
self.assertFail(_("Should have rerouted."))
except api.RedirectResult, e:
self.assertEquals(e.results['magic'], 'found me')
def test_routing_flags(self):
FLAGS.enable_zone_routing = False
decorator = FakeRerouteCompute("foo")
try:
result = decorator(go_boom)(None, None, 1)
self.assertFail(_("Should have thrown exception."))
except exception.InstanceNotFound, e:
self.assertEquals(e.message, 'boom message')
def test_get_collection_context_and_id(self):
decorator = api.reroute_compute("foo")
self.assertEquals(decorator.get_collection_context_and_id(
(None, 10, 20), {}), ("servers", 10, 20))
self.assertEquals(decorator.get_collection_context_and_id(
(None, 11,), dict(instance_id=21)), ("servers", 11, 21))
self.assertEquals(decorator.get_collection_context_and_id(
(None,), dict(context=12, instance_id=22)), ("servers", 12, 22))
def test_unmarshal_single_server(self):
decorator = api.reroute_compute("foo")
self.assertEquals(decorator.unmarshall_result([]), {})
self.assertEquals(decorator.unmarshall_result(
[FakeResource(dict(a=1, b=2)), ]),
dict(server=dict(a=1, b=2)))
self.assertEquals(decorator.unmarshall_result(
[FakeResource(dict(a=1, _b=2)), ]),
dict(server=dict(a=1,)))
self.assertEquals(decorator.unmarshall_result(
[FakeResource(dict(a=1, manager=2)), ]),
dict(server=dict(a=1,)))
self.assertEquals(decorator.unmarshall_result(
[FakeResource(dict(_a=1, manager=2)), ]),
dict(server={}))
class FakeServerCollection(object):
def get(self, instance_id):
return FakeResource(dict(a=10, b=20))
def find(self, name):
return FakeResource(dict(a=11, b=22))
class FakeEmptyServerCollection(object):
def get(self, f):
raise novaclient.NotFound(1)
def find(self, name):
raise novaclient.NotFound(2)
class FakeNovaClient(object):
def __init__(self, collection):
self.servers = collection
class DynamicNovaClientTest(test.TestCase):
def test_issue_novaclient_command_found(self):
zone = FakeZone('http://example.com', 'bob', 'xxx')
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
zone, "servers", "get", 100).a, 10)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
zone, "servers", "find", "name").b, 22)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
zone, "servers", "pause", 100), None)
def test_issue_novaclient_command_not_found(self):
zone = FakeZone('http://example.com', 'bob', 'xxx')
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
zone, "servers", "get", 100), None)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
zone, "servers", "find", "name"), None)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
zone, "servers", "any", "name"), None)

View File

@@ -77,13 +77,11 @@ class CacheConcurrencyTestCase(test.TestCase):
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
self.assertTrue('fname' in conn._image_sems)
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
self.assertFalse('fname' in conn._image_sems)
def test_different_fname_concurrency(self):
"""Ensures that two different fname caches are concurrent"""
@@ -429,6 +427,15 @@ class LibvirtConnTestCase(test.TestCase):
def fake_raise(self):
raise libvirt.libvirtError('ERR')
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -438,11 +445,15 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_conn.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref)
conn.ensure_filtering_rules_for_instance(instance_ref,
time=fake_timer)
except exception.Error, e:
c1 = (0 <= e.message.find('Timeout migrating for'))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
db.instance_destroy(self.context, instance_ref['id'])
def test_live_migration_raises_exception(self):
@@ -785,7 +796,8 @@ class NWFilterTestCase(test.TestCase):
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake'})
'project_id': 'fake',
'mac_address': '00:A0:C9:14:C8:29'})
inst_id = instance_ref['id']
ip = '10.11.12.13'
@@ -802,7 +814,8 @@ class NWFilterTestCase(test.TestCase):
'instance_id': instance_ref['id']})
def _ensure_all_called():
instance_filter = 'nova-instance-%s' % instance_ref['name']
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
'00A0C914C829')
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',

View File

@@ -0,0 +1,242 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for vlan network code
"""
import IPy
import os
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.tests.network import base
from nova.tests.network import binpath,\
lease_ip, release_ip
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
class VlanNetworkTestCase(base.NetworkTestCase):
"""Test cases for network code"""
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
# TODO(vish): better way of adding floating ips
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
pubnet = IPy.IP(flags.FLAGS.floating_range)
address = str(pubnet[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.projects[0].id)
fix_addr = self._create_address(0)
lease_ip(fix_addr)
self.assertEqual(float_addr, str(pubnet[0]))
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, float_addr)
self.network.disassociate_floating_ip(self.context, float_addr)
address = db.instance_get_floating_address(context.get_admin_context(),
self.instance_id)
self.assertEqual(address, None)
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
release_ip(fix_addr)
db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
address = self._create_address(0)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
lease_ip(address)
self._deallocate_address(0, address)
# Doesn't go away until it's dhcp released
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
release_ip(address)
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
def test_side_effects(self):
"""Ensures allocating and releasing has no side effects"""
address = self._create_address(0)
address2 = self._create_address(1, self.instance2_id)
self.assertTrue(self._is_allocated_in_project(address,
self.projects[0].id))
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[1].id))
self.assertFalse(self._is_allocated_in_project(address,
self.projects[1].id))
# Addresses are allocated before they're issued
lease_ip(address)
lease_ip(address2)
self._deallocate_address(0, address)
release_ip(address)
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
# First address release shouldn't affect the second
self.assertTrue(self._is_allocated_in_project(address2,
self.projects[1].id))
self._deallocate_address(1, address2)
release_ip(address2)
self.assertFalse(self._is_allocated_in_project(address2,
self.projects[1].id))
def test_subnet_edge(self):
"""Makes sure that private ips don't overlap"""
first = self._create_address(0)
lease_ip(first)
instance_ids = []
for i in range(1, FLAGS.num_networks):
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address = self._create_address(i, instance_ref['id'])
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address2 = self._create_address(i, instance_ref['id'])
instance_ref = self._create_instance(i, mac=utils.generate_mac())
instance_ids.append(instance_ref['id'])
address3 = self._create_address(i, instance_ref['id'])
lease_ip(address)
lease_ip(address2)
lease_ip(address3)
self.context._project = self.projects[i]
self.context.project_id = self.projects[i].id
self.assertFalse(self._is_allocated_in_project(address,
self.projects[0].id))
self.assertFalse(self._is_allocated_in_project(address2,
self.projects[0].id))
self.assertFalse(self._is_allocated_in_project(address3,
self.projects[0].id))
self.network.deallocate_fixed_ip(self.context, address)
self.network.deallocate_fixed_ip(self.context, address2)
self.network.deallocate_fixed_ip(self.context, address3)
release_ip(address)
release_ip(address2)
release_ip(address3)
for instance_id in instance_ids:
db.instance_destroy(context.get_admin_context(), instance_id)
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
self.network.deallocate_fixed_ip(self.context, first)
self._deallocate_address(0, first)
release_ip(first)
def test_vpn_ip_and_port_looks_valid(self):
"""Ensure the vpn ip and port are reasonable"""
self.assert_(self.projects[0].vpn_ip)
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
FLAGS.num_networks)
def test_too_many_networks(self):
"""Ensure error is raised if we run out of networks"""
projects = []
networks_left = (FLAGS.num_networks -
db.network_count(context.get_admin_context()))
for i in range(networks_left):
project = self.manager.create_project('many%s' % i, self.user)
projects.append(project)
db.project_get_network(context.get_admin_context(), project.id)
project = self.manager.create_project('last', self.user)
projects.append(project)
self.assertRaises(db.NoMoreNetworks,
db.project_get_network,
context.get_admin_context(),
project.id)
for project in projects:
self.manager.delete_project(project)
def test_ips_are_reused(self):
"""Makes sure that ip addresses that are deallocated get reused"""
address = self._create_address(0)
lease_ip(address)
self.network.deallocate_fixed_ip(self.context, address)
release_ip(address)
address2 = self._create_address(0)
self.assertEqual(address, address2)
lease_ip(address)
self.network.deallocate_fixed_ip(self.context, address2)
release_ip(address)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
admin_context = context.get_admin_context()
network = db.project_get_network(admin_context, self.projects[0].id)
num_available_ips = db.network_count_available_ips(admin_context,
network['id'])
addresses = []
instance_ids = []
for i in range(num_available_ips):
instance_ref = self._create_instance(0)
instance_ids.append(instance_ref['id'])
address = self._create_address(0, instance_ref['id'])
addresses.append(address)
lease_ip(address)
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, 0)
self.assertRaises(db.NoMoreAddresses,
self.network.allocate_fixed_ip,
self.context,
'foo')
for i in range(num_available_ips):
self.network.deallocate_fixed_ip(self.context, addresses[i])
release_ip(addresses[i])
db.instance_destroy(context.get_admin_context(), instance_ids[i])
ip_count = db.network_count_available_ips(context.get_admin_context(),
network['id'])
self.assertEqual(ip_count, num_available_ips)
def _is_allocated_in_project(self, address, project_id):
"""Returns true if address is in specified project"""
project_net = db.project_get_network(context.get_admin_context(),
project_id)
network = db.fixed_ip_get_network(context.get_admin_context(),
address)
instance = db.fixed_ip_get_instance(context.get_admin_context(),
address)
# instance exists until release
return instance is not None and network['id'] == project_net['id']
def run(self, result=None):
if(FLAGS.network_manager == 'nova.network.manager.VlanManager'):
super(VlanNetworkTestCase, self).run(result)

View File

@@ -0,0 +1,252 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMWareAPI.
"""
import stubout
from nova import context
from nova import db
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import power_state
from nova.tests.glance import stubs as glance_stubs
from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs
from nova.virt import vmwareapi_conn
from nova.virt.vmwareapi import fake as vmwareapi_fake
FLAGS = flags.FLAGS
class VMWareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
def setUp(self):
super(VMWareAPIVMTestCase, self).setUp()
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
vmwareapi_host_password='test_pass')
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
self.conn = vmwareapi_conn.get_connection(False)
def _create_instance_in_the_db(self):
values = {'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
self.instance = db.instance_create(values)
def _create_vm(self):
"""Create and spawn the VM."""
self._create_instance_in_the_db()
self.type_data = db.instance_type_get_by_name(None, 'm1.large')
self.conn.spawn(self.instance)
self._check_vm_record()
def _check_vm_record(self):
"""
Check if the spawned VM's properties correspond to the instance in
the db.
"""
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
# Get Nova record for VM
vm_info = self.conn.get_info(1)
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
vm = vms[0]
# Check that m1.large above turned into the right thing.
mem_kib = long(self.type_data['memory_mb']) << 10
vcpus = self.type_data['vcpus']
self.assertEquals(vm_info['max_mem'], mem_kib)
self.assertEquals(vm_info['mem'], mem_kib)
self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
self.assertEquals(vm.get("summary.config.memorySizeMB"),
self.type_data['memory_mb'])
# Check that the VM is running according to Nova
self.assertEquals(vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to vSphere API.
self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
"""
Check if the get_info returned values correspond to the instance
object in the db.
"""
mem_kib = long(self.type_data['memory_mb']) << 10
self.assertEquals(info["state"], pwr_state)
self.assertEquals(info["max_mem"], mem_kib)
self.assertEquals(info["mem"], mem_kib)
self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
def test_spawn(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.conn.snapshot(self.instance, "Test-Snapshot")
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.snapshot, self.instance,
"Test-Snapshot")
def test_reboot(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.conn.reboot(self.instance)
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.reboot, self.instance)
def test_reboot_not_poweredon(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance, self.dummy_callback_handler)
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.PAUSED)
self.assertRaises(Exception, self.conn.reboot, self.instance)
def test_suspend(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance, self.dummy_callback_handler)
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.PAUSED)
def test_suspend_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.suspend, self.instance,
self.dummy_callback_handler)
def test_resume(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance, self.dummy_callback_handler)
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.PAUSED)
self.conn.resume(self.instance, self.dummy_callback_handler)
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.resume, self.instance,
self.dummy_callback_handler)
def test_resume_not_suspended(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(Exception, self.conn.resume, self.instance,
self.dummy_callback_handler)
def test_get_info(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_destroy(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
self.conn.destroy(self.instance)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
def test_destroy_non_existent(self):
self._create_instance_in_the_db()
self.assertEquals(self.conn.destroy(self.instance), None)
def test_pause(self):
pass
def test_unpause(self):
pass
def test_diagnostics(self):
pass
def test_get_console_output(self):
pass
def test_get_ajax_console(self):
pass
def dummy_callback_handler(self, ret):
"""
Dummy callback function to be passed to suspend, resume, etc., calls.
"""
pass
def tearDown(self):
super(VMWareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.stubs.UnsetAll()

View File

@@ -336,8 +336,8 @@ class ISCSITestCase(DriverTestCase):
self.mox.StubOutWithMock(self.volume.driver, '_execute')
for i in volume_id_list:
tid = db.volume_get_iscsi_target_num(self.context, i)
self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
% locals())
self.volume.driver._execute("sudo", "ietadm", "--op", "show",
"--tid=%(tid)d" % locals())
self.stream.truncate(0)
self.mox.ReplayAll()
@@ -355,8 +355,9 @@ class ISCSITestCase(DriverTestCase):
# the first vblade process isn't running
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver, '_execute')
self.volume.driver._execute("sudo ietadm --op show --tid=%(tid)d"
% locals()).AndRaise(exception.ProcessExecutionError())
self.volume.driver._execute("sudo", "ietadm", "--op", "show",
"--tid=%(tid)d" % locals()).AndRaise(
exception.ProcessExecutionError())
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,

View File

@@ -19,11 +19,15 @@ Test suite for XenAPI
"""
import functools
import os
import re
import stubout
import ast
from nova import db
from nova import context
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
@@ -38,6 +42,9 @@ from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
from nova.tests import fake_utils
LOG = logging.getLogger('nova.tests.test_xenapi')
FLAGS = flags.FLAGS
@@ -64,13 +71,14 @@ class XenAPIVolumeTestCase(test.TestCase):
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.context = context.RequestContext('fake', 'fake', False)
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.values = {'name': 1, 'id': 1,
self.values = {'id': 1,
'project_id': 'fake',
'user_id': 'fake',
'image_id': 1,
@@ -90,7 +98,7 @@ class XenAPIVolumeTestCase(test.TestCase):
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(context.get_admin_context(), vol)
return db.volume_create(self.context, vol)
def test_create_iscsi_storage(self):
""" This shows how to test helper classes' methods """
@@ -126,7 +134,7 @@ class XenAPIVolumeTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
instance = db.instance_create(self.context, self.values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
@@ -146,7 +154,7 @@ class XenAPIVolumeTestCase(test.TestCase):
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
instance = db.instance_create(self.context, self.values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
@@ -175,8 +183,9 @@ class XenAPIVMTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
db_fakes.stub_out_db_instance_api(self.stubs)
@@ -186,8 +195,11 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
fake_utils.stub_out_utils_execute(self.stubs)
self.context = context.RequestContext('fake', 'fake', False)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
@@ -212,7 +224,7 @@ class XenAPIVMTestCase(test.TestCase):
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [1])
self.assertEquals(vm_labels, ['1'])
def ensure_vbd_was_torn_down():
vbd_labels = []
@@ -220,7 +232,7 @@ class XenAPIVMTestCase(test.TestCase):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [1])
self.assertEquals(vbd_labels, ['1'])
def ensure_vdi_was_torn_down():
for vdi_ref in xenapi_fake.get_all('VDI'):
@@ -237,11 +249,10 @@ class XenAPIVMTestCase(test.TestCase):
def create_vm_record(self, conn, os_type):
instances = conn.list_instances()
self.assertEquals(instances, [1])
self.assertEquals(instances, ['1'])
# Get Nova record for VM
vm_info = conn.get_info(1)
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
@@ -250,7 +261,7 @@ class XenAPIVMTestCase(test.TestCase):
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn):
def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
@@ -270,6 +281,25 @@ class XenAPIVMTestCase(test.TestCase):
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
key = 'vm-data/networking/aabbccddeeff'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data, {
'label': 'test_network',
'broadcast': '10.0.0.255',
'ips': [{'ip': '10.0.0.3',
'netmask':'255.255.255.0',
'enabled':'1'}],
'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
'netmask': '120',
'enabled': '1',
'gateway': 'fe80::a00:1'}],
'mac': 'aa:bb:cc:dd:ee:ff',
'dns': ['10.0.0.2'],
'gateway': '10.0.0.1'})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
@@ -303,10 +333,10 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
instance_type="m1.large", os_type="linux"):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
values = {'name': 1,
'id': 1,
instance_type="m1.large", os_type="linux",
check_injection=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': image_id,
@@ -315,12 +345,10 @@ class XenAPIVMTestCase(test.TestCase):
'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': os_type}
conn = xenapi_conn.get_connection(False)
instance = db.instance_create(values)
conn.spawn(instance)
self.create_vm_record(conn, os_type)
self.check_vm_record(conn)
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
self.create_vm_record(self.conn, os_type)
self.check_vm_record(self.conn, check_injection)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
@@ -361,6 +389,105 @@ class XenAPIVMTestCase(test.TestCase):
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 10.0.0.3',
'netmask 255.255.255.0',
'broadcast 10.0.0.255',
'gateway 10.0.0.1',
'dns-nameservers 10.0.0.2',
''])
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
# Capture the sudo tee .../etc/network/interfaces command
(r'(sudo\s+)?tee.*interfaces', _tee_handler),
])
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent' %
self._tmpdir))
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s' %
self._tmpdir))
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
(r'(sudo\s+)?mount', _mount_handler),
(r'(sudo\s+)?umount', _umount_handler),
(r'(sudo\s+)?tee.*interfaces', _tee_handler)])
self._test_spawn(1, 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(4 * 1024))
def test_rescue(self):
self.flags(xenapi_inject_image=False)
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
conn.rescue(instance, None)
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(Exception, conn.unrescue, instance, None)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
@@ -371,8 +498,8 @@ class XenAPIVMTestCase(test.TestCase):
def _create_instance(self):
"""Creates and spawns a test instance"""
stubs.stubout_loopingcall_start(self.stubs)
values = {
'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
@@ -382,7 +509,7 @@ class XenAPIVMTestCase(test.TestCase):
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
instance = db.instance_create(values)
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
return instance
@@ -427,21 +554,26 @@ class XenAPIMigrateInstance(test.TestCase):
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.values = {'name': 1, 'id': 1,
self.context = context.RequestContext('fake', 'fake', False)
self.values = {'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': 1,
'kernel_id': None,
'ramdisk_id': None,
'local_gb': 5,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
@@ -452,14 +584,15 @@ class XenAPIMigrateInstance(test.TestCase):
self.stubs.UnsetAll()
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.values)
instance = db.instance_create(self.context, self.values)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(instance, '127.0.0.1')
def test_finish_resize(self):
instance = db.instance_create(self.values)
instance = db.instance_create(self.context, self.values)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'))

View File

@@ -76,6 +76,40 @@ class ZoneManagerTestCase(test.TestCase):
self.assertEquals(len(zm.zone_states), 1)
self.assertEquals(zm.zone_states[1].username, 'user1')
def test_service_capabilities(self):
zm = zone_manager.ZoneManager()
caps = zm.get_zone_capabilities(self, None)
self.assertEquals(caps, {})
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
caps = zm.get_zone_capabilities(self, None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
caps = zm.get_zone_capabilities(self, None)
self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
caps = zm.get_zone_capabilities(self, None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
caps = zm.get_zone_capabilities(self, None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc10_a=(99, 99), svc10_b=(99, 99)))
zm.update_service_capabilities("svc1", "host3", dict(c=5))
caps = zm.get_zone_capabilities(self, None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc1_c=(5, 5), svc10_a=(99, 99),
svc10_b=(99, 99)))
caps = zm.get_zone_capabilities(self, 'svc1')
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc1_c=(5, 5)))
caps = zm.get_zone_capabilities(self, 'svc10')
self.assertEquals(caps, dict(svc10_a=(99, 99), svc10_b=(99, 99)))
def test_refresh_from_db_replace_existing(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()

View File

@@ -0,0 +1,21 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`vmwareapi` -- Stubs for VMware API
=======================================
"""

View File

@@ -0,0 +1,109 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import time
from nova import db
from nova import utils
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
'm1.medium':
dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
'm1.xlarge':
dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
if key in self.values:
return self.values[key]
else:
raise NotImplementedError()
def fake_instance_create(values):
"""Stubs out the db.instance_create method."""
type_data = INSTANCE_TYPES[values['instance_type']]
base_options = {
'name': values['name'],
'id': values['id'],
'reservation_id': utils.generate_uid('r'),
'image_id': values['image_id'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'state_description': 'scheduling',
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'instance_type': values['instance_type'],
'memory_mb': type_data['memory_mb'],
'mac_address': values['mac_address'],
'vcpus': type_data['vcpus'],
'local_gb': type_data['local_gb'],
}
return FakeModel(base_options)
def fake_network_get_by_instance(context, instance_id):
"""Stubs out the db.network_get_by_instance method."""
fields = {
'bridge': 'vmnet0',
'netmask': '255.255.255.0',
'gateway': '10.10.10.1',
'vlan': 100}
return FakeModel(fields)
def fake_instance_action_create(context, action):
"""Stubs out the db.instance_action_create method."""
pass
def fake_instance_get_fixed_address(context, instance_id):
"""Stubs out the db.instance_get_fixed_address method."""
return '10.10.10.10'
def fake_instance_type_get_all(context, inactive=0):
return INSTANCE_TYPES
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'instance_action_create', fake_instance_action_create)
stubs.Set(db, 'instance_get_fixed_address',
fake_instance_get_fixed_address)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)

View File

@@ -0,0 +1,46 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts for the test suite
"""
from nova.virt import vmwareapi_conn
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
"""Stubs out the VMWareAPISession's get_vim_object method."""
return fake.FakeVim()
def fake_is_vim_object(arg, module):
"""Stubs out the VMWareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
fake_get_vim_object)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object",
fake_is_vim_object)

View File

@@ -21,6 +21,7 @@ from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova import utils
def stubout_instance_snapshot(stubs):
@@ -137,14 +138,17 @@ def stubout_is_vdi_pv(stubs):
stubs.Set(vm_utils, '_is_vdi_pv', f)
def stubout_loopingcall_start(stubs):
def fake_start(self, interval, now=True):
self.f(*self.args, **self.kw)
stubs.Set(utils.LoopingCall, 'start', fake_start)
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
super(FakeSessionForVMTests, self).__init__(uri)
def network_get_all_records_where(self, _1, _2):
return self.xenapi.network.get_all_records()
def host_call_plugin(self, _1, _2, _3, _4, _5):
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False)
@@ -185,6 +189,25 @@ class FakeSessionForVMTests(fake.SessionBase):
pass
def stub_out_vm_methods(stubs):
def fake_shutdown(self, inst, vm, method="clean"):
pass
def fake_acquire_bootlock(self, vm):
pass
def fake_release_bootlock(self, vm):
pass
def fake_spawn_rescue(self, inst):
inst._rescue = False
stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown)
stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue)
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
def __init__(self, uri):
@@ -228,6 +251,9 @@ class FakeSessionForMigrationTests(fake.SessionBase):
def VDI_get_by_uuid(*args):
return 'hurr'
def VDI_resize_online(*args):
pass
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted':
@@ -240,7 +266,7 @@ class FakeSessionForMigrationTests(fake.SessionBase):
def stub_out_migration_methods(stubs):
def fake_get_snapshot(self, instance):
return 'foo', 'bar'
return 'vm_ref', dict(image='foo', snap='bar')
@classmethod
def fake_get_vdi(cls, session, vm_ref):
@@ -249,7 +275,7 @@ def stub_out_migration_methods(stubs):
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, {'uuid': vdi_rec['uuid'], }
def fake_shutdown(self, inst, vm, method='clean'):
def fake_shutdown(self, inst, vm, hard=True):
pass
@classmethod

View File

@@ -300,7 +300,7 @@ msgstr ""
msgid "instance %s: starting..."
msgstr ""
#. pylint: disable-msg=W0702
#. pylint: disable=W0702
#: ../nova/compute/manager.py:219
#, python-format
msgid "instance %s: Failed to spawn"
@@ -440,7 +440,7 @@ msgid ""
"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
#. pylint: disable-msg=W0702
#. pylint: disable=W0702
#. NOTE(vish): The inline callback eats the exception info so we
#. log the traceback here and reraise the same
#. ecxception below.
@@ -591,7 +591,7 @@ msgstr ""
msgid "Starting Bridge interface for %s"
msgstr ""
#. pylint: disable-msg=W0703
#. pylint: disable=W0703
#: ../nova/network/linux_net.py:314
#, python-format
msgid "Hupping dnsmasq threw %s"
@@ -602,7 +602,7 @@ msgstr ""
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
#. pylint: disable-msg=W0703
#. pylint: disable=W0703
#: ../nova/network/linux_net.py:358
#, python-format
msgid "killing radvd threw %s"
@@ -613,7 +613,7 @@ msgstr ""
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
#. pylint: disable-msg=W0703
#. pylint: disable=W0703
#: ../nova/network/linux_net.py:449
#, python-format
msgid "Killing dnsmasq threw %s"

View File

@@ -60,6 +60,8 @@ import os
import unittest
import sys
gettext.install('nova', unicode=1)
from nose import config
from nose import core
from nose import result