tests and merge with trunk

This commit is contained in:
Ilya Alekseyev
2011-08-05 07:49:29 +04:00
63 changed files with 16618 additions and 2338 deletions

View File

@@ -13,3 +13,7 @@ nova/vcsversion.py
clean.sqlite
run_tests.log
tests.sqlite
nova/tests/instance-*
tags
.coverage
covhtml

View File

@@ -14,6 +14,7 @@
<code@term.ie> <github@anarkystic.com>
<code@term.ie> <termie@preciousroy.local>
<corywright@gmail.com> <cory.wright@rackspace.com>
<dan@nicira.com> <danwent@dan-xs3-cs>
<devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver>
<itoumsn@nttdata.co.jp> <itoumsn@shayol>

12
Authors
View File

@@ -1,4 +1,7 @@
Adam Gandelman <adamg@canonical.com>
Adam Johnson <adjohn@gmail.com>
Alex Meade <alex.meade@rackspace.com>
Alexander Sakhnov <asakhnov@mirantis.com>
Andrey Brindeyev <abrindeyev@griddynamics.com>
Andy Smith <code@term.ie>
Andy Southgate <andy.southgate@citrix.com>
@@ -6,6 +9,7 @@ Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Arvind Somya <asomya@cisco.com>
Bilal Akhtar <bilalakhtar@ubuntu.com>
Brian Lamar <brian.lamar@rackspace.com>
Brian Schott <bschott@isi.edu>
@@ -17,6 +21,7 @@ Christian Berendt <berendt@b1-systems.de>
Chuck Short <zulcss@ubuntu.com>
Cory Wright <corywright@gmail.com>
Dan Prince <dan.prince@rackspace.com>
Dan Wendlandt <dan@nicira.com>
Dave Walker <DaveWalker@ubuntu.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
@@ -59,11 +64,14 @@ Kirill Shileev <kshileev@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Lvov Maxim <usrleon@gmail.com>
Mandell Degerness <mdegerne@gmail.com>
Mark Washenberger <mark.washenberger@rackspace.com>
Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
Matthew Hooker <matt@cloudscaling.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Mike Scherbakov <mihgen@gmail.com>
Mohammed Naser <mnaser@vexxhost.com>
Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
@@ -80,10 +88,13 @@ Rick Harris <rconradharris@gmail.com>
Rob Kost <kost@isi.edu>
Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com>
Ryu Ishimoto <ryu@midokura.jp>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com>
Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com>
Scott Moser <smoser@ubuntu.com>
Soren Hansen <soren.hansen@rackspace.com>
Stephanie Reese <reese.sm@gmail.com>
Thierry Carrez <thierry@openstack.org>
Todd Willey <todd@ansolabs.com>
Trey Morris <trey.morris@rackspace.com>
@@ -96,3 +107,4 @@ Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Yuriy Taraday <yorik.sar@gmail.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
Zed Shaw <zedshaw@zedshaw.com>

119
HACKING
View File

@@ -5,12 +5,23 @@ Step 1: Read http://www.python.org/dev/peps/pep-0008/
Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
Step 3: Read on
General
-------
- Put two newlines between top-level code (funcs, classes, etc)
- Put one newline between methods in classes and anywhere else
- Do not write "except:", use "except Exception:" at the very least
- Include your name with TODOs as in "#TODO(termie)"
- Do not name anything the same name as a built-in or reserved word
Imports
-------
- thou shalt not import objects, only modules
- thou shalt not import more than one module per line
- thou shalt not make relative imports
- thou shalt organize your imports according to the following template
- Do not import objects, only modules
- Do not import more than one module per line
- Do not make relative imports
- Order your imports by the full module path
- Organize your imports according to the following template
::
# vim: tabstop=4 shiftwidth=4 softtabstop=4
@@ -22,16 +33,6 @@ Imports
{{begin your code}}
General
-------
- thou shalt put two newlines twixt toplevel code (funcs, classes, etc)
- thou shalt put one newline twixt methods in classes and anywhere else
- thou shalt not write "except:", use "except Exception:" at the very least
- thou shalt include your name with TODOs as in "TODO(termie)"
- thou shalt not name anything the same name as a builtin or reserved word
- thou shalt not violate causality in our time cone, or else
Human Alphabetical Order Examples
---------------------------------
::
@@ -42,11 +43,13 @@ Human Alphabetical Order Examples
import time
import unittest
from nova import flags
from nova import test
import nova.api.ec2
from nova.api import openstack
from nova.auth import users
from nova.endpoint import api
import nova.flags
from nova.endpoint import cloud
from nova import test
Docstrings
----------
@@ -70,6 +73,88 @@ Docstrings
:param foo: the foo parameter
:param bar: the bar parameter
:returns: return_type -- description of the return value
:returns: description of the return value
:raises: AttributeError, KeyError
"""
Dictionaries/Lists
------------------
If a dictionary (dict) or list object is longer than 80 characters, its
items should be split with newlines. Embedded iterables should have their
items indented. Additionally, the last item in the dictionary should have
a trailing comma. This increases readability and simplifies future diffs.
Example:
my_dictionary = {
"image": {
"name": "Just a Snapshot",
"size": 2749573,
"properties": {
"user_id": 12,
"arch": "x86_64",
},
"things": [
"thing_one",
"thing_two",
],
"status": "ACTIVE",
},
}
Calling Methods
---------------
Calls to methods 80 characters or longer should format each argument with
newlines. This is not a requirement, but a guideline.
unnecessarily_long_function_name('string one',
'string two',
kwarg1=constants.ACTIVE,
kwarg2=['a', 'b', 'c'])
Rather than constructing parameters inline, it is better to break things up:
list_of_strings = [
'what_a_long_string',
'not as long',
]
dict_of_numbers = {
'one': 1,
'two': 2,
'twenty four': 24,
}
object_one.call_a_method('string three',
'string four',
kwarg1=list_of_strings,
kwarg2=dict_of_numbers)
Internationalization (i18n) Strings
-----------------------------------
In order to support multiple languages, we have a mechanism to support
automatic translations of exception and log strings.
Example:
msg = _("An error occurred")
raise HTTPBadRequest(explanation=msg)
If you have a variable to place within the string, first internationalize
the template string then do the replacement.
Example:
msg = _("Missing parameter: %s") % ("flavor",)
LOG.error(msg)
If you have multiple variables to place in the string, use keyword
parameters. This helps our translators reorder parameters when needed.
Example:
msg = _("The server with id %(s_id)s has no key %(m_key)s")
LOG.error(msg % {"s_id": "1234", "m_key": "imageId"})

View File

@@ -114,11 +114,11 @@ class AjaxConsoleProxy(object):
AjaxConsoleProxy.tokens[kwargs['token']] = \
{'args': kwargs, 'last_activity': time.time()}
conn = rpc.Connection.instance(new=True)
consumer = rpc.TopicAdapterConsumer(
connection=conn,
proxy=TopicProxy,
topic=FLAGS.ajax_console_proxy_topic)
conn = rpc.create_connection(new=True)
consumer = rpc.create_consumer(
conn,
FLAGS.ajax_console_proxy_topic,
TopicProxy)
def delete_expired_tokens():
now = time.time()

View File

@@ -91,7 +91,7 @@ def init_leases(interface):
"""Get the list of hosts for an interface."""
ctxt = context.get_admin_context()
network_ref = db.network_get_by_bridge(ctxt, interface)
return linux_net.get_dhcp_leases(ctxt, network_ref['id'])
return linux_net.get_dhcp_leases(ctxt, network_ref)
def main():

View File

@@ -1,59 +0,0 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Daemon for Nova RRD based instance resource monitoring.
"""
import gettext
import os
import sys
from twisted.application import service
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import log as logging
from nova import utils
from nova import twistd
from nova.compute import monitor
LOG = logging.getLogger('nova.instancemonitor')
if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
LOG.warn(_('Starting instance monitor'))
# pylint: disable=C0103
monitor = monitor.InstanceMonitor()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application('nova-instancemonitor')
monitor.setServiceParent(application)

View File

@@ -81,7 +81,6 @@ class LogReader(object):
if level == 'ERROR':
self.handle_logged_error(line)
elif level == '[-]' and self.last_error:
# twisted stack trace line
clean_line = " ".join(line.split(" ")[6:])
self.last_error.trace = self.last_error.trace + clean_line
else:

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@
# under the License.
"""
Twisted daemon for nova objectstore. Supports S3 API.
Daemon for nova objectstore. Supports S3 API.
"""
import gettext

View File

@@ -127,7 +127,7 @@ class DbDriver(object):
try:
project = db.project_create(context.get_admin_context(), values)
except exception.Duplicate:
except exception.DBError:
raise exception.ProjectExists(project=name)
for member in members:

View File

@@ -518,6 +518,14 @@ class AuthManager(object):
return drv.get_user_roles(User.safe_id(user),
Project.safe_id(project))
def get_active_roles(self, user, project=None):
"""Get all active roles for context"""
if project:
roles = FLAGS.allowed_roles + ['projectmanager']
else:
roles = FLAGS.global_roles
return [role for role in roles if self.has_role(user, role, project)]
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
@@ -730,10 +738,6 @@ class AuthManager(object):
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
@staticmethod
def get_key_pairs(context):
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
def get_credentials(self, user, project=None, use_dmz=True):
"""Get credential zip for user in project"""
if not isinstance(user, User):
@@ -785,7 +789,7 @@ class AuthManager(object):
return read_buffer
def get_environment_rc(self, user, project=None, use_dmz=True):
"""Get credential zip for user in project"""
"""Get environment rc for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:

View File

@@ -343,7 +343,7 @@ DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
'Directory for lock files')
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
DEFINE_integer('logfile_mode', 0644, 'Default file mode of the logs.')
DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite')
DEFINE_string('sql_connection',
'sqlite:///$state_path/$sqlite_db',
@@ -387,3 +387,8 @@ DEFINE_list('zone_capabilities',
'Key/Multi-value list representng capabilities of this zone')
DEFINE_string('build_plan_encryption_key', None,
'128bit (hex) encryption key for scheduler build plans.')
DEFINE_bool('start_guests_on_host_boot', False,
'Whether to restart guests when the host reboots')
DEFINE_bool('resume_guests_state_on_host_boot', False,
'Whether to start guests, that was running before the host reboot')

View File

@@ -43,8 +43,8 @@ from nova import version
FLAGS = flags.FLAGS
flags.DEFINE_string('logging_context_format_string',
'%(asctime)s %(levelname)s %(name)s '
'[%(request_id)s %(user)s '
'%(project)s] %(message)s',
'[%(request_id)s %(user_id)s '
'%(project_id)s] %(message)s',
'format string to use for log messages with context')
flags.DEFINE_string('logging_default_format_string',
'%(asctime)s %(levelname)s %(name)s [-] '
@@ -257,6 +257,7 @@ class NovaRootLogger(NovaLogger):
self.filelog = WatchedFileHandler(logpath)
self.addHandler(self.filelog)
self.logpath = logpath
os.chmod(self.logpath, FLAGS.logfile_mode)
else:
self.removeHandler(self.filelog)
self.addHandler(self.streamlog)

View File

@@ -17,7 +17,9 @@ import uuid
from nova import flags
from nova import utils
from nova import log as logging
LOG = logging.getLogger('nova.exception')
FLAGS = flags.FLAGS
@@ -37,6 +39,12 @@ class BadPriorityException(Exception):
pass
def publisher_id(service, host=None):
if not host:
host = FLAGS.host
return "%s.%s" % (service, host)
def notify(publisher_id, event_type, priority, payload):
"""
Sends a notification using the specified driver
@@ -72,6 +80,10 @@ def notify(publisher_id, event_type, priority, payload):
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities' % priority))
# Ensure everything is JSON serializable.
payload = utils.to_primitive(payload, convert_instances=True)
driver = utils.import_object(FLAGS.notification_driver)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
@@ -79,4 +91,8 @@ def notify(publisher_id, event_type, priority, payload):
priority=priority,
payload=payload,
timestamp=str(utils.utcnow()))
driver.notify(msg)
try:
driver.notify(msg)
except Exception, e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system." % locals()))

66
nova/rpc/__init__.py Normal file
View File

@@ -0,0 +1,66 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.utils import import_object
from nova.rpc.common import RemoteError, LOG
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('rpc_backend',
'nova.rpc.amqp',
"The messaging module to use, defaults to AMQP.")
RPCIMPL = import_object(FLAGS.rpc_backend)
def create_connection(new=True):
return RPCIMPL.Connection.instance(new=True)
def create_consumer(conn, topic, proxy, fanout=False):
if fanout:
return RPCIMPL.FanoutAdapterConsumer(
connection=conn,
topic=topic,
proxy=proxy)
else:
return RPCIMPL.TopicAdapterConsumer(
connection=conn,
topic=topic,
proxy=proxy)
def create_consumer_set(conn, consumers):
return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers)
def call(context, topic, msg):
return RPCIMPL.call(context, topic, msg)
def cast(context, topic, msg):
return RPCIMPL.cast(context, topic, msg)
def fanout_cast(context, topic, msg):
return RPCIMPL.fanout_cast(context, topic, msg)
def multicall(context, topic, msg):
return RPCIMPL.multicall(context, topic, msg)

View File

@@ -44,9 +44,7 @@ from nova import fakerabbit
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.rpc')
from nova.rpc.common import RemoteError, LOG
FLAGS = flags.FLAGS
@@ -219,7 +217,7 @@ class AdapterConsumer(Consumer):
return
self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)
@exception.wrap_exception
@exception.wrap_exception()
def _process_data(self, msg_id, ctxt, method, args):
"""Thread that maigcally looks for a method on the proxy
object and calls it.
@@ -418,25 +416,6 @@ def msg_reply(msg_id, reply=None, failure=None):
publisher.close()
class RemoteError(exception.Error):
"""Signifies that a remote class has raised an exception.
Containes a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevent info.
"""
def __init__(self, exc_type, value, traceback):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__('%s %s\n%s' % (exc_type,
value,
traceback))
def _unpack_context(msg):
"""Unpack context from msg."""
context_dict = {}

23
nova/rpc/common.py Normal file
View File

@@ -0,0 +1,23 @@
from nova import exception
from nova import log as logging
LOG = logging.getLogger('nova.rpc')
class RemoteError(exception.Error):
"""Signifies that a remote class has raised an exception.
Containes a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevent info.
"""
def __init__(self, exc_type, value, traceback):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__('%s %s\n%s' % (exc_type,
value,
traceback))

View File

@@ -28,6 +28,7 @@ from nova import flags
from nova import log as logging
from nova.scheduler import zone_aware_scheduler
from nova import utils
from nova import exception
LOG = logging.getLogger('nova.scheduler.least_cost')

View File

@@ -81,7 +81,7 @@ class ZoneAwareScheduler(driver.Scheduler):
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
try:
json_entry = decryptor(blob)
return json.dumps(entry)
return json.dumps(json_entry)
except M2Crypto.EVP.EVPError:
pass
return None

View File

@@ -42,3 +42,4 @@ FLAGS['iscsi_num_targets'].SetDefault(8)
FLAGS['verbose'].SetDefault(True)
FLAGS['sqlite_db'].SetDefault("tests.sqlite")
FLAGS['use_ipv6'].SetDefault(True)
FLAGS['flat_network_bridge'].SetDefault('br100')

View File

@@ -21,24 +21,18 @@ import random
from nova import context
from nova import db
from nova import flags
from nova import test
from nova.auth import manager
from nova.virt import hyperv
FLAGS = flags.FLAGS
FLAGS.connection_type = 'hyperv'
class HyperVTestCase(test.TestCase):
"""Test cases for the Hyper-V driver"""
def setUp(self):
super(HyperVTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext(self.user, self.project)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(connection_type='hyperv')
def test_create_destroy(self):
"""Create a VM and destroy it"""

View File

@@ -19,12 +19,9 @@ Tests For Scheduler Host Filters.
import json
from nova import exception
from nova import flags
from nova import test
from nova.scheduler import host_filter
FLAGS = flags.FLAGS
class FakeZoneManager:
pass
@@ -57,9 +54,9 @@ class HostFilterTestCase(test.TestCase):
'host_name-label': 'xs-%s' % multiplier}
def setUp(self):
self.old_flag = FLAGS.default_host_filter
FLAGS.default_host_filter = \
'nova.scheduler.host_filter.AllHostsFilter'
super(HostFilterTestCase, self).setUp()
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
self.flags(default_host_filter=default_host_filter)
self.instance_type = dict(name='tiny',
memory_mb=50,
vcpus=10,
@@ -98,9 +95,6 @@ class HostFilterTestCase(test.TestCase):
host09['xpu_arch'] = 'fermi'
host09['xpu_info'] = 'Tesla 2150'
def tearDown(self):
FLAGS.default_host_filter = self.old_flag
def test_choose_filter(self):
# Test default filter ...
hf = host_filter.choose_host_filter()

View File

@@ -16,13 +16,11 @@
Tests For Least Cost Scheduler
"""
from nova import flags
from nova import test
from nova.scheduler import least_cost
from nova.tests.scheduler import test_zone_aware_scheduler
MB = 1024 * 1024
FLAGS = flags.FLAGS
class FakeHost(object):
@@ -95,10 +93,9 @@ class LeastCostSchedulerTestCase(test.TestCase):
self.assertWeights(expected, num, request_spec, hosts)
def test_noop_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
'nova.scheduler.least_cost.noop_cost_fn',
]
FLAGS.noop_cost_fn_weight = 1
self.flags(least_cost_scheduler_cost_functions=[
'nova.scheduler.least_cost.noop_cost_fn'],
noop_cost_fn_weight=1)
num = 1
request_spec = {}
@@ -109,10 +106,9 @@ class LeastCostSchedulerTestCase(test.TestCase):
self.assertWeights(expected, num, request_spec, hosts)
def test_cost_fn_weights(self):
FLAGS.least_cost_scheduler_cost_functions = [
'nova.scheduler.least_cost.noop_cost_fn',
]
FLAGS.noop_cost_fn_weight = 2
self.flags(least_cost_scheduler_cost_functions=[
'nova.scheduler.least_cost.noop_cost_fn'],
noop_cost_fn_weight=2)
num = 1
request_spec = {}
@@ -123,10 +119,9 @@ class LeastCostSchedulerTestCase(test.TestCase):
self.assertWeights(expected, num, request_spec, hosts)
def test_compute_fill_first_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
'nova.scheduler.least_cost.compute_fill_first_cost_fn',
]
FLAGS.compute_fill_first_cost_fn_weight = 1
self.flags(least_cost_scheduler_cost_functions=[
'nova.scheduler.least_cost.compute_fill_first_cost_fn'],
compute_fill_first_cost_fn_weight=1)
num = 1
instance_type = {'memory_mb': 1024}

View File

@@ -16,6 +16,8 @@
Tests For Zone Aware Scheduler.
"""
import json
import nova.db
from nova import exception
@@ -327,3 +329,19 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
sched._provision_resource_from_blob(None, request_spec, 1,
request_spec, {})
self.assertTrue(was_called)
def test_decrypt_blob(self):
"""Test that the decrypt method works."""
fixture = FakeZoneAwareScheduler()
test_data = {"foo": "bar"}
class StubDecryptor(object):
def decryptor(self, key):
return lambda blob: blob
self.stubs.Set(zone_aware_scheduler, 'crypto',
StubDecryptor())
self.assertEqual(fixture._decrypt_blob(test_data),
json.dumps(test_data))

View File

@@ -16,7 +16,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import webob
from nova import context
@@ -41,7 +40,7 @@ class FakeApiRequest(object):
class AccessTestCase(test.TestCase):
def _env_for(self, ctxt, action):
env = {}
env['ec2.context'] = ctxt
env['nova.context'] = ctxt
env['ec2.request'] = FakeApiRequest(action)
return env
@@ -93,7 +92,11 @@ class AccessTestCase(test.TestCase):
super(AccessTestCase, self).tearDown()
def response_status(self, user, methodName):
ctxt = context.RequestContext(user, self.project)
roles = manager.AuthManager().get_active_roles(user, self.project)
ctxt = context.RequestContext(user.id,
self.project.id,
is_admin=user.is_admin(),
roles=roles)
environ = self._env_for(ctxt, methodName)
req = webob.Request.blank('/', environ)
resp = req.get_response(self.mw)
@@ -105,30 +108,26 @@ class AccessTestCase(test.TestCase):
def shouldDeny(self, user, methodName):
self.assertEqual(401, self.response_status(user, methodName))
def test_001_allow_all(self):
def test_allow_all(self):
users = [self.testadmin, self.testpmsys, self.testnet, self.testsys]
for user in users:
self.shouldAllow(user, '_allow_all')
def test_002_allow_none(self):
def test_allow_none(self):
self.shouldAllow(self.testadmin, '_allow_none')
users = [self.testpmsys, self.testnet, self.testsys]
for user in users:
self.shouldDeny(user, '_allow_none')
def test_003_allow_project_manager(self):
def test_allow_project_manager(self):
for user in [self.testadmin, self.testpmsys]:
self.shouldAllow(user, '_allow_project_manager')
for user in [self.testnet, self.testsys]:
self.shouldDeny(user, '_allow_project_manager')
def test_004_allow_sys_and_net(self):
def test_allow_sys_and_net(self):
for user in [self.testadmin, self.testnet, self.testsys]:
self.shouldAllow(user, '_allow_sys_and_net')
# denied because it doesn't have the per project sysadmin
for user in [self.testpmsys]:
self.shouldDeny(user, '_allow_sys_and_net')
if __name__ == "__main__":
# TODO: Implement use_fake as an option
unittest.main()

View File

@@ -25,7 +25,6 @@ from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import admin
from nova.image import fake
@@ -39,7 +38,7 @@ class AdminApiTestCase(test.TestCase):
super(AdminApiTestCase, self).setUp()
self.flags(connection_type='fake')
self.conn = rpc.Connection.instance()
self.conn = rpc.create_connection()
# set up our cloud
self.api = admin.AdminController()
@@ -51,11 +50,11 @@ class AdminApiTestCase(test.TestCase):
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
True)
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
@@ -73,11 +72,6 @@ class AdminApiTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(AdminApiTestCase, self).tearDown()
def test_block_external_ips(self):
"""Make sure provider firewall rules are created."""
result = self.api.block_external_addresses(self.context, '1.1.1.1/32')

View File

@@ -30,11 +30,11 @@ import webob
from nova import context
from nova import exception
from nova import test
from nova import wsgi
from nova.api import ec2
from nova.api.ec2 import apirequest
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.auth import manager
class FakeHttplibSocket(object):
@@ -92,7 +92,9 @@ class XmlConversionTestCase(test.TestCase):
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
@@ -107,6 +109,8 @@ class Ec2utilsTestCase(test.TestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def test_bad_ec2_id(self):
self.assertRaises(exception.InvalidEc2Id,
@@ -116,16 +120,85 @@ class Ec2utilsTestCase(test.TestCase):
def test_id_to_ec2_id(self):
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
def test_dict_from_dotted_str(self):
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
expected_dict = {
'block_device_mapping': {
'1': {'device_name': '/dev/sda1',
'ebs': {'snapshot_id': 'snap-0000001c',
'volume_size': 80,
'delete_on_termination': False}},
'2': {'device_name': '/dev/sdc',
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
self.assertDictMatch(out_dict, expected_dict)
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
properties0 = {'mappings': mappings}
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
root_device_name = ec2utils.properties_root_device_name(properties0)
self.assertEqual(root_device_name, '/dev/sda1')
root_device_name = ec2utils.properties_root_device_name(properties1)
self.assertEqual(root_device_name, '/dev/sdb')
def test_mapping_prepend_dev(self):
mappings = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
expected_result = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': '/dev/sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
self.assertDictListMatch(ec2utils.mappings_prepend_dev(mappings),
expected_result)
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.manager = manager.AuthManager()
self.host = '127.0.0.1'
self.app = ec2.Authenticate(ec2.Requestify(ec2.Executor(),
'nova.api.ec2.cloud.CloudController'))
# NOTE(vish): skipping the Authorizer
roles = ['sysadmin', 'netadmin']
ctxt = context.RequestContext('fake', 'fake', roles=roles)
self.app = wsgi.InjectContext(ctxt,
ec2.Requestify(ec2.Authorizer(ec2.Executor()),
'nova.api.ec2.cloud.CloudController'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""
@@ -143,7 +216,11 @@ class ApiEc2TestCase(test.TestCase):
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable=E1103
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
if boto.Version >= '2':
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
def test_return_valid_isoformat(self):
@@ -172,39 +249,25 @@ class ApiEc2TestCase(test.TestCase):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# Any request should be fine
self.ec2.get_all_instances()
self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
self.assertEqual(self.ec2.get_all_instances(), [])
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_terminate_invalid_instance(self):
"""Attempt to terminate an invalid instance"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
self.assertRaises(EC2ResponseError, self.ec2.terminate_instances,
"i-00000005")
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
@@ -213,16 +276,12 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
for x in range(random.randint(4, 8)))
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# NOTE(vish): create depends on pool, so call helper directly
cloud._gen_key(context.get_admin_context(), user.id, keyname)
cloud._gen_key(context.get_admin_context(), 'fake', keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEquals(len(results), 1)
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
@@ -231,8 +290,6 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
for x in range(random.randint(4, 8)))
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# NOTE(vish): create depends on pool, so call helper directly
self.ec2.create_key_pair('test')
@@ -251,27 +308,16 @@ class ApiEc2TestCase(test.TestCase):
"""Test that we can retrieve security groups"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
project = self.manager.create_project('fake', 'fake', 'fake')
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 1)
self.assertEquals(rv[0].name, 'default')
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_create_delete_security_group(self):
"""Test that we can create a security group"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
@@ -290,9 +336,6 @@ class ApiEc2TestCase(test.TestCase):
self.ec2.delete_security_group(security_group_name)
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_authorize_revoke_security_group_cidr(self):
"""
Test that we can add and remove CIDR based rules
@@ -300,12 +343,6 @@ class ApiEc2TestCase(test.TestCase):
"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
@@ -352,9 +389,6 @@ class ApiEc2TestCase(test.TestCase):
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
self.manager.delete_project(project)
self.manager.delete_user(user)
return
def test_authorize_revoke_security_group_cidr_v6(self):
@@ -364,12 +398,6 @@ class ApiEc2TestCase(test.TestCase):
"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
@@ -415,9 +443,6 @@ class ApiEc2TestCase(test.TestCase):
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
self.manager.delete_project(project)
self.manager.delete_user(user)
return
def test_authorize_revoke_security_group_foreign_group(self):
@@ -427,12 +452,6 @@ class ApiEc2TestCase(test.TestCase):
"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
@@ -486,8 +505,3 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.manager.delete_project(project)
self.manager.delete_user(user)
return

View File

@@ -83,9 +83,9 @@ class user_and_project_generator(object):
class _AuthManagerBaseTestCase(test.TestCase):
def setUp(self):
FLAGS.auth_driver = self.auth_driver
super(_AuthManagerBaseTestCase, self).setUp()
self.flags(connection_type='fake')
self.flags(auth_driver=self.auth_driver,
connection_type='fake')
self.manager = manager.AuthManager(new=True)
self.manager.mc.cache = {}
@@ -102,7 +102,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
self.assertEqual('classified', u.secret)
self.assertEqual('private-party', u.access)
def test_004_signature_is_valid(self):
def test_signature_is_valid(self):
with user_generator(self.manager, name='admin', secret='admin',
access='admin'):
with project_generator(self.manager, name="admin",
@@ -141,15 +141,14 @@ class _AuthManagerBaseTestCase(test.TestCase):
'127.0.0.1',
'/services/Cloud'))
def test_005_can_get_credentials(self):
return
credentials = self.manager.get_user('test1').get_credentials()
self.assertEqual(credentials,
'export EC2_ACCESS_KEY="access"\n' +
'export EC2_SECRET_KEY="secret"\n' +
'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' +
'export S3_URL="http://127.0.0.1:3333/"\n' +
'export EC2_USER_ID="test1"\n')
def test_can_get_credentials(self):
st = {'access': 'access', 'secret': 'secret'}
with user_and_project_generator(self.manager, user_state=st) as (u, p):
credentials = self.manager.get_environment_rc(u, p)
LOG.debug(credentials)
self.assertTrue('export EC2_ACCESS_KEY="access:testproj"\n'
in credentials)
self.assertTrue('export EC2_SECRET_KEY="secret"\n' in credentials)
def test_can_list_users(self):
with user_generator(self.manager):

View File

@@ -15,6 +15,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from base64 import b64decode
from M2Crypto import BIO
@@ -29,10 +30,10 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import network
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import fake
@@ -45,9 +46,10 @@ LOG = logging.getLogger('nova.tests.cloud')
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.flags(connection_type='fake')
self.flags(connection_type='fake',
stub_network=True)
self.conn = rpc.Connection.instance()
self.conn = rpc.create_connection()
# set up our cloud
self.cloud = cloud.CloudController()
@@ -59,12 +61,11 @@ class CloudTestCase(test.TestCase):
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
host = self.network.host
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
True)
def fake_show(meh, context, id):
return {'id': 1, 'container_format': 'ami',
@@ -84,27 +85,23 @@ class CloudTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self):
networks = db.project_get_networks(self.context, self.project.id,
networks = db.project_get_networks(self.context, self.project_id,
associate=False)
for network in networks:
db.network_disassociate(self.context, network['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(CloudTestCase, self).tearDown()
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
return cloud._gen_key(self.context, self.context.user_id, name)
def test_describe_regions(self):
"""Makes sure describe regions runs without raising an exception"""
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
regions = FLAGS.region_list
FLAGS.region_list = ["one=test_host1", "two=test_host2"]
self.flags(region_list=["one=test_host1", "two=test_host2"])
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 2)
FLAGS.region_list = regions
def test_describe_addresses(self):
"""Makes sure describe addresses runs without raising an exception"""
@@ -118,7 +115,6 @@ class CloudTestCase(test.TestCase):
public_ip=address)
db.floating_ip_destroy(self.context, address)
@test.skip_test("Skipping this pending future merge")
def test_allocate_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
@@ -131,13 +127,37 @@ class CloudTestCase(test.TestCase):
allocate,
self.context)
@test.skip_test("Skipping this pending future merge")
def test_associate_disassociate_address(self):
"""Verifies associate runs cleanly without raising an exception"""
def test_release_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
db.floating_ip_create(self.context,
{'address': address,
'host': self.network.host})
result = self.cloud.release_address(self.context, address)
self.assertEqual(result['releaseResponse'], ['Address released.'])
def test_release_address_still_associated(self):
address = "10.10.10.10"
fixed_ip = {'instance': {'id': 1}}
floating_ip = {'id': 0,
'address': address,
'fixed_ip_id': 0,
'fixed_ip': fixed_ip,
'project_id': None,
'auto_assigned': False}
network_api = network.api.API()
self.mox.StubOutWithMock(network_api.db, 'floating_ip_get_by_address')
network_api.db.floating_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(floating_ip)
self.mox.ReplayAll()
release = self.cloud.release_address
# ApiError: Floating ip is in use. Disassociate it before releasing.
self.assertRaises(exception.ApiError, release, self.context, address)
def test_associate_disassociate_address(self):
"""Verifies associate runs cleanly without raising an exception"""
address = "10.10.10.10"
db.floating_ip_create(self.context, {'address': address})
self.cloud.allocate_address(self.context)
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
@@ -145,11 +165,14 @@ class CloudTestCase(test.TestCase):
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
self.network.set_network_host(self.context, network['id'])
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
type_id = inst['instance_type_id']
ips = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
host=inst['host'],
vpn=None,
instance_type_id=type_id,
project_id=project_id)
# TODO(jkoelker) Make this mas bueno
@@ -239,12 +262,61 @@ class CloudTestCase(test.TestCase):
delete = self.cloud.delete_security_group
self.assertRaises(exception.ApiError, delete, self.context)
def test_authorize_revoke_security_group_ingress(self):
def test_authorize_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':
{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_fail_missing_source_group(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}},
'ip_protocol': u'tcp'}]}
self.assertRaises(exception.SecurityGroupNotFound, authz,
self.context, group_name=sec['name'], **kwargs)
def test_authorize_security_group_ingress_ip_permissions_groups(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'somegroup1'})
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'name': 'othergroup2'})
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'},
'2': {'user_id': u'someuser',
'group_name': u'othergroup2'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
@@ -290,7 +362,7 @@ class CloudTestCase(test.TestCase):
vol2 = db.volume_create(self.context, {})
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
volume_id = ec2utils.id_to_ec2_vol_id(vol2['id'])
result = self.cloud.describe_volumes(self.context,
volume_id=[volume_id])
self.assertEqual(len(result['volumeSet']), 1)
@@ -306,7 +378,7 @@ class CloudTestCase(test.TestCase):
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
'volume_size': vol['size'],
'status': "available"})
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id'])
result = self.cloud.create_volume(self.context,
snapshot_id=snapshot_id)
@@ -336,8 +408,6 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
# NOTE(jkoelker): this test relies on fixed_ip being in instances
@test.skip_test("EC2 stuff needs fixed_ip in instance_ref")
def test_describe_snapshots(self):
"""Makes sure describe_snapshots works and filters results."""
vol = db.volume_create(self.context, {})
@@ -345,7 +415,7 @@ class CloudTestCase(test.TestCase):
snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 2)
snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x')
snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id'])
result = self.cloud.describe_snapshots(self.context,
snapshot_id=[snapshot_id])
self.assertEqual(len(result['snapshotSet']), 1)
@@ -359,7 +429,7 @@ class CloudTestCase(test.TestCase):
def test_create_snapshot(self):
"""Makes sure create_snapshot works."""
vol = db.volume_create(self.context, {'status': "available"})
volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
volume_id = ec2utils.id_to_ec2_vol_id(vol['id'])
result = self.cloud.create_snapshot(self.context,
volume_id=volume_id)
@@ -376,7 +446,7 @@ class CloudTestCase(test.TestCase):
vol = db.volume_create(self.context, {'status': "available"})
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
'status': "available"})
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id'])
result = self.cloud.delete_snapshot(self.context,
snapshot_id=snapshot_id)
@@ -415,6 +485,185 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def _block_device_mapping_create(self, instance_id, mappings):
volumes = []
for bdm in mappings:
db.block_device_mapping_create(self.context, bdm)
if 'volume_id' in bdm:
values = {'id': bdm['volume_id']}
for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
('snapshot_size', 'volume_size'),
('delete_on_termination',
'delete_on_termination')]:
if bdm_key in bdm:
values[vol_key] = bdm[bdm_key]
vol = db.volume_create(self.context, values)
db.volume_attached(self.context, vol['id'],
instance_id, bdm['device_name'])
volumes.append(vol)
return volumes
def _setUpBlockDeviceMapping(self):
inst1 = db.instance_create(self.context,
{'image_ref': 1,
'root_device_name': '/dev/sdb1'})
inst2 = db.instance_create(self.context,
{'image_ref': 2,
'root_device_name': '/dev/sdc1'})
instance_id = inst1['id']
mappings0 = [
{'instance_id': instance_id,
'device_name': '/dev/sdb1',
'snapshot_id': '1',
'volume_id': '2'},
{'instance_id': instance_id,
'device_name': '/dev/sdb2',
'volume_id': '3',
'volume_size': 1},
{'instance_id': instance_id,
'device_name': '/dev/sdb3',
'delete_on_termination': True,
'snapshot_id': '4',
'volume_id': '5'},
{'instance_id': instance_id,
'device_name': '/dev/sdb4',
'delete_on_termination': False,
'snapshot_id': '6',
'volume_id': '7'},
{'instance_id': instance_id,
'device_name': '/dev/sdb5',
'snapshot_id': '8',
'volume_id': '9',
'volume_size': 0},
{'instance_id': instance_id,
'device_name': '/dev/sdb6',
'snapshot_id': '10',
'volume_id': '11',
'volume_size': 1},
{'instance_id': instance_id,
'device_name': '/dev/sdb7',
'no_device': True},
{'instance_id': instance_id,
'device_name': '/dev/sdb8',
'virtual_name': 'swap'},
{'instance_id': instance_id,
'device_name': '/dev/sdb9',
'virtual_name': 'ephemeral3'}]
volumes = self._block_device_mapping_create(instance_id, mappings0)
return (inst1, inst2, volumes)
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
for vol in volumes:
db.volume_destroy(self.context, vol['id'])
for id in (inst1['id'], inst2['id']):
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, id):
db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, inst2['id'])
db.instance_destroy(self.context, inst1['id'])
_expected_instance_bdm1 = {
'instanceId': 'i-00000001',
'rootDeviceName': '/dev/sdb1',
'rootDeviceType': 'ebs'}
_expected_block_device_mapping0 = [
{'deviceName': '/dev/sdb1',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 2,
}},
{'deviceName': '/dev/sdb2',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 3,
}},
{'deviceName': '/dev/sdb3',
'ebs': {'status': 'in-use',
'deleteOnTermination': True,
'volumeId': 5,
}},
{'deviceName': '/dev/sdb4',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 7,
}},
{'deviceName': '/dev/sdb5',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 9,
}},
{'deviceName': '/dev/sdb6',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': 11, }}]
# NOTE(yamahata): swap/ephemeral device case isn't supported yet.
_expected_instance_bdm2 = {
'instanceId': 'i-00000002',
'rootDeviceName': '/dev/sdc1',
'rootDeviceType': 'instance-store'}
def test_format_instance_bdm(self):
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = {}
self.cloud._format_instance_bdm(self.context, inst1['id'], '/dev/sdb1',
result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = {}
self.cloud._format_instance_bdm(self.context, inst2['id'], '/dev/sdc1',
result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def _assertInstance(self, instance_id):
ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
result = self.cloud.describe_instances(self.context,
instance_id=[ec2_instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
result = result['instancesSet'][0]
self.assertEqual(result['instanceId'], ec2_instance_id)
return result
def _assertEqualBlockDeviceMapping(self, expected, result):
self.assertEqual(len(expected), len(result))
for x in expected:
found = False
for y in result:
if x['deviceName'] == y['deviceName']:
self.assertSubDictMatch(x, y)
found = True
break
self.assertTrue(found)
def test_describe_instances_bdm(self):
"""Make sure describe_instances works with root_device_name and
block device mappings
"""
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = self._assertInstance(inst1['id'])
self.assertSubDictMatch(self._expected_instance_bdm1, result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = self._assertInstance(inst2['id'])
self.assertSubDictMatch(self._expected_instance_bdm2, result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def test_describe_images(self):
describe_images = self.cloud.describe_images
@@ -445,6 +694,161 @@ class CloudTestCase(test.TestCase):
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertTrue(key in d1)
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
self.assertDictMatch(d1, d2)
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1', 'snapshot_id': 01234567},
{'device_name': '/dev/sdb2', 'volume_id': 01234567},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
{'device_name': '/dev/sdc2', 'volume_id': 12345678},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 1,
'properties': {
'kernel_id': 1,
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 01234567}]
image2 = {
'id': 2,
'properties': {
'kernel_id': 2,
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id):
for i in [image1, image2]:
if i['id'] == image_id:
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context):
return [image1, image2]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
snap = db.snapshot_create(self.context,
{'id': bdm['snapshot_id'],
'volume_id': 76543210,
'status': "available",
'volume_size': 1})
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertTrue('rootDeviceType' in result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertTrue('rootDeviceName' in result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertTrue('blockDeviceMapping' in result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00053977'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00053977'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00bc614e'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00bc614e'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00053977'}}]
# NOTE(yamahata):
# InstanceBlockDeviceMappingItemType
# rootDeviceType
# rootDeviceName
# blockDeviceMapping
# deviceName
# virtualName
# ebs
# snapshotId
# volumeSize
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
"""test for rootDeviceName and blockDeiceMapping"""
describe_images = self.cloud.describe_images
self._setUpImageSet()
result = describe_images(self.context, ['ami-00000001'])
result = self._assertImageSet(result, 'instance-store',
self._expected_root_device_name1)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_images(self.context, ['ami-00000002'])
result = self._assertImageSet(result, 'ebs',
self._expected_root_device_name2)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
self.stubs.UnsetAll()
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
@@ -459,6 +863,32 @@ class CloudTestCase(test.TestCase):
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
def test_describe_image_attribute_root_device_name(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name1)
result = describe_image_attribute(self.context, 'ami-00000002',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name2)
def test_describe_image_attribute_block_device_mapping(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_image_attribute(self.context, 'ami-00000002',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
@@ -546,7 +976,7 @@ class CloudTestCase(test.TestCase):
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
public_key = db.key_pair_get(self.context,
self.context.user.id,
self.context.user_id,
'test')['public_key']
key.save_pub_key_bio(bio)
converted = crypto.ssl_pub_to_ssh_pub(bio.read())
@@ -570,7 +1000,7 @@ class CloudTestCase(test.TestCase):
'mytestfprint')
self.assertTrue(result1)
keydata = db.key_pair_get(self.context,
self.context.user.id,
self.context.user_id,
'testimportkey1')
self.assertEqual('mytestpubkey', keydata['public_key'])
self.assertEqual('mytestfprint', keydata['fingerprint'])
@@ -587,7 +1017,7 @@ class CloudTestCase(test.TestCase):
dummypub)
self.assertTrue(result2)
keydata = db.key_pair_get(self.context,
self.context.user.id,
self.context.user_id,
'testimportkey2')
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
@@ -597,12 +1027,6 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
# stub out the rpc call
def stub_cast(*args, **kwargs):
pass
self.stubs.Set(rpc, 'cast', stub_cast)
kwargs = {'image_id': FLAGS.default_image,
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
@@ -612,7 +1036,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['displayName'], 'Server 1')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'scheduling')
self.assertEqual(instance['instanceState']['name'], 'running')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_image_state_none(self):
@@ -684,22 +1108,21 @@ class CloudTestCase(test.TestCase):
self.assertEqual('c00l 1m4g3', inst['display_name'])
db.instance_destroy(self.context, inst['id'])
# NOTE(jkoelker): This test relies on mac_address in instance
@test.skip_test("EC2 stuff needs mac_address in instance_ref")
def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {})
host = inst['host']
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3',
mac_address='DE:AD:BE:EF')
host='otherhost')
inst = db.instance_get(self.context, inst['id'])
self.assertEqual(None, inst['mac_address'])
self.assertEqual(host, inst['host'])
db.instance_destroy(self.context, inst['id'])
def test_update_of_volume_display_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
ec2utils.id_to_ec2_vol_id(vol['id']),
display_name='c00l v0lum3')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual('c00l v0lum3', vol['display_name'])
@@ -708,7 +1131,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_wont_update_private_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
ec2utils.id_to_ec2_vol_id(vol['id']),
mountpoint='/not/here')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
@@ -749,7 +1172,6 @@ class CloudTestCase(test.TestCase):
elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted)
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
@@ -786,11 +1208,13 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service()
def _volume_create(self):
def _volume_create(self, volume_id=None):
kwargs = {'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached', }
if volume_id:
kwargs['id'] = volume_id
return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_id, mountpoint):
@@ -805,7 +1229,6 @@ class CloudTestCase(test.TestCase):
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
@@ -819,10 +1242,10 @@ class CloudTestCase(test.TestCase):
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': False, },
'delete_on_termination': False},
{'device_name': '/dev/vdc',
'volume_id': vol2['id'],
'delete_on_termination': True, },
'delete_on_termination': True},
]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
@@ -874,7 +1297,6 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service()
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s.
@@ -950,11 +1372,10 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
return result['snapshotId']
@test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create()
ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
ec2_volume_id = ec2utils.id_to_ec2_vol_id(vol['id'])
ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
@@ -1013,3 +1434,33 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_snapshot(self.context, snapshot_id)
greenthread.sleep(0.3)
db.volume_destroy(self.context, vol['id'])
def test_create_image(self):
"""Make sure that CreateImage works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance_wait(**kwargs)
# TODO(yamahata): s3._s3_create() can't be tested easily by unit test
# as there is no unit test for s3.create()
## result = self.cloud.create_image(self.context, ec2_instance_id,
## no_reboot=True)
## ec2_image_id = result['imageId']
## created_image = self.cloud.describe_images(self.context,
## [ec2_image_id])
self.cloud.terminate_instances(self.context, [ec2_instance_id])
for vol in volumes:
db.volume_destroy(self.context, vol)
for snap in snapshots:
db.snapshot_destroy(self.context, snap)
# TODO(yamahata): clean up snapshot created by CreateImage.
self._restart_compute_service()

View File

@@ -19,10 +19,6 @@
Tests For Compute
"""
import mox
import stubout
from nova.auth import manager
from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
@@ -67,10 +63,9 @@ class ComputeTestCase(test.TestCase):
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
@@ -78,19 +73,14 @@ class ComputeTestCase(test.TestCase):
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
super(ComputeTestCase, self).tearDown()
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
@@ -115,8 +105,8 @@ class ComputeTestCase(test.TestCase):
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
'user_id': self.user.id,
'project_id': self.project.id}
'user_id': self.user_id,
'project_id': self.project_id}
return db.security_group_create(self.context, values)
def _get_dummy_instance(self):
@@ -350,8 +340,8 @@ class ComputeTestCase(test.TestCase):
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.create')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
@@ -374,8 +364,8 @@ class ComputeTestCase(test.TestCase):
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.delete')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
@@ -420,15 +410,16 @@ class ComputeTestCase(test.TestCase):
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_resize', fake)
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
context = self.context.elevated()
instance_id = self._create_instance()
self.compute.prep_resize(context, instance_id, 1)
instance_ref = db.instance_get(context, instance_id)
self.compute.prep_resize(context, instance_ref['uuid'], 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
instance_ref['uuid'], 'pre-migrating')
try:
self.compute.finish_resize(context, instance_id,
self.compute.finish_resize(context, instance_ref['uuid'],
int(migration_ref['id']), {})
except KeyError, e:
# Only catch key errors. We want other reasons for the test to
@@ -441,22 +432,23 @@ class ComputeTestCase(test.TestCase):
"""Ensure notifications on instance migrate/resize"""
instance_id = self._create_instance()
context = self.context.elevated()
inst_ref = db.instance_get(context, instance_id)
self.compute.run_instance(self.context, instance_id)
test_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_id, {'host': 'foo'})
self.compute.prep_resize(context, instance_id, 1)
self.compute.prep_resize(context, inst_ref['uuid'], 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
inst_ref['uuid'], 'pre-migrating')
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.resize.prep')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project.id)
self.assertEquals(payload['user_id'], self.user.id)
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
@@ -471,13 +463,15 @@ class ComputeTestCase(test.TestCase):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()
context = self.context.elevated()
inst_ref = db.instance_get(context, instance_id)
self.compute.run_instance(self.context, instance_id)
db.instance_update(self.context, instance_id, {'host': 'foo'})
self.compute.prep_resize(context, instance_id, 1)
db.instance_update(self.context, inst_ref['uuid'],
{'host': 'foo'})
self.compute.prep_resize(context, inst_ref['uuid'], 1)
migration_ref = db.migration_get_by_instance_and_status(context,
instance_id, 'pre-migrating')
self.compute.resize_instance(context, instance_id,
inst_ref['uuid'], 'pre-migrating')
self.compute.resize_instance(context, inst_ref['uuid'],
migration_ref['id'])
self.compute.terminate_instance(context, instance_id)
@@ -502,8 +496,8 @@ class ComputeTestCase(test.TestCase):
db.instance_update(self.context, instance_id,
{'instance_type_id': inst_type['id']})
self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1)
self.assertRaises(exception.CannotResizeToSmallerSize,
self.compute_api.resize, context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
@@ -514,8 +508,61 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1)
self.assertRaises(exception.CannotResizeToSameSize,
self.compute_api.resize, context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
def test_finish_revert_resize(self):
"""Ensure that the flavor is reverted to the original on revert"""
context = self.context.elevated()
instance_id = self._create_instance()
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'revert_migration', fake)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
self.compute.run_instance(self.context, instance_id)
# Confirm the instance size before the resize starts
inst_ref = db.instance_get(context, instance_id)
instance_type_ref = db.instance_type_get(context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], 1)
db.instance_update(self.context, instance_id, {'host': 'foo'})
new_instance_type_ref = db.instance_type_get_by_flavor_id(context, 3)
self.compute.prep_resize(context, inst_ref['uuid'],
new_instance_type_ref['id'])
migration_ref = db.migration_get_by_instance_and_status(context,
inst_ref['uuid'], 'pre-migrating')
self.compute.resize_instance(context, inst_ref['uuid'],
migration_ref['id'])
self.compute.finish_resize(context, inst_ref['uuid'],
int(migration_ref['id']), {})
# Prove that the instance size is now the new size
inst_ref = db.instance_get(context, instance_id)
instance_type_ref = db.instance_type_get(context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], 3)
# Finally, revert and confirm the old flavor has been applied
self.compute.revert_resize(context, inst_ref['uuid'],
migration_ref['id'])
self.compute.finish_revert_resize(context, inst_ref['uuid'],
migration_ref['id'])
inst_ref = db.instance_get(context, instance_id)
instance_type_ref = db.instance_type_get(context,
inst_ref['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], 1)
self.compute.terminate_instance(context, instance_id)
@@ -528,8 +575,9 @@ class ComputeTestCase(test.TestCase):
the same host"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
inst_ref = db.instance_get(self.context, instance_id)
self.assertRaises(exception.Error, self.compute.prep_resize,
self.context, instance_id, 1)
self.context, inst_ref['uuid'], 1)
self.compute.terminate_instance(self.context, instance_id)
def test_migrate(self):
@@ -569,7 +617,6 @@ class ComputeTestCase(test.TestCase):
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
volmock = self.mox.CreateMock(self.volume_manager)
netmock = self.mox.CreateMock(self.network_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
@@ -577,12 +624,11 @@ class ComputeTestCase(test.TestCase):
for i in range(len(i_ref['volumes'])):
vid = i_ref['volumes'][i]['id']
volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
netmock.setup_compute_network(c, i_ref['id'])
drivermock.plug_vifs(i_ref, [])
drivermock.ensure_filtering_rules_for_instance(i_ref)
self.compute.db = dbmock
self.compute.volume_manager = volmock
self.compute.network_manager = netmock
self.compute.driver = drivermock
self.mox.ReplayAll()
@@ -597,18 +643,16 @@ class ComputeTestCase(test.TestCase):
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
netmock = self.mox.CreateMock(self.network_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
netmock.setup_compute_network(c, i_ref['id'])
drivermock.plug_vifs(i_ref, [])
drivermock.ensure_filtering_rules_for_instance(i_ref)
self.compute.db = dbmock
self.compute.network_manager = netmock
self.compute.driver = drivermock
self.mox.ReplayAll()
@@ -629,18 +673,20 @@ class ComputeTestCase(test.TestCase):
dbmock = self.mox.CreateMock(db)
netmock = self.mox.CreateMock(self.network_manager)
volmock = self.mox.CreateMock(self.volume_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])):
volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
for i in range(FLAGS.live_migration_retry_count):
netmock.setup_compute_network(c, i_ref['id']).\
drivermock.plug_vifs(i_ref, []).\
AndRaise(exception.ProcessExecutionError())
self.compute.db = dbmock
self.compute.network_manager = netmock
self.compute.volume_manager = volmock
self.compute.driver = drivermock
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
@@ -775,7 +821,7 @@ class ComputeTestCase(test.TestCase):
for v in i_ref['volumes']:
self.compute.volume_manager.remove_compute_volume(c, v['id'])
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref)
self.compute.driver.unfilter_instance(i_ref, [])
# executing
self.mox.ReplayAll()
@@ -795,7 +841,6 @@ class ComputeTestCase(test.TestCase):
def test_run_kill_vm(self):
"""Detect when a vm is terminated behind the scenes"""
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
@@ -818,3 +863,114 @@ class ComputeTestCase(test.TestCase):
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
bdm = {}
for attr in attr_list:
val = bdm_ref.get(attr, None)
if val:
bdm[attr] = val
return bdm
def test_update_block_device_mapping(self):
instance_id = self._create_instance()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb1'},
{'virtual': 'swap', 'device': 'sdb2'},
{'virtual': 'swap', 'device': 'sdb3'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
{'virtual': 'ephemeral2', 'device': 'sdc3'}]
block_device_mapping = [
# root
{'device_name': '/dev/sda1',
'snapshot_id': 0x12345678,
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'snapshot_id': 0x23456789,
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'snapshot_id': 0x3456789A},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc2',
'snapshot_id': 0x456789AB,
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'snapshot_id': 0x56789ABC},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'snapshot_id': 0x87654321,
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'snapshot_id': 0x98765432},
{'device_name': '/dev/sdd3',
'snapshot_id': 0xA9875463},
{'device_name': '/dev/sdd4',
'no_device': True}]
self.compute_api._update_image_block_device_mapping(
self.context, instance_id, mappings)
bdms = [self._parse_db_block_device_mapping(bdm_ref)
for bdm_ref in db.block_device_mapping_get_all_by_instance(
self.context, instance_id)]
expected_result = [
{'virtual_name': 'swap', 'device_name': '/dev/sdb1'},
{'virtual_name': 'swap', 'device_name': '/dev/sdb2'},
{'virtual_name': 'swap', 'device_name': '/dev/sdb3'},
{'virtual_name': 'swap', 'device_name': '/dev/sdb4'},
{'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'},
{'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'},
{'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'}]
bdms.sort()
expected_result.sort()
self.assertDictListMatch(bdms, expected_result)
self.compute_api._update_block_device_mapping(
self.context, instance_id, block_device_mapping)
bdms = [self._parse_db_block_device_mapping(bdm_ref)
for bdm_ref in db.block_device_mapping_get_all_by_instance(
self.context, instance_id)]
expected_result = [
{'snapshot_id': 0x12345678, 'device_name': '/dev/sda1'},
{'virtual_name': 'swap', 'device_name': '/dev/sdb1'},
{'snapshot_id': 0x23456789, 'device_name': '/dev/sdb2'},
{'snapshot_id': 0x3456789A, 'device_name': '/dev/sdb3'},
{'no_device': True, 'device_name': '/dev/sdb4'},
{'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'},
{'snapshot_id': 0x456789AB, 'device_name': '/dev/sdc2'},
{'snapshot_id': 0x56789ABC, 'device_name': '/dev/sdc3'},
{'no_device': True, 'device_name': '/dev/sdc4'},
{'snapshot_id': 0x87654321, 'device_name': '/dev/sdd1'},
{'snapshot_id': 0x98765432, 'device_name': '/dev/sdd2'},
{'snapshot_id': 0xA9875463, 'device_name': '/dev/sdd3'},
{'no_device': True, 'device_name': '/dev/sdd4'}]
bdms.sort()
expected_result.sort()
self.assertDictListMatch(bdms, expected_result)
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance_id):
db.block_device_mapping_destroy(self.context, bdm['id'])
self.compute.terminate_instance(self.context, instance_id)

View File

@@ -26,10 +26,9 @@ from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.console import manager as console_manager
FLAGS = flags.FLAGS
flags.DECLARE('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
@@ -39,17 +38,11 @@ class ConsoleTestCase(test.TestCase):
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
self.console = utils.import_object(FLAGS.console_manager)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.get_admin_context()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.host = 'test_compute_host'
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
super(ConsoleTestCase, self).tearDown()
def _create_instance(self):
"""Create a test instance"""
inst = {}
@@ -58,8 +51,8 @@ class ConsoleTestCase(test.TestCase):
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = 1
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']

78
nova/tests/test_db_api.py Normal file
View File

@@ -0,0 +1,78 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API"""
from nova import test
from nova import context
from nova import db
from nova import flags
FLAGS = flags.FLAGS
def _setup_networking(instance_id, ip='1.2.3.4', flo_addr='1.2.1.2'):
ctxt = context.get_admin_context()
network_ref = db.project_get_networks(ctxt,
'fake',
associate=True)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_id}
vif_ref = db.virtual_interface_create(ctxt, vif)
fixed_ip = {'address': ip,
'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id'],
'allocated': True,
'instance_id': instance_id}
db.fixed_ip_create(ctxt, fixed_ip)
fix_ref = db.fixed_ip_get_by_address(ctxt, ip)
db.floating_ip_create(ctxt, {'address': flo_addr,
'fixed_ip_id': fix_ref['id']})
class DbApiTestCase(test.TestCase):
def setUp(self):
super(DbApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_instance_get_project_vpn(self):
values = {'instance_type_id': FLAGS.default_instance_type,
'image_ref': FLAGS.vpn_image_id,
'project_id': self.project_id,
}
instance = db.instance_create(self.context, values)
result = db.instance_get_project_vpn(self.context.elevated(),
self.project_id)
self.assertEqual(instance['id'], result['id'])
def test_instance_get_project_vpn_joins(self):
values = {'instance_type_id': FLAGS.default_instance_type,
'image_ref': FLAGS.vpn_image_id,
'project_id': self.project_id,
}
instance = db.instance_create(self.context, values)
_setup_networking(instance['id'])
result = db.instance_get_project_vpn(self.context.elevated(),
self.project_id)
self.assertEqual(instance['id'], result['id'])
self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address,
'1.2.1.2')

View File

@@ -19,12 +19,9 @@ Tests For Scheduler Host Filters.
import json
from nova import exception
from nova import flags
from nova import test
from nova.scheduler import host_filter
FLAGS = flags.FLAGS
class FakeZoneManager:
pass
@@ -57,9 +54,9 @@ class HostFilterTestCase(test.TestCase):
'host_name-label': 'xs-%s' % multiplier}
def setUp(self):
self.old_flag = FLAGS.default_host_filter
FLAGS.default_host_filter = \
'nova.scheduler.host_filter.AllHostsFilter'
super(HostFilterTestCase, self).setUp()
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
self.flags(default_host_filter=default_host_filter)
self.instance_type = dict(name='tiny',
memory_mb=50,
vcpus=10,
@@ -76,9 +73,6 @@ class HostFilterTestCase(test.TestCase):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states
def tearDown(self):
FLAGS.default_host_filter = self.old_flag
def test_choose_filter(self):
# Test default filter ...
hf = host_filter.choose_host_filter()

View File

@@ -32,14 +32,12 @@ from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
from nova.compute import power_state
from nova.virt.libvirt import connection
from nova.virt.libvirt import firewall
libvirt = None
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
def _concurrency(wait, done, target):
@@ -54,10 +52,15 @@ def _create_network_info(count=1, ipv6=None):
fake_ip = '0.0.0.0/0'
fake_ip_2 = '0.0.0.1/0'
fake_ip_3 = '0.0.0.1/0'
fake_vlan = 100
fake_bridge_interface = 'eth0'
network = {'bridge': fake,
'cidr': fake_ip,
'cidr_v6': fake_ip}
'cidr_v6': fake_ip,
'vlan': fake_vlan,
'bridge_interface': fake_bridge_interface}
mapping = {'mac': fake,
'dhcp_server': fake,
'gateway': fake,
'gateway6': fake,
'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
@@ -83,12 +86,13 @@ def _setup_networking(instance_id, ip='1.2.3.4'):
'virtual_interface_id': vif_ref['id']}
db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, ip, {'allocated': True,
'instance_id': instance_id})
'instance_id': instance_id})
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path='nova.compute.manager')
def fake_exists(fname):
basedir = os.path.join(FLAGS.instances_path, '_base')
@@ -149,36 +153,15 @@ class LibvirtConnTestCase(test.TestCase):
super(LibvirtConnTestCase, self).setUp()
connection._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
try:
pjs = self.manager.get_projects()
pjs = [p for p in pjs if p.name == 'fake']
if 0 != len(pjs):
self.manager.delete_project(pjs[0])
users = self.manager.get_users()
users = [u for u in users if u.name == 'fake']
if 0 != len(users):
self.manager.delete_user(users[0])
except Exception, e:
pass
users = self.manager.get_users()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.get_admin_context()
FLAGS.instances_path = ''
self.flags(instances_path='')
self.call_libvirt_dependant_setup = False
self.test_ip = '10.11.12.13'
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(LibvirtConnTestCase, self).tearDown()
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
@@ -218,9 +201,29 @@ class LibvirtConnTestCase(test.TestCase):
def setattr(self, key, val):
self.__setattr__(key, val)
# A fake VIF driver
class FakeVIFDriver(object):
def __init__(self, **kwargs):
pass
def setattr(self, key, val):
self.__setattr__(key, val)
def plug(self, instance, network, mapping):
return {
'id': 'fake',
'bridge_name': 'fake',
'mac_address': 'fake',
'ip_address': 'fake',
'dhcp_server': 'fake',
'extra_params': 'fake',
}
# Creating mocks
fake = FakeLibvirtConnection()
fakeip = FakeIptablesFirewallDriver
fakevif = FakeVIFDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
@@ -228,6 +231,8 @@ class LibvirtConnTestCase(test.TestCase):
# Inevitable mocks for connection.LibvirtConnection
self.mox.StubOutWithMock(connection.utils, 'import_class')
connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
self.mox.StubOutWithMock(connection.utils, 'import_object')
connection.utils.import_object(mox.IgnoreArg()).AndReturn(fakevif)
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
connection.LibvirtConnection._conn = fake
@@ -263,7 +268,6 @@ class LibvirtConnTestCase(test.TestCase):
return db.service_create(context.get_admin_context(), service_ref)
@test.skip_test("Please review this test to ensure intent")
def test_preparing_xml_info(self):
conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -279,43 +283,23 @@ class LibvirtConnTestCase(test.TestCase):
_create_network_info(2))
self.assertTrue(len(result['nics']) == 2)
def test_get_nic_for_xml_v4(self):
conn = connection.LibvirtConnection(True)
network, mapping = _create_network_info()[0]
self.flags(use_ipv6=False)
params = conn._get_nic_for_xml(network, mapping)['extra_params']
self.assertTrue(params.find('PROJNETV6') == -1)
self.assertTrue(params.find('PROJMASKV6') == -1)
def test_get_nic_for_xml_v6(self):
conn = connection.LibvirtConnection(True)
network, mapping = _create_network_info()[0]
self.flags(use_ipv6=True)
params = conn._get_nic_for_xml(network, mapping)['extra_params']
self.assertTrue(params.find('PROJNETV6') > -1)
self.assertTrue(params.find('PROJMASKV6') > -1)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
@@ -323,7 +307,6 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
@@ -331,7 +314,6 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=True)
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
@@ -340,7 +322,7 @@ class LibvirtConnTestCase(test.TestCase):
if not self.lazy_load_library_exists():
return
FLAGS.image_service = 'nova.image.fake.FakeImageService'
self.flags(image_service='nova.image.fake.FakeImageService')
# Start test
image_service = utils.import_object(FLAGS.image_service)
@@ -364,7 +346,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.snapshot(instance_ref, recv_meta['id'])
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
@@ -375,7 +357,7 @@ class LibvirtConnTestCase(test.TestCase):
if not self.lazy_load_library_exists():
return
FLAGS.image_service = 'nova.image.fake.FakeImageService'
self.flags(image_service='nova.image.fake.FakeImageService')
# Start test
image_service = utils.import_object(FLAGS.image_service)
@@ -404,13 +386,22 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
conn.snapshot(instance_ref, recv_meta['id'])
conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_attach_invalid_device(self):
self.create_fake_libvirt_mock()
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
self.assertRaises(exception.InvalidDevicePath,
conn.attach_volume,
"fake", "bad/device/path", "/dev/fake")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _create_network_info(2)
@@ -428,27 +419,10 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(parameters[1].get('value'), 'fake')
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(project=self.project,
user=self.user)
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
# Re-get the instance so it's bound to an actual session
instance_ref = db.instance_get(user_context, instance_ref['id'])
network_ref = db.project_get_networks(context.get_admin_context(),
self.project.id)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_ref['id']}
vif_ref = db.virtual_interface_create(self.context, vif)
fixed_ip = {'address': self.test_ip,
'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id']}
ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, self.test_ip,
{'allocated': True,
'instance_id': instance_ref['id']})
_setup_networking(instance_ref['id'], self.test_ip)
self.flags(libvirt_type='lxc')
conn = connection.LibvirtConnection(True)
@@ -474,13 +448,12 @@ class LibvirtConnTestCase(test.TestCase):
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=False):
user_context = context.RequestContext(project=self.project,
user=self.user)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, instance)
network_ref = db.project_get_networks(context.get_admin_context(),
self.project.id)[0]
self.project_id)[0]
_setup_networking(instance_ref['id'], ip=self.test_ip)
_setup_networking(instance_ref['id'], self.test_ip)
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
@@ -548,7 +521,7 @@ class LibvirtConnTestCase(test.TestCase):
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
self.flags(libvirt_type=libvirt_type)
conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
@@ -573,9 +546,9 @@ class LibvirtConnTestCase(test.TestCase):
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
self.flags(libvirt_type=libvirt_type)
conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, testuri)
@@ -583,8 +556,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_update_available_resource_works_correctly(self):
"""Confirm compute_node table is updated successfully."""
org_path = FLAGS.instances_path = ''
FLAGS.instances_path = '.'
self.flags(instances_path='.')
# Prepare mocks
def getVersion():
@@ -631,12 +603,10 @@ class LibvirtConnTestCase(test.TestCase):
self.assertTrue(compute_node['hypervisor_version'] > 0)
db.service_destroy(self.context, service_ref['id'])
FLAGS.instances_path = org_path
def test_update_resource_info_no_compute_record_found(self):
"""Raise exception if no recorde found on services table."""
org_path = FLAGS.instances_path = ''
FLAGS.instances_path = '.'
self.flags(instances_path='.')
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
@@ -645,8 +615,6 @@ class LibvirtConnTestCase(test.TestCase):
conn.update_available_resource,
self.context, 'dummy')
FLAGS.instances_path = org_path
def test_ensure_filtering_rules_for_instance_timeout(self):
"""ensure_filtering_fules_for_instance() finishes with timeout."""
# Skip if non-libvirt environment
@@ -721,6 +689,9 @@ class LibvirtConnTestCase(test.TestCase):
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.StubOutWithMock(self.compute, "recover_live_migration")
self.compute.recover_live_migration(self.context, instance_ref,
dest='dest')
# Start test
self.mox.ReplayAll()
@@ -739,7 +710,6 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
@test.skip_test("test needs rewrite: instance no longer has mac_address")
def test_spawn_with_network_info(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
@@ -758,20 +728,10 @@ class LibvirtConnTestCase(test.TestCase):
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
network = db.project_get_networks(context.get_admin_context(),
self.project.id)[0]
ip_dict = {'ip': self.test_ip,
'netmask': network['netmask'],
'enabled': '1'}
mapping = {'label': network['label'],
'gateway': network['gateway'],
'mac': instance['mac_address'],
'dns': [network['dns']],
'ips': [ip_dict]}
network_info = [(network, mapping)]
network_info = _create_network_info()
try:
conn.spawn(instance, network_info)
conn.spawn(self.context, instance, network_info)
except Exception, e:
count = (0 <= str(e.message).find('Unexpected method call'))
@@ -814,11 +774,9 @@ class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object):
@@ -826,6 +784,7 @@ class IptablesFirewallTestCase(test.TestCase):
"""setup_basic_rules in nwfilter calls this."""
pass
self.fake_libvirt_connection = FakeLibvirtConnection()
self.test_ip = '10.11.12.13'
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
@@ -843,11 +802,6 @@ class IptablesFirewallTestCase(test.TestCase):
connection.libxml2 = __import__('libxml2')
return True
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
@@ -891,27 +845,11 @@ class IptablesFirewallTestCase(test.TestCase):
'project_id': 'fake',
'instance_type_id': 1})
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
ip = '10.11.12.13'
_setup_networking(instance_ref['id'], self.test_ip)
network_ref = db.project_get_networks(self.context,
'fake',
associate=True)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_ref['id']}
vif_ref = db.virtual_interface_create(self.context, vif)
fixed_ip = {'address': ip,
'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id']}
admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
@@ -1043,7 +981,6 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_network * networks_count)
@test.skip_test("skipping libvirt tests")
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
@@ -1054,7 +991,6 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
@test.skip_test("skip libvirt test project_get_network no longer exists")
def test_unfilter_instance_undefines_nwfilter(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
@@ -1068,38 +1004,24 @@ class IptablesFirewallTestCase(test.TestCase):
self.fw.nwfilter._conn.nwfilterLookupByName =\
fakefilter.nwfilterLookupByName
instance_ref = self._create_instance_ref()
inst_id = instance_ref['id']
instance = db.instance_get(self.context, inst_id)
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context, 'fake')
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': inst_id})
self.fw.setup_basic_filtering(instance)
self.fw.prepare_instance_filter(instance)
self.fw.apply_instance_filter(instance)
_setup_networking(instance_ref['id'], self.test_ip)
self.fw.setup_basic_filtering(instance_ref)
self.fw.prepare_instance_filter(instance_ref)
self.fw.apply_instance_filter(instance_ref)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance)
self.fw.unfilter_instance(instance_ref)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['id'])
@test.skip_test("skip libvirt test project_get_network no longer exists")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
nw_info = _create_network_info(1)
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context, 'fake')
admin_ctxt = context.get_admin_context()
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
_setup_networking(instance_ref['id'], self.test_ip)
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
@@ -1111,6 +1033,7 @@ class IptablesFirewallTestCase(test.TestCase):
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
@@ -1161,22 +1084,16 @@ class NWFilterTestCase(test.TestCase):
class Mock(object):
pass
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext(self.user, self.project)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.fake_libvirt_connection = Mock()
self.test_ip = '10.11.12.13'
self.fw = firewall.NWFilterFirewall(
lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(NWFilterTestCase, self).tearDown()
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
@@ -1255,7 +1172,6 @@ class NWFilterTestCase(test.TestCase):
inst.update(params)
return db.instance_type_create(context, inst)['id']
@test.skip_test('Skipping this test')
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
@@ -1287,21 +1203,11 @@ class NWFilterTestCase(test.TestCase):
instance_ref = self._create_instance()
inst_id = instance_ref['id']
ip = '10.11.12.13'
#network_ref = db.project_get_networks(self.context, 'fake')[0]
#fixed_ip = {'address': ip, 'network_id': network_ref['id']}
#admin_ctxt = context.get_admin_context()
#db.fixed_ip_create(admin_ctxt, fixed_ip)
#db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
# 'instance_id': inst_id})
self._setup_networking(instance_ref['id'], ip=ip)
_setup_networking(instance_ref['id'], self.test_ip)
def _ensure_all_called():
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
'00A0C914C829')
'561212121212')
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',
@@ -1322,7 +1228,7 @@ class NWFilterTestCase(test.TestCase):
self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
db.instance_destroy(admin_ctxt, instance_ref['id'])
db.instance_destroy(context.get_admin_context(), instance_ref['id'])
def test_create_network_filters(self):
instance_ref = self._create_instance()
@@ -1332,7 +1238,6 @@ class NWFilterTestCase(test.TestCase):
"fake")
self.assertEquals(len(result), 3)
@test.skip_test("skip libvirt test project_get_network no longer exists")
def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context()
@@ -1350,12 +1255,7 @@ class NWFilterTestCase(test.TestCase):
instance = db.instance_get(self.context, inst_id)
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context, 'fake')
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': inst_id})
_setup_networking(instance_ref['id'], self.test_ip)
self.fw.setup_basic_filtering(instance)
self.fw.prepare_instance_filter(instance)
self.fw.apply_instance_filter(instance)

View File

@@ -16,7 +16,7 @@
# under the License.
from nova import db
from nova import flags
from nova import exception
from nova import log as logging
from nova import test
from nova.network import manager as network_manager
@@ -25,7 +25,6 @@ from nova.network import manager as network_manager
import mox
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
@@ -44,6 +43,7 @@ class FakeModel(dict):
networks = [{'id': 0,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
@@ -53,7 +53,8 @@ networks = [{'id': 0,
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns': '192.168.0.1',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'project_id': 'fake_project',
@@ -61,6 +62,7 @@ networks = [{'id': 0,
{'id': 1,
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
@@ -70,7 +72,8 @@ networks = [{'id': 0,
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns': '192.168.0.1',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': None,
'project_id': 'fake_project',
@@ -121,34 +124,20 @@ class FlatNetworkTestCase(test.TestCase):
self.network = network_manager.FlatManager(host=HOST)
self.network.db = db
def test_set_network_hosts(self):
self.mox.StubOutWithMock(db, 'network_get_all')
self.mox.StubOutWithMock(db, 'network_set_host')
self.mox.StubOutWithMock(db, 'network_update')
db.network_get_all(mox.IgnoreArg()).AndReturn([networks[0]])
db.network_set_host(mox.IgnoreArg(),
networks[0]['id'],
mox.IgnoreArg()).AndReturn(HOST)
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.set_network_hosts(None)
def test_get_instance_nw_info(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance')
self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance')
self.mox.StubOutWithMock(db, 'instance_type_get_by_id')
self.mox.StubOutWithMock(db, 'instance_type_get')
db.fixed_ip_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixed_ips)
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(vifs)
db.instance_type_get_by_id(mox.IgnoreArg(),
db.instance_type_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(flavor)
self.mox.ReplayAll()
nw_info = self.network.get_instance_nw_info(None, 0, 0)
nw_info = self.network.get_instance_nw_info(None, 0, 0, None)
self.assertTrue(nw_info)
@@ -158,11 +147,15 @@ class FlatNetworkTestCase(test.TestCase):
'cidr': '192.168.%s.0/24' % i,
'cidr_v6': '2001:db%s::/64' % i8,
'id': i,
'injected': 'DONTCARE'}
'multi_host': False,
'injected': 'DONTCARE',
'bridge_interface': 'fake_fa%s' % i,
'vlan': None}
self.assertDictMatch(nw[0], check)
check = {'broadcast': '192.168.%s.255' % i,
'dhcp_server': '192.168.%s.1' % i,
'dns': 'DONTCARE',
'gateway': '192.168.%s.1' % i,
'gateway6': '2001:db%s::1' % i8,
@@ -170,7 +163,9 @@ class FlatNetworkTestCase(test.TestCase):
'ips': 'DONTCARE',
'label': 'test%s' % i,
'mac': 'DE:AD:BE:EF:00:0%s' % i,
'rxtx_cap': 'DONTCARE'}
'rxtx_cap': 'DONTCARE',
'should_create_vlan': False,
'should_create_bridge': False}
self.assertDictMatch(nw[1], check)
check = [{'enabled': 'DONTCARE',
@@ -238,3 +233,35 @@ class VlanNetworkTestCase(test.TestCase):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
class CommonNetworkTestCase(test.TestCase):
class FakeNetworkManager(network_manager.NetworkManager):
"""This NetworkManager doesn't call the base class so we can bypass all
inherited service cruft and just perform unit tests.
"""
class FakeDB:
def fixed_ip_get_by_instance(self, context, instance_id):
return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
dict(address='10.0.0.2')]
def __init__(self):
self.db = self.FakeDB()
self.deallocate_called = None
def deallocate_fixed_ip(self, context, address):
self.deallocate_called = address
def test_remove_fixed_ip_from_instance(self):
manager = self.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1')
self.assertEquals(manager.deallocate_called, '10.0.0.1')
def test_remove_fixed_ip_from_instance_bad_input(self):
manager = self.FakeNetworkManager()
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
None, 99, 'bad input')

View File

@@ -20,24 +20,23 @@ Unit Tests for remote procedure calls using queue
"""
from nova import context
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.rpc')
class RpcTestCase(test.TestCase):
def setUp(self):
super(RpcTestCase, self).setUp()
self.conn = rpc.Connection.instance(True)
self.conn = rpc.create_connection(True)
self.receiver = TestReceiver()
self.consumer = rpc.TopicAdapterConsumer(connection=self.conn,
topic='test',
proxy=self.receiver)
self.consumer = rpc.create_consumer(self.conn,
'test',
self.receiver,
False)
self.consumer.attach_to_eventlet()
self.context = context.get_admin_context()
@@ -129,6 +128,8 @@ class RpcTestCase(test.TestCase):
"""Calls echo in the passed queue"""
LOG.debug(_("Nested received %(queue)s, %(value)s")
% locals())
# TODO: so, it will replay the context and use the same REQID?
# that's bizarre.
ret = rpc.call(context,
queue,
{"method": "echo",
@@ -137,10 +138,11 @@ class RpcTestCase(test.TestCase):
return value
nested = Nested()
conn = rpc.Connection.instance(True)
consumer = rpc.TopicAdapterConsumer(connection=conn,
topic='nested',
proxy=nested)
conn = rpc.create_connection(True)
consumer = rpc.create_consumer(conn,
'nested',
nested,
False)
consumer.attach_to_eventlet()
value = 42
result = rpc.call(self.context,
@@ -149,47 +151,6 @@ class RpcTestCase(test.TestCase):
"value": value}})
self.assertEqual(value, result)
def test_connectionpool_single(self):
"""Test that ConnectionPool recycles a single connection."""
conn1 = rpc.ConnectionPool.get()
rpc.ConnectionPool.put(conn1)
conn2 = rpc.ConnectionPool.get()
rpc.ConnectionPool.put(conn2)
self.assertEqual(conn1, conn2)
def test_connectionpool_double(self):
"""Test that ConnectionPool returns and reuses separate connections.
When called consecutively we should get separate connections and upon
returning them those connections should be reused for future calls
before generating a new connection.
"""
conn1 = rpc.ConnectionPool.get()
conn2 = rpc.ConnectionPool.get()
self.assertNotEqual(conn1, conn2)
rpc.ConnectionPool.put(conn1)
rpc.ConnectionPool.put(conn2)
conn3 = rpc.ConnectionPool.get()
conn4 = rpc.ConnectionPool.get()
self.assertEqual(conn1, conn3)
self.assertEqual(conn2, conn4)
def test_connectionpool_limit(self):
"""Test connection pool limit and connection uniqueness."""
max_size = FLAGS.rpc_conn_pool_size
conns = []
for i in xrange(max_size):
conns.append(rpc.ConnectionPool.get())
self.assertFalse(rpc.ConnectionPool.free_items)
self.assertEqual(rpc.ConnectionPool.current_size,
rpc.ConnectionPool.max_size)
self.assertEqual(len(set(conns)), max_size)
class TestReceiver(object):
"""Simple Proxy class so the consumer has methods to call.

View File

@@ -0,0 +1,88 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For RPC AMQP.
"""
from nova import context
from nova import log as logging
from nova import rpc
from nova.rpc import amqp
from nova import test
LOG = logging.getLogger('nova.tests.rpc')
class RpcAMQPTestCase(test.TestCase):
def setUp(self):
super(RpcAMQPTestCase, self).setUp()
self.conn = rpc.create_connection(True)
self.receiver = TestReceiver()
self.consumer = rpc.create_consumer(self.conn,
'test',
self.receiver,
False)
self.consumer.attach_to_eventlet()
self.context = context.get_admin_context()
def test_connectionpool_single(self):
"""Test that ConnectionPool recycles a single connection."""
conn1 = amqp.ConnectionPool.get()
amqp.ConnectionPool.put(conn1)
conn2 = amqp.ConnectionPool.get()
amqp.ConnectionPool.put(conn2)
self.assertEqual(conn1, conn2)
class TestReceiver(object):
"""Simple Proxy class so the consumer has methods to call.
Uses static methods because we aren't actually storing any state.
"""
@staticmethod
def echo(context, value):
"""Simply returns whatever value is sent in."""
LOG.debug(_("Received %s"), value)
return value
@staticmethod
def context(context, value):
"""Returns dictionary version of context."""
LOG.debug(_("Received %s"), context)
return context.to_dict()
@staticmethod
def echo_three_times(context, value):
context.reply(value)
context.reply(value + 1)
context.reply(value + 2)
@staticmethod
def echo_three_times_yield(context, value):
yield value
yield value + 1
yield value + 2
@staticmethod
def fail(context, value):
"""Raises an exception with the value sent in."""
raise Exception(value)

View File

@@ -1,53 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import StringIO
import sys
from nova import twistd
from nova import exception
from nova import flags
from nova import test
FLAGS = flags.FLAGS
class TwistdTestCase(test.TestCase):
def setUp(self):
super(TwistdTestCase, self).setUp()
self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions)
sys.stdout = StringIO.StringIO()
def tearDown(self):
super(TwistdTestCase, self).tearDown()
sys.stdout = sys.__stdout__
def test_basic(self):
options = self.Options()
argv = options.parseOptions()
def test_logfile(self):
options = self.Options()
argv = options.parseOptions(['--logfile=foo'])
self.assertEqual(FLAGS.logfile, 'foo')
def test_help(self):
options = self.Options()
self.assertRaises(SystemExit, options.parseOptions, ['--help'])
self.assert_('pidfile' in sys.stdout.getvalue())

View File

@@ -19,14 +19,11 @@
Test suite for VMWareAPI.
"""
import stubout
from nova import context
from nova import db
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import power_state
from nova.tests.glance import stubs as glance_stubs
from nova.tests.vmwareapi import db_fakes
@@ -41,51 +38,64 @@ FLAGS = flags.FLAGS
class VMWareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
# NOTE(jkoelker): This is leaking stubs into the db module.
# Commenting out until updated for multi-nic.
#def setUp(self):
# super(VMWareAPIVMTestCase, self).setUp()
# self.flags(vmwareapi_host_ip='test_url',
# vmwareapi_host_username='test_username',
# vmwareapi_host_password='test_pass')
# self.manager = manager.AuthManager()
# self.user = self.manager.create_user('fake', 'fake', 'fake',
# admin=True)
# self.project = self.manager.create_project('fake', 'fake', 'fake')
# self.network = utils.import_object(FLAGS.network_manager)
# self.stubs = stubout.StubOutForTesting()
# vmwareapi_fake.reset()
# db_fakes.stub_out_db_instance_api(self.stubs)
# stubs.set_stubs(self.stubs)
# glance_stubs.stubout_glance_client(self.stubs,
# glance_stubs.FakeGlance)
# self.conn = vmwareapi_conn.get_connection(False)
def setUp(self):
super(VMWareAPIVMTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake', False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
vmwareapi_host_password='test_pass')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
self.conn = vmwareapi_conn.get_connection(False)
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = [({'bridge': 'fa0',
'id': 0,
'vlan': None,
'bridge_interface': None,
'injected': True},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
#def tearDown(self):
# super(VMWareAPIVMTestCase, self).tearDown()
# vmwareapi_fake.cleanup()
# self.manager.delete_project(self.project)
# self.manager.delete_user(self.user)
# self.stubs.UnsetAll()
def tearDown(self):
super(VMWareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
def _create_instance_in_the_db(self):
values = {'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': "1",
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
self.instance = db.instance_create(values)
self.instance = db.instance_create(None, values)
def _create_vm(self):
"""Create and spawn the VM."""
self._create_instance_in_the_db()
self.type_data = db.instance_type_get_by_name(None, 'm1.large')
self.conn.spawn(self.instance)
self.conn.spawn(self.context, self.instance, self.network_info)
self._check_vm_record()
def _check_vm_record(self):
@@ -129,53 +139,45 @@ class VMWareAPIVMTestCase(test.TestCase):
self.assertEquals(info["mem"], mem_kib)
self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_spawn(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_snapshot(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.conn.snapshot(self.instance, "Test-Snapshot")
self.conn.snapshot(self.context, self.instance, "Test-Snapshot")
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.snapshot, self.instance,
"Test-Snapshot")
self.assertRaises(Exception, self.conn.snapshot, self.context,
self.instance, "Test-Snapshot")
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_reboot(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
self.conn.reboot(self.instance)
self.conn.reboot(self.instance, self.network_info)
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_reboot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.reboot, self.instance)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_reboot_not_poweredon(self):
self._create_vm()
info = self.conn.get_info(1)
@@ -185,7 +187,6 @@ class VMWareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.PAUSED)
self.assertRaises(Exception, self.conn.reboot, self.instance)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_suspend(self):
self._create_vm()
info = self.conn.get_info(1)
@@ -194,13 +195,11 @@ class VMWareAPIVMTestCase(test.TestCase):
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.PAUSED)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_suspend_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.suspend, self.instance,
self.dummy_callback_handler)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_resume(self):
self._create_vm()
info = self.conn.get_info(1)
@@ -212,13 +211,11 @@ class VMWareAPIVMTestCase(test.TestCase):
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_resume_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(Exception, self.conn.resume, self.instance,
self.dummy_callback_handler)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_resume_not_suspended(self):
self._create_vm()
info = self.conn.get_info(1)
@@ -226,49 +223,41 @@ class VMWareAPIVMTestCase(test.TestCase):
self.assertRaises(Exception, self.conn.resume, self.instance,
self.dummy_callback_handler)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_get_info(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_destroy(self):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 1)
self.conn.destroy(self.instance)
self.conn.destroy(self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEquals(len(instances), 0)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_destroy_non_existent(self):
self._create_instance_in_the_db()
self.assertEquals(self.conn.destroy(self.instance), None)
self.assertEquals(self.conn.destroy(self.instance, self.network_info),
None)
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_pause(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_unpause(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_diagnostics(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_get_console_output(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def test_get_ajax_console(self):
pass
@test.skip_test("DB stubbing not removed, needs updating for multi-nic")
def dummy_callback_handler(self, ret):
"""
Dummy callback function to be passed to suspend, resume, etc., calls.

View File

@@ -27,8 +27,10 @@ from nova import exception
from nova import db
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova import volume
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.volume')
@@ -43,6 +45,11 @@ class VolumeTestCase(test.TestCase):
self.flags(connection_type='fake')
self.volume = utils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
self.instance_id = db.instance_create(self.context, {})['id']
def tearDown(self):
db.instance_destroy(self.context, self.instance_id)
super(VolumeTestCase, self).tearDown()
@staticmethod
def _create_volume(size='0', snapshot_id=None):
@@ -223,6 +230,30 @@ class VolumeTestCase(test.TestCase):
snapshot_id)
self.volume.delete_volume(self.context, volume_id)
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
def fake_cast(ctxt, topic, msg):
pass
self.stubs.Set(rpc, 'cast', fake_cast)
volume_id = self._create_volume()
self.volume.create_volume(self.context, volume_id)
db.volume_attached(self.context, volume_id, self.instance_id,
'/dev/sda1')
volume_api = volume.api.API()
self.assertRaises(exception.ApiError,
volume_api.create_snapshot,
self.context, volume_id,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume_id,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume_id)
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""

View File

@@ -30,7 +30,6 @@ from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova import exception
@@ -69,15 +68,17 @@ class XenAPIVolumeTestCase(test.TestCase):
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.context = context.RequestContext('fake', 'fake', False)
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.values = {'id': 1,
'project_id': 'fake',
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
@@ -169,14 +170,14 @@ def reset_network(*args):
pass
def _find_rescue_vbd_ref(*args):
pass
class XenAPIVMTestCase(test.TestCase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
self.flags(xenapi_connection_url='test_url',
@@ -192,10 +193,14 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
self.stubs.Set(vmops.VMOps, '_find_rescue_vbd_ref',
_find_rescue_vbd_ref)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.context = context.RequestContext('fake', 'fake', False)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
def test_parallel_builds(self):
@@ -227,10 +232,10 @@ class XenAPIVMTestCase(test.TestCase):
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
instance = db.instance_create(self.context, values)
self.conn.spawn(instance, network_info)
self.conn.spawn(self.context, instance, network_info)
gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id)
gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id)
gt1.wait()
gt2.wait()
@@ -257,14 +262,15 @@ class XenAPIVMTestCase(test.TestCase):
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
self.assertRaises(exception.Error, self.conn.snapshot,
self.context, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
template_vm_ref = self.conn.snapshot(instance, name)
template_vm_ref = self.conn.snapshot(self.context, instance, name)
def ensure_vm_was_torn_down():
vm_labels = []
@@ -396,18 +402,22 @@ class XenAPIVMTestCase(test.TestCase):
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
architecture="x86-64", instance_id=1,
check_injection=False):
check_injection=False,
create_record=True, empty_dns=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id,
'project_id': self.project.id,
'user_id': self.user.id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
'os_type': os_type,
'architecture': architecture}
instance = db.instance_create(self.context, values)
if create_record:
values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
'os_type': os_type,
'architecture': architecture}
instance = db.instance_create(self.context, values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
@@ -422,14 +432,23 @@ class XenAPIVMTestCase(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
self.conn.spawn(instance, network_info)
if empty_dns:
network_info[0][1]['dns'] = []
self.conn.spawn(self.context, instance, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_empty_dns(self):
""""Test spawning with an empty dns list"""
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
self.assertRaises(Exception,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
@@ -441,7 +460,6 @@ class XenAPIVMTestCase(test.TestCase):
"""
vdi_recs_start = self._list_vdis()
FLAGS.xenapi_image_service = 'glance'
stubs.stubout_fetch_image_glance_disk(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
@@ -456,7 +474,6 @@ class XenAPIVMTestCase(test.TestCase):
"""
vdi_recs_start = self._list_vdis()
FLAGS.xenapi_image_service = 'glance'
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
@@ -464,22 +481,12 @@ class XenAPIVMTestCase(test.TestCase):
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, None, None)
def test_spawn_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, 2, 3)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
@@ -508,20 +515,17 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEqual(len(self.vm['VBDs']), 1)
def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
@@ -547,7 +551,6 @@ class XenAPIVMTestCase(test.TestCase):
# Capture the sudo tee .../etc/network/interfaces command
(r'(sudo\s+)?tee.*interfaces', _tee_handler),
])
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
@@ -555,7 +558,6 @@ class XenAPIVMTestCase(test.TestCase):
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
@@ -599,41 +601,38 @@ class XenAPIVMTestCase(test.TestCase):
# guest agent is detected
self.assertFalse(self._tee_executed)
@test.skip_test("Never gets an address, not sure why")
def test_spawn_vlanmanager(self):
self.flags(xenapi_image_service='glance',
self.flags(image_service='nova.image.glance.GlanceImageService',
network_manager='nova.network.manager.VlanManager',
network_driver='nova.network.xenapi_net',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(VMOps, 'create_vifs', dummy)
self.stubs.Set(vmops.VMOps, 'create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance_ref = self._create_instance(2)
network_bk = self.network
# Ensure we use xenapi_net driver
self.network = utils.import_object(FLAGS.network_manager)
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network['id'])
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt, instance_id=instance_ref.id,
instance_type_id=1, project_id=self.project.id)
self.network.setup_compute_network(ctxt, instance_ref.id)
self.network.allocate_for_instance(ctxt,
instance_id=2,
host=FLAGS.host,
vpn=None,
instance_type_id=1,
project_id=self.project_id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
instance_id=instance_ref.id,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
self.network = network_bk
def test_spawn_with_network_qos(self):
self._create_instance()
@@ -644,10 +643,10 @@ class XenAPIVMTestCase(test.TestCase):
str(3 * 1024))
def test_rescue(self):
self.flags(xenapi_inject_image=False)
self.flags(flat_injected=False)
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
conn.rescue(instance, None)
conn.rescue(self.context, instance, None, [])
def test_unrescue(self):
instance = self._create_instance()
@@ -655,21 +654,13 @@ class XenAPIVMTestCase(test.TestCase):
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(Exception, conn.unrescue, instance, None)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.vm_info = None
self.vm = None
self.stubs.UnsetAll()
def _create_instance(self, instance_id=1):
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs)
values = {
'id': instance_id,
'project_id': self.project.id,
'user_id': self.user.id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
@@ -691,7 +682,8 @@ class XenAPIVMTestCase(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
self.conn.spawn(instance, network_info)
if spawn:
self.conn.spawn(self.context, instance, network_info)
return instance
@@ -743,21 +735,19 @@ class XenAPIMigrateInstance(test.TestCase):
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.stubs = stubout.StubOutForTesting()
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.values = {'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
@@ -771,20 +761,27 @@ class XenAPIMigrateInstance(test.TestCase):
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
def tearDown(self):
super(XenAPIMigrateInstance, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.stubs.UnsetAll()
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.values)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(instance, '127.0.0.1')
def test_finish_resize(self):
def test_finish_migrate(self):
instance = db.instance_create(self.context, self.values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
@@ -802,8 +799,90 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'),
network_info)
conn.finish_migration(self.context, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
def test_finish_migrate_no_local_storage(self):
tiny_type_id = \
instance_types.get_instance_type_by_name('m1.tiny')['id']
self.values.update({'instance_type_id': tiny_type_id, 'local_gb': 0})
instance = db.instance_create(self.context, self.values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_migration(self.context, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
# Resize instance would be determined by the compute call
conn.finish_migration(self.context, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=False)
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
"""Can convert from type id to type string."""
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def test_from_string(self):
"""Can convert from string to type id."""
self.assertEquals(
vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR),
vm_utils.ImageType.KERNEL)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
@@ -827,7 +906,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
def test_instance_disk(self):
"""If a kernel is specified, the image type is DISK (aka machine)."""
FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
self.assert_disk_type(vm_utils.ImageType.DISK)
@@ -837,7 +915,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If the kernel isn't specified, and we're not using Glance, then
DISK_RAW is assumed.
"""
FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -847,7 +924,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If we're using Glance, then defer to the image_type field, which in
this case will be 'raw'.
"""
FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -857,7 +933,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If we're using Glance, then defer to the image_type field, which in
this case will be 'vhd'.
"""
FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)

View File

@@ -70,8 +70,8 @@ def stub_out_db_instance_api(stubs):
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
'instance_type': values['instance_type'],
'memory_mb': type_data['memory_mb'],
'mac_address': values['mac_address'],
'vcpus': type_data['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'local_gb': type_data['local_gb'],
}
return FakeModel(base_options)
@@ -83,6 +83,8 @@ def stub_out_db_instance_api(stubs):
'bridge': 'vmnet0',
'netmask': '255.255.255.0',
'gateway': '10.10.10.1',
'broadcast': '10.10.10.255',
'dns1': 'fake',
'vlan': 100}
return FakeModel(fields)
@@ -90,7 +92,7 @@ def stub_out_db_instance_api(stubs):
"""Stubs out the db.instance_action_create method."""
pass
def fake_instance_get_fixed_address(context, instance_id):
def fake_instance_get_fixed_addresses(context, instance_id):
"""Stubs out the db.instance_get_fixed_address method."""
return '10.10.10.10'
@@ -103,7 +105,7 @@ def stub_out_db_instance_api(stubs):
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'instance_action_create', fake_instance_action_create)
stubs.Set(db, 'instance_get_fixed_address',
fake_instance_get_fixed_address)
stubs.Set(db, 'instance_get_fixed_addresses',
fake_instance_get_fixed_addresses)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)

View File

@@ -22,6 +22,8 @@ Stubouts for the test suite
from nova.virt import vmwareapi_conn
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vmware_images
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import network_utils
def fake_get_vim_object(arg):
@@ -36,10 +38,15 @@ def fake_is_vim_object(arg, module):
def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(vmops.VMWareVMOps, 'plug_vifs', fake.fake_plug_vifs)
stubs.Set(network_utils, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
fake_get_vim_object)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
fake_get_vim_object)
stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object",

View File

@@ -28,8 +28,8 @@ from nova import utils
def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
def fake_fetch_image(cls, context, session, instance_id, image, user,
project, type):
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
@@ -227,7 +227,7 @@ def stub_out_vm_methods(stubs):
def fake_release_bootlock(self, vm):
pass
def fake_spawn_rescue(self, inst):
def fake_spawn_rescue(self, context, inst, network_info):
inst._rescue = False
stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown)

View File

@@ -1,267 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
manage pid files and support syslogging.
"""
import gflags
import os
import signal
import sys
import time
from twisted.scripts import twistd
from twisted.python import log
from twisted.python import reflect
from twisted.python import runtime
from twisted.python import usage
from nova import flags
from nova import log as logging
if runtime.platformType == "win32":
from twisted.scripts._twistw import ServerOptions
else:
from twisted.scripts._twistd_unix import ServerOptions
FLAGS = flags.FLAGS
class TwistdServerOptions(ServerOptions):
def parseArgs(self, *args):
return
class FlagParser(object):
# this is a required attribute for gflags
syntactic_help = ''
def __init__(self, parser):
self.parser = parser
def Parse(self, s):
return self.parser(s)
def WrapTwistedOptions(wrapped):
class TwistedOptionsToFlags(wrapped):
subCommands = None
def __init__(self):
# NOTE(termie): _data exists because Twisted stuff expects
# to be able to set arbitrary things that are
# not actual flags
self._data = {}
self._flagHandlers = {}
self._paramHandlers = {}
# Absorb the twistd flags into our FLAGS
self._absorbFlags()
self._absorbParameters()
self._absorbHandlers()
wrapped.__init__(self)
def _absorbFlags(self):
twistd_flags = []
reflect.accumulateClassList(self.__class__, 'optFlags',
twistd_flags)
for flag in twistd_flags:
key = flag[0].replace('-', '_')
if hasattr(FLAGS, key):
continue
flags.DEFINE_boolean(key, None, str(flag[-1]))
def _absorbParameters(self):
twistd_params = []
reflect.accumulateClassList(self.__class__, 'optParameters',
twistd_params)
for param in twistd_params:
key = param[0].replace('-', '_')
if hasattr(FLAGS, key):
continue
if len(param) > 4:
flags.DEFINE(FlagParser(param[4]),
key, param[2], str(param[3]),
serializer=gflags.ArgumentSerializer())
else:
flags.DEFINE_string(key, param[2], str(param[3]))
def _absorbHandlers(self):
twistd_handlers = {}
reflect.addMethodNamesToDict(self.__class__, twistd_handlers,
"opt_")
# NOTE(termie): Much of the following is derived/copied from
# twisted.python.usage with the express purpose of
# providing compatibility
for name in twistd_handlers.keys():
method = getattr(self, 'opt_' + name)
takesArg = not usage.flagFunction(method, name)
doc = getattr(method, '__doc__', None)
if not doc:
doc = 'undocumented'
if not takesArg:
if name not in FLAGS:
flags.DEFINE_boolean(name, None, doc)
self._flagHandlers[name] = method
else:
if name not in FLAGS:
flags.DEFINE_string(name, None, doc)
self._paramHandlers[name] = method
def _doHandlers(self):
for flag, handler in self._flagHandlers.iteritems():
if self[flag]:
handler()
for param, handler in self._paramHandlers.iteritems():
if self[param] is not None:
handler(self[param])
def __str__(self):
return str(FLAGS)
def parseOptions(self, options=None):
if options is None:
options = sys.argv
else:
options.insert(0, '')
args = FLAGS(options)
logging.setup()
argv = args[1:]
# ignore subcommands
try:
self.parseArgs(*argv)
except TypeError:
raise usage.UsageError(_("Wrong number of arguments."))
self.postOptions()
return args
def parseArgs(self, *args):
# TODO(termie): figure out a decent way of dealing with args
#return
wrapped.parseArgs(self, *args)
def postOptions(self):
self._doHandlers()
wrapped.postOptions(self)
def __getitem__(self, key):
key = key.replace('-', '_')
try:
return getattr(FLAGS, key)
except (AttributeError, KeyError):
return self._data[key]
def __setitem__(self, key, value):
key = key.replace('-', '_')
try:
return setattr(FLAGS, key, value)
except (AttributeError, KeyError):
self._data[key] = value
def get(self, key, default):
key = key.replace('-', '_')
try:
return getattr(FLAGS, key)
except (AttributeError, KeyError):
self._data.get(key, default)
return TwistedOptionsToFlags
def stop(pidfile):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = _("pidfile %s does not exist. Daemon not running?\n")
sys.stderr.write(message % pidfile)
# Not an error in a restart
return
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGKILL)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find(_("No such process")) > 0:
if os.path.exists(pidfile):
os.remove(pidfile)
else:
print str(err)
sys.exit(1)
def serve(filename):
logging.debug(_("Serving %s") % filename)
name = os.path.basename(filename)
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
argv = options.parseOptions()
FLAGS.python = filename
FLAGS.no_save = True
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
FLAGS.prefix = FLAGS.prefix.replace('twisted', name)
action = 'start'
if len(argv) > 1:
action = argv.pop()
if action == 'stop':
stop(FLAGS.pidfile)
sys.exit()
elif action == 'restart':
stop(FLAGS.pidfile)
elif action == 'start':
pass
else:
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
logging.audit(_("Starting %s"), name)
twistd.runApp(options)

View File

@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr ""
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr ""
#: ../nova/twistd.py:221
msgid "No such process"
msgstr ""
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr ""
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr ""
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr ""
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

View File

@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr ""
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr ""
#: ../nova/twistd.py:221
msgid "No such process"
msgstr ""
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr ""
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr ""
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr ""
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2265,10 +2210,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

View File

@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr ""
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr ""
#: ../nova/twistd.py:221
msgid "No such process"
msgstr ""
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr ""
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr ""
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr ""
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

104
po/de.po
View File

@@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
"PO-Revision-Date: 2011-04-03 19:42+0000\n"
"Last-Translator: Matthias Loidolt <kedapperdrake@googlemail.com>\n"
"PO-Revision-Date: 2011-06-06 07:58+0000\n"
"Last-Translator: Christian Berendt <Unknown>\n"
"Language-Team: German <de@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-04-04 05:19+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -85,6 +85,7 @@ msgstr ""
#, python-format
msgid "%(param)s property not found for image %(_image_id)s"
msgstr ""
"Die Property %(param)s konnte im Image %(_image_id)s nicht gefunden werden"
#: ../nova/api/openstack/servers.py:168
msgid "No keypairs defined"
@@ -130,33 +131,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr ""
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n"
#: ../nova/twistd.py:221
msgid "No such process"
msgstr ""
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr ""
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr "Alle vorhandenen FLAGS:"
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr "%s wird gestartet"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -183,12 +157,13 @@ msgstr ""
#: ../nova/virt/xenapi/volumeops.py:91
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
msgstr "Nicht möglich Volumen zur Instanze %s hinzuzufügen"
#: ../nova/virt/xenapi/volumeops.py:93
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
"Einhängepunkt%(mountpoint)s zur Instanze %(instance_name)s hinzugefügt"
#. Detach VBD from VM
#: ../nova/virt/xenapi/volumeops.py:104
@@ -199,7 +174,7 @@ msgstr ""
#: ../nova/virt/xenapi/volumeops.py:112
#, python-format
msgid "Unable to locate volume %s"
msgstr ""
msgstr "Nicht möglich volume %s zufinden"
#: ../nova/virt/xenapi/volumeops.py:120
#, python-format
@@ -214,7 +189,7 @@ msgstr ""
#: ../nova/compute/instance_types.py:41
#, python-format
msgid "Unknown instance type: %s"
msgstr ""
msgstr "Unbekannter Instanztyp: %s"
#: ../nova/crypto.py:46
msgid "Filename of root CA"
@@ -230,7 +205,7 @@ msgstr "Dateiname der Certificate Revocation List"
#: ../nova/crypto.py:53
msgid "Where we keep our keys"
msgstr ""
msgstr "Wo wir unsere Schlüssel aufbewahren"
#: ../nova/crypto.py:55
msgid "Where we keep our root CA"
@@ -298,12 +273,12 @@ msgstr ""
#: ../nova/compute/manager.py:179
msgid "Instance has already been created"
msgstr ""
msgstr "Instanz wurde bereits erstellt"
#: ../nova/compute/manager.py:180
#, python-format
msgid "instance %s: starting..."
msgstr ""
msgstr "Instanz %s startet..."
#. pylint: disable=W0702
#: ../nova/compute/manager.py:219
@@ -314,7 +289,7 @@ msgstr ""
#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286
#, python-format
msgid "Terminating instance %s"
msgstr ""
msgstr "Beende Instanz %s"
#: ../nova/compute/manager.py:255
#, python-format
@@ -377,7 +352,7 @@ msgstr ""
#: ../nova/compute/manager.py:372
#, python-format
msgid "instance %s: rescuing"
msgstr ""
msgstr "Instanz %s: Rettung"
#: ../nova/compute/manager.py:387
#, python-format
@@ -387,12 +362,12 @@ msgstr ""
#: ../nova/compute/manager.py:406
#, python-format
msgid "instance %s: pausing"
msgstr ""
msgstr "Instanz %s pausiert"
#: ../nova/compute/manager.py:423
#, python-format
msgid "instance %s: unpausing"
msgstr ""
msgstr "Instanz %s wird fortgesetzt"
#: ../nova/compute/manager.py:440
#, python-format
@@ -584,7 +559,7 @@ msgstr ""
#: ../nova/virt/connection.py:73
msgid "Failed to open connection to the hypervisor"
msgstr ""
msgstr "Konnte Verbindung zum Hypervisor nicht öffnen"
#: ../nova/network/linux_net.py:187
#, python-format
@@ -637,7 +612,7 @@ msgstr "Klasse %s konnte nicht gefunden werden"
#: ../nova/utils.py:118
#, python-format
msgid "Fetching %s"
msgstr ""
msgstr "Hole %s"
#: ../nova/utils.py:130
#, python-format
@@ -1783,34 +1758,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2268,10 +2215,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
@@ -2562,7 +2505,7 @@ msgstr ""
#: ../nova/auth/manager.py:270
#, python-format
msgid "Using project name = user name (%s)"
msgstr ""
msgstr "Verwende Project-Name = User-Name (%s)"
#: ../nova/auth/manager.py:277
#, python-format
@@ -2572,7 +2515,7 @@ msgstr ""
#: ../nova/auth/manager.py:279
#, python-format
msgid "No project called %s could be found"
msgstr ""
msgstr "Es konnte kein Projekt mit dem Namen %s gefunden werden"
#: ../nova/auth/manager.py:287
#, python-format
@@ -2696,6 +2639,7 @@ msgstr ""
#: ../nova/service.py:195
msgid "The service database object disappeared, Recreating it."
msgstr ""
"Das Service-Datenbank-Objekt ist verschwunden, es wird erneut erzeugt."
#: ../nova/service.py:207
msgid "Recovered model server connection!"
@@ -2723,7 +2667,7 @@ msgstr ""
#: ../nova/auth/ldapdriver.py:472
#, python-format
msgid "Group can't be created because group %s already exists"
msgstr ""
msgstr "Die Gruppe %s kann nicht angelegt werde, da sie bereits existiert"
#: ../nova/auth/ldapdriver.py:478
#, python-format
@@ -2739,6 +2683,7 @@ msgstr ""
#, python-format
msgid "User %s can't be added to the group because the user doesn't exist"
msgstr ""
"Der User %s kann nicht zur Gruppe hinzugefügt werde, da er nicht existiert"
#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521
#, python-format
@@ -2755,6 +2700,7 @@ msgstr ""
msgid ""
"User %s can't be removed from the group because the user doesn't exist"
msgstr ""
"Der User %s kann nicht aus der Gruppe entfernt werden, da er nicht existiert"
#: ../nova/auth/ldapdriver.py:528
#, python-format
@@ -2840,7 +2786,7 @@ msgstr ""
#: ../nova/api/ec2/admin.py:200
#, python-format
msgid "Delete project: %s"
msgstr ""
msgstr "Lösche Projekt %s"
#: ../nova/api/ec2/admin.py:214
#, python-format

2789
po/en_AU.po Normal file

File diff suppressed because it is too large Load Diff

2814
po/en_GB.po Normal file

File diff suppressed because it is too large Load Diff

188
po/es.po
View File

@@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
"PO-Revision-Date: 2011-03-17 15:54+0000\n"
"Last-Translator: Erick Huezo <erickhuezo@gmail.com>\n"
"PO-Revision-Date: 2011-06-30 16:42+0000\n"
"Last-Translator: David Caro <Unknown>\n"
"Language-Team: Spanish <es@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -36,10 +36,15 @@ msgid ""
"Stdout: %(stdout)r\n"
"Stderr: %(stderr)r"
msgstr ""
"%(description)s\n"
"Comando: %(cmd)s\n"
"Código de salida: %(exit_code)s\n"
"Stdout: %(stdout)r\n"
"Stderr: %(stderr)r"
#: ../nova/exception.py:107
msgid "DB exception wrapped"
msgstr ""
msgstr "Excepción DB encapsulada"
#. exc_type, exc_value, exc_traceback = sys.exc_info()
#: ../nova/exception.py:120
@@ -49,12 +54,12 @@ msgstr "Excepción no controlada"
#: ../nova/volume/api.py:45
#, python-format
msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume"
msgstr ""
msgstr "Cuota excedida por %(pid)s, se intentó crear el volumen %(size)sG"
#: ../nova/volume/api.py:47
#, python-format
msgid "Volume quota exceeded. You cannot create a volume of size %sG"
msgstr "Cuota excedida. No puedes crear un volumen con tamaño %sG"
msgstr "Cuota excedida. No puede crear un volumen con tamaño %sG"
#: ../nova/volume/api.py:71 ../nova/volume/api.py:96
msgid "Volume status must be available"
@@ -83,7 +88,7 @@ msgstr "%(param)s propiedad no encontrada para la imagen %(_image_id)s"
#: ../nova/api/openstack/servers.py:168
msgid "No keypairs defined"
msgstr "No se definio una Keypairs"
msgstr "No se definio un par de llaves (Keypair)"
#: ../nova/api/openstack/servers.py:238
#, python-format
@@ -103,7 +108,7 @@ msgstr "Compute.api::get_lock %s"
#: ../nova/api/openstack/servers.py:281
#, python-format
msgid "Compute.api::reset_network %s"
msgstr ""
msgstr "Compute.api::reset_network %s"
#: ../nova/api/openstack/servers.py:292
#, python-format
@@ -125,33 +130,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr "Numero de argumentos incorrectos"
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr "el pidfile %s no existe. ¿No estará el demonio parado?\n"
#: ../nova/twistd.py:221
msgid "No such process"
msgstr "No se encontró proceso"
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr "Sirviendo %s"
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr "Conjunto completo de opciones:"
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr "Comenzando %s"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -163,17 +141,19 @@ msgstr "La instancia %s no se ha encontrado"
#: ../nova/virt/xenapi/volumeops.py:51
#, python-format
msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s"
msgstr ""
msgstr "Volumen_unido: %(instance_name)s, %(device_path)s, %(mountpoint)s"
#: ../nova/virt/xenapi/volumeops.py:69
#, python-format
msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
"No es posible crear el VDI en SR %(sr_ref)s para la instancia "
"%(instance_name)s"
#: ../nova/virt/xenapi/volumeops.py:80
#, python-format
msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
msgstr "No es posible usar SR %(sr_ref)s para la instancia %(instance_name)s"
#: ../nova/virt/xenapi/volumeops.py:91
#, python-format
@@ -184,12 +164,14 @@ msgstr "Imposible adjuntar volumen a la instancia %s"
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
"El punto de montaje %(mountpoint)s esta unido a la instancia "
"%(instance_name)s"
#. Detach VBD from VM
#: ../nova/virt/xenapi/volumeops.py:104
#, python-format
msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
msgstr ""
msgstr "Volume_separado: %(instance_name)s, %(mountpoint)s"
#: ../nova/virt/xenapi/volumeops.py:112
#, python-format
@@ -205,6 +187,8 @@ msgstr "Imposible desasociar volumen %s"
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
"El punto de montaje %(mountpoint)s se desligó de la instancia "
"%(instance_name)s"
#: ../nova/compute/instance_types.py:41
#, python-format
@@ -259,7 +243,7 @@ msgstr ""
#: ../nova/crypto.py:258
#, python-format
msgid "Flags path: %s"
msgstr ""
msgstr "Ruta a las opciones: %s"
#: ../nova/scheduler/manager.py:69
#, python-format
@@ -276,6 +260,7 @@ msgstr "check_instance_lock: decorating: |%s|"
msgid ""
"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|"
msgstr ""
"check_instance_lock: argumentos: |%(self)s| |%(context)s| |%(instance_id)s|"
#: ../nova/compute/manager.py:84
#, python-format
@@ -338,6 +323,8 @@ msgid ""
"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s "
"expected: %(running)s)"
msgstr ""
"intentando reiniciar una instancia no ejecutada: %(instance_id)s (state: "
"%(state)s expected: %(running)s)"
#: ../nova/compute/manager.py:311
#, python-format
@@ -350,6 +337,8 @@ msgid ""
"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s "
"expected: %(running)s)"
msgstr ""
"intentando crear una imagen instantanea(snapshot) de una maquina no "
"ejecutada: %(instance_id)s (state: %(state)s expected: %(running)s)"
#: ../nova/compute/manager.py:332
#, python-format
@@ -357,11 +346,13 @@ msgid ""
"trying to reset the password on a non-running instance: %(instance_id)s "
"(state: %(instance_state)s expected: %(expected_state)s)"
msgstr ""
"intentando restablecer el password en una instancia: %(instance_id)s "
"(estado: %(instance_state)s esperado: %(expected_state)s)"
#: ../nova/compute/manager.py:335
#, python-format
msgid "instance %s: setting admin password"
msgstr ""
msgstr "instancia %s: estableciendo password de administrador"
#: ../nova/compute/manager.py:353
#, python-format
@@ -369,11 +360,13 @@ msgid ""
"trying to inject a file into a non-running instance: %(instance_id)s (state: "
"%(instance_state)s expected: %(expected_state)s)"
msgstr ""
"intentando inyectar un archivo dentro de una instancia parada: "
"%(instance_id)s (estado: %(instance_state)s esperado: %(expected_state)s)"
#: ../nova/compute/manager.py:362
#, python-format
msgid "instance %(nm)s: injecting file to %(plain_path)s"
msgstr ""
msgstr "instancia %(nm)s: inyectando archivo en %(plain_path)s"
#: ../nova/compute/manager.py:372
#, python-format
@@ -393,7 +386,7 @@ msgstr "instancia %s: pausando"
#: ../nova/compute/manager.py:423
#, python-format
msgid "instance %s: unpausing"
msgstr "instnacia %s: continuando tras pausa"
msgstr "instancia %s: continuando tras pausa"
#: ../nova/compute/manager.py:440
#, python-format
@@ -403,7 +396,7 @@ msgstr "instancia %s: obteniendo los diagnosticos"
#: ../nova/compute/manager.py:453
#, python-format
msgid "instance %s: suspending"
msgstr ""
msgstr "instancia %s: suspendiendo"
#: ../nova/compute/manager.py:472
#, python-format
@@ -501,7 +494,7 @@ msgstr "Exportando de nuevo los volumenes %s"
#: ../nova/volume/manager.py:90
#, python-format
msgid "volume %s: skipping export"
msgstr ""
msgstr "volume %s: saltando exportación"
#: ../nova/volume/manager.py:96
#, python-format
@@ -511,7 +504,7 @@ msgstr "volumen %s: creando"
#: ../nova/volume/manager.py:108
#, python-format
msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG"
msgstr ""
msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG"
#: ../nova/volume/manager.py:112
#, python-format
@@ -549,7 +542,7 @@ msgstr "volumen %s: eliminado satisfactoriamente"
#: ../nova/virt/xenapi/fake.py:74
#, python-format
msgid "%(text)s: _db_content => %(content)s"
msgstr ""
msgstr "%(text)s: _db_content => %(content)s"
#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404
#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478
@@ -564,7 +557,7 @@ msgstr "xenapi.fake no tiene una implementación para %s"
#: ../nova/virt/xenapi/fake.py:341
#, python-format
msgid "Calling %(localname)s %(impl)s"
msgstr ""
msgstr "Llamando %(localname)s %(impl)s"
#: ../nova/virt/xenapi/fake.py:346
#, python-format
@@ -618,12 +611,12 @@ msgstr "El pid %d está pasado, relanzando dnsmasq"
#: ../nova/network/linux_net.py:358
#, python-format
msgid "killing radvd threw %s"
msgstr ""
msgstr "Matando radvd lanzado %s"
#: ../nova/network/linux_net.py:360
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
msgstr "Pid %d corrupto, relanzando radvd"
#. pylint: disable=W0703
#: ../nova/network/linux_net.py:449
@@ -659,7 +652,7 @@ msgstr "El resultado fue %s"
#: ../nova/utils.py:159
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
msgstr "corriendo cmd (SSH): %s"
#: ../nova/utils.py:217
#, python-format
@@ -674,12 +667,12 @@ msgstr "Ejecutando %s"
#: ../nova/utils.py:262
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
msgstr "No se encuentra la dirección del enlace local.:%s"
#: ../nova/utils.py:265
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s"
#: ../nova/utils.py:363
#, python-format
@@ -694,7 +687,7 @@ msgstr "backend %s"
#: ../nova/fakerabbit.py:49
#, python-format
msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s"
msgstr ""
msgstr "(%(nm)s) publica (key: %(routing_key)s) %(message)s"
#: ../nova/fakerabbit.py:54
#, python-format
@@ -714,12 +707,12 @@ msgstr "Declarando intercambio %s"
#: ../nova/fakerabbit.py:96
#, python-format
msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s"
msgstr ""
msgstr "Enlazando %(queue)s a %(exchange)s con la llave %(routing_key)s"
#: ../nova/fakerabbit.py:121
#, python-format
msgid "Getting from %(queue)s: %(message)s"
msgstr ""
msgstr "Obtendiendo desde %(queue)s: %(message)s"
#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171
#, python-format
@@ -729,17 +722,17 @@ msgstr "Creada VM %s..."
#: ../nova/virt/xenapi/vm_utils.py:138
#, python-format
msgid "Created VM %(instance_name)s as %(vm_ref)s."
msgstr ""
msgstr "VM creada %(instance_name)s como %(vm_ref)s."
#: ../nova/virt/xenapi/vm_utils.py:168
#, python-format
msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
msgstr ""
msgstr "Creando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... "
#: ../nova/virt/xenapi/vm_utils.py:171
#, python-format
msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
msgstr ""
msgstr "Creado el VBD %(vbd_ref)s para VM %(vm_ref)s, VDI %(vdi_ref)s"
#: ../nova/virt/xenapi/vm_utils.py:187
#, python-format
@@ -759,12 +752,12 @@ msgstr "Imposible destruir VBD %s"
#: ../nova/virt/xenapi/vm_utils.py:224
#, python-format
msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s."
msgstr ""
msgstr "Creando VIF para VM %(vm_ref)s, red %(network_ref)s."
#: ../nova/virt/xenapi/vm_utils.py:227
#, python-format
msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s."
msgstr ""
msgstr "Creado el VIF %(vif_ref)s para VM %(vm_ref)s, red %(network_ref)s."
#: ../nova/virt/xenapi/vm_utils.py:246
#, python-format
@@ -772,50 +765,52 @@ msgid ""
"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on "
"%(sr_ref)s."
msgstr ""
"VDI creado %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) "
"sobre %(sr_ref)s."
#. TODO(sirp): Add quiesce and VSS locking support when Windows support
#. is added
#: ../nova/virt/xenapi/vm_utils.py:258
#, python-format
msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..."
msgstr ""
msgstr "Creando snapshot de la VM %(vm_ref)s con etiqueta '%(label)s'..."
#: ../nova/virt/xenapi/vm_utils.py:272
#, python-format
msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s."
msgstr ""
msgstr "Instantánea creada %(template_vm_ref)s de la VM %(vm_ref)s."
#: ../nova/virt/xenapi/vm_utils.py:286
#, python-format
msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
msgstr ""
msgstr "Pidiendo xapi a subir %(vdi_uuids)s como ID %(image_id)s"
#: ../nova/virt/xenapi/vm_utils.py:327
#, python-format
msgid "Size for image %(image)s:%(virtual_size)d"
msgstr ""
msgstr "Tamaño para imagen %(image)s:%(virtual_size)d"
#: ../nova/virt/xenapi/vm_utils.py:332
#, python-format
msgid "Glance image %s"
msgstr ""
msgstr "Imagen Glance %s"
#. we need to invoke a plugin for copying VDI's
#. content into proper path
#: ../nova/virt/xenapi/vm_utils.py:342
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr ""
msgstr "Copiando VDI %s a /boot/guest on dom0"
#: ../nova/virt/xenapi/vm_utils.py:352
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr ""
msgstr "Kernel/Ramdisk VDI %s destruído"
#: ../nova/virt/xenapi/vm_utils.py:361
#, python-format
msgid "Asking xapi to fetch %(url)s as %(access)s"
msgstr ""
msgstr "Pidiendo a xapi que descargue %(url)s como %(access)s"
#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402
#, python-format
@@ -825,21 +820,21 @@ msgstr "Buscando vid %s para el kernel PV"
#: ../nova/virt/xenapi/vm_utils.py:397
#, python-format
msgid "PV Kernel in VDI:%s"
msgstr ""
msgstr "Kernel PV en VDI:%s"
#: ../nova/virt/xenapi/vm_utils.py:405
#, python-format
msgid "Running pygrub against %s"
msgstr ""
msgstr "Ejecutando pygrub contra %s"
#: ../nova/virt/xenapi/vm_utils.py:411
#, python-format
msgid "Found Xen kernel %s"
msgstr ""
msgstr "Kernel Xen Encontrado %s"
#: ../nova/virt/xenapi/vm_utils.py:413
msgid "No Xen kernel found. Booting HVM."
msgstr ""
msgstr "Kernel Xen no encontrado. Reiniciando HVM"
#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431
#, python-format
@@ -864,7 +859,7 @@ msgstr "(VM_UTILS) xenapi power_state -> |%s|"
#: ../nova/virt/xenapi/vm_utils.py:525
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s"
msgstr ""
msgstr "VHD %(vdi_uuid)s tiene origen en %(parent_ref)s"
#: ../nova/virt/xenapi/vm_utils.py:542
#, python-format
@@ -893,18 +888,19 @@ msgstr "No se han encontrado VDI's para VM %s"
#, python-format
msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s"
msgstr ""
"Numero de VDIs inesperado (%(num_vdis)s) encontrados por VM %(vm_ref)s"
#: ../nova/virt/xenapi/vm_utils.py:653
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188
#, python-format
msgid "Creating VBD for VDI %s ... "
msgstr ""
msgstr "Creando VBD para VDI %s ... "
#: ../nova/virt/xenapi/vm_utils.py:655
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190
#, python-format
msgid "Creating VBD for VDI %s done."
msgstr ""
msgstr "Creando VBF para VDI %s terminado"
#: ../nova/virt/xenapi/vm_utils.py:657
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192
@@ -1796,34 +1792,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr "Obtenida excepción %s"
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr "actualizando %s..."
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr "error inesperado durante la actualización"
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr "excepción inexperada al obtener la conexión"
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr "Encontrada interfaz: %s"
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2286,10 +2254,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
@@ -2850,12 +2814,12 @@ msgstr ""
#: ../nova/api/ec2/admin.py:177
#, python-format
msgid "Create project %(name)s managed by %(manager_user)s"
msgstr ""
msgstr "Crear proyecto %(name)s administrador por %(manager_user)s"
#: ../nova/api/ec2/admin.py:190
#, python-format
msgid "Modify project: %(name)s managed by %(manager_user)s"
msgstr ""
msgstr "Modificar proyecto: %(name)s administrado por %(manager_user)s"
#: ../nova/api/ec2/admin.py:200
#, python-format
@@ -2865,12 +2829,12 @@ msgstr "Borrar proyecto: %s"
#: ../nova/api/ec2/admin.py:214
#, python-format
msgid "Adding user %(user)s to project %(project)s"
msgstr ""
msgstr "Agregando usuario %(user)s al proyecto %(project)s"
#: ../nova/api/ec2/admin.py:218
#, python-format
msgid "Removing user %(user)s from project %(project)s"
msgstr ""
msgstr "Eliminando el usuario %(user)s del proyecto %(project)s"
#, python-format
#~ msgid ""

2931
po/fr.po Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -134,34 +134,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr "Numero errato di argomenti"
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr ""
"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n"
#: ../nova/twistd.py:221
msgid "No such process"
msgstr "Nessun processo trovato"
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr "Servire %s"
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr "Insieme di FLAGS:"
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr "Avvio di %s"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1791,34 +1763,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2278,10 +2222,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

533
po/ja.po

File diff suppressed because it is too large Load Diff

View File

@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr ""
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr ""
#: ../nova/twistd.py:221
msgid "No such process"
msgstr ""
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr ""
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr ""
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr ""
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

View File

@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-25 05:22+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -126,34 +126,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr "Número errado de argumentos."
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr ""
"Arquivo do id do processo (pidfile) %s não existe. O Daemon está parado?\n"
#: ../nova/twistd.py:221
msgid "No such process"
msgstr "Processo inexistente"
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr "Servindo %s"
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr "Conjunto completo de FLAGS:"
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr "Iniciando %s"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1804,34 +1776,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2290,10 +2234,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

View File

@@ -8,20 +8,20 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
"PO-Revision-Date: 2011-03-30 07:06+0000\n"
"Last-Translator: Andrey Olykainen <Unknown>\n"
"PO-Revision-Date: 2011-07-09 07:20+0000\n"
"Last-Translator: ilya kislicyn <Unknown>\n"
"Language-Team: Russian <ru@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-31 05:58+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
#: ../nova/scheduler/simple.py:122
msgid "No hosts found"
msgstr ""
msgstr "Узлы не найдены"
#: ../nova/exception.py:33
msgid "Unexpected error while running command."
@@ -54,7 +54,7 @@ msgstr ""
#: ../nova/volume/api.py:47
#, python-format
msgid "Volume quota exceeded. You cannot create a volume of size %sG"
msgstr ""
msgstr "Квота тома превышена. Вы не можете создать том размером %sG"
#: ../nova/volume/api.py:71 ../nova/volume/api.py:96
msgid "Volume status must be available"
@@ -62,19 +62,19 @@ msgstr ""
#: ../nova/volume/api.py:98
msgid "Volume is already attached"
msgstr ""
msgstr "Том уже смотирован"
#: ../nova/volume/api.py:104
msgid "Volume is already detached"
msgstr ""
msgstr "Том уже отмонтирован"
#: ../nova/api/openstack/servers.py:72
msgid "Failed to read private ip"
msgstr ""
msgstr "Ошибка чтения приватного IP адреса"
#: ../nova/api/openstack/servers.py:79
msgid "Failed to read public ip(s)"
msgstr ""
msgstr "Ошибка чтения публичных IP адресов"
#: ../nova/api/openstack/servers.py:152
#, python-format
@@ -83,7 +83,7 @@ msgstr ""
#: ../nova/api/openstack/servers.py:168
msgid "No keypairs defined"
msgstr ""
msgstr "Не определены ключевые пары"
#: ../nova/api/openstack/servers.py:238
#, python-format
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr "Неверное число аргументов."
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr "pidfile %s не обнаружен. Демон не запущен?\n"
#: ../nova/twistd.py:221
msgid "No such process"
msgstr ""
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr ""
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr ""
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr "Запускается %s"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1779,34 +1752,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr "обновление %s..."
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr "неожиданная ошибка во время обновления"
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2264,10 +2209,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

2796
po/tl.po Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -14,8 +14,8 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n"
"X-Generator: Launchpad (build 12559)\n"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr ""
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr ""
#: ../nova/twistd.py:221
msgid "No such process"
msgstr ""
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr "Обслуговування %s"
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr ""
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr "Запускається %s"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

View File

@@ -8,25 +8,20 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
"PO-Revision-Date: 2011-04-07 05:01+0000\n"
"Last-Translator: ben <Unknown>\n"
"PO-Revision-Date: 2011-06-14 14:44+0000\n"
"Last-Translator: chong <Unknown>\n"
"Language-Team: Chinese (Simplified) <zh_CN@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"X-Launchpad-Export-Date: 2011-04-08 05:28+0000\n"
"X-Generator: Launchpad (build 12735)\n"
#: ../nova/twistd.py:266
#, python-format
msgid "Starting %s"
msgstr "启动 %s 中"
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
#: ../nova/scheduler/simple.py:122
msgid "No hosts found"
msgstr "找到主机"
msgstr "没有找到主机"
#: ../nova/exception.py:33
msgid "Unexpected error while running command."
@@ -41,6 +36,11 @@ msgid ""
"Stdout: %(stdout)r\n"
"Stderr: %(stderr)r"
msgstr ""
"%(description)s\n"
"命令: %(cmd)s\n"
"退出代码: %(exit_code)s\n"
"标准输出: %(stdout)r\n"
"标准出错: %(stderr)r"
#: ../nova/exception.py:107
msgid "DB exception wrapped"
@@ -130,28 +130,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
#: ../nova/twistd.py:157
msgid "Wrong number of arguments."
msgstr "错误参数个数。"
#: ../nova/twistd.py:209
#, python-format
msgid "pidfile %s does not exist. Daemon not running?\n"
msgstr "pidfile %s 不存在守护进程是否运行\n"
#: ../nova/twistd.py:221
msgid "No such process"
msgstr "没有该进程"
#: ../nova/twistd.py:230 ../nova/service.py:224
#, python-format
msgid "Serving %s"
msgstr "正在为 %s 服务"
#: ../nova/twistd.py:262 ../nova/service.py:225
msgid "Full set of FLAGS:"
msgstr "FLAGS全集"
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -309,17 +287,17 @@ msgstr ""
#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286
#, python-format
msgid "Terminating instance %s"
msgstr ""
msgstr "正在结束实例 %s"
#: ../nova/compute/manager.py:255
#, python-format
msgid "Deallocating address %s"
msgstr ""
msgstr "取消分配地址 %s"
#: ../nova/compute/manager.py:268
#, python-format
msgid "trying to destroy already destroyed instance: %s"
msgstr ""
msgstr "尝试销毁已经销毁的实例: %s"
#: ../nova/compute/manager.py:282
#, python-format
@@ -331,12 +309,12 @@ msgstr "重启虚拟机 %s"
msgid ""
"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s "
"expected: %(running)s)"
msgstr ""
msgstr "尝试重启没有在运行中实例: %(instance_id)s (状态: %(state)s 预料: %(running)s)"
#: ../nova/compute/manager.py:311
#, python-format
msgid "instance %s: snapshotting"
msgstr ""
msgstr "实例 %s: 快照中"
#: ../nova/compute/manager.py:316
#, python-format
@@ -351,6 +329,8 @@ msgid ""
"trying to reset the password on a non-running instance: %(instance_id)s "
"(state: %(instance_state)s expected: %(expected_state)s)"
msgstr ""
"尝试对没有在运行的实例重置密码: %(instance_id)s (状态: %(instance_state)s 预料: "
"%(expected_state)s)"
#: ../nova/compute/manager.py:335
#, python-format
@@ -1778,34 +1758,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
#: ../nova/compute/monitor.py:259
#, python-format
msgid "updating %s..."
msgstr ""
#: ../nova/compute/monitor.py:289
msgid "unexpected error during update"
msgstr ""
#: ../nova/compute/monitor.py:356
#, python-format
msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:379
#, python-format
msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
msgstr ""
#: ../nova/compute/monitor.py:414
msgid "unexpected exception getting connection"
msgstr ""
#: ../nova/compute/monitor.py:429
#, python-format
msgid "Found instance: %s"
msgstr ""
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2215,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
#: ../bin/nova-instancemonitor.py:55
msgid "Starting instance monitor"
msgstr ""
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""

2789
po/zh_TW.po Normal file

File diff suppressed because it is too large Load Diff