rebase trunk

This commit is contained in:
Ken Pepple
2011-04-21 10:29:11 -07:00
31 changed files with 27905 additions and 18518 deletions

View File

@@ -5,13 +5,7 @@ _trial_temp
keys keys
networks networks
nova.sqlite nova.sqlite
CA/cacert.pem CA
CA/crl.pem
CA/index.txt*
CA/openssl.cnf
CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
nova/vcsversion.py nova/vcsversion.py
*.DS_Store *.DS_Store
.project .project

View File

@@ -4,6 +4,7 @@
<anotherjesse@gmail.com> <jesse@dancelamb> <anotherjesse@gmail.com> <jesse@dancelamb>
<anotherjesse@gmail.com> <jesse@gigantor.local> <anotherjesse@gmail.com> <jesse@gigantor.local>
<anotherjesse@gmail.com> <jesse@ubuntu> <anotherjesse@gmail.com> <jesse@ubuntu>
<anotherjesse@gmail.com> <jesse@aire.local>
<ant@openstack.org> <amesserl@rackspace.com> <ant@openstack.org> <amesserl@rackspace.com>
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com> <Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
<brian.lamar@rackspace.com> <brian.lamar@gmail.com> <brian.lamar@rackspace.com> <brian.lamar@gmail.com>

View File

@@ -27,10 +27,12 @@ Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp> Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com> Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com> Ilya Alekseyev <ialekseev@griddynamics.com>
Jason Koelker <jason@koelker.net>
Jay Pipes <jaypipes@gmail.com> Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com> Jesse Andrews <anotherjesse@gmail.com>
Joe Heck <heckj@mac.com> Joe Heck <heckj@mac.com>
Joel Moore <joelbm24@gmail.com> Joel Moore <joelbm24@gmail.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
John Dewey <john@dewey.ws> John Dewey <john@dewey.ws>
John Tran <jtran@attinteractive.com> John Tran <jtran@attinteractive.com>
Jonathan Bryce <jbryce@jbryce.com> Jonathan Bryce <jbryce@jbryce.com>
@@ -73,5 +75,6 @@ Trey Morris <trey.morris@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com> Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org> Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com> Vishvananda Ishaya <vishvananda@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com>

17
HACKING
View File

@@ -50,17 +50,24 @@ Human Alphabetical Order Examples
Docstrings Docstrings
---------- ----------
"""Summary of the function, class or method, less than 80 characters. """A one line docstring looks like this and ends in a period."""
New paragraph after newline that explains in more detail any general
information about the function, class or method. After this, if defining """A multiline docstring has a one-line summary, less than 80 characters.
parameters and return types use the Sphinx format. After that an extra
newline then close the quotations. Then a new paragraph after a newline that explains in more detail any
general information about the function, class or method. Example usages
are also great to have here if it is a complex class for function. After
you have finished your descriptions add an extra newline and close the
quotations.
When writing the docstring for a class, an extra line should be placed When writing the docstring for a class, an extra line should be placed
after the closing quotations. For more in-depth explanations for these after the closing quotations. For more in-depth explanations for these
decisions see http://www.python.org/dev/peps/pep-0257/ decisions see http://www.python.org/dev/peps/pep-0257/
If you are going to describe parameters and return values, use Sphinx, the
appropriate syntax is as follows.
:param foo: the foo parameter :param foo: the foo parameter
:param bar: the bar parameter :param bar: the bar parameter
:returns: description of the return value :returns: description of the return value

View File

@@ -28,11 +28,11 @@ import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that # If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir, os.pardir,
os.pardir)) os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, POSSIBLE_TOPDIR)
gettext.install('nova', unicode=1) gettext.install('nova', unicode=1)

View File

@@ -58,7 +58,6 @@ import gettext
import glob import glob
import json import json
import os import os
import re
import sys import sys
import time import time
@@ -66,11 +65,11 @@ import IPy
# If ../nova/__init__.py exists, add ../ to Python search path, so that # If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir, os.pardir,
os.pardir)) os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, POSSIBLE_TOPDIR)
gettext.install('nova', unicode=1) gettext.install('nova', unicode=1)
@@ -449,7 +448,7 @@ class FixedIpCommands(object):
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
try: try:
if host == None: if host is None:
fixed_ips = db.fixed_ip_get_all(ctxt) fixed_ips = db.fixed_ip_get_all(ctxt)
else: else:
fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host) fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host)
@@ -499,7 +498,7 @@ class FloatingIpCommands(object):
"""Lists all floating ips (optionally by host) """Lists all floating ips (optionally by host)
arguments: [host]""" arguments: [host]"""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
if host == None: if host is None:
floating_ips = db.floating_ip_get_all(ctxt) floating_ips = db.floating_ip_get_all(ctxt)
else: else:
floating_ips = db.floating_ip_get_all_by_host(ctxt, host) floating_ips = db.floating_ip_get_all_by_host(ctxt, host)
@@ -570,6 +569,49 @@ class NetworkCommands(object):
class VmCommands(object): class VmCommands(object):
"""Class for mangaging VM instances.""" """Class for mangaging VM instances."""
def list(self, host=None):
"""Show a list of all instances
:param host: show all instance on specified host.
:param instance: show specificed instance.
"""
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
" %-10s %-10s %-10s %-5s" % (
_('instance'),
_('node'),
_('type'),
_('state'),
_('launched'),
_('image'),
_('kernel'),
_('ramdisk'),
_('project'),
_('user'),
_('zone'),
_('index'))
if host is None:
instances = db.instance_get_all(context.get_admin_context())
else:
instances = db.instance_get_all_by_host(
context.get_admin_context(), host)
for instance in instances:
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
" %-10s %-10s %-10s %-5d" % (
instance['hostname'],
instance['host'],
instance['instance_type'],
instance['state_description'],
instance['launched_at'],
instance['image_id'],
instance['kernel_id'],
instance['ramdisk_id'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
instance['launch_index'])
def live_migration(self, ec2_id, dest): def live_migration(self, ec2_id, dest):
"""Migrates a running instance to a new machine. """Migrates a running instance to a new machine.
@@ -701,15 +743,6 @@ class ServiceCommands(object):
{"method": "update_available_resource"}) {"method": "update_available_resource"})
class LogCommands(object):
def request(self, request_id, logfile='/var/log/nova.log'):
"""Show all fields in the log for the given request. Assumes you
haven't changed the log format too much.
ARGS: request_id [logfile]"""
lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
print re.sub('#012', "\n", "\n".join(lines))
class DbCommands(object): class DbCommands(object):
"""Class for managing the database.""" """Class for managing the database."""
@@ -725,49 +758,6 @@ class DbCommands(object):
print migration.db_version() print migration.db_version()
class InstanceCommands(object):
"""Class for managing instances."""
def list(self, host=None, instance=None):
"""Show a list of all instances"""
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
" %-10s %-10s %-10s %-5s" % (
_('instance'),
_('node'),
_('type'),
_('state'),
_('launched'),
_('image'),
_('kernel'),
_('ramdisk'),
_('project'),
_('user'),
_('zone'),
_('index'))
if host == None:
instances = db.instance_get_all(context.get_admin_context())
else:
instances = db.instance_get_all_by_host(
context.get_admin_context(), host)
for instance in instances:
print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
" %-10s %-10s %-10s %-5d" % (
instance['hostname'],
instance['host'],
instance['instance_type'],
instance['state_description'],
instance['launched_at'],
instance['image_id'],
instance['kernel_id'],
instance['ramdisk_id'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
instance['launch_index'])
class VolumeCommands(object): class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state""" """Methods for dealing with a cloud in an odd state"""
@@ -818,11 +808,11 @@ class VolumeCommands(object):
class InstanceTypeCommands(object): class InstanceTypeCommands(object):
"""Class for managing instance types / flavors.""" """Class for managing instance types / flavors."""
def _print_instance_types(self, n, val): def _print_instance_types(self, name, val):
deleted = ('', ', inactive')[val["deleted"] == 1] deleted = ('', ', inactive')[val["deleted"] == 1]
print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, " print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, "
"Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % ( "Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % (
n, val["memory_mb"], val["vcpus"], val["local_gb"], name, val["memory_mb"], val["vcpus"], val["local_gb"],
val["flavorid"], val["swap"], val["rxtx_quota"], val["flavorid"], val["swap"], val["rxtx_quota"],
val["rxtx_cap"], deleted) val["rxtx_cap"], deleted)
@@ -873,12 +863,12 @@ class InstanceTypeCommands(object):
"""Lists all active or specific instance types / flavors """Lists all active or specific instance types / flavors
arguments: [name]""" arguments: [name]"""
try: try:
if name == None: if name is None:
inst_types = instance_types.get_all_types() inst_types = instance_types.get_all_types()
elif name == "--all": elif name == "--all":
inst_types = instance_types.get_all_types(True) inst_types = instance_types.get_all_types(True)
else: else:
inst_types = instance_types.get_instance_type(name) inst_types = instance_types.get_instance_type_by_name(name)
except exception.DBError, e: except exception.DBError, e:
_db_error(e) _db_error(e)
if isinstance(inst_types.values()[0], dict): if isinstance(inst_types.values()[0], dict):
@@ -894,20 +884,17 @@ class ImageCommands(object):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.image_service = utils.import_object(FLAGS.image_service) self.image_service = utils.import_object(FLAGS.image_service)
def _register(self, image_type, disk_format, container_format, def _register(self, container_format, disk_format,
path, owner, name=None, is_public='T', path, owner, name=None, is_public='T',
architecture='x86_64', kernel_id=None, ramdisk_id=None): architecture='x86_64', kernel_id=None, ramdisk_id=None):
meta = {'is_public': True, meta = {'is_public': (is_public == 'T'),
'name': name, 'name': name,
'disk_format': disk_format,
'container_format': container_format, 'container_format': container_format,
'disk_format': disk_format,
'properties': {'image_state': 'available', 'properties': {'image_state': 'available',
'owner_id': owner, 'project_id': owner,
'type': image_type,
'architecture': architecture, 'architecture': architecture,
'image_location': 'local', 'image_location': 'local'}}
'is_public': (is_public == 'T')}}
print image_type, meta
if kernel_id: if kernel_id:
meta['properties']['kernel_id'] = int(kernel_id) meta['properties']['kernel_id'] = int(kernel_id)
if ramdisk_id: if ramdisk_id:
@@ -932,16 +919,18 @@ class ImageCommands(object):
ramdisk_id = self.ramdisk_register(ramdisk, owner, None, ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
is_public, architecture) is_public, architecture)
self.image_register(image, owner, name, is_public, self.image_register(image, owner, name, is_public,
architecture, kernel_id, ramdisk_id) architecture, 'ami', 'ami',
kernel_id, ramdisk_id)
def image_register(self, path, owner, name=None, is_public='T', def image_register(self, path, owner, name=None, is_public='T',
architecture='x86_64', kernel_id=None, ramdisk_id=None, architecture='x86_64', container_format='bare',
disk_format='ami', container_format='ami'): disk_format='raw', kernel_id=None, ramdisk_id=None):
"""Uploads an image into the image_service """Uploads an image into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64'] arguments: path owner [name] [is_public='T'] [architecture='x86_64']
[container_format='bare'] [disk_format='raw']
[kernel_id=None] [ramdisk_id=None] [kernel_id=None] [ramdisk_id=None]
[disk_format='ami'] [container_format='ami']""" """
return self._register('machine', disk_format, container_format, path, return self._register(container_format, disk_format, path,
owner, name, is_public, architecture, owner, name, is_public, architecture,
kernel_id, ramdisk_id) kernel_id, ramdisk_id)
@@ -950,7 +939,7 @@ class ImageCommands(object):
"""Uploads a kernel into the image_service """Uploads a kernel into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64'] arguments: path owner [name] [is_public='T'] [architecture='x86_64']
""" """
return self._register('kernel', 'aki', 'aki', path, owner, name, return self._register('aki', 'aki', path, owner, name,
is_public, architecture) is_public, architecture)
def ramdisk_register(self, path, owner, name=None, is_public='T', def ramdisk_register(self, path, owner, name=None, is_public='T',
@@ -958,7 +947,7 @@ class ImageCommands(object):
"""Uploads a ramdisk into the image_service """Uploads a ramdisk into the image_service
arguments: path owner [name] [is_public='T'] [architecture='x86_64'] arguments: path owner [name] [is_public='T'] [architecture='x86_64']
""" """
return self._register('ramdisk', 'ari', 'ari', path, owner, name, return self._register('ari', 'ari', path, owner, name,
is_public, architecture) is_public, architecture)
def _lookup(self, old_image_id): def _lookup(self, old_image_id):
@@ -975,16 +964,17 @@ class ImageCommands(object):
'ramdisk': 'ari'} 'ramdisk': 'ari'}
container_format = mapping[old['type']] container_format = mapping[old['type']]
disk_format = container_format disk_format = container_format
if container_format == 'ami' and not old.get('kernelId'):
container_format = 'bare'
disk_format = 'raw'
new = {'disk_format': disk_format, new = {'disk_format': disk_format,
'container_format': container_format, 'container_format': container_format,
'is_public': True, 'is_public': old['isPublic'],
'name': old['imageId'], 'name': old['imageId'],
'properties': {'image_state': old['imageState'], 'properties': {'image_state': old['imageState'],
'owner_id': old['imageOwnerId'], 'project_id': old['imageOwnerId'],
'architecture': old['architecture'], 'architecture': old['architecture'],
'type': old['type'], 'image_location': old['imageLocation']}}
'image_location': old['imageLocation'],
'is_public': old['isPublic']}}
if old.get('kernelId'): if old.get('kernelId'):
new['properties']['kernel_id'] = self._lookup(old['kernelId']) new['properties']['kernel_id'] = self._lookup(old['kernelId'])
if old.get('ramdiskId'): if old.get('ramdiskId'):
@@ -1018,7 +1008,7 @@ class ImageCommands(object):
if (FLAGS.image_service == 'nova.image.local.LocalImageService' if (FLAGS.image_service == 'nova.image.local.LocalImageService'
and directory == os.path.abspath(FLAGS.images_path)): and directory == os.path.abspath(FLAGS.images_path)):
new_dir = "%s_bak" % directory new_dir = "%s_bak" % directory
os.move(directory, new_dir) os.rename(directory, new_dir)
os.mkdir(directory) os.mkdir(directory)
directory = new_dir directory = new_dir
for fn in glob.glob("%s/*/info.json" % directory): for fn in glob.glob("%s/*/info.json" % directory):
@@ -1030,7 +1020,7 @@ class ImageCommands(object):
machine_images[image_path] = image_metadata machine_images[image_path] = image_metadata
else: else:
other_images[image_path] = image_metadata other_images[image_path] = image_metadata
except Exception as exc: except Exception:
print _("Failed to load %(fn)s.") % locals() print _("Failed to load %(fn)s.") % locals()
# NOTE(vish): do kernels and ramdisks first so images # NOTE(vish): do kernels and ramdisks first so images
self._convert_images(other_images) self._convert_images(other_images)
@@ -1049,13 +1039,11 @@ CATEGORIES = [
('network', NetworkCommands), ('network', NetworkCommands),
('vm', VmCommands), ('vm', VmCommands),
('service', ServiceCommands), ('service', ServiceCommands),
('log', LogCommands),
('db', DbCommands), ('db', DbCommands),
('volume', VolumeCommands), ('volume', VolumeCommands),
('instance_type', InstanceTypeCommands), ('instance_type', InstanceTypeCommands),
('image', ImageCommands), ('image', ImageCommands),
('flavor', InstanceTypeCommands), ('flavor', InstanceTypeCommands)]
('instance', InstanceCommands)]
def lazy_match(name, key_value_tuples): def lazy_match(name, key_value_tuples):

View File

@@ -115,7 +115,7 @@ class DbDriver(object):
# on to create the project. This way we won't have to destroy # on to create the project. This way we won't have to destroy
# the project again because a user turns out to be invalid. # the project again because a user turns out to be invalid.
members = set([manager]) members = set([manager])
if member_uids != None: if member_uids is not None:
for member_uid in member_uids: for member_uid in member_uids:
member = db.user_get(context.get_admin_context(), member_uid) member = db.user_get(context.get_admin_context(), member_uid)
if not member: if not member:

View File

@@ -268,7 +268,7 @@ class AuthManager(object):
LOG.debug(_('Looking up user: %r'), access_key) LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key) user = self.get_user_from_access_key(access_key)
LOG.debug('user: %r', user) LOG.debug('user: %r', user)
if user == None: if user is None:
LOG.audit(_("Failed authorization for access key %s"), access_key) LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.NotFound(_('No user found for access key %s') raise exception.NotFound(_('No user found for access key %s')
% access_key) % access_key)
@@ -280,7 +280,7 @@ class AuthManager(object):
project_id = user.name project_id = user.name
project = self.get_project(project_id) project = self.get_project(project_id)
if project == None: if project is None:
pjid = project_id pjid = project_id
uname = user.name uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s" LOG.audit(_("failed authorization: no project named %(pjid)s"
@@ -646,9 +646,9 @@ class AuthManager(object):
@rtype: User @rtype: User
@return: The new user. @return: The new user.
""" """
if access == None: if access is None:
access = str(uuid.uuid4()) access = str(uuid.uuid4())
if secret == None: if secret is None:
secret = str(uuid.uuid4()) secret = str(uuid.uuid4())
with self.driver() as drv: with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin) user_dict = drv.create_user(name, access, secret, admin)

View File

@@ -18,14 +18,14 @@
"""Super simple fake memcache client.""" """Super simple fake memcache client."""
import utils from nova import utils
class Client(object): class Client(object):
"""Replicates a tiny subset of memcached client interface.""" """Replicates a tiny subset of memcached client interface."""
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
"""Ignores the passed in args""" """Ignores the passed in args."""
self.cache = {} self.cache = {}
def get(self, key): def get(self, key):

View File

@@ -16,9 +16,13 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Command-line flag library.
Wraps gflags.
Package-level global flags are defined here, the rest are defined Package-level global flags are defined here, the rest are defined
where they're used. where they're used.
""" """
import getopt import getopt
@@ -145,10 +149,12 @@ class FlagValues(gflags.FlagValues):
class StrWrapper(object): class StrWrapper(object):
"""Wrapper around FlagValues objects """Wrapper around FlagValues objects.
Wraps FlagValues objects for string.Template so that we're Wraps FlagValues objects for string.Template so that we're
sure to return strings.""" sure to return strings.
"""
def __init__(self, context_objs): def __init__(self, context_objs):
self.context_objs = context_objs self.context_objs = context_objs
@@ -169,6 +175,7 @@ def _GetCallingModule():
We generally use this function to get the name of the module calling a We generally use this function to get the name of the module calling a
DEFINE_foo... function. DEFINE_foo... function.
""" """
# Walk down the stack to find the first globals dict that's not ours. # Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()): for depth in range(1, sys.getrecursionlimit()):
@@ -192,6 +199,7 @@ def __GetModuleName(globals_dict):
Returns: Returns:
A string (the name of the module) or None (if the module could not A string (the name of the module) or None (if the module could not
be identified. be identified.
""" """
for name, module in sys.modules.iteritems(): for name, module in sys.modules.iteritems():
if getattr(module, '__dict__', None) is globals_dict: if getattr(module, '__dict__', None) is globals_dict:
@@ -326,7 +334,7 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
"Top-level directory for maintaining nova's state") "Top-level directory for maintaining nova's state")
DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
"Directory for lock files") 'Directory for lock files')
DEFINE_string('logdir', None, 'output to a per-service log file in named ' DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory') 'directory')

View File

@@ -16,16 +16,15 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Nova logging handler.
Nova logging handler.
This module adds to logging functionality by adding the option to specify This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object a context object when calling the various log methods. If the context object
is not specified, default formatting is used. is not specified, default formatting is used.
It also allows setting of formatting information through flags. It also allows setting of formatting information through flags.
"""
"""
import cStringIO import cStringIO
import inspect import inspect
@@ -41,34 +40,28 @@ from nova import version
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('logging_context_format_string', flags.DEFINE_string('logging_context_format_string',
'%(asctime)s %(levelname)s %(name)s ' '%(asctime)s %(levelname)s %(name)s '
'[%(request_id)s %(user)s ' '[%(request_id)s %(user)s '
'%(project)s] %(message)s', '%(project)s] %(message)s',
'format string to use for log messages with context') 'format string to use for log messages with context')
flags.DEFINE_string('logging_default_format_string', flags.DEFINE_string('logging_default_format_string',
'%(asctime)s %(levelname)s %(name)s [-] ' '%(asctime)s %(levelname)s %(name)s [-] '
'%(message)s', '%(message)s',
'format string to use for log messages without context') 'format string to use for log messages without context')
flags.DEFINE_string('logging_debug_format_suffix', flags.DEFINE_string('logging_debug_format_suffix',
'from (pid=%(process)d) %(funcName)s' 'from (pid=%(process)d) %(funcName)s'
' %(pathname)s:%(lineno)d', ' %(pathname)s:%(lineno)d',
'data to append to log format when level is DEBUG') 'data to append to log format when level is DEBUG')
flags.DEFINE_string('logging_exception_prefix', flags.DEFINE_string('logging_exception_prefix',
'(%(name)s): TRACE: ', '(%(name)s): TRACE: ',
'prefix each line of exception output with this format') 'prefix each line of exception output with this format')
flags.DEFINE_list('default_log_levels', flags.DEFINE_list('default_log_levels',
['amqplib=WARN', ['amqplib=WARN',
'sqlalchemy=WARN', 'sqlalchemy=WARN',
'boto=WARN', 'boto=WARN',
'eventlet.wsgi.server=WARN'], 'eventlet.wsgi.server=WARN'],
'list of logger=LEVEL pairs') 'list of logger=LEVEL pairs')
flags.DEFINE_bool('use_syslog', False, 'output to syslog') flags.DEFINE_bool('use_syslog', False, 'output to syslog')
flags.DEFINE_string('logfile', None, 'output to named file') flags.DEFINE_string('logfile', None, 'output to named file')
@@ -83,6 +76,8 @@ WARN = logging.WARN
INFO = logging.INFO INFO = logging.INFO
DEBUG = logging.DEBUG DEBUG = logging.DEBUG
NOTSET = logging.NOTSET NOTSET = logging.NOTSET
# methods # methods
getLogger = logging.getLogger getLogger = logging.getLogger
debug = logging.debug debug = logging.debug
@@ -93,6 +88,8 @@ error = logging.error
exception = logging.exception exception = logging.exception
critical = logging.critical critical = logging.critical
log = logging.log log = logging.log
# handlers # handlers
StreamHandler = logging.StreamHandler StreamHandler = logging.StreamHandler
WatchedFileHandler = logging.handlers.WatchedFileHandler WatchedFileHandler = logging.handlers.WatchedFileHandler
@@ -106,7 +103,7 @@ logging.addLevelName(AUDIT, 'AUDIT')
def _dictify_context(context): def _dictify_context(context):
if context == None: if context is None:
return None return None
if not isinstance(context, dict) \ if not isinstance(context, dict) \
and getattr(context, 'to_dict', None): and getattr(context, 'to_dict', None):
@@ -127,17 +124,18 @@ def _get_log_file_path(binary=None):
class NovaLogger(logging.Logger): class NovaLogger(logging.Logger):
""" """NovaLogger manages request context and formatting.
NovaLogger manages request context and formatting.
This becomes the class that is instanciated by logging.getLogger. This becomes the class that is instanciated by logging.getLogger.
""" """
def __init__(self, name, level=NOTSET): def __init__(self, name, level=NOTSET):
logging.Logger.__init__(self, name, level) logging.Logger.__init__(self, name, level)
self.setup_from_flags() self.setup_from_flags()
def setup_from_flags(self): def setup_from_flags(self):
"""Setup logger from flags""" """Setup logger from flags."""
level = NOTSET level = NOTSET
for pair in FLAGS.default_log_levels: for pair in FLAGS.default_log_levels:
logger, _sep, level_name = pair.partition('=') logger, _sep, level_name = pair.partition('=')
@@ -148,7 +146,7 @@ class NovaLogger(logging.Logger):
self.setLevel(level) self.setLevel(level)
def _log(self, level, msg, args, exc_info=None, extra=None, context=None): def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
"""Extract context from any log call""" """Extract context from any log call."""
if not extra: if not extra:
extra = {} extra = {}
if context: if context:
@@ -157,17 +155,17 @@ class NovaLogger(logging.Logger):
return logging.Logger._log(self, level, msg, args, exc_info, extra) return logging.Logger._log(self, level, msg, args, exc_info, extra)
def addHandler(self, handler): def addHandler(self, handler):
"""Each handler gets our custom formatter""" """Each handler gets our custom formatter."""
handler.setFormatter(_formatter) handler.setFormatter(_formatter)
return logging.Logger.addHandler(self, handler) return logging.Logger.addHandler(self, handler)
def audit(self, msg, *args, **kwargs): def audit(self, msg, *args, **kwargs):
"""Shortcut for our AUDIT level""" """Shortcut for our AUDIT level."""
if self.isEnabledFor(AUDIT): if self.isEnabledFor(AUDIT):
self._log(AUDIT, msg, args, **kwargs) self._log(AUDIT, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs): def exception(self, msg, *args, **kwargs):
"""Logging.exception doesn't handle kwargs, so breaks context""" """Logging.exception doesn't handle kwargs, so breaks context."""
if not kwargs.get('exc_info'): if not kwargs.get('exc_info'):
kwargs['exc_info'] = 1 kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs) self.error(msg, *args, **kwargs)
@@ -181,14 +179,13 @@ class NovaLogger(logging.Logger):
for k in env.keys(): for k in env.keys():
if not isinstance(env[k], str): if not isinstance(env[k], str):
env.pop(k) env.pop(k)
message = "Environment: %s" % json.dumps(env) message = 'Environment: %s' % json.dumps(env)
kwargs.pop('exc_info') kwargs.pop('exc_info')
self.error(message, **kwargs) self.error(message, **kwargs)
class NovaFormatter(logging.Formatter): class NovaFormatter(logging.Formatter):
""" """A nova.context.RequestContext aware formatter configured through flags.
A nova.context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_foramt_string The flags used to set format strings are: logging_context_foramt_string
and logging_default_format_string. You can also specify and logging_default_format_string. You can also specify
@@ -197,10 +194,11 @@ class NovaFormatter(logging.Formatter):
For information about what variables are available for the formatter see: For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter http://docs.python.org/library/logging.html#formatter
""" """
def format(self, record): def format(self, record):
"""Uses contextstring if request_id is set, otherwise default""" """Uses contextstring if request_id is set, otherwise default."""
if record.__dict__.get('request_id', None): if record.__dict__.get('request_id', None):
self._fmt = FLAGS.logging_context_format_string self._fmt = FLAGS.logging_context_format_string
else: else:
@@ -214,20 +212,21 @@ class NovaFormatter(logging.Formatter):
return logging.Formatter.format(self, record) return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None): def formatException(self, exc_info, record=None):
"""Format exception output with FLAGS.logging_exception_prefix""" """Format exception output with FLAGS.logging_exception_prefix."""
if not record: if not record:
return logging.Formatter.formatException(self, exc_info) return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO() stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer) None, stringbuffer)
lines = stringbuffer.getvalue().split("\n") lines = stringbuffer.getvalue().split('\n')
stringbuffer.close() stringbuffer.close()
formatted_lines = [] formatted_lines = []
for line in lines: for line in lines:
pl = FLAGS.logging_exception_prefix % record.__dict__ pl = FLAGS.logging_exception_prefix % record.__dict__
fl = "%s%s" % (pl, line) fl = '%s%s' % (pl, line)
formatted_lines.append(fl) formatted_lines.append(fl)
return "\n".join(formatted_lines) return '\n'.join(formatted_lines)
_formatter = NovaFormatter() _formatter = NovaFormatter()
@@ -241,7 +240,7 @@ class NovaRootLogger(NovaLogger):
NovaLogger.__init__(self, name, level) NovaLogger.__init__(self, name, level)
def setup_from_flags(self): def setup_from_flags(self):
"""Setup logger from flags""" """Setup logger from flags."""
global _filelog global _filelog
if FLAGS.use_syslog: if FLAGS.use_syslog:
self.syslog = SysLogHandler(address='/dev/log') self.syslog = SysLogHandler(address='/dev/log')

View File

@@ -16,9 +16,12 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """AMQP-based RPC.
AMQP-based RPC. Queues have consumers and publishers.
Queues have consumers and publishers.
No fan-out support yet. No fan-out support yet.
""" """
import json import json
@@ -40,17 +43,19 @@ from nova import log as logging
from nova import utils from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.rpc') LOG = logging.getLogger('nova.rpc')
FLAGS = flags.FLAGS
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
class Connection(carrot_connection.BrokerConnection): class Connection(carrot_connection.BrokerConnection):
"""Connection instance object""" """Connection instance object."""
@classmethod @classmethod
def instance(cls, new=True): def instance(cls, new=True):
"""Returns the instance""" """Returns the instance."""
if new or not hasattr(cls, '_instance'): if new or not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host, params = dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port, port=FLAGS.rabbit_port,
@@ -71,9 +76,11 @@ class Connection(carrot_connection.BrokerConnection):
@classmethod @classmethod
def recreate(cls): def recreate(cls):
"""Recreates the connection instance """Recreates the connection instance.
This is necessary to recover from some network errors/disconnects""" This is necessary to recover from some network errors/disconnects.
"""
try: try:
del cls._instance del cls._instance
except AttributeError, e: except AttributeError, e:
@@ -84,10 +91,12 @@ class Connection(carrot_connection.BrokerConnection):
class Consumer(messaging.Consumer): class Consumer(messaging.Consumer):
"""Consumer base class """Consumer base class.
Contains methods for connecting the fetch method to async loops.
Contains methods for connecting the fetch method to async loops
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
for i in xrange(FLAGS.rabbit_max_retries): for i in xrange(FLAGS.rabbit_max_retries):
if i > 0: if i > 0:
@@ -100,19 +109,18 @@ class Consumer(messaging.Consumer):
fl_host = FLAGS.rabbit_host fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port fl_port = FLAGS.rabbit_port
fl_intv = FLAGS.rabbit_retry_interval fl_intv = FLAGS.rabbit_retry_interval
LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is" LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is'
" unreachable: %(e)s. Trying again in %(fl_intv)d" ' unreachable: %(e)s. Trying again in %(fl_intv)d'
" seconds.") ' seconds.') % locals())
% locals())
self.failed_connection = True self.failed_connection = True
if self.failed_connection: if self.failed_connection:
LOG.error(_("Unable to connect to AMQP server " LOG.error(_('Unable to connect to AMQP server '
"after %d tries. Shutting down."), 'after %d tries. Shutting down.'),
FLAGS.rabbit_max_retries) FLAGS.rabbit_max_retries)
sys.exit(1) sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
"""Wraps the parent fetch with some logic for failed connections""" """Wraps the parent fetch with some logic for failed connection."""
# TODO(vish): the logic for failed connections and logging should be # TODO(vish): the logic for failed connections and logging should be
# refactored into some sort of connection manager object # refactored into some sort of connection manager object
try: try:
@@ -125,14 +133,14 @@ class Consumer(messaging.Consumer):
self.declare() self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection: if self.failed_connection:
LOG.error(_("Reconnected to queue")) LOG.error(_('Reconnected to queue'))
self.failed_connection = False self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't # NOTE(vish): This is catching all errors because we really don't
# want exceptions to be logged 10 times a second if some # want exceptions to be logged 10 times a second if some
# persistent failure occurs. # persistent failure occurs.
except Exception, e: # pylint: disable=W0703 except Exception, e: # pylint: disable=W0703
if not self.failed_connection: if not self.failed_connection:
LOG.exception(_("Failed to fetch message from queue: %s" % e)) LOG.exception(_('Failed to fetch message from queue: %s' % e))
self.failed_connection = True self.failed_connection = True
def attach_to_eventlet(self): def attach_to_eventlet(self):
@@ -143,8 +151,9 @@ class Consumer(messaging.Consumer):
class AdapterConsumer(Consumer): class AdapterConsumer(Consumer):
"""Calls methods on a proxy object based on method and args""" """Calls methods on a proxy object based on method and args."""
def __init__(self, connection=None, topic="broadcast", proxy=None):
def __init__(self, connection=None, topic='broadcast', proxy=None):
LOG.debug(_('Initing the Adapter Consumer for %s') % topic) LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
self.proxy = proxy self.proxy = proxy
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
@@ -156,13 +165,14 @@ class AdapterConsumer(Consumer):
@exception.wrap_exception @exception.wrap_exception
def _receive(self, message_data, message): def _receive(self, message_data, message):
"""Magically looks for a method on the proxy object and calls it """Magically looks for a method on the proxy object and calls it.
Message data should be a dictionary with two keys: Message data should be a dictionary with two keys:
method: string representing the method to call method: string representing the method to call
args: dictionary of arg: value args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}} Example: {'method': 'echo', 'args': {'value': 42}}
""" """
LOG.debug(_('received %s') % message_data) LOG.debug(_('received %s') % message_data)
msg_id = message_data.pop('_msg_id', None) msg_id = message_data.pop('_msg_id', None)
@@ -189,22 +199,23 @@ class AdapterConsumer(Consumer):
if msg_id: if msg_id:
msg_reply(msg_id, rval, None) msg_reply(msg_id, rval, None)
except Exception as e: except Exception as e:
logging.exception("Exception during message handling") logging.exception('Exception during message handling')
if msg_id: if msg_id:
msg_reply(msg_id, None, sys.exc_info()) msg_reply(msg_id, None, sys.exc_info())
return return
class Publisher(messaging.Publisher): class Publisher(messaging.Publisher):
"""Publisher base class""" """Publisher base class."""
pass pass
class TopicAdapterConsumer(AdapterConsumer): class TopicAdapterConsumer(AdapterConsumer):
"""Consumes messages on a specific topic""" """Consumes messages on a specific topic."""
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast", proxy=None): exchange_type = 'topic'
def __init__(self, connection=None, topic='broadcast', proxy=None):
self.queue = topic self.queue = topic
self.routing_key = topic self.routing_key = topic
self.exchange = FLAGS.control_exchange self.exchange = FLAGS.control_exchange
@@ -214,27 +225,29 @@ class TopicAdapterConsumer(AdapterConsumer):
class FanoutAdapterConsumer(AdapterConsumer): class FanoutAdapterConsumer(AdapterConsumer):
"""Consumes messages from a fanout exchange""" """Consumes messages from a fanout exchange."""
exchange_type = "fanout"
def __init__(self, connection=None, topic="broadcast", proxy=None): exchange_type = 'fanout'
self.exchange = "%s_fanout" % topic
def __init__(self, connection=None, topic='broadcast', proxy=None):
self.exchange = '%s_fanout' % topic
self.routing_key = topic self.routing_key = topic
unique = uuid.uuid4().hex unique = uuid.uuid4().hex
self.queue = "%s_fanout_%s" % (topic, unique) self.queue = '%s_fanout_%s' % (topic, unique)
self.durable = False self.durable = False
LOG.info(_("Created '%(exchange)s' fanout exchange " LOG.info(_('Created "%(exchange)s" fanout exchange '
"with '%(key)s' routing key"), 'with "%(key)s" routing key'),
dict(exchange=self.exchange, key=self.routing_key)) dict(exchange=self.exchange, key=self.routing_key))
super(FanoutAdapterConsumer, self).__init__(connection=connection, super(FanoutAdapterConsumer, self).__init__(connection=connection,
topic=topic, proxy=proxy) topic=topic, proxy=proxy)
class TopicPublisher(Publisher): class TopicPublisher(Publisher):
"""Publishes messages on a specific topic""" """Publishes messages on a specific topic."""
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"): exchange_type = 'topic'
def __init__(self, connection=None, topic='broadcast'):
self.routing_key = topic self.routing_key = topic
self.exchange = FLAGS.control_exchange self.exchange = FLAGS.control_exchange
self.durable = False self.durable = False
@@ -243,20 +256,22 @@ class TopicPublisher(Publisher):
class FanoutPublisher(Publisher): class FanoutPublisher(Publisher):
"""Publishes messages to a fanout exchange.""" """Publishes messages to a fanout exchange."""
exchange_type = "fanout"
exchange_type = 'fanout'
def __init__(self, topic, connection=None): def __init__(self, topic, connection=None):
self.exchange = "%s_fanout" % topic self.exchange = '%s_fanout' % topic
self.queue = "%s_fanout" % topic self.queue = '%s_fanout' % topic
self.durable = False self.durable = False
LOG.info(_("Creating '%(exchange)s' fanout exchange"), LOG.info(_('Creating "%(exchange)s" fanout exchange'),
dict(exchange=self.exchange)) dict(exchange=self.exchange))
super(FanoutPublisher, self).__init__(connection=connection) super(FanoutPublisher, self).__init__(connection=connection)
class DirectConsumer(Consumer): class DirectConsumer(Consumer):
"""Consumes messages directly on a channel specified by msg_id""" """Consumes messages directly on a channel specified by msg_id."""
exchange_type = "direct"
exchange_type = 'direct'
def __init__(self, connection=None, msg_id=None): def __init__(self, connection=None, msg_id=None):
self.queue = msg_id self.queue = msg_id
@@ -268,8 +283,9 @@ class DirectConsumer(Consumer):
class DirectPublisher(Publisher): class DirectPublisher(Publisher):
"""Publishes messages directly on a channel specified by msg_id""" """Publishes messages directly on a channel specified by msg_id."""
exchange_type = "direct"
exchange_type = 'direct'
def __init__(self, connection=None, msg_id=None): def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id self.routing_key = msg_id
@@ -279,9 +295,9 @@ class DirectPublisher(Publisher):
def msg_reply(msg_id, reply=None, failure=None): def msg_reply(msg_id, reply=None, failure=None):
"""Sends a reply or an error on the channel signified by msg_id """Sends a reply or an error on the channel signified by msg_id.
failure should be a sys.exc_info() tuple. Failure should be a sys.exc_info() tuple.
""" """
if failure: if failure:
@@ -303,17 +319,20 @@ def msg_reply(msg_id, reply=None, failure=None):
class RemoteError(exception.Error): class RemoteError(exception.Error):
"""Signifies that a remote class has raised an exception """Signifies that a remote class has raised an exception.
Containes a string representation of the type of the original exception, Containes a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception sent to the parent as a joined string so printing the exception
contains all of the relevent info.""" contains all of the relevent info.
"""
def __init__(self, exc_type, value, traceback): def __init__(self, exc_type, value, traceback):
self.exc_type = exc_type self.exc_type = exc_type
self.value = value self.value = value
self.traceback = traceback self.traceback = traceback
super(RemoteError, self).__init__("%s %s\n%s" % (exc_type, super(RemoteError, self).__init__('%s %s\n%s' % (exc_type,
value, value,
traceback)) traceback))
@@ -339,6 +358,7 @@ def _pack_context(msg, context):
context out into a bunch of separate keys. If we want to support context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same more arguments in rabbit messages, we may want to do the same
for args at some point. for args at some point.
""" """
context = dict([('_context_%s' % key, value) context = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()]) for (key, value) in context.to_dict().iteritems()])
@@ -346,11 +366,11 @@ def _pack_context(msg, context):
def call(context, topic, msg): def call(context, topic, msg):
"""Sends a message on a topic and wait for a response""" """Sends a message on a topic and wait for a response."""
LOG.debug(_("Making asynchronous call on %s ..."), topic) LOG.debug(_('Making asynchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id}) msg.update({'_msg_id': msg_id})
LOG.debug(_("MSG_ID is %s") % (msg_id)) LOG.debug(_('MSG_ID is %s') % (msg_id))
_pack_context(msg, context) _pack_context(msg, context)
class WaitMessage(object): class WaitMessage(object):
@@ -387,8 +407,8 @@ def call(context, topic, msg):
def cast(context, topic, msg): def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response""" """Sends a message on a topic without waiting for a response."""
LOG.debug(_("Making asynchronous cast on %s..."), topic) LOG.debug(_('Making asynchronous cast on %s...'), topic)
_pack_context(msg, context) _pack_context(msg, context)
conn = Connection.instance() conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic) publisher = TopicPublisher(connection=conn, topic=topic)
@@ -397,8 +417,8 @@ def cast(context, topic, msg):
def fanout_cast(context, topic, msg): def fanout_cast(context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response""" """Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_("Making asynchronous fanout cast...")) LOG.debug(_('Making asynchronous fanout cast...'))
_pack_context(msg, context) _pack_context(msg, context)
conn = Connection.instance() conn = Connection.instance()
publisher = FanoutPublisher(topic, connection=conn) publisher = FanoutPublisher(topic, connection=conn)
@@ -407,14 +427,14 @@ def fanout_cast(context, topic, msg):
def generic_response(message_data, message): def generic_response(message_data, message):
"""Logs a result and exits""" """Logs a result and exits."""
LOG.debug(_('response %s'), message_data) LOG.debug(_('response %s'), message_data)
message.ack() message.ack()
sys.exit(0) sys.exit(0)
def send_message(topic, message, wait=True): def send_message(topic, message, wait=True):
"""Sends a message for testing""" """Sends a message for testing."""
msg_id = uuid.uuid4().hex msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id}) message.update({'_msg_id': msg_id})
LOG.debug(_('topic is %s'), topic) LOG.debug(_('topic is %s'), topic)
@@ -425,14 +445,14 @@ def send_message(topic, message, wait=True):
queue=msg_id, queue=msg_id,
exchange=msg_id, exchange=msg_id,
auto_delete=True, auto_delete=True,
exchange_type="direct", exchange_type='direct',
routing_key=msg_id) routing_key=msg_id)
consumer.register_callback(generic_response) consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(), publisher = messaging.Publisher(connection=Connection.instance(),
exchange=FLAGS.control_exchange, exchange=FLAGS.control_exchange,
durable=False, durable=False,
exchange_type="topic", exchange_type='topic',
routing_key=topic) routing_key=topic)
publisher.send(message) publisher.send(message)
publisher.close() publisher.close()
@@ -441,8 +461,8 @@ def send_message(topic, message, wait=True):
consumer.wait() consumer.wait()
if __name__ == "__main__": if __name__ == '__main__':
# NOTE(vish): you can send messages from the command line using # You can send messages from the command line using
# topic and a json sting representing a dictionary # topic and a json string representing a dictionary
# for the method # for the method
send_message(sys.argv[1], json.loads(sys.argv[2])) send_message(sys.argv[1], json.loads(sys.argv[2]))

View File

@@ -36,6 +36,7 @@ from nova import rpc
from nova import service from nova import service
from nova import test from nova import test
from nova import utils from nova import utils
from nova import exception
from nova.auth import manager from nova.auth import manager
from nova.compute import power_state from nova.compute import power_state
from nova.api.ec2 import cloud from nova.api.ec2 import cloud
@@ -247,6 +248,37 @@ class CloudTestCase(test.TestCase):
self.assertRaises(NotFound, describe_images, self.assertRaises(NotFound, describe_images,
self.context, ['ami-fake']) self.context, ['ami-fake'])
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': True}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': False}
def fake_update(meh, context, image_id, metadata, data=None):
return metadata
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(local.LocalImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
self.assertEqual(True, result['is_public'])
def test_console_output(self): def test_console_output(self):
instance_type = FLAGS.default_instance_type instance_type = FLAGS.default_instance_type
max_count = 1 max_count = 1
@@ -341,6 +373,19 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id) LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id) rv = self.compute.terminate_instance(instance_id)
def test_terminate_instances(self):
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_id': 1,
'host': 'host1'})
terminate_instances = self.cloud.terminate_instances
# valid instance_id
result = terminate_instances(self.context, ['i-00000001'])
self.assertTrue(result)
# non-existing instance_id
self.assertRaises(exception.InstanceNotFound, terminate_instances,
self.context, ['i-2'])
db.instance_destroy(self.context, inst1['id'])
def test_update_of_instance_display_fields(self): def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {}) inst = db.instance_create(self.context, {})
ec2_id = ec2utils.id_to_ec2_id(inst['id']) ec2_id = ec2utils.id_to_ec2_id(inst['id'])

View File

@@ -84,7 +84,8 @@ class ComputeTestCase(test.TestCase):
inst['launch_time'] = '10' inst['launch_time'] = '10'
inst['user_id'] = self.user.id inst['user_id'] = self.user.id
inst['project_id'] = self.project.id inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny' type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['mac_address'] = utils.generate_mac() inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
inst.update(params) inst.update(params)
@@ -132,7 +133,7 @@ class ComputeTestCase(test.TestCase):
cases = [dict(), dict(display_name=None)] cases = [dict(), dict(display_name=None)]
for instance in cases: for instance in cases:
ref = self.compute_api.create(self.context, ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, **instance) instance_types.get_default_instance_type(), None, **instance)
try: try:
self.assertNotEqual(ref[0]['display_name'], None) self.assertNotEqual(ref[0]['display_name'], None)
finally: finally:
@@ -143,7 +144,7 @@ class ComputeTestCase(test.TestCase):
group = self._create_group() group = self._create_group()
ref = self.compute_api.create( ref = self.compute_api.create(
self.context, self.context,
instance_type=FLAGS.default_instance_type, instance_type=instance_types.get_default_instance_type(),
image_id=None, image_id=None,
security_group=['testgroup']) security_group=['testgroup'])
try: try:
@@ -161,7 +162,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create( ref = self.compute_api.create(
self.context, self.context,
instance_type=FLAGS.default_instance_type, instance_type=instance_types.get_default_instance_type(),
image_id=None, image_id=None,
security_group=['testgroup']) security_group=['testgroup'])
try: try:
@@ -177,7 +178,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create( ref = self.compute_api.create(
self.context, self.context,
instance_type=FLAGS.default_instance_type, instance_type=instance_types.get_default_instance_type(),
image_id=None, image_id=None,
security_group=['testgroup']) security_group=['testgroup'])
@@ -359,8 +360,9 @@ class ComputeTestCase(test.TestCase):
instance_id = self._create_instance() instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id) self.compute.run_instance(self.context, instance_id)
inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
db.instance_update(self.context, instance_id, db.instance_update(self.context, instance_id,
{'instance_type': 'm1.xlarge'}) {'instance_type_id': inst_type['id']})
self.assertRaises(exception.ApiError, self.compute_api.resize, self.assertRaises(exception.ApiError, self.compute_api.resize,
context, instance_id, 1) context, instance_id, 1)
@@ -380,8 +382,8 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(context, instance_id) self.compute.terminate_instance(context, instance_id)
def test_get_by_flavor_id(self): def test_get_by_flavor_id(self):
type = instance_types.get_by_flavor_id(1) type = instance_types.get_instance_type_by_flavor_id(1)
self.assertEqual(type, 'm1.tiny') self.assertEqual(type['name'], 'm1.tiny')
def test_resize_same_source_fails(self): def test_resize_same_source_fails(self):
"""Ensure instance fails to migrate when source and destination are """Ensure instance fails to migrate when source and destination are
@@ -664,4 +666,5 @@ class ComputeTestCase(test.TestCase):
instances = db.instance_get_all(context.get_admin_context()) instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After force-killing instances: %s"), instances) LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 0) self.assertEqual(len(instances), 1)
self.assertEqual(power_state.SHUTOFF, instances[0]['state'])

View File

@@ -62,7 +62,7 @@ class ConsoleTestCase(test.TestCase):
inst['launch_time'] = '10' inst['launch_time'] = '10'
inst['user_id'] = self.user.id inst['user_id'] = self.user.id
inst['project_id'] = self.project.id inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny' inst['instance_type_id'] = 1
inst['mac_address'] = utils.generate_mac() inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id'] return db.instance_create(self.context, inst)['id']

View File

@@ -40,7 +40,11 @@ class InstanceTypeTestCase(test.TestCase):
max_flavorid = session.query(models.InstanceTypes).\ max_flavorid = session.query(models.InstanceTypes).\
order_by("flavorid desc").\ order_by("flavorid desc").\
first() first()
max_id = session.query(models.InstanceTypes).\
order_by("id desc").\
first()
self.flavorid = max_flavorid["flavorid"] + 1 self.flavorid = max_flavorid["flavorid"] + 1
self.id = max_id["id"] + 1
self.name = str(int(time.time())) self.name = str(int(time.time()))
def test_instance_type_create_then_delete(self): def test_instance_type_create_then_delete(self):
@@ -53,7 +57,7 @@ class InstanceTypeTestCase(test.TestCase):
'instance type was not created') 'instance type was not created')
instance_types.destroy(self.name) instance_types.destroy(self.name)
self.assertEqual(1, self.assertEqual(1,
instance_types.get_instance_type(self.name)["deleted"]) instance_types.get_instance_type(self.id)["deleted"])
self.assertEqual(starting_inst_list, instance_types.get_all_types()) self.assertEqual(starting_inst_list, instance_types.get_all_types())
instance_types.purge(self.name) instance_types.purge(self.name)
self.assertEqual(len(starting_inst_list), self.assertEqual(len(starting_inst_list),

View File

@@ -263,7 +263,7 @@ class SimpleDriverTestCase(test.TestCase):
inst['reservation_id'] = 'r-fakeres' inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id inst['user_id'] = self.user.id
inst['project_id'] = self.project.id inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny' inst['instance_type_id'] = '1'
inst['mac_address'] = utils.generate_mac() inst['mac_address'] = utils.generate_mac()
inst['vcpus'] = kwargs.get('vcpus', 1) inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
@@ -737,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase):
ret = self.scheduler.driver._live_migration_src_check(self.context, ret = self.scheduler.driver._live_migration_src_check(self.context,
i_ref) i_ref)
self.assertTrue(ret == None) self.assertTrue(ret is None)
db.instance_destroy(self.context, instance_id) db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref['id'])
@@ -805,7 +805,7 @@ class SimpleDriverTestCase(test.TestCase):
ret = self.scheduler.driver._live_migration_dest_check(self.context, ret = self.scheduler.driver._live_migration_dest_check(self.context,
i_ref, i_ref,
'somewhere') 'somewhere')
self.assertTrue(ret == None) self.assertTrue(ret is None)
db.instance_destroy(self.context, instance_id) db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref['id'])

View File

@@ -140,7 +140,7 @@ class LibvirtConnTestCase(test.TestCase):
'vcpus': 2, 'vcpus': 2,
'project_id': 'fake', 'project_id': 'fake',
'bridge': 'br101', 'bridge': 'br101',
'instance_type': 'm1.small'} 'instance_type_id': '5'} # m1.small
def lazy_load_library_exists(self): def lazy_load_library_exists(self):
"""check if libvirt is available.""" """check if libvirt is available."""
@@ -479,7 +479,7 @@ class LibvirtConnTestCase(test.TestCase):
fake_timer = FakeTime() fake_timer = FakeTime()
self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise) self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_create(self.context, self.test_instance)
# Start test # Start test
@@ -488,6 +488,7 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_conn.LibvirtConnection(False) conn = libvirt_conn.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref, conn.ensure_filtering_rules_for_instance(instance_ref,
time=fake_timer) time=fake_timer)
except exception.Error, e: except exception.Error, e:

View File

@@ -106,7 +106,7 @@ class VolumeTestCase(test.TestCase):
inst['launch_time'] = '10' inst['launch_time'] = '10'
inst['user_id'] = 'fake' inst['user_id'] = 'fake'
inst['project_id'] = 'fake' inst['project_id'] = 'fake'
inst['instance_type'] = 'm1.tiny' inst['instance_type_id'] = '2' # m1.tiny
inst['mac_address'] = utils.generate_mac() inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
instance_id = db.instance_create(self.context, inst)['id'] instance_id = db.instance_create(self.context, inst)['id']

View File

@@ -80,7 +80,7 @@ class XenAPIVolumeTestCase(test.TestCase):
'image_id': 1, 'image_id': 1,
'kernel_id': 2, 'kernel_id': 2,
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type': 'm1.large', 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'} 'os_type': 'linux'}
@@ -289,11 +289,11 @@ class XenAPIVMTestCase(test.TestCase):
'enabled':'1'}], 'enabled':'1'}],
'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff', 'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
'netmask': '120', 'netmask': '120',
'enabled': '1', 'enabled': '1'}],
'gateway': 'fe80::a00:1'}],
'mac': 'aa:bb:cc:dd:ee:ff', 'mac': 'aa:bb:cc:dd:ee:ff',
'dns': ['10.0.0.2'], 'dns': ['10.0.0.2'],
'gateway': '10.0.0.1'}) 'gateway': '10.0.0.1',
'gateway6': 'fe80::a00:1'})
def check_vm_params_for_windows(self): def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true') self.assertEquals(self.vm['platform']['nx'], 'true')
@@ -328,7 +328,7 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_policy'], '') self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id, def _test_spawn(self, image_id, kernel_id, ramdisk_id,
instance_type="m1.large", os_type="linux", instance_type_id="3", os_type="linux",
instance_id=1, check_injection=False): instance_id=1, check_injection=False):
stubs.stubout_loopingcall_start(self.stubs) stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id, values = {'id': instance_id,
@@ -337,7 +337,7 @@ class XenAPIVMTestCase(test.TestCase):
'image_id': image_id, 'image_id': image_id,
'kernel_id': kernel_id, 'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id, 'ramdisk_id': ramdisk_id,
'instance_type': instance_type, 'instance_type_id': instance_type_id,
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': os_type} 'os_type': os_type}
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
@@ -349,7 +349,7 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_image_service = 'glance' FLAGS.xenapi_image_service = 'glance'
self.assertRaises(Exception, self.assertRaises(Exception,
self._test_spawn, self._test_spawn,
1, 2, 3, "m1.xlarge") 1, 2, 3, "4") # m1.xlarge
def test_spawn_raw_objectstore(self): def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore' FLAGS.xenapi_image_service = 'objectstore'
@@ -523,7 +523,7 @@ class XenAPIVMTestCase(test.TestCase):
'image_id': 1, 'image_id': 1,
'kernel_id': 2, 'kernel_id': 2,
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type': 'm1.large', 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'} 'os_type': 'linux'}
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
@@ -580,7 +580,7 @@ class XenAPIMigrateInstance(test.TestCase):
'kernel_id': None, 'kernel_id': None,
'ramdisk_id': None, 'ramdisk_id': None,
'local_gb': 5, 'local_gb': 5,
'instance_type': 'm1.large', 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'} 'os_type': 'linux'}

3888
po/ast.po

File diff suppressed because it is too large Load Diff

3915
po/cs.po

File diff suppressed because it is too large Load Diff

3892
po/da.po

File diff suppressed because it is too large Load Diff

3978
po/de.po

File diff suppressed because it is too large Load Diff

4732
po/es.po

File diff suppressed because it is too large Load Diff

4077
po/it.po

File diff suppressed because it is too large Load Diff

4662
po/ja.po

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

4055
po/ru.po

File diff suppressed because it is too large Load Diff

3935
po/uk.po

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff