merge trunk and upgrade to cheetah templating

This commit is contained in:
Vishvananda Ishaya 2010-12-22 20:59:53 +00:00
parent 2504643977
commit 81191660cf
65 changed files with 976 additions and 574 deletions

View File

@ -19,6 +19,7 @@
<mordred@inaugust.com> <mordred@hudson>
<paul@openstack.org> <pvoccio@castor.local>
<paul@openstack.org> <paul.voccio@rackspace.com>
<soren.hansen@rackspace.com> <soren@linux2go.dk>
<todd@ansolabs.com> <todd@lapex>
<todd@ansolabs.com> <todd@rubidine.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com>

View File

@ -6,6 +6,7 @@ Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
Ewan Mellor <ewan.mellor@citrix.com>
@ -14,6 +15,7 @@ Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
Joe Heck <heckj@mac.com>
Joel Moore <joelbm24@gmail.com>
Jonathan Bryce <jbryce@jbryce.com>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>

View File

@ -13,7 +13,7 @@ include nova/cloudpipe/client.ovpn.template
include nova/compute/fakevirtinstance.xml
include nova/compute/interfaces.template
include nova/virt/interfaces.template
include nova/virt/libvirt.*.xml.template
include nova/virt/libvirt*.xml.template
include nova/tests/CA/
include nova/tests/CA/cacert.pem
include nova/tests/CA/private/

View File

@ -194,6 +194,7 @@ class HostInfo(object):
class NovaAdminClient(object):
def __init__(
self,
clc_url=DEFAULT_CLC_URL,

View File

@ -45,7 +45,7 @@ class API(wsgi.Application):
def __call__(self, req):
if req.method == 'POST':
return self.sign_csr(req)
_log.debug("Cloudpipe path is %s" % req.path_info)
_log.debug(_("Cloudpipe path is %s") % req.path_info)
if req.path_info.endswith("/getca/"):
return self.send_root_ca(req)
return webob.exc.HTTPNotFound()
@ -56,7 +56,7 @@ class API(wsgi.Application):
return instance['project_id']
def send_root_ca(self, req):
_log.debug("Getting root ca")
_log.debug(_("Getting root ca"))
project_id = self.get_project_id_from_ip(req.remote_addr)
res = webob.Response()
res.headers["Content-Type"] = "text/plain"

View File

@ -77,7 +77,7 @@ class Authenticate(wsgi.Middleware):
req.host,
req.path)
except exception.Error, ex:
logging.debug("Authentication Failure: %s" % ex)
logging.debug(_("Authentication Failure: %s") % ex)
raise webob.exc.HTTPForbidden()
# Authenticated!
@ -120,9 +120,9 @@ class Router(wsgi.Middleware):
except:
raise webob.exc.HTTPBadRequest()
_log.debug('action: %s' % action)
_log.debug(_('action: %s') % action)
for key, value in args.items():
_log.debug('arg: %s\t\tval: %s' % (key, value))
_log.debug(_('arg: %s\t\tval: %s') % (key, value))
# Success!
req.environ['ec2.controller'] = controller

View File

@ -168,6 +168,7 @@ class AdminController(object):
# FIXME(vish): these host commands don't work yet, perhaps some of the
# required data can be retrieved from service objects?
def describe_hosts(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:
* Disk Space

View File

@ -92,8 +92,8 @@ class APIRequest(object):
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
_error = ('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
_error = _('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
_log.warning(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.

View File

@ -114,7 +114,7 @@ class CloudController(object):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh")
os.chdir(start)
def _get_mpi_data(self, context, project_id):
@ -318,11 +318,11 @@ class CloudController(object):
ip_protocol = str(ip_protocol)
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise InvalidInputException('%s is not a valid ipProtocol' %
raise InvalidInputException(_('%s is not a valid ipProtocol') %
(ip_protocol,))
if ((min(from_port, to_port) < -1) or
(max(from_port, to_port) > 65535)):
raise InvalidInputException('Invalid port range')
raise InvalidInputException(_('Invalid port range'))
values['protocol'] = ip_protocol
values['from_port'] = from_port
@ -360,7 +360,8 @@ class CloudController(object):
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria == None:
raise exception.ApiError("No rule for the specified parameters.")
raise exception.ApiError(_("No rule for the specified "
"parameters."))
for rule in security_group.rules:
match = True
@ -371,7 +372,7 @@ class CloudController(object):
db.security_group_rule_destroy(context, rule['id'])
self._trigger_refresh_security_group(context, security_group)
return True
raise exception.ApiError("No rule for the specified parameters.")
raise exception.ApiError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
@ -387,8 +388,8 @@ class CloudController(object):
values['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group, values):
raise exception.ApiError('This rule already exists in group %s' %
group_name)
raise exception.ApiError(_('This rule already exists in group %s')
% group_name)
security_group_rule = db.security_group_rule_create(context, values)
@ -416,7 +417,7 @@ class CloudController(object):
def create_security_group(self, context, group_name, group_description):
self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError('group %s already exists' % group_name)
raise exception.ApiError(_('group %s already exists') % group_name)
group = {'user_id': context.user.id,
'project_id': context.project_id,
@ -529,13 +530,13 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError("Invalid device specified: %s. "
"Example device: /dev/vdb" % device)
raise exception.ApiError(_("Invalid device specified: %s. "
"Example device: /dev/vdb") % device)
# TODO(vish): abstract status checking?
if volume_ref['status'] != "available":
raise exception.ApiError("Volume status must be available")
raise exception.ApiError(_("Volume status must be available"))
if volume_ref['attach_status'] == "attached":
raise exception.ApiError("Volume is already attached")
raise exception.ApiError(_("Volume is already attached"))
internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = self.compute_api.get_instance(context, internal_id)
host = instance_ref['host']
@ -557,10 +558,10 @@ class CloudController(object):
instance_ref = db.volume_get_instance(context.elevated(),
volume_ref['id'])
if not instance_ref:
raise exception.ApiError("Volume isn't attached to anything!")
raise exception.ApiError(_("Volume isn't attached to anything!"))
# TODO(vish): abstract status checking?
if volume_ref['status'] == "available":
raise exception.ApiError("Volume is already detached")
raise exception.ApiError(_("Volume is already detached"))
try:
host = instance_ref['host']
rpc.cast(context,
@ -689,10 +690,11 @@ class CloudController(object):
def allocate_address(self, context, **kwargs):
# check quota
if quota.allowed_floating_ips(context, 1) < 1:
logging.warn("Quota exceeeded for %s, tried to allocate address",
logging.warn(_("Quota exceeeded for %s, tried to allocate "
"address"),
context.project_id)
raise quota.QuotaError("Address quota exceeded. You cannot "
"allocate any more addresses")
raise quota.QuotaError(_("Address quota exceeded. You cannot "
"allocate any more addresses"))
network_topic = self._get_network_topic(context)
public_ip = rpc.call(context,
network_topic,
@ -751,7 +753,7 @@ class CloudController(object):
kwargs['image_id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
kernel_id=kwargs.get('kernel_id'),
kernel_id=kwargs.get('kernel_id', None),
ramdisk_id=kwargs.get('ramdisk_id'),
display_name=kwargs.get('display_name'),
description=kwargs.get('display_description'),
@ -805,7 +807,7 @@ class CloudController(object):
# TODO: return error if not authorized
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
if volume_ref['status'] != "available":
raise exception.ApiError("Volume status must be available")
raise exception.ApiError(_("Volume status must be available"))
now = datetime.datetime.utcnow()
db.volume_update(context, volume_ref['id'], {'status': 'deleting',
'terminated_at': now})
@ -836,11 +838,12 @@ class CloudController(object):
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
image = self.image_service.show(context, image_id)
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
raise exception.ApiError(_('invalid id: %s') % image_id)
result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
result['launchPermission'].append({'group': 'all'})
@ -850,13 +853,14 @@ class CloudController(object):
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
if not 'user_group' in kwargs:
raise exception.ApiError('user or group not specified')
raise exception.ApiError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
raise exception.ApiError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
raise exception.ApiError('operation_type must be add or remove')
raise exception.ApiError(_('operation_type must be add or remove'))
return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):

View File

@ -65,7 +65,7 @@ class MetadataRequestHandler(object):
cc = cloud.CloudController()
meta_data = cc.get_metadata(req.remote_addr)
if meta_data is None:
logging.error('Failed to get metadata for ip: %s' %
logging.error(_('Failed to get metadata for ip: %s') %
req.remote_addr)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)

View File

@ -66,7 +66,7 @@ class API(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
logging.warn("Caught error: %s" % str(ex))
logging.warn(_("Caught error: %s") % str(ex))
logging.debug(traceback.format_exc())
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
@ -133,7 +133,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
if delay:
# TODO(gundlach): Get the retry-after format correct.
exc = webob.exc.HTTPRequestEntityTooLarge(
explanation='Too many requests.',
explanation=_('Too many requests.'),
headers={'Retry-After': time.time() + delay})
raise faults.Fault(exc)
return self.application
@ -170,9 +170,16 @@ class APIRouter(wsgi.Router):
def __init__(self):
mapper = routes.Mapper()
server_members = {'action': 'POST'}
if FLAGS.allow_admin_api:
logging.debug("Including admin operations in API.")
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
mapper.resource("server", "servers", controller=servers.Controller(),
collection={'detail': 'GET'},
member={'action': 'POST'})
member=server_members)
mapper.resource("backup_schedule", "backup_schedules",
controller=backup_schedules.Controller(),
@ -186,10 +193,6 @@ class APIRouter(wsgi.Router):
mapper.resource("sharedipgroup", "sharedipgroups",
controller=sharedipgroups.Controller())
if FLAGS.allow_admin_api:
logging.debug("Including admin operations in API.")
# TODO: Place routes for admin operations here.
super(APIRouter, self).__init__(mapper)

View File

@ -24,6 +24,7 @@ import nova.image.service
class Controller(wsgi.Controller):
def __init__(self):
pass

View File

@ -15,6 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import traceback
from webob import exc
from nova import exception
@ -27,6 +30,10 @@ from nova.compute import power_state
import nova.api.openstack
LOG = logging.getLogger('server')
LOG.setLevel(logging.DEBUG)
def _entity_list(entities):
""" Coerces a list of servers into proper dictionary format """
return dict(servers=entities)
@ -166,3 +173,25 @@ class Controller(wsgi.Controller):
except:
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
try:
self.compute_api.pause(ctxt, id)
except:
readable = traceback.format_exc()
logging.error("Compute.api::pause %s", readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
def unpause(self, req, id):
""" Permit Admins to Unpause the server. """
ctxt = req.environ['nova.context']
try:
self.compute_api.unpause(ctxt, id)
except:
readable = traceback.format_exc()
logging.error("Compute.api::unpause %s", readable)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()

View File

@ -37,7 +37,6 @@ class DbDriver(object):
def __init__(self):
"""Imports the LDAP module"""
pass
db
def __enter__(self):
return self
@ -83,7 +82,7 @@ class DbDriver(object):
user_ref = db.user_create(context.get_admin_context(), values)
return self._db_user_to_auth_user(user_ref)
except exception.Duplicate, e:
raise exception.Duplicate('User %s already exists' % name)
raise exception.Duplicate(_('User %s already exists') % name)
def _db_user_to_auth_user(self, user_ref):
return {'id': user_ref['id'],
@ -105,8 +104,9 @@ class DbDriver(object):
"""Create a project"""
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
raise exception.NotFound("Project can't be created because "
"manager %s doesn't exist" % manager_uid)
raise exception.NotFound(_("Project can't be created because "
"manager %s doesn't exist")
% manager_uid)
# description is a required attribute
if description is None:
@ -133,8 +133,8 @@ class DbDriver(object):
try:
project = db.project_create(context.get_admin_context(), values)
except exception.Duplicate:
raise exception.Duplicate("Project can't be created because "
"project %s already exists" % name)
raise exception.Duplicate(_("Project can't be created because "
"project %s already exists") % name)
for member in members:
db.project_add_member(context.get_admin_context(),
@ -155,8 +155,8 @@ class DbDriver(object):
if manager_uid:
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
raise exception.NotFound("Project can't be modified because "
"manager %s doesn't exist" %
raise exception.NotFound(_("Project can't be modified because "
"manager %s doesn't exist") %
manager_uid)
values['project_manager'] = manager['id']
if description:
@ -243,8 +243,8 @@ class DbDriver(object):
def _validate_user_and_project(self, user_id, project_id):
user = db.user_get(context.get_admin_context(), user_id)
if not user:
raise exception.NotFound('User "%s" not found' % user_id)
raise exception.NotFound(_('User "%s" not found') % user_id)
project = db.project_get(context.get_admin_context(), project_id)
if not project:
raise exception.NotFound('Project "%s" not found' % project_id)
raise exception.NotFound(_('Project "%s" not found') % project_id)
return user, project

View File

@ -15,7 +15,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake LDAP server for test harness, backs to ReDIS.
"""Fake LDAP server for test harness.
This class does very little error checking, and knows nothing about ldap
class definitions. It implements the minimum emulation of the python ldap
@ -23,34 +23,65 @@ library to work with nova.
"""
import fnmatch
import json
import redis
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('redis_host', '127.0.0.1',
'Host that redis is running on.')
flags.DEFINE_integer('redis_port', 6379,
'Port that redis is running on.')
flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
class Redis(object):
class Store(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
raise Exception('Attempted to instantiate singleton')
raise Exception(_('Attempted to instantiate singleton'))
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
inst = redis.Redis(host=FLAGS.redis_host,
port=FLAGS.redis_port,
db=FLAGS.redis_db)
cls._instance = inst
cls._instance = _StorageDict()
return cls._instance
class _StorageDict(dict):
def keys(self, pat=None):
ret = super(_StorageDict, self).keys()
if pat is not None:
ret = fnmatch.filter(ret, pat)
return ret
def delete(self, key):
try:
del self[key]
except KeyError:
pass
def flushdb(self):
self.clear()
def hgetall(self, key):
"""Returns the hash for the given key; creates
the hash if the key doesn't exist."""
try:
return self[key]
except KeyError:
self[key] = {}
return self[key]
def hget(self, key, field):
hashdict = self.hgetall(key)
try:
return hashdict[field]
except KeyError:
hashdict[field] = {}
return hashdict[field]
def hset(self, key, field, val):
hashdict = self.hgetall(key)
hashdict[field] = val
def hmset(self, key, value_dict):
hashdict = self.hgetall(key)
for field, val in value_dict.items():
hashdict[field] = val
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # Not implemented
SCOPE_SUBTREE = 2
@ -169,8 +200,6 @@ def _to_json(unencoded):
class FakeLDAP(object):
#TODO(vish): refactor this class to use a wrapper instead of accessing
# redis directly
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
@ -183,14 +212,13 @@ class FakeLDAP(object):
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
key = "%s%s" % (self.__redis_prefix, dn)
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
Redis.instance().hmset(key, value_dict)
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
Redis.instance().delete("%s%s" % (self.__redis_prefix, dn))
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
"""Modify the object at dn using the attribute list.
@ -201,18 +229,18 @@ class FakeLDAP(object):
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
redis = Redis.instance()
key = "%s%s" % (self.__redis_prefix, dn)
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
for cmd, k, v in attrs:
values = _from_json(redis.hget(key, k))
values = _from_json(store.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
elif cmd == MOD_REPLACE:
values = [v]
else:
values.remove(v)
values = redis.hset(key, k, _to_json(values))
values = store.hset(key, k, _to_json(values))
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
@ -226,16 +254,17 @@ class FakeLDAP(object):
"""
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
redis = Redis.instance()
store = Store.instance()
if scope == SCOPE_BASE:
keys = ["%s%s" % (self.__redis_prefix, dn)]
keys = ["%s%s" % (self.__prefix, dn)]
else:
keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
keys = store.keys("%s*%s" % (self.__prefix, dn))
objects = []
for key in keys:
# get the attributes from redis
attrs = redis.hgetall(key)
# turn the values from redis into lists
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
# pylint: disable-msg=E1103
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
@ -244,13 +273,13 @@ class FakeLDAP(object):
# filter the attributes by fields
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((key[len(self.__redis_prefix):], attrs))
objects.append((key[len(self.__prefix):], attrs))
# pylint: enable-msg=E1103
if objects == []:
raise NO_SUCH_OBJECT()
return objects
@property
def __redis_prefix(self): # pylint: disable-msg=R0201
"""Get the prefix to use for all redis keys."""
def __prefix(self): # pylint: disable-msg=R0201
"""Get the prefix to use for all keys."""
return 'ldap:'

View File

@ -159,7 +159,7 @@ class LdapDriver(object):
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
raise exception.NotFound("LDAP object for %s doesn't exist"
raise exception.NotFound(_("LDAP object for %s doesn't exist")
% name)
else:
attr = [
@ -182,11 +182,12 @@ class LdapDriver(object):
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
raise exception.Duplicate("Project can't be created because "
"project %s already exists" % name)
raise exception.Duplicate(_("Project can't be created because "
"project %s already exists") % name)
if not self.__user_exists(manager_uid):
raise exception.NotFound("Project can't be created because "
"manager %s doesn't exist" % manager_uid)
raise exception.NotFound(_("Project can't be created because "
"manager %s doesn't exist")
% manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
@ -195,8 +196,8 @@ class LdapDriver(object):
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Project can't be created "
"because user %s doesn't exist"
raise exception.NotFound(_("Project can't be created "
"because user %s doesn't exist")
% member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
@ -218,9 +219,9 @@ class LdapDriver(object):
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
raise exception.NotFound("Project can't be modified because "
"manager %s doesn't exist" %
manager_uid)
raise exception.NotFound(_("Project can't be modified because "
"manager %s doesn't exist")
% manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn))
if description:
@ -416,8 +417,9 @@ class LdapDriver(object):
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Group can't be created "
"because user %s doesn't exist" % member_uid)
raise exception.NotFound(_("Group can't be created "
"because user %s doesn't exist")
% member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@ -432,8 +434,9 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be searched in group "
"becuase the user doesn't exist" % (uid,))
raise exception.NotFound(_("User %s can't be searched in group "
"because the user doesn't exist")
% uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
@ -444,28 +447,30 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be added to the group "
"becuase the user doesn't exist" % (uid,))
raise exception.NotFound(_("User %s can't be added to the group "
"because the user doesn't exist")
% uid)
if not self.__group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" %
(group_dn,))
raise exception.NotFound(_("The group at dn %s doesn't exist")
% group_dn)
if self.__is_in_group(uid, group_dn):
raise exception.Duplicate("User %s is already a member of "
"the group %s" % (uid, group_dn))
raise exception.Duplicate(_("User %s is already a member of "
"the group %s") % (uid, group_dn))
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" %
(group_dn,))
raise exception.NotFound(_("The group at dn %s doesn't exist")
% group_dn)
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from the "
"group because the user doesn't exist" % (uid,))
raise exception.NotFound(_("User %s can't be removed from the "
"group because the user doesn't exist")
% uid)
if not self.__is_in_group(uid, group_dn):
raise exception.NotFound("User %s is not a member of the group" %
(uid,))
raise exception.NotFound(_("User %s is not a member of the group")
% uid)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(
group_dn, uid)
@ -479,15 +484,16 @@ class LdapDriver(object):
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
logging.debug("Attempted to remove the last member of a group. "
"Deleting the group at %s instead.", group_dn)
logging.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from all "
"because the user doesn't exist" % (uid,))
raise exception.NotFound(_("User %s can't be removed from all "
"because the user doesn't exist")
% uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@ -500,7 +506,8 @@ class LdapDriver(object):
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
raise exception.NotFound(_("Group at dn %s doesn't exist")
% group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):

View File

@ -257,12 +257,12 @@ class AuthManager(object):
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
logging.info('Looking up user: %r', access_key)
logging.info(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
logging.info('user: %r', user)
if user == None:
raise exception.NotFound('No user found for access key %s' %
access_key)
raise exception.NotFound(_('No user found for access key %s')
% access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
@ -271,12 +271,12 @@ class AuthManager(object):
project = self.get_project(project_id)
if project == None:
raise exception.NotFound('No project called %s could be found' %
project_id)
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
raise exception.NotFound('User %s is not a member of project %s' %
(user.id, project.id))
raise exception.NotFound(_('User %s is not a member of project %s')
% (user.id, project.id))
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
@ -284,7 +284,7 @@ class AuthManager(object):
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
if signature != expected_signature:
raise exception.NotAuthorized('Signature does not match')
raise exception.NotAuthorized(_('Signature does not match'))
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
@ -294,7 +294,7 @@ class AuthManager(object):
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
if signature != expected_signature:
raise exception.NotAuthorized('Signature does not match')
raise exception.NotAuthorized(_('Signature does not match'))
return (user, project)
def get_access_key(self, user, project):
@ -364,7 +364,7 @@ class AuthManager(object):
with self.driver() as drv:
if role == 'projectmanager':
if not project:
raise exception.Error("Must specify project")
raise exception.Error(_("Must specify project"))
return self.is_project_manager(user, project)
global_role = drv.has_role(User.safe_id(user),
@ -398,9 +398,9 @@ class AuthManager(object):
@param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
raise exception.NotFound("The %s role can not be found" % role)
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound("The %s role is global only" % role)
raise exception.NotFound(_("The %s role is global only") % role)
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@ -546,7 +546,8 @@ class AuthManager(object):
Project.safe_id(project))
if not network_ref['vpn_public_port']:
raise exception.NotFound('project network data has not been set')
raise exception.NotFound(_('project network data has not '
'been set'))
return (network_ref['vpn_public_address'],
network_ref['vpn_public_port'])
@ -659,8 +660,7 @@ class AuthManager(object):
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
logging.warn("No vpn data for project %s" %
pid)
logging.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()

View File

@ -49,7 +49,7 @@ class CloudPipe(object):
self.manager = manager.AuthManager()
def launch_vpn_instance(self, project_id):
logging.debug("Launching VPN for %s" % (project_id))
logging.debug(_("Launching VPN for %s") % (project_id))
project = self.manager.get_project(project_id)
# Make a payload.zip
tmpfolder = tempfile.mkdtemp()

View File

@ -73,14 +73,19 @@ class ComputeAPI(base.Base):
is_vpn = image_id == FLAGS.vpn_image_id
if not is_vpn:
image = self.image_service.show(context, image_id)
# If kernel_id/ramdisk_id isn't explicitly set in API call
# we take the defaults from the image's metadata
if kernel_id is None:
kernel_id = image.get('kernelId', FLAGS.default_kernel)
kernel_id = image.get('kernelId', None)
if ramdisk_id is None:
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
ramdisk_id = image.get('ramdiskId', None)
# Make sure we have access to kernel and ramdisk
self.image_service.show(context, kernel_id)
self.image_service.show(context, ramdisk_id)
if kernel_id:
self.image_service.show(context, kernel_id)
if ramdisk_id:
self.image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
@ -103,8 +108,8 @@ class ComputeAPI(base.Base):
base_options = {
'reservation_id': utils.generate_uid('r'),
'image_id': image_id,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
@ -120,7 +125,7 @@ class ComputeAPI(base.Base):
elevated = context.elevated()
instances = []
logging.debug("Going to run %s instances...", num_instances)
logging.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
@ -157,7 +162,7 @@ class ComputeAPI(base.Base):
{"method": "setup_fixed_ip",
"args": {"address": address}})
logging.debug("Casting to scheduler for %s/%s's instance %s",
logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
@ -204,12 +209,12 @@ class ComputeAPI(base.Base):
instance = self.db.instance_get_by_internal_id(context,
instance_id)
except exception.NotFound as e:
logging.warning("Instance %d was not found during terminate",
logging.warning(_("Instance %d was not found during terminate"),
instance_id)
raise e
if (instance['state_description'] == 'terminating'):
logging.warning("Instance %d is already being terminated",
logging.warning(_("Instance %d is already being terminated"),
instance_id)
return
@ -223,7 +228,7 @@ class ComputeAPI(base.Base):
address = self.db.instance_get_floating_address(context,
instance['id'])
if address:
logging.debug("Disassociating address %s" % address)
logging.debug(_("Disassociating address %s") % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
@ -234,7 +239,7 @@ class ComputeAPI(base.Base):
address = self.db.instance_get_fixed_address(context, instance['id'])
if address:
logging.debug("Deallocating address %s" % address)
logging.debug(_("Deallocating address %s") % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
@ -275,6 +280,24 @@ class ComputeAPI(base.Base):
{"method": "reboot_instance",
"args": {"instance_id": instance['id']}})
def pause(self, context, instance_id):
"""Pause the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "pause_instance",
"args": {"instance_id": instance['id']}})
def unpause(self, context, instance_id):
"""Unpause the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unpause_instance",
"args": {"instance_id": instance['id']}})
def rescue(self, context, instance_id):
"""Rescue the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)

View File

@ -67,12 +67,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
logging.warn("Input partition size not evenly divisible by"
" sector size: %d / %d", file_size, sector_size)
logging.warn(_("Input partition size not evenly divisible by"
" sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
logging.warn("Bytes for local storage not evenly divisible"
" by sector size: %d / %d", local_bytes, sector_size)
logging.warn(_("Bytes for local storage not evenly divisible"
" by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
@ -106,6 +106,13 @@ def partition(infile, outfile, local_bytes=0, resize=True,
% (outfile, local_type, local_first, local_last))
def extend(image, size, execute):
file_size = os.path.getsize(image)
if file_size >= size:
return
return execute('truncate -s size %s' % (image,))
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
@ -115,20 +122,30 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
If partition is not specified it mounts the image as a single partition.
"""
out, err = execute('sudo losetup -f --show %s' % image)
out, err = execute('sudo losetup --find --show %s' % image)
if err:
raise exception.Error('Could not attach image to loopback: %s' % err)
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
device = out.strip()
try:
if not partition is None:
# create partition
out, err = execute('sudo kpartx -a %s' % device)
if err:
raise exception.Error('Failed to load partition: %s' % err)
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
mapped_device = device
# We can only loopback mount raw images. If the device isn't there,
# it's normally because it's a .vmdk or a .vdi etc
if not os.path.exists(mapped_device):
raise exception.Error('Mapped device was not found (we can'
' only inject raw disk images): %s' %
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
tmpdir = tempfile.mkdtemp()
@ -137,7 +154,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
out, err = execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
raise exception.Error('Failed to mount filesystem: %s' % err)
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
try:
if key:
@ -156,7 +174,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
execute('sudo kpartx -d %s' % device)
finally:
# remove loopback
execute('sudo losetup -d %s' % device)
execute('sudo losetup --detach %s' % device)
def _inject_key_into_fs(key, fs, execute=None):
@ -165,7 +183,7 @@ def _inject_key_into_fs(key, fs, execute=None):
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
sshdir = os.path.join(fs, 'root', '.ssh')
execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
execute('sudo chown root %s' % sshdir)
execute('sudo chmod 700 %s' % sshdir)

View File

@ -38,7 +38,8 @@ def get_by_type(instance_type):
if instance_type is None:
return FLAGS.default_instance_type
if instance_type not in INSTANCE_TYPES:
raise exception.ApiError("Unknown instance type: %s" % instance_type)
raise exception.ApiError(_("Unknown instance type: %s"),
instance_type)
return instance_type

View File

@ -93,8 +93,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id)
raise exception.Error(_("Instance has already been created"))
logging.debug(_("instance %s: starting..."), instance_id)
self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
@ -113,7 +113,7 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
logging.exception("instance %s: Failed to spawn",
logging.exception(_("instance %s: Failed to spawn"),
instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
@ -125,7 +125,7 @@ class ComputeManager(manager.Manager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
logging.debug("instance %s: terminating", instance_id)
logging.debug(_("instance %s: terminating"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
volumes = instance_ref.get('volumes', []) or []
@ -133,8 +133,8 @@ class ComputeManager(manager.Manager):
self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
raise exception.Error('trying to destroy already destroyed'
' instance: %s' % instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit?
@ -148,13 +148,13 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
logging.warn('trying to reboot a non-running '
'instance: %s (state: %s excepted: %s)',
logging.warn(_('trying to reboot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING)
logging.debug('instance %s: rebooting', instance_ref['name'])
logging.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -168,7 +168,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug('instance %s: rescuing',
logging.debug(_('instance %s: rescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@ -183,7 +183,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug('instance %s: unrescuing',
logging.debug(_('instance %s: unrescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@ -192,11 +192,52 @@ class ComputeManager(manager.Manager):
self.driver.unrescue(instance_ref)
self._update_state(context, instance_id)
@staticmethod
def _update_state_callback(self, context, instance_id, result):
"""Update instance state when async task completes."""
self._update_state(context, instance_id)
@exception.wrap_exception
def pause_instance(self, context, instance_id):
"""Pause an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug('instance %s: pausing',
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
'pausing')
self.driver.pause(instance_ref,
lambda result: self._update_state_callback(self,
context,
instance_id,
result))
@exception.wrap_exception
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug('instance %s: unpausing',
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
'unpausing')
self.driver.unpause(instance_ref,
lambda result: self._update_state_callback(self,
context,
instance_id,
result))
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
logging.debug("instance %s: getting console output", instance_id)
logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_console_output(instance_ref)
@ -205,7 +246,7 @@ class ComputeManager(manager.Manager):
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
logging.debug("instance %s: attaching volume %s to %s", instance_id,
logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
dev_path = self.volume_manager.setup_compute_volume(context,
@ -222,7 +263,7 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
logging.exception("instance %s: attach failed %s, removing",
logging.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint)
self.volume_manager.remove_compute_volume(context,
volume_id)
@ -234,13 +275,13 @@ class ComputeManager(manager.Manager):
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
logging.debug("instance %s: detaching volume %s",
logging.debug(_("instance %s: detaching volume %s"),
instance_id,
volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
if instance_ref['name'] not in self.driver.list_instances():
logging.warn("Detaching volume from unknown instance %s",
logging.warn(_("Detaching volume from unknown instance %s"),
instance_ref['name'])
else:
self.driver.detach_volume(instance_ref['name'],

View File

@ -255,7 +255,7 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
logging.debug('updating %s...', self.instance_id)
logging.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
@ -285,7 +285,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
logging.exception('unexpected error during update')
logging.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@ -351,7 +351,7 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
logging.error('Cannot get blockstats for "%s" on "%s"',
logging.error(_('Cannot get blockstats for "%s" on "%s"'),
disk, self.instance_id)
raise
@ -373,7 +373,7 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
logging.error('Cannot get ifstats for "%s" on "%s"',
logging.error(_('Cannot get ifstats for "%s" on "%s"'),
interface, self.instance_id)
raise
@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
logging.exception('unexpected exception getting connection')
logging.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service):
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
logging.debug('Found instance: %s', domain_id)
logging.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]

View File

@ -39,13 +39,13 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
flags.DEFINE_string('keys_path', '$state_path/keys',
'Where we keep our keys')
_('Where we keep our keys'))
flags.DEFINE_string('ca_path', '$state_path/CA',
'Where we keep our root CA')
_('Where we keep our root CA'))
flags.DEFINE_boolean('use_intermediate_ca', False,
'Should we use intermediate CAs for each project?')
_('Should we use intermediate CAs for each project?'))
def ca_path(project_id):
@ -111,9 +111,9 @@ def generate_x509_cert(subject, bits=1024):
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating private key: %s",
utils.runthis(_("Generating private key: %s"),
"openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating CSR: %s",
utils.runthis(_("Generating CSR: %s"),
"openssl req -new -key %s -out %s -batch -subj %s" %
(keyfile, csrfile, subject))
private_key = open(keyfile).read()
@ -131,7 +131,7 @@ def sign_csr(csr_text, intermediate=None):
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating intermediate CA: %s",
utils.runthis(_("Generating intermediate CA: %s"),
"sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
@ -142,11 +142,11 @@ def _sign_csr(csr_text, ca_folder):
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug("Flags path: %s" % ca_folder)
logging.debug(_("Flags path: %s") % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.runthis("Signing cert: %s",
utils.runthis(_("Signing cert: %s"),
"openssl ca -batch -out %s/outbound.crt "
"-config ./openssl.cnf -infiles %s/inbound.csr" %
(tmpfolder, tmpfolder))

View File

@ -334,6 +334,11 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
###################

View File

@ -41,7 +41,7 @@ FLAGS = flags.FLAGS
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
warnings.warn('Use of empty request context is deprecated',
warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
@ -130,7 +130,7 @@ def service_get(context, service_id, session=None):
first()
if not result:
raise exception.NotFound('No service for id %s' % service_id)
raise exception.NotFound(_('No service for id %s') % service_id)
return result
@ -227,7 +227,7 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.NotFound('No service for %s, %s' % (host, binary))
raise exception.NotFound(_('No service for %s, %s') % (host, binary))
return result
@ -491,7 +491,7 @@ def fixed_ip_get_by_address(context, address, session=None):
options(joinedload('instance')).\
first()
if not result:
raise exception.NotFound('No floating ip for address %s' % address)
raise exception.NotFound(_('No floating ip for address %s') % address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@ -528,6 +528,8 @@ def fixed_ip_update(context, address, values):
#TODO(gundlach): instance_create and volume_create are nearly identical
#and should be refactored. I expect there are other copy-and-paste
#functions between the two of them as well.
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
@ -591,7 +593,7 @@ def instance_get(context, instance_id, session=None):
filter_by(deleted=False).\
first()
if not result:
raise exception.NotFound('No instance for id %s' % instance_id)
raise exception.NotFound(_('No instance for id %s') % instance_id)
return result
@ -669,7 +671,7 @@ def instance_get_by_internal_id(context, internal_id):
filter_by(deleted=False).\
first()
if not result:
raise exception.NotFound('Instance %s not found' % (internal_id))
raise exception.NotFound(_('Instance %s not found') % (internal_id))
return result
@ -747,6 +749,18 @@ def instance_add_security_group(context, instance_id, security_group_id):
instance_ref.save(session=session)
@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
action_ref.update(values)
session = get_session()
with session.begin():
action_ref.save(session=session)
return action_ref
###################
@ -790,7 +804,7 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.NotFound('no keypair for user %s, name %s' %
raise exception.NotFound(_('no keypair for user %s, name %s') %
(user_id, name))
return result
@ -905,7 +919,7 @@ def network_get(context, network_id, session=None):
filter_by(deleted=False).\
first()
if not result:
raise exception.NotFound('No network for id %s' % network_id)
raise exception.NotFound(_('No network for id %s') % network_id)
return result
@ -913,6 +927,8 @@ def network_get(context, network_id, session=None):
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable-msg=C0103
@require_admin_context
def network_get_associated_fixed_ips(context, network_id):
session = get_session()
@ -933,7 +949,7 @@ def network_get_by_bridge(context, bridge):
first()
if not result:
raise exception.NotFound('No network for bridge %s' % bridge)
raise exception.NotFound(_('No network for bridge %s') % bridge)
return result
@ -947,7 +963,7 @@ def network_get_by_instance(_context, instance_id):
filter_by(deleted=False).\
first()
if not rv:
raise exception.NotFound('No network for instance %s' % instance_id)
raise exception.NotFound(_('No network for instance %s') % instance_id)
return rv
@ -961,7 +977,7 @@ def network_set_host(context, network_id, host_id):
with_lockmode('update').\
first()
if not network_ref:
raise exception.NotFound('No network for id %s' % network_id)
raise exception.NotFound(_('No network for id %s') % network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
@ -1073,7 +1089,7 @@ def auth_get_token(_context, token_hash):
filter_by(token_hash=token_hash).\
first()
if not tk:
raise exception.NotFound('Token %s does not exist' % token_hash)
raise exception.NotFound(_('Token %s does not exist') % token_hash)
return tk
@ -1097,7 +1113,7 @@ def quota_get(context, project_id, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.NotFound('No quota for project_id %s' % project_id)
raise exception.NotFound(_('No quota for project_id %s') % project_id)
return result
@ -1252,7 +1268,7 @@ def volume_get(context, volume_id, session=None):
filter_by(deleted=False).\
first()
if not result:
raise exception.NotFound('No volume for id %s' % volume_id)
raise exception.NotFound(_('No volume for id %s') % volume_id)
return result
@ -1308,7 +1324,7 @@ def volume_get_by_ec2_id(context, ec2_id):
raise exception.NotAuthorized()
if not result:
raise exception.NotFound('Volume %s not found' % ec2_id)
raise exception.NotFound(_('Volume %s not found') % ec2_id)
return result
@ -1332,7 +1348,7 @@ def volume_get_instance(context, volume_id):
options(joinedload('instance')).\
first()
if not result:
raise exception.NotFound('Volume %s not found' % ec2_id)
raise exception.NotFound(_('Volume %s not found') % ec2_id)
return result.instance
@ -1344,7 +1360,7 @@ def volume_get_shelf_and_blade(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.NotFound('No export device found for volume %s' %
raise exception.NotFound(_('No export device found for volume %s') %
volume_id)
return (result.shelf_id, result.blade_id)
@ -1357,7 +1373,7 @@ def volume_get_iscsi_target_num(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.NotFound('No target id found for volume %s' %
raise exception.NotFound(_('No target id found for volume %s') %
volume_id)
return result.target_num
@ -1402,7 +1418,7 @@ def security_group_get(context, security_group_id, session=None):
options(joinedload_all('rules')).\
first()
if not result:
raise exception.NotFound("No secuity group with id %s" %
raise exception.NotFound(_("No security group with id %s") %
security_group_id)
return result
@ -1419,7 +1435,7 @@ def security_group_get_by_name(context, project_id, group_name):
first()
if not result:
raise exception.NotFound(
'No security group named %s for project: %s' \
_('No security group named %s for project: %s')
% (group_name, project_id))
return result
@ -1507,7 +1523,7 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
filter_by(id=security_group_rule_id).\
first()
if not result:
raise exception.NotFound("No secuity group rule with id %s" %
raise exception.NotFound(_("No secuity group rule with id %s") %
security_group_rule_id)
return result
@ -1543,7 +1559,7 @@ def user_get(context, id, session=None):
first()
if not result:
raise exception.NotFound('No user for id %s' % id)
raise exception.NotFound(_('No user for id %s') % id)
return result
@ -1559,7 +1575,7 @@ def user_get_by_access_key(context, access_key, session=None):
first()
if not result:
raise exception.NotFound('No user for access key %s' % access_key)
raise exception.NotFound(_('No user for access key %s') % access_key)
return result
@ -1621,7 +1637,7 @@ def project_get(context, id, session=None):
first()
if not result:
raise exception.NotFound("No project with id %s" % id)
raise exception.NotFound(_("No project with id %s") % id)
return result

View File

@ -22,7 +22,7 @@ SQLAlchemy models for nova data.
import datetime
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import Column, Integer, Float, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
@ -226,6 +226,31 @@ class Instance(BASE, NovaBase):
# 'shutdown', 'shutoff', 'crashed'])
class InstanceDiagnostics(BASE, NovaBase):
"""Represents a guest VM's diagnostics"""
__tablename__ = "instance_diagnostics"
id = Column(Integer, primary_key=True)
instance_id = Column(Integer, ForeignKey('instances.id'))
memory_available = Column(Float)
memory_free = Column(Float)
cpu_load = Column(Float)
disk_read = Column(Float)
disk_write = Column(Float)
net_tx = Column(Float)
net_rx = Column(Float)
class InstanceActions(BASE, NovaBase):
"""Represents a guest VM's actions and results"""
__tablename__ = "instance_actions"
id = Column(Integer, primary_key=True)
instance_id = Column(Integer, ForeignKey('instances.id'))
action = Column(String(255))
error = Column(Text)
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
@ -526,10 +551,11 @@ def register_models():
it will never need to be called explicitly elsewhere.
"""
from sqlalchemy import create_engine
models = (Service, Instance, Volume, ExportDevice, IscsiTarget, FixedIp,
FloatingIp, Network, SecurityGroup,
SecurityGroupIngressRule, SecurityGroupInstanceAssociation,
AuthToken, User, Project) # , Image, Host
models = (Service, Instance, InstanceDiagnostics, InstanceActions,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project) # , Image, Host
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)

View File

@ -27,23 +27,26 @@ import traceback
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
if description is None:
description = "Unexpected error while running command."
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % (
description, cmd, exit_code, stdout, stderr)
message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\
% (description, cmd, exit_code, stdout, stderr)
IOError.__init__(self, message)
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
@ -81,7 +84,7 @@ def wrap_exception(f):
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('Uncaught exception')
logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise

View File

@ -37,12 +37,12 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
logging.debug('(%s) publish (key: %s) %s',
logging.debug(_('(%s) publish (key: %s) %s'),
self.name, routing_key, message)
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
logging.debug('Publishing to route %s', f)
logging.debug(_('Publishing to route %s'), f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
@ -82,16 +82,16 @@ class Backend(object):
def queue_declare(self, queue, **kwargs):
if queue not in self._queues:
logging.debug('Declaring queue %s', queue)
logging.debug(_('Declaring queue %s'), queue)
self._queues[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
if exchange not in self._exchanges:
logging.debug('Declaring exchange %s', exchange)
logging.debug(_('Declaring exchange %s'), exchange)
self._exchanges[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
logging.debug('Binding %s to %s with key %s',
logging.debug(_('Binding %s to %s with key %s'),
queue, exchange, routing_key)
self._exchanges[exchange].bind(self._queues[queue].push,
routing_key)
@ -117,7 +117,7 @@ class Backend(object):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
logging.debug('Getting from %s: %s', queue, message)
logging.debug(_('Getting from %s: %s'), queue, message)
return message
def prepare_message(self, message_data, delivery_mode,

View File

@ -235,12 +235,11 @@ DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
DEFINE_string('default_image', 'ami-11111',
'default image to use, testing only')
DEFINE_string('default_kernel', 'aki-11111',
'default kernel to use, testing only')
DEFINE_string('default_ramdisk', 'ari-11111',
'default ramdisk to use, testing only')
DEFINE_string('default_instance_type', 'm1.small',
'default instance type to use, testing only')
DEFINE_string('null_kernel', 'nokernel',
'kernel image that indicates not to use a kernel,'
' but to use a raw disk image instead')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
DEFINE_string('vpn_key_suffix',

View File

@ -77,8 +77,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
logging.warn("Parallax returned HTTP error %d from "
"request for /images", res.status_int)
logging.warn(_("Parallax returned HTTP error %d from "
"request for /images"), res.status_int)
return []
finally:
c.close()
@ -96,8 +96,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
logging.warn("Parallax returned HTTP error %d from "
"request for /images/detail", res.status_int)
logging.warn(_("Parallax returned HTTP error %d from "
"request for /images/detail"), res.status_int)
return []
finally:
c.close()

View File

@ -79,7 +79,8 @@ class S3ImageService(service.BaseImageService):
result = self.index(context)
result = [i for i in result if i['imageId'] == image_id]
if not result:
raise exception.NotFound('Image %s could not be found' % image_id)
raise exception.NotFound(_('Image %s could not be found')
% image_id)
image = result[0]
return image

View File

@ -135,7 +135,7 @@ def ensure_vlan(vlan_num):
"""Create a vlan unless it already exists"""
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
logging.debug("Starting VLAN inteface %s", interface)
logging.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ifconfig %s up" % interface)
@ -145,7 +145,7 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists"""
if not _device_exists(bridge):
logging.debug("Starting Bridge interface for %s", interface)
logging.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
@ -202,9 +202,9 @@ def update_dhcp(context, network_id):
_execute('sudo kill -HUP %d' % pid)
return
except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Hupping dnsmasq threw %s", exc)
logging.debug(_("Hupping dnsmasq threw %s"), exc)
else:
logging.debug("Pid %d is stale, relaunching dnsmasq", pid)
logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@ -276,7 +276,7 @@ def _stop_dnsmasq(network):
try:
_execute('sudo kill -TERM %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
logging.debug("Killing dnsmasq threw %s", exc)
logging.debug(_("Killing dnsmasq threw %s"), exc)
def _dhcp_file(bridge, kind):

View File

@ -115,7 +115,7 @@ class NetworkManager(manager.Manager):
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
logging.debug("setting network host")
logging.debug(_("setting network host"))
host = self.db.network_set_host(context,
network_id,
self.host)
@ -174,10 +174,10 @@ class NetworkManager(manager.Manager):
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
raise exception.Error("IP %s leased that isn't associated" %
raise exception.Error(_("IP %s leased that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
raise exception.Error("IP %s leased to bad mac %s vs %s" %
raise exception.Error(_("IP %s leased to bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
@ -185,7 +185,8 @@ class NetworkManager(manager.Manager):
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
logging.warn("IP %s leased that was already deallocated", address)
logging.warn(_("IP %s leased that was already deallocated"),
address)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
@ -193,13 +194,13 @@ class NetworkManager(manager.Manager):
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
raise exception.Error("IP %s released that isn't associated" %
raise exception.Error(_("IP %s released that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
raise exception.Error("IP %s released from bad mac %s vs %s" %
raise exception.Error(_("IP %s released from bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
if not fixed_ip_ref['leased']:
logging.warn("IP %s released that was not leased", address)
logging.warn(_("IP %s released that was not leased"), address)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': False})
@ -361,8 +362,7 @@ class FlatDHCPManager(FlatManager):
"""Sets up matching network for compute hosts."""
network_ref = db.network_get_by_instance(context, instance_id)
self.driver.ensure_bridge(network_ref['bridge'],
FLAGS.flat_interface,
network_ref)
FLAGS.flat_interface)
def setup_fixed_ip(self, context, address):
"""Setup dhcp for this network."""
@ -408,7 +408,7 @@ class VlanManager(NetworkManager):
self.host,
time)
if num:
logging.debug("Dissassociated %s stale fixed ip(s)", num)
logging.debug(_("Dissassociated %s stale fixed ip(s)"), num)
def init_host(self):
"""Do any initialization that needs to be run if this is a

View File

@ -102,7 +102,7 @@ def _render_parts(value, write_cb):
_render_parts(subsubvalue, write_cb)
write_cb('</' + utils.utf8(name) + '>')
else:
raise Exception("Unknown S3 value type %r", value)
raise Exception(_("Unknown S3 value type %r"), value)
def get_argument(request, key, default_value):
@ -134,7 +134,7 @@ def get_context(request):
check_type='s3')
return context.RequestContext(user, project)
except exception.Error as ex:
logging.debug("Authentication Failure: %s", ex)
logging.debug(_("Authentication Failure: %s"), ex)
raise exception.NotAuthorized()
@ -227,7 +227,7 @@ class BucketResource(ErrorHandlingResource):
def render_PUT(self, request):
"Creates the bucket resource"""
logging.debug("Creating bucket %s", self.name)
logging.debug(_("Creating bucket %s"), self.name)
logging.debug("calling bucket.Bucket.create(%r, %r)",
self.name,
request.context)
@ -237,7 +237,7 @@ class BucketResource(ErrorHandlingResource):
def render_DELETE(self, request):
"""Deletes the bucket resource"""
logging.debug("Deleting bucket %s", self.name)
logging.debug(_("Deleting bucket %s"), self.name)
bucket_object = bucket.Bucket(self.name)
if not bucket_object.is_authorized(request.context):
@ -261,7 +261,9 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
logging.debug("Getting object: %s / %s", self.bucket.name, self.name)
logging.debug(_("Getting object: %s / %s"),
self.bucket.name,
self.name)
if not self.bucket.is_authorized(request.context):
raise exception.NotAuthorized()
@ -279,7 +281,9 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
logging.debug("Putting object: %s / %s", self.bucket.name, self.name)
logging.debug(_("Putting object: %s / %s"),
self.bucket.name,
self.name)
if not self.bucket.is_authorized(request.context):
raise exception.NotAuthorized()
@ -298,7 +302,7 @@ class ObjectResource(ErrorHandlingResource):
authorized to delete the object.
"""
logging.debug("Deleting object: %s / %s",
logging.debug(_("Deleting object: %s / %s"),
self.bucket.name,
self.name)
@ -394,17 +398,17 @@ class ImagesResource(resource.Resource):
image_id = get_argument(request, 'image_id', u'')
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
logging.debug("not authorized for render_POST in images")
logging.debug(_("not authorized for render_POST in images"))
raise exception.NotAuthorized()
operation = get_argument(request, 'operation', u'')
if operation:
# operation implies publicity toggle
logging.debug("handling publicity toggle")
logging.debug(_("handling publicity toggle"))
image_object.set_public(operation == 'add')
else:
# other attributes imply update
logging.debug("update user fields")
logging.debug(_("update user fields"))
clean_args = {}
for arg in request.args.keys():
clean_args[arg] = request.args[arg][0]

View File

@ -91,15 +91,15 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
logging.exception("AMQP server on %s:%d is unreachable." \
" Trying again in %d seconds." % (
logging.exception(_("AMQP server on %s:%d is unreachable."
" Trying again in %d seconds.") % (
FLAGS.rabbit_host,
FLAGS.rabbit_port,
FLAGS.rabbit_retry_interval))
self.failed_connection = True
if self.failed_connection:
logging.exception("Unable to connect to AMQP server" \
" after %d tries. Shutting down." % FLAGS.rabbit_max_retries)
logging.exception(_("Unable to connect to AMQP server"
" after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@ -116,14 +116,14 @@ class Consumer(messaging.Consumer):
self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
logging.error("Reconnected to queue")
logging.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
logging.exception("Failed to fetch message from queue")
logging.exception(_("Failed to fetch message from queue"))
self.failed_connection = True
def attach_to_eventlet(self):
@ -153,7 +153,7 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
LOG.debug('Initing the Adapter Consumer for %s' % (topic))
LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
@ -168,7 +168,7 @@ class AdapterConsumer(TopicConsumer):
Example: {'method': 'echo', 'args': {'value': 42}}
"""
LOG.debug('received %s' % (message_data))
LOG.debug(_('received %s') % (message_data))
msg_id = message_data.pop('_msg_id', None)
ctxt = _unpack_context(message_data)
@ -181,8 +181,8 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
LOG.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
LOG.warn(_('no method for message: %s') % (message_data))
msg_reply(msg_id, _('No method for message: %s') % message_data)
return
node_func = getattr(self.proxy, str(method))
@ -242,7 +242,7 @@ def msg_reply(msg_id, reply=None, failure=None):
if failure:
message = str(failure[1])
tb = traceback.format_exception(*failure)
logging.error("Returning exception %s to caller", message)
logging.error(_("Returning exception %s to caller"), message)
logging.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
conn = Connection.instance()
@ -283,7 +283,7 @@ def _unpack_context(msg):
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
LOG.debug('unpacked context: %s', context_dict)
LOG.debug(_('unpacked context: %s'), context_dict)
return context.RequestContext.from_dict(context_dict)
@ -302,10 +302,10 @@ def _pack_context(msg, context):
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
LOG.debug("Making asynchronous call...")
LOG.debug(_("Making asynchronous call..."))
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug("MSG_ID is %s" % (msg_id))
LOG.debug(_("MSG_ID is %s") % (msg_id))
_pack_context(msg, context)
class WaitMessage(object):
@ -353,7 +353,7 @@ def cast(context, topic, msg):
def generic_response(message_data, message):
"""Logs a result and exits"""
LOG.debug('response %s', message_data)
LOG.debug(_('response %s'), message_data)
message.ack()
sys.exit(0)
@ -362,8 +362,8 @@ def send_message(topic, message, wait=True):
"""Sends a message for testing"""
msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id})
LOG.debug('topic is %s', topic)
LOG.debug('message %s', message)
LOG.debug(_('topic is %s'), topic)
LOG.debug(_('message %s'), message)
if wait:
consumer = messaging.Consumer(connection=Connection.instance(),

View File

@ -34,5 +34,5 @@ class ChanceScheduler(driver.Scheduler):
hosts = self.hosts_up(context, topic)
if not hosts:
raise driver.NoValidHost("No hosts found")
raise driver.NoValidHost(_("No hosts found"))
return hosts[int(random.random() * len(hosts))]

View File

@ -58,4 +58,4 @@ class Scheduler(object):
def schedule(self, context, topic, *_args, **_kwargs):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError("Must implement a fallback schedule")
raise NotImplementedError(_("Must implement a fallback schedule"))

View File

@ -65,4 +65,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
logging.debug("Casting to %s %s for %s", topic, host, method)
logging.debug(_("Casting to %s %s for %s"), topic, host, method)

View File

@ -47,7 +47,7 @@ class SimpleScheduler(chance.ChanceScheduler):
for result in results:
(service, instance_cores) = result
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
raise driver.NoValidHost("All hosts have too many cores")
raise driver.NoValidHost(_("All hosts have too many cores"))
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
@ -57,7 +57,7 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
raise driver.NoValidHost("No hosts found")
raise driver.NoValidHost(_("No hosts found"))
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
@ -66,7 +66,8 @@ class SimpleScheduler(chance.ChanceScheduler):
for result in results:
(service, volume_gigabytes) = result
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
raise driver.NoValidHost("All hosts have too many gigabytes")
raise driver.NoValidHost(_("All hosts have too many "
"gigabytes"))
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
@ -76,7 +77,7 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
raise driver.NoValidHost("No hosts found")
raise driver.NoValidHost(_("No hosts found"))
def schedule_set_network_host(self, context, *_args, **_kwargs):
"""Picks a host that is up and has the fewest networks."""
@ -85,7 +86,7 @@ class SimpleScheduler(chance.ChanceScheduler):
for result in results:
(service, instance_count) = result
if instance_count >= FLAGS.max_networks:
raise driver.NoValidHost("All hosts have too many networks")
raise driver.NoValidHost(_("All hosts have too many networks"))
if self.service_is_up(service):
return service['host']
raise driver.NoValidHost("No hosts found")
raise driver.NoValidHost(_("No hosts found"))

View File

@ -151,7 +151,7 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
logging.warn("Starting %s node", topic)
logging.warn(_("Starting %s node"), topic)
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@ -163,7 +163,7 @@ class Service(object):
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
logging.warn("Service killed that has no database entry")
logging.warn(_("Service killed that has no database entry"))
def stop(self):
for x in self.timers:
@ -184,8 +184,8 @@ class Service(object):
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
logging.debug("The service database object disappeared, "
"Recreating it.")
logging.debug(_("The service database object disappeared, "
"Recreating it."))
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
@ -196,13 +196,13 @@ class Service(object):
# TODO(termie): make this pattern be more elegant.
if getattr(self, "model_disconnected", False):
self.model_disconnected = False
logging.error("Recovered model server connection!")
logging.error(_("Recovered model server connection!"))
# TODO(vish): this should probably only catch connection errors
except Exception: # pylint: disable-msg=W0702
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception("model server went away")
logging.exception(_("model server went away"))
def serve(*services):
@ -221,7 +221,7 @@ def serve(*services):
else:
logging.getLogger().setLevel(logging.WARNING)
logging.debug("Full set of FLAGS:")
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))

View File

@ -56,11 +56,16 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1):
return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id,
return Instance(id=int(id) + 123456, state=0, image_id=10, user_id=user_id,
display_name='server%s' % id, internal_id=id)
def fake_compute_api(cls, req, id):
return True
class ServersTest(unittest.TestCase):
def setUp(self):
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
@ -82,9 +87,15 @@ class ServersTest(unittest.TestCase):
instance_address)
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
instance_address)
self.stubs.Set(nova.compute.api.ComputeAPI, 'pause',
fake_compute_api)
self.stubs.Set(nova.compute.api.ComputeAPI, 'unpause',
fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
def test_get_server_by_id(self):
req = webob.Request.blank('/v1.0/servers/1')
@ -211,6 +222,30 @@ class ServersTest(unittest.TestCase):
self.assertEqual(s['imageId'], 10)
i += 1
def test_server_pause(self):
FLAGS.allow_admin_api = True
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
req = webob.Request.blank('/v1.0/servers/1/pause')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 202)
def test_server_unpause(self):
FLAGS.allow_admin_api = True
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
req = webob.Request.blank('/v1.0/servers/1/unpause')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(nova.api.API('os'))
self.assertEqual(res.status_int, 202)
def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},

View File

@ -333,14 +333,10 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
AuthManagerTestCase.__init__(self)
test.TestCase.__init__(self, *args, **kwargs)
import nova.auth.fakeldap as fakeldap
FLAGS.redis_db = 8
if FLAGS.flush_db:
logging.info("Flushing redis datastore")
try:
r = fakeldap.Redis.instance()
r.flushdb()
except:
self.skip = True
logging.info("Flushing datastore")
r = fakeldap.Store.instance()
r.flushdb()
class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase):

View File

@ -127,6 +127,14 @@ class ComputeTestCase(test.TestCase):
self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate)
def test_pause(self):
"""Ensure instance can be paused"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.pause_instance(self.context, instance_id)
self.compute.unpause_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_reboot(self):
"""Ensure instance can be rebooted"""
instance_id = self._create_instance()

View File

@ -40,19 +40,51 @@ class LibvirtConnTestCase(test.TestCase):
self.network = utils.import_object(FLAGS.network_manager)
FLAGS.instances_path = ''
def test_get_uri_and_template(self):
ip = '10.11.12.13'
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'mac_address': '02:12:34:46:56:67',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'instance_type': 'm1.small'}
instance = {'internal_id': 1,
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'mac_address': '02:12:34:46:56:67',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'instance_type': 'm1.small'}
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self.do_test_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self.do_test_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self.do_test_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self.do_test_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self.do_test_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True,
rescue=True)
def do_test_xml_and_uri(self, instance,
expect_ramdisk, expect_kernel,
rescue=False):
user_context = context.RequestContext(project=self.project,
user=self.user)
instance_ref = db.instance_create(user_context, instance)
@ -60,13 +92,14 @@ class LibvirtConnTestCase(test.TestCase):
self.network.set_network_host(context.get_admin_context(),
network_ref['id'])
fixed_ip = {'address': ip,
fixed_ip = {'address': self.test_ip,
'network_id': network_ref['id']}
ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
db.fixed_ip_update(ctxt, self.test_ip,
{'allocated': True,
'instance_id': instance_ref['id']})
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
@ -78,23 +111,73 @@ class LibvirtConnTestCase(test.TestCase):
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text, 'uml')])}
(lambda t: t.find('./os/type').text, 'uml')]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text, 'linux')]),
}
for hypervisor_type in ['qemu', 'kvm', 'xen']:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
check = (lambda t: t.find('./os/kernel').text.split('/')[1],
'rescue-kernel')
check_list.append(check)
check = (lambda t: t.find('./os/initrd').text.split('/')[1],
'rescue-ramdisk')
check_list.append(check)
else:
if expect_kernel:
check = (lambda t: t.find('./os/kernel').text.split(
'/')[1], 'kernel')
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: t.find('./os/initrd').text.split(
'/')[1], 'ramdisk')
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./devices/interface/filterref/parameter').\
get('name'), 'IP'),
(lambda t: t.find('./devices/interface/filterref/parameter').\
get('value'), '10.11.12.13')]
(lambda t: t.find(
'./devices/interface/filterref/parameter').get('name'), 'IP'),
(lambda t: t.find(
'./devices/interface/filterref/parameter').get(
'value'), '10.11.12.13'),
(lambda t: t.findall(
'./devices/interface/filterref/parameter')[1].get(
'name'), 'DHCPSERVER'),
(lambda t: t.findall(
'./devices/interface/filterref/parameter')[1].get(
'value'), '10.0.0.1'),
(lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
'file').split('/')[1], 'rescue-disk'),
(lambda t: t.findall('./devices/disk/source')[1].get(
'file').split('/')[1], 'disk')]
else:
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[0].get('file').split('/')[1],
'disk')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, _template, _rescue = conn.get_uri_and_templates()
uri = conn.get_uri()
self.assertEquals(uri, expected_uri)
xml = conn.to_xml(instance_ref)
xml = conn.to_xml(instance_ref, rescue)
tree = xml_to_tree(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
@ -106,6 +189,9 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
# This test is supposed to make sure we don't override a specifically
# set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
@ -114,7 +200,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, _template, _rescue = conn.get_uri_and_templates()
uri = conn.get_uri()
self.assertEquals(uri, testuri)
def tearDown(self):

View File

@ -43,7 +43,7 @@ else:
FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
'(will be prepended to $logfile)')
@ -208,7 +208,7 @@ def stop(pidfile):
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
message = _("pidfile %s does not exist. Daemon not running?\n")
sys.stderr.write(message % pidfile)
# Not an error in a restart
return
@ -229,7 +229,7 @@ def stop(pidfile):
def serve(filename):
logging.debug("Serving %s" % filename)
logging.debug(_("Serving %s") % filename)
name = os.path.basename(filename)
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
@ -281,7 +281,7 @@ def serve(filename):
else:
logging.getLogger().setLevel(logging.WARNING)
logging.debug("Full set of FLAGS:")
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))

View File

@ -50,7 +50,7 @@ def import_class(import_str):
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
raise exception.NotFound(_('Class %s cannot be found') % class_str)
def import_object(import_str):
@ -64,7 +64,7 @@ def import_object(import_str):
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
logging.debug(_("Fetching %s") % url)
# c = pycurl.Curl()
# fp = open(target, "wb")
# c.setopt(c.URL, url)
@ -76,7 +76,7 @@ def fetchfile(url, target):
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
logging.debug("Running cmd (subprocess): %s", cmd)
logging.debug(_("Running cmd (subprocess): %s"), cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
@ -89,7 +89,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
logging.debug("Result was %s" % (obj.returncode))
logging.debug(_("Result was %s") % (obj.returncode))
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@ -127,7 +127,7 @@ def debug(arg):
def runthis(prompt, cmd, check_exit_code=True):
logging.debug("Running %s" % (cmd))
logging.debug(_("Running %s") % (cmd))
rv, err = execute(cmd, check_exit_code=check_exit_code)
@ -160,7 +160,7 @@ def get_my_ip():
csock.close()
return addr
except socket.gaierror as ex:
logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex)
logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex)
return "127.0.0.1"
@ -204,7 +204,7 @@ class LazyPluggable(object):
if not self.__backend:
backend_name = self.__pivot.value
if backend_name not in self.__backends:
raise exception.Error('Invalid backend: %s' % backend_name)
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if type(backend) == type(tuple()):

View File

@ -66,6 +66,6 @@ def get_connection(read_only=False):
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
logging.error('Failed to open connection to the hypervisor')
logging.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
return conn

View File

@ -136,6 +136,18 @@ class FakeConnection(object):
"""
pass
def pause(self, instance, callback):
"""
Pause the specified instance.
"""
pass
def unpause(self, instance, callback):
"""
Unpause the specified instance.
"""
pass
def destroy(self, instance):
"""
Destroy (shutdown and delete) the specified instance.
@ -169,7 +181,8 @@ class FakeConnection(object):
knowledge of the instance
"""
if instance_name not in self.instances:
raise exception.NotFound("Instance %s Not Found" % instance_name)
raise exception.NotFound(_("Instance %s Not Found")
% instance_name)
i = self.instances[instance_name]
return {'state': i._state,
'max_mem': 0,
@ -249,5 +262,6 @@ class FakeConnection(object):
class FakeInstance(object):
def __init__(self):
self._state = power_state.NOSTATE

View File

@ -1,34 +0,0 @@
<domain type='%(type)s'>
<name>%(name)s</name>
<os>
<type>hvm</type>
<kernel>%(basepath)s/kernel</kernel>
<initrd>%(basepath)s/ramdisk</initrd>
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
</os>
<features>
<acpi/>
</features>
<memory>%(memory_kb)s</memory>
<vcpu>%(vcpus)s</vcpu>
<devices>
<disk type='file'>
<source file='%(basepath)s/disk'/>
<target dev='vda' bus='virtio'/>
</disk>
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
<filterref filter="nova-instance-%(name)s">
<parameter name="IP" value="%(ip_address)s" />
<parameter name="DHCPSERVER" value="%(dhcp_server)s" />
%(extra_params)s
</filterref>
</interface>
<serial type="file">
<source path='%(basepath)s/console.log'/>
<target port='1'/>
</serial>
</devices>
</domain>

View File

@ -1,34 +0,0 @@
<domain type='%(type)s'>
<name>%(name)s</name>
<os>
<type>linux</type>
<kernel>%(basepath)s/kernel</kernel>
<initrd>%(basepath)s/ramdisk</initrd>
<root>/dev/xvda1</root>
<cmdline>ro</cmdline>
</os>
<features>
<acpi/>
</features>
<memory>%(memory_kb)s</memory>
<vcpu>%(vcpus)s</vcpu>
<devices>
<disk type='file'>
<source file='%(basepath)s/rescue-disk'/>
<target dev='sda' />
</disk>
<disk type='file'>
<source file='%(basepath)s/disk'/>
<target dev='sdb' />
</disk>
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
</interface>
<console type="file">
<source path='%(basepath)s/console.log'/>
<target port='1'/>
</console>
</devices>
</domain>

View File

@ -1,30 +0,0 @@
<domain type='%(type)s'>
<name>%(name)s</name>
<os>
<type>linux</type>
<kernel>%(basepath)s/kernel</kernel>
<initrd>%(basepath)s/ramdisk</initrd>
<root>/dev/xvda1</root>
<cmdline>ro</cmdline>
</os>
<features>
<acpi/>
</features>
<memory>%(memory_kb)s</memory>
<vcpu>%(vcpus)s</vcpu>
<devices>
<disk type='file'>
<source file='%(basepath)s/disk'/>
<target dev='sda' />
</disk>
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
</interface>
<console type="file">
<source path='%(basepath)s/console.log'/>
<target port='1'/>
</console>
</devices>
</domain>

View File

@ -0,0 +1,79 @@
<domain type='${type}'>
<name>${name}</name>
<memory>${memory_kb}</memory>
<os>
#if $type == 'uml'
#set $disk_prefix = 'ubd'
#set $disk_bus = 'uml'
<type>uml</type>
<kernel>/usr/bin/linux</kernel>
<root>/dev/ubda1</root>
#else
#if $type == 'xen'
#set $disk_prefix = 'sd'
#set $disk_bus = 'scsi'
<type>linux</type>
<root>/dev/xvda1</root>
#else
#set $disk_prefix = 'vd'
#set $disk_bus = 'virtio'
<type>hvm</type>
#end if
#if $getVar('rescue', False)
<kernel>${basepath}/rescue-kernel</kernel>
<initrd>${basepath}/rescue-ramdisk</initrd>
#else
#if $getVar('kernel', None)
<kernel>${kernel}</kernel>
#if $type == 'xen'
<cmdline>ro</cmdline>
#else
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
#end if
#if $getVar('ramdisk', None)
<initrd>${ramdisk}</initrd>
#end if
#else
<boot dev="hd" />
#end if
#end if
#end if
</os>
<features>
<acpi/>
</features>
<vcpu>${vcpus}</vcpu>
<devices>
#if $getVar('rescue', False)
<disk type='file'>
<source file='${basepath}/rescue-disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
<disk type='file'>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#else
<disk type='file'>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
#end if
<interface type='bridge'>
<source bridge='${bridge_name}'/>
<mac address='${mac_address}'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
<filterref filter="nova-instance-${name}">
<parameter name="IP" value="${ip_address}" />
<parameter name="DHCPSERVER" value="${dhcp_server}" />
#if $getVar('extra_params', False)
${extra_params}
#end if
</filterref>
</interface>
<serial type="file">
<source path='${basepath}/console.log'/>
<target port='1'/>
</serial>
</devices>
</domain>

View File

@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN.
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_xml_template: Libvirt XML Template (QEmu/KVM).
:libvirt_xen_xml_template: Libvirt XML Template (Xen).
:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux).
:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU).
:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN).
:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML).
:libvirt_xml_template: Libvirt XML Template.
:rescue_image_id: Rescue ami image (default: ami-rescue).
:rescue_kernel_id: Rescue aki image (default: aki-rescue).
:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
@ -62,36 +57,20 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
from Cheetah.Template import Template
libvirt = None
libxml2 = None
FLAGS = flags.FLAGS
flags.DEFINE_string('libvirt_rescue_xml_template',
utils.abspath('virt/libvirt.rescue.qemu.xml.template'),
'Libvirt RESCUE XML Template for QEmu/KVM')
flags.DEFINE_string('libvirt_rescue_xen_xml_template',
utils.abspath('virt/libvirt.rescue.xen.xml.template'),
'Libvirt RESCUE XML Template for xen')
flags.DEFINE_string('libvirt_rescue_uml_xml_template',
utils.abspath('virt/libvirt.rescue.uml.xml.template'),
'Libvirt RESCUE XML Template for user-mode-linux')
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.qemu.xml.template'),
'Libvirt XML Template for QEmu/KVM')
flags.DEFINE_string('libvirt_xen_xml_template',
utils.abspath('virt/libvirt.xen.xml.template'),
'Libvirt XML Template for Xen')
flags.DEFINE_string('libvirt_uml_xml_template',
utils.abspath('virt/libvirt.uml.xml.template'),
'Libvirt XML Template for user-mode-linux')
flags.DEFINE_string('injected_network_template',
utils.abspath('virt/interfaces.template'),
'Template file for injected network')
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
flags.DEFINE_string('libvirt_type',
'kvm',
'Libvirt domain type (valid options are: '
@ -123,13 +102,11 @@ def _get_net_and_mask(cidr):
class LibvirtConnection(object):
def __init__(self, read_only):
(self.libvirt_uri,
template_file,
rescue_file) = self.get_uri_and_templates()
self.libvirt_xml = open(template_file).read()
self.rescue_xml = open(rescue_file).read()
def __init__(self, read_only):
self.libvirt_uri = self.get_uri()
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
self._wrapped_conn = None
self.read_only = read_only
@ -139,7 +116,7 @@ class LibvirtConnection(object):
@property
def _conn(self):
if not self._wrapped_conn or not self._test_connection():
logging.debug('Connecting to libvirt: %s' % self.libvirt_uri)
logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri,
self.read_only)
return self._wrapped_conn
@ -151,24 +128,18 @@ class LibvirtConnection(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
logging.debug('Connection to libvirt broke')
logging.debug(_('Connection to libvirt broke'))
return False
raise
def get_uri_and_templates(self):
def get_uri(self):
if FLAGS.libvirt_type == 'uml':
uri = FLAGS.libvirt_uri or 'uml:///system'
template_file = FLAGS.libvirt_uml_xml_template
rescue_file = FLAGS.libvirt_rescue_uml_xml_template
elif FLAGS.libvirt_type == 'xen':
uri = FLAGS.libvirt_uri or 'xen:///'
template_file = FLAGS.libvirt_xen_xml_template
rescue_file = FLAGS.libvirt_rescue_xen_xml_template
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
template_file = FLAGS.libvirt_xml_template
rescue_file = FLAGS.libvirt_rescue_xml_template
return uri, template_file, rescue_file
return uri
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
@ -228,7 +199,7 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
logging.info('instance %s: deleting instance files %s',
logging.info(_('instance %s: deleting instance files %s'),
instance['name'], target)
if os.path.exists(target):
shutil.rmtree(target)
@ -270,7 +241,7 @@ class LibvirtConnection(object):
mount_device = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
raise exception.NotFound("No disk at %s" % mount_device)
raise exception.NotFound(_("No disk at %s") % mount_device)
virt_dom.detachDevice(xml)
@exception.wrap_exception
@ -286,10 +257,10 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
logging.debug('instance %s: rebooted', instance['name'])
logging.debug(_('instance %s: rebooted'), instance['name'])
timer.stop()
except Exception, exn:
logging.error('_wait_for_reboot failed: %s', exn)
logging.error(_('_wait_for_reboot failed: %s'), exn)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@ -298,6 +269,14 @@ class LibvirtConnection(object):
timer.f = _wait_for_reboot
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
def pause(self, instance, callback):
raise exception.APIError("pause not supported for libvirt.")
@exception.wrap_exception
def unpause(self, instance, callback):
raise exception.APIError("unpause not supported for libvirt.")
@exception.wrap_exception
def rescue(self, instance):
self.destroy(instance, False)
@ -316,10 +295,10 @@ class LibvirtConnection(object):
state = self.get_info(instance['name'])['state']
db.instance_set_state(None, instance['id'], state)
if state == power_state.RUNNING:
logging.debug('instance %s: rescued', instance['name'])
logging.debug(_('instance %s: rescued'), instance['name'])
timer.stop()
except Exception, exn:
logging.error('_wait_for_rescue failed: %s', exn)
logging.error(_('_wait_for_rescue failed: %s'), exn)
db.instance_set_state(None,
instance['id'],
power_state.SHUTDOWN)
@ -344,7 +323,7 @@ class LibvirtConnection(object):
NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
self._create_image(instance, xml)
self._conn.createXML(xml, 0)
logging.debug("instance %s: is running", instance['name'])
logging.debug(_("instance %s: is running"), instance['name'])
timer = utils.LoopingCall(f=None)
@ -354,10 +333,10 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
logging.debug('instance %s: booted', instance['name'])
logging.debug(_('instance %s: booted'), instance['name'])
timer.stop()
except:
logging.exception('instance %s: failed to boot',
logging.exception(_('instance %s: failed to boot'),
instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
@ -372,7 +351,7 @@ class LibvirtConnection(object):
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
logging.info('cool, it\'s a device')
logging.info(_('cool, it\'s a device'))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@ -380,7 +359,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
logging.info('data: %r, fpath: %r' % (data, fpath))
logging.info(_('data: %r, fpath: %r') % (data, fpath))
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@ -422,7 +401,7 @@ class LibvirtConnection(object):
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
logging.info('instance %s: Creating image', inst['name'])
logging.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
@ -441,18 +420,28 @@ class LibvirtConnection(object):
if not os.path.exists(basepath('disk')):
images.fetch(inst.image_id, basepath('disk-raw'), user,
project)
if not os.path.exists(basepath('kernel')):
images.fetch(inst.kernel_id, basepath('kernel'), user,
project)
if not os.path.exists(basepath('ramdisk')):
images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
project)
if inst['kernel_id']:
if not os.path.exists(basepath('kernel')):
images.fetch(inst['kernel_id'], basepath('kernel'),
user, project)
if inst['ramdisk_id']:
if not os.path.exists(basepath('ramdisk')):
images.fetch(inst['ramdisk_id'], basepath('ramdisk'),
user, project)
def execute(cmd, process_input=None, check_exit_code=True):
return utils.execute(cmd=cmd,
process_input=process_input,
check_exit_code=check_exit_code)
# For now, we assume that if we're not using a kernel, we're using a
# partitioned disk image where the target partition is the first
# partition
target_partition = None
if not inst['kernel_id']:
target_partition = "1"
key = str(inst['key_data'])
net = None
network_ref = db.network_get_by_instance(context.get_admin_context(),
@ -468,16 +457,24 @@ class LibvirtConnection(object):
'dns': network_ref['dns']}
if key or net:
if key:
logging.info('instance %s: injecting key into image %s',
logging.info(_('instance %s: injecting key into image %s'),
inst['name'], inst.image_id)
if net:
logging.info('instance %s: injecting net into image %s',
inst['name'], inst.image_id)
disk.inject_data(basepath('disk-raw'), key, net,
execute=execute)
logging.info(_('instance %s: injecting net into image %s'),
inst['name'], inst.image_id)
try:
disk.inject_data(basepath('disk-raw'), key, net,
partition=target_partition,
execute=execute)
except Exception as e:
# This could be a windows image, or a vmdk format disk
logging.warn(_('instance %s: ignoring error injecting data'
' into image %s (%s)'),
inst['name'], inst.image_id, e)
if os.path.exists(basepath('disk')):
utils.execute('rm -f %s' % basepath('disk'))
if inst['kernel_id']:
if os.path.exists(basepath('disk')):
utils.execute('rm -f %s' % basepath('disk'))
local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type]
['local_gb']
@ -486,15 +483,21 @@ class LibvirtConnection(object):
resize = True
if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
resize = False
disk.partition(basepath('disk-raw'), basepath('disk'),
local_bytes, resize, execute=execute)
if inst['kernel_id']:
disk.partition(basepath('disk-raw'), basepath('disk'),
local_bytes, resize, execute=execute)
else:
os.rename(basepath('disk-raw'), basepath('disk'))
disk.extend(basepath('disk'), local_bytes, execute=execute)
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
logging.debug('instance %s: starting toXML method', instance['name'])
logging.debug(_('instance %s: starting toXML method'),
instance['name'])
network = db.project_get_network(context.get_admin_context(),
instance['project_id'])
# FIXME(vish): stick this in db
@ -523,20 +526,29 @@ class LibvirtConnection(object):
'mac_address': instance['mac_address'],
'ip_address': ip_address,
'dhcp_server': dhcp_server,
'extra_params': extra_params}
if rescue:
libvirt_xml = self.rescue_xml % xml_info
else:
libvirt_xml = self.libvirt_xml % xml_info
logging.debug('instance %s: finished toXML method', instance['name'])
'extra_params': extra_params,
'rescue': rescue}
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
return libvirt_xml
if instance['ramdisk_id']:
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
xml_info['disk'] = xml_info['basepath'] + "/disk"
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
logging.debug(_('instance %s: finished toXML method'),
instance['name'])
return xml
def get_info(self, instance_name):
try:
virt_dom = self._conn.lookupByName(instance_name)
except:
raise exception.NotFound("Instance %s not found" % instance_name)
raise exception.NotFound(_("Instance %s not found")
% instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,

View File

@ -25,6 +25,7 @@ class NetworkHelper():
"""
The class that wraps the helper methods together.
"""
def __init__(self):
return

View File

@ -47,6 +47,7 @@ class VMHelper():
"""
The class that wraps the helper methods together.
"""
def __init__(self):
return
@ -228,11 +229,7 @@ class VMHelper():
try:
host = session.get_xenapi_host()
host_ip = session.get_xenapi().host.get_record(host)["address"]
metrics = session.get_xenapi().VM_guest_metrics.get_record(
record["guest_metrics"])
diags = {
"Kernel": metrics["os_version"]["uname"],
"Distro": metrics["os_version"]["name"]}
diags = {}
xml = get_rrd(host_ip, record["uuid"])
if xml:
rrd = minidom.parseString(xml)

View File

@ -34,6 +34,7 @@ class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session):
global XenAPI
if XenAPI is None:
@ -43,12 +44,16 @@ class VMOps(object):
VMHelper.late_import()
def list_instances(self):
""" List VM instances """
return [self._session.get_xenapi().VM.get_name_label(vm) \
for vm in self._session.get_xenapi().VM.get_all()]
"""List VM instances"""
vms = []
for vm in self._session.get_xenapi().VM.get_all():
rec = self._session.get_xenapi().VM.get_record(vm)
if not rec["is_a_template"] and not rec["is_control_domain"]:
vms.append(rec["name_label"])
return vms
def spawn(self, instance):
""" Create VM instance """
"""Create VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
if vm is not None:
raise Exception('Attempted to create non-unique name %s' %
@ -80,16 +85,16 @@ class VMOps(object):
vm_ref)
def reboot(self, instance):
""" Reboot VM instance """
"""Reboot VM instance"""
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
raise Exception('instance not present %s' % instance_name)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
self._session.wait_for_task(task)
self._session.wait_for_task(instance.id, task)
def destroy(self, instance):
""" Destroy VM instance """
"""Destroy VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
if vm is None:
# Don't complain, just return. This lets us clean up instances
@ -100,7 +105,7 @@ class VMOps(object):
try:
task = self._session.call_xenapi('Async.VM.hard_shutdown',
vm)
self._session.wait_for_task(task)
self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
# Disk clean-up
@ -108,17 +113,43 @@ class VMOps(object):
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(task)
self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(task)
self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
ret = self._session.wait_for_task(instance_id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
callback(ret)
def pause(self, instance, callback):
"""Pause VM instance"""
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
raise Exception('instance not present %s' % instance_name)
task = self._session.call_xenapi('Async.VM.pause', vm)
self._wait_with_callback(instance.id, task, callback)
def unpause(self, instance, callback):
"""Unpause VM instance"""
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
raise Exception('instance not present %s' % instance_name)
task = self._session.call_xenapi('Async.VM.unpause', vm)
self._wait_with_callback(instance.id, task, callback)
def get_info(self, instance_id):
""" Return data about VM instance """
"""Return data about VM instance"""
vm = VMHelper.lookup_blocking(self._session, instance_id)
if vm is None:
raise Exception('instance not present %s' % instance_id)
@ -134,6 +165,6 @@ class VMOps(object):
return VMHelper.compile_diagnostics(self._session, rec)
def get_console_output(self, instance):
""" Return snapshot of console """
"""Return snapshot of console"""
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'

View File

@ -20,6 +20,7 @@ Management class for Storage-related functions (attach, detach, etc).
class VolumeOps(object):
def __init__(self, session):
self._session = session

View File

@ -54,6 +54,8 @@ import xmlrpclib
from eventlet import event
from eventlet import tpool
from nova import context
from nova import db
from nova import utils
from nova import flags
from nova.virt.xenapi.vmops import VMOps
@ -93,38 +95,47 @@ def get_connection(_):
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
if not url or password is None:
raise Exception('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'connection_type=xenapi')
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'connection_type=xenapi'))
return XenAPIConnection(url, username, password)
class XenAPIConnection(object):
""" A connection to XenServer or Xen Cloud Platform """
"""A connection to XenServer or Xen Cloud Platform"""
def __init__(self, url, user, pw):
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
def list_instances(self):
""" List VM instances """
"""List VM instances"""
return self._vmops.list_instances()
def spawn(self, instance):
""" Create VM instance """
"""Create VM instance"""
self._vmops.spawn(instance)
def reboot(self, instance):
""" Reboot VM instance """
"""Reboot VM instance"""
self._vmops.reboot(instance)
def destroy(self, instance):
""" Destroy VM instance """
"""Destroy VM instance"""
self._vmops.destroy(instance)
def pause(self, instance, callback):
"""Pause VM instance"""
self._vmops.pause(instance, callback)
def unpause(self, instance, callback):
"""Unpause paused VM instance"""
self._vmops.unpause(instance, callback)
def get_info(self, instance_id):
""" Return data about VM instance """
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
def get_diagnostics(self, instance_id):
@ -132,32 +143,33 @@ class XenAPIConnection(object):
return self._vmops.get_diagnostics(instance_id)
def get_console_output(self, instance):
""" Return snapshot of console """
"""Return snapshot of console"""
return self._vmops.get_console_output(instance)
def attach_volume(self, instance_name, device_path, mountpoint):
""" Attach volume storage to VM instance """
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
device_path,
mountpoint)
def detach_volume(self, instance_name, mountpoint):
""" Detach volume storage to VM instance """
"""Detach volume storage to VM instance"""
return self._volumeops.detach_volume(instance_name, mountpoint)
class XenAPISession(object):
""" The session to invoke XenAPI SDK calls """
"""The session to invoke XenAPI SDK calls"""
def __init__(self, url, user, pw):
self._session = XenAPI.Session(url)
self._session.login_with_password(user, pw)
def get_xenapi(self):
""" Return the xenapi object """
"""Return the xenapi object"""
return self._session.xenapi
def get_xenapi_host(self):
""" Return the xenapi host """
"""Return the xenapi host"""
return self._session.xenapi.session.get_this_host(self._session.handle)
def call_xenapi(self, method, *args):
@ -173,46 +185,57 @@ class XenAPISession(object):
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
def wait_for_task(self, task):
def wait_for_task(self, instance_id, task):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes."""
done = event.Event()
loop = utils.LoopingCall(self._poll_task, task, done)
loop = utils.LoopingCall(self._poll_task, instance_id, task, done)
loop.start(FLAGS.xenapi_task_poll_interval, now=True)
rv = done.wait()
loop.stop()
return rv
def _poll_task(self, task, done):
def _poll_task(self, instance_id, task, done):
"""Poll the given XenAPI task, and fire the given Deferred if we
get a result."""
try:
#logging.debug('Polling task %s...', task)
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
if status == 'pending':
action = dict(
instance_id=int(instance_id),
action=name,
error=None)
if status == "pending":
return
elif status == 'success':
elif status == "success":
result = self._session.xenapi.task.get_result(task)
logging.info('Task %s status: success. %s', task, result)
logging.info(_("Task [%s] %s status: success %s") % (
name,
task,
result))
done.send(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
logging.warn('Task %s status: %s. %s', task, status,
error_info)
action["error"] = str(error_info)
logging.warn(_("Task [%s] %s status: %s %s") % (
name,
task,
status,
error_info))
done.send_exception(XenAPI.Failure(error_info))
#logging.debug('Polling task %s done.', task)
db.instance_action_create(context.get_admin_context(), action)
except XenAPI.Failure, exc:
logging.warn(exc)
done.send_exception(*sys.exc_info())
def _unwrap_plugin_exceptions(func, *args, **kwargs):
""" Parse exception details """
"""Parse exception details"""
try:
return func(*args, **kwargs)
except XenAPI.Failure, exc:
logging.debug("Got exception: %s", exc)
logging.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
@ -225,7 +248,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs):
else:
raise
except xmlrpclib.ProtocolError, exc:
logging.debug("Got exception: %s", exc)
logging.debug(_("Got exception: %s"), exc)
raise

View File

@ -73,14 +73,14 @@ class VolumeDriver(object):
tries = tries + 1
if tries >= FLAGS.num_shell_tries:
raise
logging.exception("Recovering from a failed execute."
"Try number %s", tries)
logging.exception(_("Recovering from a failed execute."
"Try number %s"), tries)
time.sleep(tries ** 2)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
if not os.path.isdir("/dev/%s" % FLAGS.volume_group):
raise exception.Error("volume group %s doesn't exist"
raise exception.Error(_("volume group %s doesn't exist")
% FLAGS.volume_group)
def create_volume(self, volume):
@ -205,7 +205,7 @@ class FakeAOEDriver(AOEDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
logging.debug("FAKE AOE: %s", cmd)
logging.debug(_("FAKE AOE: %s"), cmd)
return (None, None)
@ -310,5 +310,5 @@ class FakeISCSIDriver(ISCSIDriver):
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
logging.debug("FAKE ISCSI: %s", cmd)
logging.debug(_("FAKE ISCSI: %s"), cmd)
return (None, None)

View File

@ -81,7 +81,7 @@ class VolumeManager(manager.Manager):
self.driver.check_for_setup_error()
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
logging.debug("Re-exporting %s volumes", len(volumes))
logging.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
self.driver.ensure_export(ctxt, volume)
@ -89,7 +89,7 @@ class VolumeManager(manager.Manager):
"""Creates and exports the volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
logging.info("volume %s: creating", volume_ref['name'])
logging.info(_("volume %s: creating"), volume_ref['name'])
self.db.volume_update(context,
volume_id,
@ -98,18 +98,18 @@ class VolumeManager(manager.Manager):
# before passing it to the driver.
volume_ref['host'] = self.host
logging.debug("volume %s: creating lv of size %sG",
logging.debug(_("volume %s: creating lv of size %sG"),
volume_ref['name'], volume_ref['size'])
self.driver.create_volume(volume_ref)
logging.debug("volume %s: creating export", volume_ref['name'])
logging.debug(_("volume %s: creating export"), volume_ref['name'])
self.driver.create_export(context, volume_ref)
now = datetime.datetime.utcnow()
self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})
logging.debug("volume %s: created successfully", volume_ref['name'])
logging.debug(_("volume %s: created successfully"), volume_ref['name'])
return volume_id
def delete_volume(self, context, volume_id):
@ -117,15 +117,15 @@ class VolumeManager(manager.Manager):
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if volume_ref['attach_status'] == "attached":
raise exception.Error("Volume is still attached")
raise exception.Error(_("Volume is still attached"))
if volume_ref['host'] != self.host:
raise exception.Error("Volume is not local to this node")
logging.debug("volume %s: removing export", volume_ref['name'])
raise exception.Error(_("Volume is not local to this node"))
logging.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
logging.debug("volume %s: deleting", volume_ref['name'])
logging.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref)
self.db.volume_destroy(context, volume_id)
logging.debug("volume %s: deleted successfully", volume_ref['name'])
logging.debug(_("volume %s: deleted successfully"), volume_ref['name'])
return True
def setup_compute_volume(self, context, volume_id):

View File

@ -2,6 +2,7 @@ SQLAlchemy==0.6.3
pep8==0.5.0
pylint==0.19
IPy==0.70
Cheetah==2.4.2.1
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4