merge trunk
This commit is contained in:
3
Authors
3
Authors
@@ -19,6 +19,7 @@ Chiradeep Vittal <chiradeep@cloud.com>
|
||||
Chmouel Boudjnah <chmouel@chmouel.com>
|
||||
Chris Behrens <cbehrens@codestud.com>
|
||||
Christian Berendt <berendt@b1-systems.de>
|
||||
Christopher MacGown <chris@pistoncloud.com>
|
||||
Chuck Short <zulcss@ubuntu.com>
|
||||
Cory Wright <corywright@gmail.com>
|
||||
Dan Prince <dan.prince@rackspace.com>
|
||||
@@ -69,6 +70,7 @@ Koji Iida <iida.koji@lab.ntt.co.jp>
|
||||
Lorin Hochstein <lorin@isi.edu>
|
||||
Lvov Maxim <usrleon@gmail.com>
|
||||
Mandell Degerness <mdegerne@gmail.com>
|
||||
Mark McLoughlin <markmc@redhat.com>
|
||||
Mark Washenberger <mark.washenberger@rackspace.com>
|
||||
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||
Matt Dietz <matt.dietz@rackspace.com>
|
||||
@@ -100,6 +102,7 @@ Scott Moser <smoser@ubuntu.com>
|
||||
Soren Hansen <soren.hansen@rackspace.com>
|
||||
Stephanie Reese <reese.sm@gmail.com>
|
||||
Thierry Carrez <thierry@openstack.org>
|
||||
Tim Simpson <tim.simpson@rackspace.com>
|
||||
Todd Willey <todd@ansolabs.com>
|
||||
Trey Morris <trey.morris@rackspace.com>
|
||||
Troy Toman <troy.toman@rackspace.com>
|
||||
|
||||
@@ -45,6 +45,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
servers = []
|
||||
for api in flags.FLAGS.enabled_apis:
|
||||
servers.append(service.WSGIService(api))
|
||||
|
||||
@@ -41,6 +41,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.WSGIService('ec2')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
||||
@@ -41,6 +41,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.WSGIService('osapi')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
||||
@@ -43,6 +43,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.Service.create(binary='nova-compute')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
||||
530
bin/nova-manage
530
bin/nova-manage
@@ -53,6 +53,7 @@
|
||||
CLI interface for nova management.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import gettext
|
||||
import glob
|
||||
import json
|
||||
@@ -85,11 +86,13 @@ from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import version
|
||||
from nova import vsa
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.auth import manager
|
||||
from nova.cloudpipe import pipelib
|
||||
from nova.compute import instance_types
|
||||
from nova.db import migration
|
||||
from nova.volume import volume_types
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('fixed_range', 'nova.network.manager')
|
||||
@@ -134,7 +137,7 @@ class VpnCommands(object):
|
||||
help='Project name')
|
||||
def list(self, project=None):
|
||||
"""Print a listing of the VPN data for one or all projects."""
|
||||
|
||||
print "WARNING: This method only works with deprecated auth"
|
||||
print "%-12s\t" % 'project',
|
||||
print "%-20s\t" % 'ip:port',
|
||||
print "%-20s\t" % 'private_ip',
|
||||
@@ -170,17 +173,22 @@ class VpnCommands(object):
|
||||
|
||||
def spawn(self):
|
||||
"""Run all VPNs."""
|
||||
print "WARNING: This method only works with deprecated auth"
|
||||
for p in reversed(self.manager.get_projects()):
|
||||
if not self._vpn_for(p.id):
|
||||
print 'spawning %s' % p.id
|
||||
self.pipe.launch_vpn_instance(p.id)
|
||||
self.pipe.launch_vpn_instance(p.id, p.project_manager_id)
|
||||
time.sleep(10)
|
||||
|
||||
@args('--project', dest="project_id", metavar='<Project name>',
|
||||
help='Project name')
|
||||
def run(self, project_id):
|
||||
"""Start the VPN for a given project."""
|
||||
self.pipe.launch_vpn_instance(project_id)
|
||||
@args('--user', dest="user_id", metavar='<user name>', help='User name')
|
||||
def run(self, project_id, user_id):
|
||||
"""Start the VPN for a given project and user."""
|
||||
if not user_id:
|
||||
print "WARNING: This method only works with deprecated auth"
|
||||
user_id = self.manager.get_project(project_id).project_manager_id
|
||||
self.pipe.launch_vpn_instance(project_id, user_id)
|
||||
|
||||
@args('--project', dest="project_id", metavar='<Project name>',
|
||||
help='Project name')
|
||||
@@ -195,10 +203,6 @@ class VpnCommands(object):
|
||||
"""
|
||||
# TODO(tr3buchet): perhaps this shouldn't update all networks
|
||||
# associated with a project in the future
|
||||
project = self.manager.get_project(project_id)
|
||||
if not project:
|
||||
print 'No project %s' % (project_id)
|
||||
return
|
||||
admin_context = context.get_admin_context()
|
||||
networks = db.project_get_networks(admin_context, project_id)
|
||||
for network in networks:
|
||||
@@ -825,6 +829,39 @@ class NetworkCommands(object):
|
||||
uuid=None)
|
||||
|
||||
|
||||
@args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>',
|
||||
help='Network to modify')
|
||||
@args('--project', dest="project", metavar='<project name>',
|
||||
help='Project name to associate')
|
||||
@args('--host', dest="host", metavar='<host>',
|
||||
help='Host to associate')
|
||||
@args('--disassociate-project', action="store_true", dest='dis_project',
|
||||
default=False, help='Disassociate Network from Project')
|
||||
@args('--disassociate-host', action="store_true", dest='dis_host',
|
||||
default=False, help='Disassociate Host from Project')
|
||||
def modify(self, fixed_range, project=None, host=None,
|
||||
dis_project=None, dis_host=None):
|
||||
"""Associate/Disassociate Network with Project and/or Host
|
||||
arguments: network project host
|
||||
leave any field blank to ignore it
|
||||
"""
|
||||
admin_context = context.get_admin_context()
|
||||
network = db.network_get_by_cidr(admin_context, fixed_range)
|
||||
net = {}
|
||||
#User can choose the following actions each for project and host.
|
||||
#1) Associate (set not None value given by project/host parameter)
|
||||
#2) Disassociate (set None by disassociate parameter)
|
||||
#3) Keep unchanged (project/host key is not added to 'net')
|
||||
if project:
|
||||
net['project_id'] = project
|
||||
elif dis_project:
|
||||
net['project_id'] = None
|
||||
if host:
|
||||
net['host'] = host
|
||||
elif dis_host:
|
||||
net['host'] = None
|
||||
db.network_update(admin_context, network['id'], net)
|
||||
|
||||
|
||||
class VmCommands(object):
|
||||
"""Class for mangaging VM instances."""
|
||||
@@ -1091,6 +1128,477 @@ class VersionCommands(object):
|
||||
self.list()
|
||||
|
||||
|
||||
class VsaCommands(object):
|
||||
"""Methods for dealing with VSAs"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.manager = manager.AuthManager()
|
||||
self.vsa_api = vsa.API()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
self._format_str_vsa = "%(id)-5s %(vsa_id)-15s %(name)-25s "\
|
||||
"%(type)-10s %(vcs)-6s %(drives)-9s %(stat)-10s "\
|
||||
"%(az)-10s %(time)-10s"
|
||||
self._format_str_volume = "\t%(id)-4s %(name)-15s %(size)-5s "\
|
||||
"%(stat)-10s %(att)-20s %(time)s"
|
||||
self._format_str_drive = "\t%(id)-4s %(name)-15s %(size)-5s "\
|
||||
"%(stat)-10s %(host)-20s %(type)-4s %(tname)-10s %(time)s"
|
||||
self._format_str_instance = "\t%(id)-4s %(name)-10s %(dname)-20s "\
|
||||
"%(image)-12s %(type)-10s %(fl_ip)-15s %(fx_ip)-15s "\
|
||||
"%(stat)-10s %(host)-15s %(time)s"
|
||||
|
||||
def _print_vsa_header(self):
|
||||
print self._format_str_vsa %\
|
||||
dict(id=_('ID'),
|
||||
vsa_id=_('vsa_id'),
|
||||
name=_('displayName'),
|
||||
type=_('vc_type'),
|
||||
vcs=_('vc_cnt'),
|
||||
drives=_('drive_cnt'),
|
||||
stat=_('status'),
|
||||
az=_('AZ'),
|
||||
time=_('createTime'))
|
||||
|
||||
def _print_vsa(self, vsa):
|
||||
print self._format_str_vsa %\
|
||||
dict(id=vsa['id'],
|
||||
vsa_id=vsa['name'],
|
||||
name=vsa['display_name'],
|
||||
type=vsa['vsa_instance_type'].get('name', None),
|
||||
vcs=vsa['vc_count'],
|
||||
drives=vsa['vol_count'],
|
||||
stat=vsa['status'],
|
||||
az=vsa['availability_zone'],
|
||||
time=str(vsa['created_at']))
|
||||
|
||||
def _print_volume_header(self):
|
||||
print _(' === Volumes ===')
|
||||
print self._format_str_volume %\
|
||||
dict(id=_('ID'),
|
||||
name=_('name'),
|
||||
size=_('size'),
|
||||
stat=_('status'),
|
||||
att=_('attachment'),
|
||||
time=_('createTime'))
|
||||
|
||||
def _print_volume(self, vol):
|
||||
print self._format_str_volume %\
|
||||
dict(id=vol['id'],
|
||||
name=vol['display_name'] or vol['name'],
|
||||
size=vol['size'],
|
||||
stat=vol['status'],
|
||||
att=vol['attach_status'],
|
||||
time=str(vol['created_at']))
|
||||
|
||||
def _print_drive_header(self):
|
||||
print _(' === Drives ===')
|
||||
print self._format_str_drive %\
|
||||
dict(id=_('ID'),
|
||||
name=_('name'),
|
||||
size=_('size'),
|
||||
stat=_('status'),
|
||||
host=_('host'),
|
||||
type=_('type'),
|
||||
tname=_('typeName'),
|
||||
time=_('createTime'))
|
||||
|
||||
def _print_drive(self, drive):
|
||||
if drive['volume_type_id'] is not None and drive.get('volume_type'):
|
||||
drive_type_name = drive['volume_type'].get('name')
|
||||
else:
|
||||
drive_type_name = ''
|
||||
|
||||
print self._format_str_drive %\
|
||||
dict(id=drive['id'],
|
||||
name=drive['display_name'],
|
||||
size=drive['size'],
|
||||
stat=drive['status'],
|
||||
host=drive['host'],
|
||||
type=drive['volume_type_id'],
|
||||
tname=drive_type_name,
|
||||
time=str(drive['created_at']))
|
||||
|
||||
def _print_instance_header(self):
|
||||
print _(' === Instances ===')
|
||||
print self._format_str_instance %\
|
||||
dict(id=_('ID'),
|
||||
name=_('name'),
|
||||
dname=_('disp_name'),
|
||||
image=_('image'),
|
||||
type=_('type'),
|
||||
fl_ip=_('floating_IP'),
|
||||
fx_ip=_('fixed_IP'),
|
||||
stat=_('status'),
|
||||
host=_('host'),
|
||||
time=_('createTime'))
|
||||
|
||||
def _print_instance(self, vc):
|
||||
|
||||
fixed_addr = None
|
||||
floating_addr = None
|
||||
if vc['fixed_ips']:
|
||||
fixed = vc['fixed_ips'][0]
|
||||
fixed_addr = fixed['address']
|
||||
if fixed['floating_ips']:
|
||||
floating_addr = fixed['floating_ips'][0]['address']
|
||||
floating_addr = floating_addr or fixed_addr
|
||||
|
||||
print self._format_str_instance %\
|
||||
dict(id=vc['id'],
|
||||
name=ec2utils.id_to_ec2_id(vc['id']),
|
||||
dname=vc['display_name'],
|
||||
image=('ami-%08x' % int(vc['image_ref'])),
|
||||
type=vc['instance_type']['name'],
|
||||
fl_ip=floating_addr,
|
||||
fx_ip=fixed_addr,
|
||||
stat=vc['state_description'],
|
||||
host=vc['host'],
|
||||
time=str(vc['created_at']))
|
||||
|
||||
def _list(self, context, vsas, print_drives=False,
|
||||
print_volumes=False, print_instances=False):
|
||||
if vsas:
|
||||
self._print_vsa_header()
|
||||
|
||||
for vsa in vsas:
|
||||
self._print_vsa(vsa)
|
||||
vsa_id = vsa.get('id')
|
||||
|
||||
if print_instances:
|
||||
instances = self.vsa_api.get_all_vsa_instances(context, vsa_id)
|
||||
if instances:
|
||||
print
|
||||
self._print_instance_header()
|
||||
for instance in instances:
|
||||
self._print_instance(instance)
|
||||
print
|
||||
|
||||
if print_drives:
|
||||
drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
|
||||
if drives:
|
||||
self._print_drive_header()
|
||||
for drive in drives:
|
||||
self._print_drive(drive)
|
||||
print
|
||||
|
||||
if print_volumes:
|
||||
volumes = self.vsa_api.get_all_vsa_volumes(context, vsa_id)
|
||||
if volumes:
|
||||
self._print_volume_header()
|
||||
for volume in volumes:
|
||||
self._print_volume(volume)
|
||||
print
|
||||
|
||||
@args('--storage', dest='storage',
|
||||
metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]",
|
||||
help='Initial storage allocation for VSA')
|
||||
@args('--name', dest='name', metavar="<name>", help='VSA name')
|
||||
@args('--description', dest='description', metavar="<description>",
|
||||
help='VSA description')
|
||||
@args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
|
||||
@args('--instance_type', dest='instance_type_name', metavar="<name>",
|
||||
help='Instance type name')
|
||||
@args('--image', dest='image_name', metavar="<name>", help='Image name')
|
||||
@args('--shared', dest='shared', action="store_true", default=False,
|
||||
help='Use shared drives')
|
||||
@args('--az', dest='az', metavar="<zone:host>", help='Availability zone')
|
||||
@args('--user', dest="user_id", metavar='<User name>',
|
||||
help='User name')
|
||||
@args('--project', dest="project_id", metavar='<Project name>',
|
||||
help='Project name')
|
||||
def create(self, storage='[]', name=None, description=None, vc_count=1,
|
||||
instance_type_name=None, image_name=None, shared=None,
|
||||
az=None, user_id=None, project_id=None):
|
||||
"""Create a VSA."""
|
||||
|
||||
if project_id is None:
|
||||
try:
|
||||
project_id = os.getenv("EC2_ACCESS_KEY").split(':')[1]
|
||||
except Exception as exc:
|
||||
print _("Failed to retrieve project id: %(exc)s") % exc
|
||||
raise
|
||||
|
||||
if user_id is None:
|
||||
try:
|
||||
project = self.manager.get_project(project_id)
|
||||
user_id = project.project_manager_id
|
||||
except Exception as exc:
|
||||
print _("Failed to retrieve user info: %(exc)s") % exc
|
||||
raise
|
||||
|
||||
is_admin = self.manager.is_admin(user_id)
|
||||
ctxt = context.RequestContext(user_id, project_id, is_admin)
|
||||
if not is_admin and \
|
||||
not self.manager.is_project_member(user_id, project_id):
|
||||
msg = _("%(user_id)s must be an admin or a "
|
||||
"member of %(project_id)s")
|
||||
LOG.warn(msg % locals())
|
||||
raise ValueError(msg % locals())
|
||||
|
||||
# Sanity check for storage string
|
||||
storage_list = []
|
||||
if storage is not None:
|
||||
try:
|
||||
storage_list = ast.literal_eval(storage)
|
||||
except:
|
||||
print _("Invalid string format %s") % storage
|
||||
raise
|
||||
|
||||
for node in storage_list:
|
||||
if ('drive_name' not in node) or ('num_drives' not in node):
|
||||
print (_("Invalid string format for element %s. " \
|
||||
"Expecting keys 'drive_name' & 'num_drives'"),
|
||||
str(node))
|
||||
raise KeyError
|
||||
|
||||
if instance_type_name == '':
|
||||
instance_type_name = None
|
||||
instance_type = instance_types.get_instance_type_by_name(
|
||||
instance_type_name)
|
||||
|
||||
if image_name == '':
|
||||
image_name = None
|
||||
|
||||
if shared in [None, False, "--full_drives"]:
|
||||
shared = False
|
||||
elif shared in [True, "--shared"]:
|
||||
shared = True
|
||||
else:
|
||||
raise ValueError(_('Shared parameter should be set either to "\
|
||||
"--shared or --full_drives'))
|
||||
|
||||
values = {
|
||||
'display_name': name,
|
||||
'display_description': description,
|
||||
'vc_count': int(vc_count),
|
||||
'instance_type': instance_type,
|
||||
'image_name': image_name,
|
||||
'availability_zone': az,
|
||||
'storage': storage_list,
|
||||
'shared': shared,
|
||||
}
|
||||
|
||||
result = self.vsa_api.create(ctxt, **values)
|
||||
self._list(ctxt, [result])
|
||||
|
||||
@args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
|
||||
@args('--name', dest='name', metavar="<name>", help='VSA name')
|
||||
@args('--description', dest='description', metavar="<description>",
|
||||
help='VSA description')
|
||||
@args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
|
||||
def update(self, vsa_id, name=None, description=None, vc_count=None):
|
||||
"""Updates name/description of vsa and number of VCs."""
|
||||
|
||||
values = {}
|
||||
if name is not None:
|
||||
values['display_name'] = name
|
||||
if description is not None:
|
||||
values['display_description'] = description
|
||||
if vc_count is not None:
|
||||
values['vc_count'] = int(vc_count)
|
||||
|
||||
vsa_id = ec2utils.ec2_id_to_id(vsa_id)
|
||||
result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values)
|
||||
self._list(self.context, [result])
|
||||
|
||||
@args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
|
||||
def delete(self, vsa_id):
|
||||
"""Delete a VSA."""
|
||||
vsa_id = ec2utils.ec2_id_to_id(vsa_id)
|
||||
self.vsa_api.delete(self.context, vsa_id)
|
||||
|
||||
@args('--id', dest='vsa_id', metavar="<vsa_id>",
|
||||
help='VSA ID (optional)')
|
||||
@args('--all', dest='all', action="store_true", default=False,
|
||||
help='Show all available details')
|
||||
@args('--drives', dest='drives', action="store_true",
|
||||
help='Include drive-level details')
|
||||
@args('--volumes', dest='volumes', action="store_true",
|
||||
help='Include volume-level details')
|
||||
@args('--instances', dest='instances', action="store_true",
|
||||
help='Include instance-level details')
|
||||
def list(self, vsa_id=None, all=False,
|
||||
drives=False, volumes=False, instances=False):
|
||||
"""Describe all available VSAs (or particular one)."""
|
||||
|
||||
vsas = []
|
||||
if vsa_id is not None:
|
||||
internal_id = ec2utils.ec2_id_to_id(vsa_id)
|
||||
vsa = self.vsa_api.get(self.context, internal_id)
|
||||
vsas.append(vsa)
|
||||
else:
|
||||
vsas = self.vsa_api.get_all(self.context)
|
||||
|
||||
if all:
|
||||
drives = volumes = instances = True
|
||||
|
||||
self._list(self.context, vsas, drives, volumes, instances)
|
||||
|
||||
def update_capabilities(self):
|
||||
"""Forces updates capabilities on all nova-volume nodes."""
|
||||
|
||||
rpc.fanout_cast(context.get_admin_context(),
|
||||
FLAGS.volume_topic,
|
||||
{"method": "notification",
|
||||
"args": {"event": "startup"}})
|
||||
|
||||
|
||||
class VsaDriveTypeCommands(object):
|
||||
"""Methods for dealing with VSA drive types"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(VsaDriveTypeCommands, self).__init__(*args, **kwargs)
|
||||
self.context = context.get_admin_context()
|
||||
self._drive_type_template = '%s_%sGB_%sRPM'
|
||||
|
||||
def _list(self, drives):
|
||||
format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s"
|
||||
if len(drives):
|
||||
print format_str %\
|
||||
(_('ID'),
|
||||
_('name'),
|
||||
_('type'),
|
||||
_('size_gb'),
|
||||
_('rpm'),
|
||||
_('capabilities'),
|
||||
_('visible'),
|
||||
_('createTime'))
|
||||
|
||||
for name, vol_type in drives.iteritems():
|
||||
drive = vol_type.get('extra_specs')
|
||||
print format_str %\
|
||||
(str(vol_type['id']),
|
||||
drive['drive_name'],
|
||||
drive['drive_type'],
|
||||
drive['drive_size'],
|
||||
drive['drive_rpm'],
|
||||
drive.get('capabilities', ''),
|
||||
str(drive.get('visible', '')),
|
||||
str(vol_type['created_at']))
|
||||
|
||||
@args('--type', dest='type', metavar="<type>",
|
||||
help='Drive type (SATA, SAS, SSD, etc.)')
|
||||
@args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
|
||||
@args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
|
||||
@args('--capabilities', dest='capabilities', default=None,
|
||||
metavar="<string>", help='Different capabilities')
|
||||
@args('--hide', dest='hide', action="store_true", default=False,
|
||||
help='Show or hide drive')
|
||||
@args('--name', dest='name', metavar="<name>", help='Drive name')
|
||||
def create(self, type, size_gb, rpm, capabilities=None,
|
||||
hide=False, name=None):
|
||||
"""Create drive type."""
|
||||
|
||||
hide = True if hide in [True, "True", "--hide", "hide"] else False
|
||||
|
||||
if name is None:
|
||||
name = self._drive_type_template % (type, size_gb, rpm)
|
||||
|
||||
extra_specs = {'type': 'vsa_drive',
|
||||
'drive_name': name,
|
||||
'drive_type': type,
|
||||
'drive_size': size_gb,
|
||||
'drive_rpm': rpm,
|
||||
'visible': True,
|
||||
}
|
||||
if hide:
|
||||
extra_specs['visible'] = False
|
||||
|
||||
if capabilities is not None and capabilities != '':
|
||||
extra_specs['capabilities'] = capabilities
|
||||
|
||||
volume_types.create(self.context, name, extra_specs)
|
||||
result = volume_types.get_volume_type_by_name(self.context, name)
|
||||
self._list({name: result})
|
||||
|
||||
@args('--name', dest='name', metavar="<name>", help='Drive name')
|
||||
@args('--purge', action="store_true", dest='purge', default=False,
|
||||
help='purge record from database')
|
||||
def delete(self, name, purge):
|
||||
"""Marks instance types / flavors as deleted"""
|
||||
try:
|
||||
if purge:
|
||||
volume_types.purge(self.context, name)
|
||||
verb = "purged"
|
||||
else:
|
||||
volume_types.destroy(self.context, name)
|
||||
verb = "deleted"
|
||||
except exception.ApiError:
|
||||
print "Valid volume type name is required"
|
||||
sys.exit(1)
|
||||
except exception.DBError, e:
|
||||
print "DB Error: %s" % e
|
||||
sys.exit(2)
|
||||
except:
|
||||
sys.exit(3)
|
||||
else:
|
||||
print "%s %s" % (name, verb)
|
||||
|
||||
@args('--all', dest='all', action="store_true", default=False,
|
||||
help='Show all drives (including invisible)')
|
||||
@args('--name', dest='name', metavar="<name>",
|
||||
help='Show only specified drive')
|
||||
def list(self, all=False, name=None):
|
||||
"""Describe all available VSA drive types (or particular one)."""
|
||||
|
||||
all = False if all in ["--all", False, "False"] else True
|
||||
|
||||
search_opts = {'extra_specs': {'type': 'vsa_drive'}}
|
||||
if name is not None:
|
||||
search_opts['extra_specs']['name'] = name
|
||||
|
||||
if all == False:
|
||||
search_opts['extra_specs']['visible'] = '1'
|
||||
|
||||
drives = volume_types.get_all_types(self.context,
|
||||
search_opts=search_opts)
|
||||
self._list(drives)
|
||||
|
||||
@args('--name', dest='name', metavar="<name>", help='Drive name')
|
||||
@args('--type', dest='type', metavar="<type>",
|
||||
help='Drive type (SATA, SAS, SSD, etc.)')
|
||||
@args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
|
||||
@args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
|
||||
@args('--capabilities', dest='capabilities', default=None,
|
||||
metavar="<string>", help='Different capabilities')
|
||||
@args('--visible', dest='visible',
|
||||
metavar="<show|hide>", help='Show or hide drive')
|
||||
def update(self, name, type=None, size_gb=None, rpm=None,
|
||||
capabilities=None, visible=None):
|
||||
"""Update drive type."""
|
||||
|
||||
volume_type = volume_types.get_volume_type_by_name(self.context, name)
|
||||
|
||||
extra_specs = {'type': 'vsa_drive'}
|
||||
|
||||
if type:
|
||||
extra_specs['drive_type'] = type
|
||||
|
||||
if size_gb:
|
||||
extra_specs['drive_size'] = size_gb
|
||||
|
||||
if rpm:
|
||||
extra_specs['drive_rpm'] = rpm
|
||||
|
||||
if capabilities:
|
||||
extra_specs['capabilities'] = capabilities
|
||||
|
||||
if visible is not None:
|
||||
if visible in ["show", True, "True"]:
|
||||
extra_specs['visible'] = True
|
||||
elif visible in ["hide", False, "False"]:
|
||||
extra_specs['visible'] = False
|
||||
else:
|
||||
raise ValueError(_('visible parameter should be set to '\
|
||||
'show or hide'))
|
||||
|
||||
db.api.volume_type_extra_specs_update_or_create(self.context,
|
||||
volume_type['id'],
|
||||
extra_specs)
|
||||
result = volume_types.get_volume_type_by_name(self.context, name)
|
||||
self._list({name: result})
|
||||
|
||||
|
||||
class VolumeCommands(object):
|
||||
"""Methods for dealing with a cloud in an odd state"""
|
||||
|
||||
@@ -1477,6 +1985,7 @@ CATEGORIES = [
|
||||
('agent', AgentBuildCommands),
|
||||
('config', ConfigCommands),
|
||||
('db', DbCommands),
|
||||
('drive', VsaDriveTypeCommands),
|
||||
('fixed', FixedIpCommands),
|
||||
('flavor', InstanceTypeCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
@@ -1492,7 +2001,8 @@ CATEGORIES = [
|
||||
('version', VersionCommands),
|
||||
('vm', VmCommands),
|
||||
('volume', VolumeCommands),
|
||||
('vpn', VpnCommands)]
|
||||
('vpn', VpnCommands),
|
||||
('vsa', VsaCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
||||
@@ -43,6 +43,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.Service.create(binary='nova-network')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
||||
@@ -49,6 +49,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
router = s3server.S3Application(FLAGS.buckets_path)
|
||||
server = wsgi.Server("S3 Objectstore",
|
||||
router,
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -33,6 +34,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
@@ -43,6 +45,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.Service.create(binary='nova-scheduler')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
||||
@@ -43,6 +43,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.Service.create(binary='nova-volume')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
|
||||
49
bin/nova-vsa
Executable file
49
bin/nova-vsa
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Starter script for Nova VSA."""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
os.pardir))
|
||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import service
|
||||
from nova import utils
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
flags.FLAGS(sys.argv)
|
||||
logging.setup()
|
||||
utils.monkey_patch()
|
||||
server = service.Service.create(binary='nova-vsa')
|
||||
service.serve(server)
|
||||
service.wait()
|
||||
@@ -17,6 +17,9 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
WARNING: This code is deprecated and will be removed.
|
||||
Keystone is the recommended solution for auth management.
|
||||
|
||||
Nova authentication management
|
||||
"""
|
||||
|
||||
@@ -38,10 +41,13 @@ from nova.auth import signer
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_bool('use_deprecated_auth',
|
||||
False,
|
||||
'This flag must be set to use old style auth')
|
||||
|
||||
flags.DEFINE_list('allowed_roles',
|
||||
['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
|
||||
'Allowed roles for project')
|
||||
|
||||
# NOTE(vish): a user with one of these roles will be a superuser and
|
||||
# have access to all api commands
|
||||
flags.DEFINE_list('superuser_roles', ['cloudadmin'],
|
||||
@@ -811,7 +817,13 @@ class AuthManager(object):
|
||||
s3_host = host
|
||||
ec2_host = host
|
||||
rc = open(FLAGS.credentials_template).read()
|
||||
rc = rc % {'access': user.access,
|
||||
# NOTE(vish): Deprecated auth uses an access key, no auth uses a
|
||||
# the user_id in place of it.
|
||||
if FLAGS.use_deprecated_auth:
|
||||
access = user.access
|
||||
else:
|
||||
access = user.id
|
||||
rc = rc % {'access': access,
|
||||
'project': pid,
|
||||
'secret': user.secret,
|
||||
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
|
||||
|
||||
@@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url',
|
||||
in the form "http://127.0.0.1:8000"')
|
||||
DEFINE_string('ajax_console_proxy_port',
|
||||
8000, 'port that ajax_console_proxy binds')
|
||||
DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on')
|
||||
DEFINE_bool('verbose', False, 'show debug output')
|
||||
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
|
||||
DEFINE_bool('fake_network', False,
|
||||
@@ -371,6 +372,17 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
|
||||
'Manager for volume')
|
||||
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
||||
'Manager for scheduler')
|
||||
DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager',
|
||||
'Manager for vsa')
|
||||
DEFINE_string('vc_image_name', 'vc_image',
|
||||
'the VC image ID (for a VC image that exists in DB Glance)')
|
||||
# VSA constants and enums
|
||||
DEFINE_string('default_vsa_instance_type', 'm1.small',
|
||||
'default instance type for VSA instances')
|
||||
DEFINE_integer('max_vcs_in_vsa', 32,
|
||||
'maxinum VCs in a VSA')
|
||||
DEFINE_integer('vsa_part_size_gb', 100,
|
||||
'default partition size for shared capacity')
|
||||
|
||||
# The service to use for image search and retrieval
|
||||
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
|
||||
@@ -402,3 +414,14 @@ DEFINE_bool('resume_guests_state_on_host_boot', False,
|
||||
|
||||
DEFINE_string('root_helper', 'sudo',
|
||||
'Command prefix to use for running commands as root')
|
||||
|
||||
DEFINE_bool('use_ipv6', False, 'use ipv6')
|
||||
|
||||
DEFINE_bool('monkey_patch', False,
|
||||
'Whether to log monkey patching')
|
||||
|
||||
DEFINE_list('monkey_patch_modules',
|
||||
['nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
|
||||
'nova.compute.api:nova.notifier.api.notify_decorator'],
|
||||
'Module list representing monkey '
|
||||
'patched module and decorator')
|
||||
|
||||
@@ -32,6 +32,7 @@ import json
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
@@ -257,7 +258,10 @@ class NovaRootLogger(NovaLogger):
|
||||
self.filelog = WatchedFileHandler(logpath)
|
||||
self.addHandler(self.filelog)
|
||||
self.logpath = logpath
|
||||
os.chmod(self.logpath, FLAGS.logfile_mode)
|
||||
|
||||
st = os.stat(self.logpath)
|
||||
if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode):
|
||||
os.chmod(self.logpath, FLAGS.logfile_mode)
|
||||
else:
|
||||
self.removeHandler(self.filelog)
|
||||
self.addHandler(self.streamlog)
|
||||
|
||||
@@ -25,6 +25,9 @@ FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_string('default_notification_level', 'INFO',
|
||||
'Default notification level for outgoing notifications')
|
||||
flags.DEFINE_string('default_publisher_id', FLAGS.host,
|
||||
'Default publisher_id for outgoing notifications')
|
||||
|
||||
|
||||
WARN = 'WARN'
|
||||
INFO = 'INFO'
|
||||
@@ -39,6 +42,30 @@ class BadPriorityException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def notify_decorator(name, fn):
|
||||
""" decorator for notify which is used from utils.monkey_patch()
|
||||
|
||||
:param name: name of the function
|
||||
:param function: - object of the function
|
||||
:returns: function -- decorated function
|
||||
|
||||
"""
|
||||
def wrapped_func(*args, **kwarg):
|
||||
body = {}
|
||||
body['args'] = []
|
||||
body['kwarg'] = {}
|
||||
for arg in args:
|
||||
body['args'].append(arg)
|
||||
for key in kwarg:
|
||||
body['kwarg'][key] = kwarg[key]
|
||||
notify(FLAGS.default_publisher_id,
|
||||
name,
|
||||
FLAGS.default_notification_level,
|
||||
body)
|
||||
return fn(*args, **kwarg)
|
||||
return wrapped_func
|
||||
|
||||
|
||||
def publisher_id(service, host=None):
|
||||
if not host:
|
||||
host = FLAGS.host
|
||||
|
||||
68
nova/notifier/list_notifier.py
Normal file
68
nova/notifier/list_notifier.py
Normal file
@@ -0,0 +1,68 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.exception import ClassNotFound
|
||||
|
||||
flags.DEFINE_multistring('list_notifier_drivers',
|
||||
['nova.notifier.no_op_notifier'],
|
||||
'List of drivers to send notifications')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
LOG = logging.getLogger('nova.notifier.list_notifier')
|
||||
|
||||
drivers = None
|
||||
|
||||
|
||||
class ImportFailureNotifier(object):
|
||||
"""Noisily re-raises some exception over-and-over when notify is called."""
|
||||
|
||||
def __init__(self, exception):
|
||||
self.exception = exception
|
||||
|
||||
def notify(self, message):
|
||||
raise self.exception
|
||||
|
||||
|
||||
def _get_drivers():
|
||||
"""Instantiates and returns drivers based on the flag values."""
|
||||
global drivers
|
||||
if not drivers:
|
||||
drivers = []
|
||||
for notification_driver in FLAGS.list_notifier_drivers:
|
||||
try:
|
||||
drivers.append(utils.import_object(notification_driver))
|
||||
except ClassNotFound as e:
|
||||
drivers.append(ImportFailureNotifier(e))
|
||||
return drivers
|
||||
|
||||
|
||||
def notify(message):
|
||||
"""Passes notification to mulitple notifiers in a list."""
|
||||
for driver in _get_drivers():
|
||||
try:
|
||||
driver.notify(message)
|
||||
except Exception as e:
|
||||
LOG.exception(_("Problem '%(e)s' attempting to send to "
|
||||
"notification driver %(driver)s." % locals()))
|
||||
|
||||
|
||||
def _reset_drivers():
|
||||
"""Used by unit tests to reset the drivers."""
|
||||
global drivers
|
||||
drivers = None
|
||||
535
nova/scheduler/vsa.py
Normal file
535
nova/scheduler/vsa.py
Normal file
@@ -0,0 +1,535 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
VSA Simple Scheduler
|
||||
"""
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import simple
|
||||
from nova.vsa.api import VsaState
|
||||
from nova.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.vsa')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('drive_type_approx_capacity_percent', 10,
|
||||
'The percentage range for capacity comparison')
|
||||
flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10,
|
||||
'The number of unique hosts per storage allocation')
|
||||
flags.DEFINE_boolean('vsa_select_unique_drives', True,
|
||||
'Allow selection of same host for multiple drives')
|
||||
|
||||
|
||||
def BYTES_TO_GB(bytes):
|
||||
return bytes >> 30
|
||||
|
||||
|
||||
def GB_TO_BYTES(gb):
|
||||
return gb << 30
|
||||
|
||||
|
||||
class VsaScheduler(simple.SimpleScheduler):
|
||||
"""Implements Scheduler for volume placement."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(VsaScheduler, self).__init__(*args, **kwargs)
|
||||
self._notify_all_volume_hosts("startup")
|
||||
|
||||
def _notify_all_volume_hosts(self, event):
|
||||
rpc.fanout_cast(context.get_admin_context(),
|
||||
FLAGS.volume_topic,
|
||||
{"method": "notification",
|
||||
"args": {"event": event}})
|
||||
|
||||
def _qosgrp_match(self, drive_type, qos_values):
|
||||
|
||||
def _compare_names(str1, str2):
|
||||
return str1.lower() == str2.lower()
|
||||
|
||||
def _compare_sizes_approxim(cap_capacity, size):
|
||||
cap_capacity = BYTES_TO_GB(int(cap_capacity))
|
||||
size = int(size)
|
||||
size_perc = size * \
|
||||
FLAGS.drive_type_approx_capacity_percent / 100
|
||||
|
||||
return cap_capacity >= size - size_perc and \
|
||||
cap_capacity <= size + size_perc
|
||||
|
||||
# Add more entries for additional comparisons
|
||||
compare_list = [{'cap1': 'DriveType',
|
||||
'cap2': 'type',
|
||||
'cmp_func': _compare_names},
|
||||
{'cap1': 'DriveCapacity',
|
||||
'cap2': 'size',
|
||||
'cmp_func': _compare_sizes_approxim}]
|
||||
|
||||
for cap in compare_list:
|
||||
if cap['cap1'] in qos_values.keys() and \
|
||||
cap['cap2'] in drive_type.keys() and \
|
||||
cap['cmp_func'] is not None and \
|
||||
cap['cmp_func'](qos_values[cap['cap1']],
|
||||
drive_type[cap['cap2']]):
|
||||
pass
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _get_service_states(self):
|
||||
return self.zone_manager.service_states
|
||||
|
||||
def _filter_hosts(self, topic, request_spec, host_list=None):
|
||||
|
||||
LOG.debug(_("_filter_hosts: %(request_spec)s"), locals())
|
||||
|
||||
drive_type = request_spec['drive_type']
|
||||
LOG.debug(_("Filter hosts for drive type %s"), drive_type['name'])
|
||||
|
||||
if host_list is None:
|
||||
host_list = self._get_service_states().iteritems()
|
||||
|
||||
filtered_hosts = [] # returns list of (hostname, capability_dict)
|
||||
for host, host_dict in host_list:
|
||||
for service_name, service_dict in host_dict.iteritems():
|
||||
if service_name != topic:
|
||||
continue
|
||||
|
||||
gos_info = service_dict.get('drive_qos_info', {})
|
||||
for qosgrp, qos_values in gos_info.iteritems():
|
||||
if self._qosgrp_match(drive_type, qos_values):
|
||||
if qos_values['AvailableCapacity'] > 0:
|
||||
filtered_hosts.append((host, gos_info))
|
||||
else:
|
||||
LOG.debug(_("Host %s has no free capacity. Skip"),
|
||||
host)
|
||||
break
|
||||
|
||||
host_names = [item[0] for item in filtered_hosts]
|
||||
LOG.debug(_("Filter hosts: %s"), host_names)
|
||||
return filtered_hosts
|
||||
|
||||
def _allowed_to_use_host(self, host, selected_hosts, unique):
|
||||
if unique == False or \
|
||||
host not in [item[0] for item in selected_hosts]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _add_hostcap_to_list(self, selected_hosts, host, cap):
|
||||
if host not in [item[0] for item in selected_hosts]:
|
||||
selected_hosts.append((host, cap))
|
||||
|
||||
def host_selection_algorithm(self, request_spec, all_hosts,
|
||||
selected_hosts, unique):
|
||||
"""Must override this method for VSA scheduler to work."""
|
||||
raise NotImplementedError(_("Must implement host selection mechanism"))
|
||||
|
||||
def _select_hosts(self, request_spec, all_hosts, selected_hosts=None):
|
||||
|
||||
if selected_hosts is None:
|
||||
selected_hosts = []
|
||||
|
||||
host = None
|
||||
if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc:
|
||||
# try to select from already selected hosts only
|
||||
LOG.debug(_("Maximum number of hosts selected (%d)"),
|
||||
len(selected_hosts))
|
||||
unique = False
|
||||
(host, qos_cap) = self.host_selection_algorithm(request_spec,
|
||||
selected_hosts,
|
||||
selected_hosts,
|
||||
unique)
|
||||
|
||||
LOG.debug(_("Selected excessive host %(host)s"), locals())
|
||||
else:
|
||||
unique = FLAGS.vsa_select_unique_drives
|
||||
|
||||
if host is None:
|
||||
# if we've not tried yet (# of sel hosts < max) - unique=True
|
||||
# or failed to select from selected_hosts - unique=False
|
||||
# select from all hosts
|
||||
(host, qos_cap) = self.host_selection_algorithm(request_spec,
|
||||
all_hosts,
|
||||
selected_hosts,
|
||||
unique)
|
||||
if host is None:
|
||||
raise driver.WillNotSchedule(_("No available hosts"))
|
||||
|
||||
return (host, qos_cap)
|
||||
|
||||
def _provision_volume(self, context, vol, vsa_id, availability_zone):
|
||||
|
||||
if availability_zone is None:
|
||||
availability_zone = FLAGS.storage_availability_zone
|
||||
|
||||
now = utils.utcnow()
|
||||
options = {
|
||||
'size': vol['size'],
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'snapshot_id': None,
|
||||
'availability_zone': availability_zone,
|
||||
'status': "creating",
|
||||
'attach_status': "detached",
|
||||
'display_name': vol['name'],
|
||||
'display_description': vol['description'],
|
||||
'volume_type_id': vol['volume_type_id'],
|
||||
'metadata': dict(to_vsa_id=vsa_id),
|
||||
'host': vol['host'],
|
||||
'scheduled_at': now
|
||||
}
|
||||
|
||||
size = vol['size']
|
||||
host = vol['host']
|
||||
name = vol['name']
|
||||
LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
|
||||
"host %(host)s"), locals())
|
||||
|
||||
volume_ref = db.volume_create(context, options)
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "volume", vol['host']),
|
||||
{"method": "create_volume",
|
||||
"args": {"volume_id": volume_ref['id'],
|
||||
"snapshot_id": None}})
|
||||
|
||||
def _check_host_enforcement(self, context, availability_zone):
|
||||
if (availability_zone
|
||||
and ':' in availability_zone
|
||||
and context.is_admin):
|
||||
zone, _x, host = availability_zone.partition(':')
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-volume')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule(_("Host %s not available") % host)
|
||||
|
||||
return host
|
||||
else:
|
||||
return None
|
||||
|
||||
def _assign_hosts_to_volumes(self, context, volume_params, forced_host):
|
||||
|
||||
prev_volume_type_id = None
|
||||
request_spec = {}
|
||||
selected_hosts = []
|
||||
|
||||
LOG.debug(_("volume_params %(volume_params)s") % locals())
|
||||
|
||||
i = 1
|
||||
for vol in volume_params:
|
||||
name = vol['name']
|
||||
LOG.debug(_("%(i)d: Volume %(name)s"), locals())
|
||||
i += 1
|
||||
|
||||
if forced_host:
|
||||
vol['host'] = forced_host
|
||||
vol['capabilities'] = None
|
||||
continue
|
||||
|
||||
volume_type_id = vol['volume_type_id']
|
||||
request_spec['size'] = vol['size']
|
||||
|
||||
if prev_volume_type_id is None or\
|
||||
prev_volume_type_id != volume_type_id:
|
||||
# generate list of hosts for this drive type
|
||||
|
||||
volume_type = volume_types.get_volume_type(context,
|
||||
volume_type_id)
|
||||
drive_type = {
|
||||
'name': volume_type['extra_specs'].get('drive_name'),
|
||||
'type': volume_type['extra_specs'].get('drive_type'),
|
||||
'size': int(volume_type['extra_specs'].get('drive_size')),
|
||||
'rpm': volume_type['extra_specs'].get('drive_rpm'),
|
||||
}
|
||||
request_spec['drive_type'] = drive_type
|
||||
|
||||
all_hosts = self._filter_hosts("volume", request_spec)
|
||||
prev_volume_type_id = volume_type_id
|
||||
|
||||
(host, qos_cap) = self._select_hosts(request_spec,
|
||||
all_hosts, selected_hosts)
|
||||
vol['host'] = host
|
||||
vol['capabilities'] = qos_cap
|
||||
self._consume_resource(qos_cap, vol['size'], -1)
|
||||
|
||||
def schedule_create_volumes(self, context, request_spec,
|
||||
availability_zone=None, *_args, **_kwargs):
|
||||
"""Picks hosts for hosting multiple volumes."""
|
||||
|
||||
num_volumes = request_spec.get('num_volumes')
|
||||
LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
|
||||
locals())
|
||||
|
||||
vsa_id = request_spec.get('vsa_id')
|
||||
volume_params = request_spec.get('volumes')
|
||||
|
||||
host = self._check_host_enforcement(context, availability_zone)
|
||||
|
||||
try:
|
||||
self._print_capabilities_info()
|
||||
|
||||
self._assign_hosts_to_volumes(context, volume_params, host)
|
||||
|
||||
for vol in volume_params:
|
||||
self._provision_volume(context, vol, vsa_id, availability_zone)
|
||||
except:
|
||||
if vsa_id:
|
||||
db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED))
|
||||
|
||||
for vol in volume_params:
|
||||
if 'capabilities' in vol:
|
||||
self._consume_resource(vol['capabilities'],
|
||||
vol['size'], 1)
|
||||
raise
|
||||
|
||||
return None
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks the best host based on requested drive type capability."""
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
|
||||
host = self._check_host_enforcement(context,
|
||||
volume_ref['availability_zone'])
|
||||
if host:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
|
||||
volume_type_id = volume_ref['volume_type_id']
|
||||
if volume_type_id:
|
||||
volume_type = volume_types.get_volume_type(context, volume_type_id)
|
||||
|
||||
if volume_type_id is None or\
|
||||
volume_types.is_vsa_volume(volume_type_id, volume_type):
|
||||
|
||||
LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
|
||||
return super(VsaScheduler, self).schedule_create_volume(context,
|
||||
volume_id, *_args, **_kwargs)
|
||||
|
||||
self._print_capabilities_info()
|
||||
|
||||
drive_type = {
|
||||
'name': volume_type['extra_specs'].get('drive_name'),
|
||||
'type': volume_type['extra_specs'].get('drive_type'),
|
||||
'size': int(volume_type['extra_specs'].get('drive_size')),
|
||||
'rpm': volume_type['extra_specs'].get('drive_rpm'),
|
||||
}
|
||||
|
||||
LOG.debug(_("Spawning volume %(volume_id)s with drive type "\
|
||||
"%(drive_type)s"), locals())
|
||||
|
||||
request_spec = {'size': volume_ref['size'],
|
||||
'drive_type': drive_type}
|
||||
hosts = self._filter_hosts("volume", request_spec)
|
||||
|
||||
try:
|
||||
(host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
|
||||
except:
|
||||
if volume_ref['to_vsa_id']:
|
||||
db.vsa_update(context, volume_ref['to_vsa_id'],
|
||||
dict(status=VsaState.FAILED))
|
||||
raise
|
||||
|
||||
if host:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
self._consume_resource(qos_cap, volume_ref['size'], -1)
|
||||
return host
|
||||
|
||||
def _consume_full_drive(self, qos_values, direction):
|
||||
qos_values['FullDrive']['NumFreeDrives'] += direction
|
||||
qos_values['FullDrive']['NumOccupiedDrives'] -= direction
|
||||
|
||||
def _consume_partition(self, qos_values, size, direction):
|
||||
|
||||
if qos_values['PartitionDrive']['PartitionSize'] != 0:
|
||||
partition_size = qos_values['PartitionDrive']['PartitionSize']
|
||||
else:
|
||||
partition_size = size
|
||||
part_per_drive = qos_values['DriveCapacity'] / partition_size
|
||||
|
||||
if direction == -1 and \
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] == 0:
|
||||
|
||||
self._consume_full_drive(qos_values, direction)
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] += \
|
||||
part_per_drive
|
||||
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] += direction
|
||||
qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction
|
||||
|
||||
if direction == 1 and \
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] >= \
|
||||
part_per_drive:
|
||||
|
||||
self._consume_full_drive(qos_values, direction)
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] -= \
|
||||
part_per_drive
|
||||
|
||||
def _consume_resource(self, qos_values, size, direction):
|
||||
if qos_values is None:
|
||||
LOG.debug(_("No capability selected for volume of size %(size)s"),
|
||||
locals())
|
||||
return
|
||||
|
||||
if size == 0: # full drive match
|
||||
qos_values['AvailableCapacity'] += direction * \
|
||||
qos_values['DriveCapacity']
|
||||
self._consume_full_drive(qos_values, direction)
|
||||
else:
|
||||
qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size)
|
||||
self._consume_partition(qos_values, GB_TO_BYTES(size), direction)
|
||||
return
|
||||
|
||||
def _print_capabilities_info(self):
|
||||
host_list = self._get_service_states().iteritems()
|
||||
for host, host_dict in host_list:
|
||||
for service_name, service_dict in host_dict.iteritems():
|
||||
if service_name != "volume":
|
||||
continue
|
||||
|
||||
LOG.info(_("Host %s:"), host)
|
||||
|
||||
gos_info = service_dict.get('drive_qos_info', {})
|
||||
for qosgrp, qos_values in gos_info.iteritems():
|
||||
total = qos_values['TotalDrives']
|
||||
used = qos_values['FullDrive']['NumOccupiedDrives']
|
||||
free = qos_values['FullDrive']['NumFreeDrives']
|
||||
avail = BYTES_TO_GB(qos_values['AvailableCapacity'])
|
||||
|
||||
LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\
|
||||
"used %(used)2s, free %(free)2s. Available "\
|
||||
"capacity %(avail)-5s"), locals())
|
||||
|
||||
|
||||
class VsaSchedulerLeastUsedHost(VsaScheduler):
|
||||
"""
|
||||
Implements VSA scheduler to select the host with least used capacity
|
||||
of particular type.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs)
|
||||
|
||||
def host_selection_algorithm(self, request_spec, all_hosts,
|
||||
selected_hosts, unique):
|
||||
size = request_spec['size']
|
||||
drive_type = request_spec['drive_type']
|
||||
best_host = None
|
||||
best_qoscap = None
|
||||
best_cap = None
|
||||
min_used = 0
|
||||
|
||||
for (host, capabilities) in all_hosts:
|
||||
|
||||
has_enough_capacity = False
|
||||
used_capacity = 0
|
||||
for qosgrp, qos_values in capabilities.iteritems():
|
||||
|
||||
used_capacity = used_capacity + qos_values['TotalCapacity'] \
|
||||
- qos_values['AvailableCapacity']
|
||||
|
||||
if self._qosgrp_match(drive_type, qos_values):
|
||||
# we found required qosgroup
|
||||
|
||||
if size == 0: # full drive match
|
||||
if qos_values['FullDrive']['NumFreeDrives'] > 0:
|
||||
has_enough_capacity = True
|
||||
matched_qos = qos_values
|
||||
else:
|
||||
break
|
||||
else:
|
||||
if qos_values['AvailableCapacity'] >= size and \
|
||||
(qos_values['PartitionDrive'][
|
||||
'NumFreePartitions'] > 0 or \
|
||||
qos_values['FullDrive']['NumFreeDrives'] > 0):
|
||||
has_enough_capacity = True
|
||||
matched_qos = qos_values
|
||||
else:
|
||||
break
|
||||
|
||||
if has_enough_capacity and \
|
||||
self._allowed_to_use_host(host,
|
||||
selected_hosts,
|
||||
unique) and \
|
||||
(best_host is None or used_capacity < min_used):
|
||||
|
||||
min_used = used_capacity
|
||||
best_host = host
|
||||
best_qoscap = matched_qos
|
||||
best_cap = capabilities
|
||||
|
||||
if best_host:
|
||||
self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
|
||||
min_used = BYTES_TO_GB(min_used)
|
||||
LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\
|
||||
"(used capacity %(min_used)s)"), locals())
|
||||
return (best_host, best_qoscap)
|
||||
|
||||
|
||||
class VsaSchedulerMostAvailCapacity(VsaScheduler):
|
||||
"""
|
||||
Implements VSA scheduler to select the host with most available capacity
|
||||
of one particular type.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs)
|
||||
|
||||
def host_selection_algorithm(self, request_spec, all_hosts,
|
||||
selected_hosts, unique):
|
||||
size = request_spec['size']
|
||||
drive_type = request_spec['drive_type']
|
||||
best_host = None
|
||||
best_qoscap = None
|
||||
best_cap = None
|
||||
max_avail = 0
|
||||
|
||||
for (host, capabilities) in all_hosts:
|
||||
for qosgrp, qos_values in capabilities.iteritems():
|
||||
if self._qosgrp_match(drive_type, qos_values):
|
||||
# we found required qosgroup
|
||||
|
||||
if size == 0: # full drive match
|
||||
available = qos_values['FullDrive']['NumFreeDrives']
|
||||
else:
|
||||
available = qos_values['AvailableCapacity']
|
||||
|
||||
if available > max_avail and \
|
||||
self._allowed_to_use_host(host,
|
||||
selected_hosts,
|
||||
unique):
|
||||
max_avail = available
|
||||
best_host = host
|
||||
best_qoscap = qos_values
|
||||
best_cap = capabilities
|
||||
break # go to the next host
|
||||
|
||||
if best_host:
|
||||
self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
|
||||
type_str = "drives" if size == 0 else "bytes"
|
||||
LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\
|
||||
"(available %(max_avail)s %(type_str)s)"), locals())
|
||||
|
||||
return (best_host, best_qoscap)
|
||||
16
nova/tests/notifier/__init__.py
Normal file
16
nova/tests/notifier/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# Copyright 2011 Openstack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.tests import *
|
||||
88
nova/tests/notifier/test_list_notifier.py
Normal file
88
nova/tests/notifier/test_list_notifier.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import stubout
|
||||
import sys
|
||||
|
||||
import nova
|
||||
from nova import log as logging
|
||||
import nova.notifier.api
|
||||
from nova.notifier.api import notify
|
||||
from nova.notifier import log_notifier
|
||||
from nova.notifier import no_op_notifier
|
||||
from nova.notifier import list_notifier
|
||||
from nova import test
|
||||
|
||||
|
||||
class NotifierListTestCase(test.TestCase):
|
||||
"""Test case for notifications"""
|
||||
|
||||
def setUp(self):
|
||||
super(NotifierListTestCase, self).setUp()
|
||||
list_notifier._reset_drivers()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
# Mock log to add one to exception_count when log.exception is called
|
||||
|
||||
def mock_exception(cls, *args):
|
||||
self.exception_count += 1
|
||||
|
||||
self.exception_count = 0
|
||||
list_notifier_log = logging.getLogger('nova.notifier.list_notifier')
|
||||
self.stubs.Set(list_notifier_log, "exception", mock_exception)
|
||||
# Mock no_op notifier to add one to notify_count when called.
|
||||
|
||||
def mock_notify(cls, *args):
|
||||
self.notify_count += 1
|
||||
|
||||
self.notify_count = 0
|
||||
self.stubs.Set(nova.notifier.no_op_notifier, 'notify', mock_notify)
|
||||
# Mock log_notifier to raise RuntimeError when called.
|
||||
|
||||
def mock_notify2(cls, *args):
|
||||
raise RuntimeError("Bad notifier.")
|
||||
|
||||
self.stubs.Set(nova.notifier.log_notifier, 'notify', mock_notify2)
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
list_notifier._reset_drivers()
|
||||
super(NotifierListTestCase, self).tearDown()
|
||||
|
||||
def test_send_notifications_successfully(self):
|
||||
self.flags(notification_driver='nova.notifier.list_notifier',
|
||||
list_notifier_drivers=['nova.notifier.no_op_notifier',
|
||||
'nova.notifier.no_op_notifier'])
|
||||
notify('publisher_id', 'event_type',
|
||||
nova.notifier.api.WARN, dict(a=3))
|
||||
self.assertEqual(self.notify_count, 2)
|
||||
self.assertEqual(self.exception_count, 0)
|
||||
|
||||
def test_send_notifications_with_errors(self):
|
||||
|
||||
self.flags(notification_driver='nova.notifier.list_notifier',
|
||||
list_notifier_drivers=['nova.notifier.no_op_notifier',
|
||||
'nova.notifier.log_notifier'])
|
||||
notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
|
||||
self.assertEqual(self.notify_count, 1)
|
||||
self.assertEqual(self.exception_count, 1)
|
||||
|
||||
def test_when_driver_fails_to_import(self):
|
||||
self.flags(notification_driver='nova.notifier.list_notifier',
|
||||
list_notifier_drivers=['nova.notifier.no_op_notifier',
|
||||
'nova.notifier.logo_notifier',
|
||||
'fdsjgsdfhjkhgsfkj'])
|
||||
notify('publisher_id', 'event_type', nova.notifier.api.WARN, dict(a=3))
|
||||
self.assertEqual(self.exception_count, 2)
|
||||
self.assertEqual(self.notify_count, 1)
|
||||
641
nova/tests/scheduler/test_vsa_scheduler.py
Normal file
641
nova/tests/scheduler/test_vsa_scheduler.py
Normal file
@@ -0,0 +1,641 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import stubout
|
||||
|
||||
import nova
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.volume import volume_types
|
||||
|
||||
from nova.scheduler import vsa as vsa_sched
|
||||
from nova.scheduler import driver
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.scheduler.vsa')
|
||||
|
||||
scheduled_volumes = []
|
||||
scheduled_volume = {}
|
||||
global_volume = {}
|
||||
|
||||
|
||||
class FakeVsaLeastUsedScheduler(
|
||||
vsa_sched.VsaSchedulerLeastUsedHost):
|
||||
# No need to stub anything at the moment
|
||||
pass
|
||||
|
||||
|
||||
class FakeVsaMostAvailCapacityScheduler(
|
||||
vsa_sched.VsaSchedulerMostAvailCapacity):
|
||||
# No need to stub anything at the moment
|
||||
pass
|
||||
|
||||
|
||||
class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
|
||||
volume_params = []
|
||||
for i in range(num_vols):
|
||||
|
||||
name = 'name_' + str(i)
|
||||
try:
|
||||
volume_types.create(self.context, name,
|
||||
extra_specs={'type': 'vsa_drive',
|
||||
'drive_name': name,
|
||||
'drive_type': 'type_' + str(drive_ix),
|
||||
'drive_size': 1 + 100 * (drive_ix)})
|
||||
self.created_types_lst.append(name)
|
||||
except exception.ApiError:
|
||||
# type is already created
|
||||
pass
|
||||
|
||||
volume_type = volume_types.get_volume_type_by_name(self.context,
|
||||
name)
|
||||
volume = {'size': size,
|
||||
'snapshot_id': None,
|
||||
'name': 'vol_' + str(i),
|
||||
'description': None,
|
||||
'volume_type_id': volume_type['id']}
|
||||
volume_params.append(volume)
|
||||
|
||||
return {'num_volumes': len(volume_params),
|
||||
'vsa_id': 123,
|
||||
'volumes': volume_params}
|
||||
|
||||
def _generate_default_service_states(self):
|
||||
service_states = {}
|
||||
for i in range(self.host_num):
|
||||
host = {}
|
||||
hostname = 'host_' + str(i)
|
||||
if hostname in self.exclude_host_list:
|
||||
continue
|
||||
|
||||
host['volume'] = {'timestamp': utils.utcnow(),
|
||||
'drive_qos_info': {}}
|
||||
|
||||
for j in range(self.drive_type_start_ix,
|
||||
self.drive_type_start_ix + self.drive_type_num):
|
||||
dtype = {}
|
||||
dtype['Name'] = 'name_' + str(j)
|
||||
dtype['DriveType'] = 'type_' + str(j)
|
||||
dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
|
||||
dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
|
||||
dtype['TotalCapacity'] = dtype['TotalDrives'] * \
|
||||
dtype['DriveCapacity']
|
||||
dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
|
||||
dtype['DriveCapacity']
|
||||
dtype['DriveRpm'] = 7200
|
||||
dtype['DifCapable'] = 0
|
||||
dtype['SedCapable'] = 0
|
||||
dtype['PartitionDrive'] = {
|
||||
'PartitionSize': 0,
|
||||
'NumOccupiedPartitions': 0,
|
||||
'NumFreePartitions': 0}
|
||||
dtype['FullDrive'] = {
|
||||
'NumFreeDrives': dtype['TotalDrives'] - i,
|
||||
'NumOccupiedDrives': i}
|
||||
host['volume']['drive_qos_info'][dtype['Name']] = dtype
|
||||
|
||||
service_states[hostname] = host
|
||||
|
||||
return service_states
|
||||
|
||||
def _print_service_states(self):
|
||||
for host, host_val in self.service_states.iteritems():
|
||||
LOG.info(_("Host %s"), host)
|
||||
total_used = 0
|
||||
total_available = 0
|
||||
qos = host_val['volume']['drive_qos_info']
|
||||
|
||||
for k, d in qos.iteritems():
|
||||
LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\
|
||||
"size %3d, total %4d, used %4d, avail %d",
|
||||
k, d['DriveType'],
|
||||
d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'],
|
||||
vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
|
||||
vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
|
||||
vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
|
||||
d['AvailableCapacity']),
|
||||
vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
|
||||
|
||||
total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \
|
||||
d['AvailableCapacity'])
|
||||
total_available += vsa_sched.BYTES_TO_GB(
|
||||
d['AvailableCapacity'])
|
||||
LOG.info("Host %s: used %d, avail %d",
|
||||
host, total_used, total_available)
|
||||
|
||||
def _set_service_states(self, host_num,
|
||||
drive_type_start_ix, drive_type_num,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=[]):
|
||||
self.host_num = host_num
|
||||
self.drive_type_start_ix = drive_type_start_ix
|
||||
self.drive_type_num = drive_type_num
|
||||
self.exclude_host_list = exclude_host_list
|
||||
self.init_num_drives = init_num_drives
|
||||
self.service_states = self._generate_default_service_states()
|
||||
|
||||
def _get_service_states(self):
|
||||
return self.service_states
|
||||
|
||||
def _fake_get_service_states(self):
|
||||
return self._get_service_states()
|
||||
|
||||
def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes.append(dict(vol=vol,
|
||||
vsa_id=vsa_id,
|
||||
az=availability_zone))
|
||||
name = vol['name']
|
||||
host = vol['host']
|
||||
LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
|
||||
locals())
|
||||
LOG.debug(_("\t vol=%(vol)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_vsa_update(self, context, vsa_id, values):
|
||||
LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
|
||||
"values=%(values)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_volume_create(self, context, options):
|
||||
LOG.debug(_("Test: Volume create: %s"), options)
|
||||
options['id'] = 123
|
||||
global global_volume
|
||||
global_volume = options
|
||||
return options
|
||||
|
||||
def _fake_volume_get(self, context, volume_id):
|
||||
LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
|
||||
global global_volume
|
||||
global_volume['id'] = volume_id
|
||||
global_volume['availability_zone'] = None
|
||||
return global_volume
|
||||
|
||||
def _fake_volume_update(self, context, volume_id, values):
|
||||
LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\
|
||||
"values=%(values)s"), locals())
|
||||
global scheduled_volume
|
||||
scheduled_volume = {'id': volume_id, 'host': values['host']}
|
||||
pass
|
||||
|
||||
def _fake_service_get_by_args(self, context, host, binary):
|
||||
return "service"
|
||||
|
||||
def _fake_service_is_up_True(self, service):
|
||||
return True
|
||||
|
||||
def _fake_service_is_up_False(self, service):
|
||||
return False
|
||||
|
||||
def setUp(self, sched_class=None):
|
||||
super(VsaSchedulerTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
if sched_class is None:
|
||||
self.sched = FakeVsaLeastUsedScheduler()
|
||||
else:
|
||||
self.sched = sched_class
|
||||
|
||||
self.host_num = 10
|
||||
self.drive_type_num = 5
|
||||
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(self.sched,
|
||||
'_provision_volume', self._fake_provision_volume)
|
||||
self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update)
|
||||
|
||||
self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get)
|
||||
self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
|
||||
|
||||
self.created_types_lst = []
|
||||
|
||||
def tearDown(self):
|
||||
for name in self.created_types_lst:
|
||||
volume_types.purge(self.context, name)
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCase, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_volumes_simple(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_1', 'host_3'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_0', 'host_2', 'host_4']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
def test_vsa_sched_no_drive_type(self):
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_no_enough_drives(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
|
||||
self._set_service_states(host_num=3,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=1,
|
||||
init_num_drives=0)
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
# check that everything was returned back
|
||||
cur = self._get_service_states()
|
||||
for k, v in prev.iteritems():
|
||||
self.assertEqual(prev[k]['volume']['drive_qos_info'],
|
||||
cur[k]['volume']['drive_qos_info'])
|
||||
|
||||
def test_vsa_sched_wrong_topic(self):
|
||||
self._set_service_states(host_num=1,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1)
|
||||
states = self._get_service_states()
|
||||
new_states = {}
|
||||
new_states['host_0'] = {'compute': states['host_0']['volume']}
|
||||
self.service_states = new_states
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_provision_volume(self):
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
self._set_service_states(host_num=1,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=1,
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(request_spec['volumes'][0]['name'],
|
||||
global_volume['display_name'])
|
||||
|
||||
def test_vsa_sched_no_free_drives(self):
|
||||
self._set_service_states(host_num=1,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=1,
|
||||
init_num_drives=1)
|
||||
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
cur = self._get_service_states()
|
||||
cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
|
||||
|
||||
new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
self._print_service_states()
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
new_request,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_forced_host(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10)
|
||||
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self.assertRaises(exception.HostBinaryNotFound,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.stubs.Set(nova.db,
|
||||
'service_get_by_args', self._fake_service_get_by_args)
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_False)
|
||||
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.sched.schedule_create_volumes,
|
||||
self.context,
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone="nova:host_5")
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
|
||||
|
||||
def test_vsa_sched_create_volumes_partition(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=5,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1,
|
||||
exclude_host_list=['host_0', 'host_2'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3,
|
||||
drive_ix=3,
|
||||
size=50)
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_1', 'host_3', 'host_4']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
|
||||
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 0)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 1)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 5)
|
||||
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 0)
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['PartitionSize'], 0)
|
||||
|
||||
def test_vsa_sched_create_single_volume_az(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
|
||||
def _fake_volume_get_az(context, volume_id):
|
||||
LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
|
||||
return {'id': volume_id, 'availability_zone': 'nova:host_3'}
|
||||
|
||||
self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
|
||||
self.stubs.Set(nova.db,
|
||||
'service_get_by_args', self._fake_service_get_by_args)
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_3')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_3')
|
||||
|
||||
def test_vsa_sched_create_single_non_vsa_volume(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
global_volume['volume_type_id'] = None
|
||||
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.sched.schedule_create_volume,
|
||||
self.context,
|
||||
123,
|
||||
availability_zone=None)
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_0', 'host_1'])
|
||||
prev = self._generate_default_service_states()
|
||||
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
|
||||
drive_ix = 2
|
||||
name = 'name_' + str(drive_ix)
|
||||
volume_types.create(self.context, name,
|
||||
extra_specs={'type': 'vsa_drive',
|
||||
'drive_name': name,
|
||||
'drive_type': 'type_' + str(drive_ix),
|
||||
'drive_size': 1 + 100 * (drive_ix)})
|
||||
self.created_types_lst.append(name)
|
||||
volume_type = volume_types.get_volume_type_by_name(self.context, name)
|
||||
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_2')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_2')
|
||||
|
||||
|
||||
class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VsaSchedulerTestCaseMostAvail, self).setUp(
|
||||
FakeVsaMostAvailCapacityScheduler())
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCaseMostAvail, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
global scheduled_volume
|
||||
scheduled_volume = {}
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_0', 'host_1'])
|
||||
prev = self._generate_default_service_states()
|
||||
|
||||
global global_volume
|
||||
global_volume = {}
|
||||
|
||||
drive_ix = 2
|
||||
name = 'name_' + str(drive_ix)
|
||||
volume_types.create(self.context, name,
|
||||
extra_specs={'type': 'vsa_drive',
|
||||
'drive_name': name,
|
||||
'drive_type': 'type_' + str(drive_ix),
|
||||
'drive_size': 1 + 100 * (drive_ix)})
|
||||
self.created_types_lst.append(name)
|
||||
volume_type = volume_types.get_volume_type_by_name(self.context, name)
|
||||
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_9')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_9')
|
||||
|
||||
def test_vsa_sched_create_volumes_simple(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=10,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=10,
|
||||
exclude_host_list=['host_1', 'host_3'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
|
||||
|
||||
self._print_service_states()
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_9', 'host_8', 'host_7']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
def test_vsa_sched_create_volumes_partition(self):
|
||||
global scheduled_volumes
|
||||
scheduled_volumes = []
|
||||
self._set_service_states(host_num=5,
|
||||
drive_type_start_ix=0,
|
||||
drive_type_num=5,
|
||||
init_num_drives=1,
|
||||
exclude_host_list=['host_0', 'host_2'])
|
||||
prev = self._generate_default_service_states()
|
||||
request_spec = self._get_vol_creation_request(num_vols=3,
|
||||
drive_ix=3,
|
||||
size=50)
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
availability_zone=None)
|
||||
|
||||
self.assertEqual(len(scheduled_volumes), 3)
|
||||
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
|
||||
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
|
||||
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
|
||||
|
||||
cur = self._get_service_states()
|
||||
for host in ['host_1', 'host_3', 'host_4']:
|
||||
cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
|
||||
prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
|
||||
|
||||
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
|
||||
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
|
||||
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
|
||||
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
|
||||
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 0)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumOccupiedPartitions'], 1)
|
||||
self.assertEqual(cur_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 5)
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['NumFreePartitions'], 0)
|
||||
self.assertEqual(prev_dtype['PartitionDrive']
|
||||
['PartitionSize'], 0)
|
||||
@@ -147,6 +147,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
|
||||
'/services/Cloud'))
|
||||
|
||||
def test_can_get_credentials(self):
|
||||
self.flags(use_deprecated_auth=True)
|
||||
st = {'access': 'access', 'secret': 'secret'}
|
||||
with user_and_project_generator(self.manager, user_state=st) as (u, p):
|
||||
credentials = self.manager.get_environment_rc(u, p)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@@ -159,9 +160,24 @@ class ComputeTestCase(test.TestCase):
|
||||
db.security_group_destroy(self.context, group['id'])
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
|
||||
def test_create_instance_associates_config_drive(self):
|
||||
"""Make sure create associates a config drive."""
|
||||
|
||||
instance_id = self._create_instance(params={'config_drive': True, })
|
||||
|
||||
try:
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
instances = db.instance_get_all(context.get_admin_context())
|
||||
instance = instances[0]
|
||||
|
||||
self.assertTrue(instance.config_drive)
|
||||
finally:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
def test_default_hostname_generator(self):
|
||||
cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'),
|
||||
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')]
|
||||
cases = [(None, 'server-1'), ('Hello, Server!', 'hello-server'),
|
||||
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
|
||||
('hello_server', 'hello-server')]
|
||||
for display_name, hostname in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
|
||||
@@ -47,6 +47,29 @@ class InstanceTypeTestCase(test.TestCase):
|
||||
self.id = max_id["id"] + 1
|
||||
self.name = str(int(time.time()))
|
||||
|
||||
def _nonexistent_flavor_name(self):
|
||||
"""return an instance type name not in the DB"""
|
||||
nonexistent_flavor = "sdfsfsdf"
|
||||
flavors = instance_types.get_all_types()
|
||||
while nonexistent_flavor in flavors:
|
||||
nonexistent_flavor += "z"
|
||||
else:
|
||||
return nonexistent_flavor
|
||||
|
||||
def _nonexistent_flavor_id(self):
|
||||
"""return an instance type ID not in the DB"""
|
||||
nonexistent_flavor = 2700
|
||||
flavor_ids = [value["id"] for key, value in\
|
||||
instance_types.get_all_types().iteritems()]
|
||||
while nonexistent_flavor in flavor_ids:
|
||||
nonexistent_flavor += 1
|
||||
else:
|
||||
return nonexistent_flavor
|
||||
|
||||
def _existing_flavor(self):
|
||||
"""return first instance type name"""
|
||||
return instance_types.get_all_types().keys()[0]
|
||||
|
||||
def test_instance_type_create_then_delete(self):
|
||||
"""Ensure instance types can be created"""
|
||||
starting_inst_list = instance_types.get_all_types()
|
||||
@@ -84,10 +107,11 @@ class InstanceTypeTestCase(test.TestCase):
|
||||
exception.InvalidInput,
|
||||
instance_types.create, self.name, 256, 1, "aa", self.flavorid)
|
||||
|
||||
def test_non_existant_inst_type_shouldnt_delete(self):
|
||||
def test_non_existent_inst_type_shouldnt_delete(self):
|
||||
"""Ensures that instance type creation fails with invalid args"""
|
||||
self.assertRaises(exception.ApiError,
|
||||
instance_types.destroy, "sfsfsdfdfs")
|
||||
instance_types.destroy,
|
||||
self._nonexistent_flavor_name())
|
||||
|
||||
def test_repeated_inst_types_should_raise_api_error(self):
|
||||
"""Ensures that instance duplicates raises ApiError"""
|
||||
@@ -97,3 +121,43 @@ class InstanceTypeTestCase(test.TestCase):
|
||||
self.assertRaises(
|
||||
exception.ApiError,
|
||||
instance_types.create, new_name, 256, 1, 120, self.flavorid)
|
||||
|
||||
def test_will_not_destroy_with_no_name(self):
|
||||
"""Ensure destroy sad path of no name raises error"""
|
||||
self.assertRaises(exception.ApiError,
|
||||
instance_types.destroy,
|
||||
self._nonexistent_flavor_name())
|
||||
|
||||
def test_will_not_purge_without_name(self):
|
||||
"""Ensure purge without a name raises error"""
|
||||
self.assertRaises(exception.InvalidInstanceType,
|
||||
instance_types.purge, None)
|
||||
|
||||
def test_will_not_purge_with_wrong_name(self):
|
||||
"""Ensure purge without correct name raises error"""
|
||||
self.assertRaises(exception.ApiError,
|
||||
instance_types.purge,
|
||||
self._nonexistent_flavor_name())
|
||||
|
||||
def test_will_not_get_bad_default_instance_type(self):
|
||||
"""ensures error raised on bad default instance type"""
|
||||
FLAGS.default_instance_type = self._nonexistent_flavor_name()
|
||||
self.assertRaises(exception.InstanceTypeNotFoundByName,
|
||||
instance_types.get_default_instance_type)
|
||||
|
||||
def test_will_not_get_instance_type_by_name_with_no_name(self):
|
||||
"""Ensure get by name returns default flavor with no name"""
|
||||
self.assertEqual(instance_types.get_default_instance_type(),
|
||||
instance_types.get_instance_type_by_name(None))
|
||||
|
||||
def test_will_not_get_instance_type_with_bad_name(self):
|
||||
"""Ensure get by name returns default flavor with bad name"""
|
||||
self.assertRaises(exception.InstanceTypeNotFound,
|
||||
instance_types.get_instance_type,
|
||||
self._nonexistent_flavor_name())
|
||||
|
||||
def test_will_not_get_flavor_by_bad_flavor_id(self):
|
||||
"""Ensure get by flavor raises error with wrong flavorid"""
|
||||
self.assertRaises(exception.InstanceTypeNotFound,
|
||||
instance_types.get_instance_type_by_name,
|
||||
self._nonexistent_flavor_id())
|
||||
|
||||
@@ -134,3 +134,24 @@ class NotifierTestCase(test.TestCase):
|
||||
self.assertEqual(msg['event_type'], 'error_notification')
|
||||
self.assertEqual(msg['priority'], 'ERROR')
|
||||
self.assertEqual(msg['payload']['error'], 'foo')
|
||||
|
||||
def test_send_notification_by_decorator(self):
|
||||
self.notify_called = False
|
||||
|
||||
def example_api(arg1, arg2):
|
||||
return arg1 + arg2
|
||||
|
||||
example_api = nova.notifier.api.notify_decorator(
|
||||
'example_api',
|
||||
example_api)
|
||||
|
||||
def mock_notify(cls, *args):
|
||||
self.notify_called = True
|
||||
|
||||
self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
|
||||
mock_notify)
|
||||
|
||||
class Mock(object):
|
||||
pass
|
||||
self.assertEqual(3, example_api(1, 2))
|
||||
self.assertEqual(self.notify_called, True)
|
||||
|
||||
489
nova/tests/test_virt_drivers.py
Normal file
489
nova/tests/test_virt_drivers.py
Normal file
@@ -0,0 +1,489 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2010 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import netaddr
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import image
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova.tests import utils as test_utils
|
||||
|
||||
libvirt = None
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
LOG = logging.getLogger('nova.tests.test_virt_drivers')
|
||||
|
||||
|
||||
def catch_notimplementederror(f):
|
||||
"""Decorator to simplify catching drivers raising NotImplementedError
|
||||
|
||||
If a particular call makes a driver raise NotImplementedError, we
|
||||
log it so that we can extract this information afterwards to
|
||||
automatically generate a hypervisor/feature support matrix."""
|
||||
def wrapped_func(self, *args, **kwargs):
|
||||
try:
|
||||
return f(self, *args, **kwargs)
|
||||
except NotImplementedError:
|
||||
frame = traceback.extract_tb(sys.exc_info()[2])[-1]
|
||||
LOG.error('%(driver)s does not implement %(method)s' % {
|
||||
'driver': type(self.connection),
|
||||
'method': frame[2]})
|
||||
|
||||
wrapped_func.__name__ = f.__name__
|
||||
wrapped_func.__doc__ = f.__doc__
|
||||
return wrapped_func
|
||||
|
||||
|
||||
class _VirtDriverTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(_VirtDriverTestCase, self).setUp()
|
||||
self.connection = self.driver_module.get_connection('')
|
||||
self.ctxt = test_utils.get_test_admin_context()
|
||||
self.image_service = image.get_default_image_service()
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_init_host(self):
|
||||
self.connection.init_host('myhostname')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_list_instances(self):
|
||||
self.connection.list_instances()
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_list_instances_detail(self):
|
||||
self.connection.list_instances_detail()
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_spawn(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
|
||||
domains = self.connection.list_instances()
|
||||
self.assertIn(instance_ref['name'], domains)
|
||||
|
||||
domains_details = self.connection.list_instances_detail()
|
||||
self.assertIn(instance_ref['name'], [i.name for i in domains_details])
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_snapshot_not_running(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
|
||||
self.assertRaises(exception.InstanceNotRunning,
|
||||
self.connection.snapshot,
|
||||
self.ctxt, instance_ref, img_ref['id'])
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_snapshot_running(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'])
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_reboot(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.reboot(instance_ref, network_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_host_ip_addr(self):
|
||||
host_ip = self.connection.get_host_ip_addr()
|
||||
|
||||
# Will raise an exception if it's not a valid IP at all
|
||||
ip = netaddr.IPAddress(host_ip)
|
||||
|
||||
# For now, assume IPv4.
|
||||
self.assertEquals(ip.version, 4)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_resize_running(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.resize(instance_ref, 7)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_set_admin_password(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.set_admin_password(instance_ref, 'p4ssw0rd')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_inject_file(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.inject_file(instance_ref,
|
||||
base64.b64encode('/testfile'),
|
||||
base64.b64encode('testcontents'))
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_agent_update(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.agent_update(instance_ref, 'http://www.openstack.org/',
|
||||
'd41d8cd98f00b204e9800998ecf8427e')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_rescue(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.rescue(self.ctxt, instance_ref,
|
||||
lambda x: None, network_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_unrescue_unrescued_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.unrescue(instance_ref, lambda x: None, network_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_unrescue_rescued_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.rescue(self.ctxt, instance_ref,
|
||||
lambda x: None, network_info)
|
||||
self.connection.unrescue(instance_ref, lambda x: None, network_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_poll_rescued_instances(self):
|
||||
self.connection.poll_rescued_instances(10)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_migrate_disk_and_power_off(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.migrate_disk_and_power_off(instance_ref, 'dest_host')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_pause(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.pause(instance_ref, None)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_unpause_unpaused_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.unpause(instance_ref, None)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_unpause_paused_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.pause(instance_ref, None)
|
||||
self.connection.unpause(instance_ref, None)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_suspend(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.suspend(instance_ref, None)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_resume_unsuspended_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.resume(instance_ref, None)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_resume_suspended_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.suspend(instance_ref, None)
|
||||
self.connection.resume(instance_ref, None)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_destroy_instance_nonexistant(self):
|
||||
fake_instance = {'id': 42, 'name': 'I just made this up!'}
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.destroy(fake_instance, network_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_destroy_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.assertIn(instance_ref['name'],
|
||||
self.connection.list_instances())
|
||||
self.connection.destroy(instance_ref, network_info)
|
||||
self.assertNotIn(instance_ref['name'],
|
||||
self.connection.list_instances())
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_attach_detach_volume(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.attach_volume(instance_ref['name'],
|
||||
'/dev/null', '/mnt/nova/something')
|
||||
self.connection.detach_volume(instance_ref['name'],
|
||||
'/mnt/nova/something')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_info(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
info = self.connection.get_info(instance_ref['name'])
|
||||
self.assertIn('state', info)
|
||||
self.assertIn('max_mem', info)
|
||||
self.assertIn('mem', info)
|
||||
self.assertIn('num_cpu', info)
|
||||
self.assertIn('cpu_time', info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_info_for_unknown_instance(self):
|
||||
self.assertRaises(exception.NotFound,
|
||||
self.connection.get_info, 'I just made this name up')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_diagnostics(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.get_diagnostics(instance_ref['name'])
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_list_disks(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.list_disks(instance_ref['name'])
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_list_interfaces(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.list_interfaces(instance_ref['name'])
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_block_stats(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
stats = self.connection.block_stats(instance_ref['name'], 'someid')
|
||||
self.assertEquals(len(stats), 5)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_interface_stats(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
stats = self.connection.interface_stats(instance_ref['name'], 'someid')
|
||||
self.assertEquals(len(stats), 8)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_console_output(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
console_output = self.connection.get_console_output(instance_ref)
|
||||
self.assertTrue(isinstance(console_output, basestring))
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_ajax_console(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
ajax_console = self.connection.get_ajax_console(instance_ref)
|
||||
self.assertIn('token', ajax_console)
|
||||
self.assertIn('host', ajax_console)
|
||||
self.assertIn('port', ajax_console)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_vnc_console(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
vnc_console = self.connection.get_vnc_console(instance_ref)
|
||||
self.assertIn('token', vnc_console)
|
||||
self.assertIn('host', vnc_console)
|
||||
self.assertIn('port', vnc_console)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_console_pool_info(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
console_pool = self.connection.get_console_pool_info(instance_ref)
|
||||
self.assertIn('address', console_pool)
|
||||
self.assertIn('username', console_pool)
|
||||
self.assertIn('password', console_pool)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_refresh_security_group_rules(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
# FIXME: Create security group and add the instance to it
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.refresh_security_group_rules(1)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_refresh_security_group_members(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
# FIXME: Create security group and add the instance to it
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.refresh_security_group_members(1)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_refresh_provider_fw_rules(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.refresh_provider_fw_rules()
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_update_available_resource(self):
|
||||
self.compute = self.start_service('compute', host='dummy')
|
||||
self.connection.update_available_resource(self.ctxt, 'dummy')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_compare_cpu(self):
|
||||
cpu_info = '''{ "topology": {
|
||||
"sockets": 1,
|
||||
"cores": 2,
|
||||
"threads": 1 },
|
||||
"features": [
|
||||
"xtpr",
|
||||
"tm2",
|
||||
"est",
|
||||
"vmx",
|
||||
"ds_cpl",
|
||||
"monitor",
|
||||
"pbe",
|
||||
"tm",
|
||||
"ht",
|
||||
"ss",
|
||||
"acpi",
|
||||
"ds",
|
||||
"vme"],
|
||||
"arch": "x86_64",
|
||||
"model": "Penryn",
|
||||
"vendor": "Intel" }'''
|
||||
|
||||
self.connection.compare_cpu(cpu_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_ensure_filtering_for_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.ensure_filtering_rules_for_instance(instance_ref,
|
||||
network_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_unfilter_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
network_info = test_utils.get_test_network_info()
|
||||
self.connection.unfilter_instance(instance_ref, network_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_live_migration(self):
|
||||
network_info = test_utils.get_test_network_info()
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
self.connection.spawn(self.ctxt, instance_ref, network_info)
|
||||
self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
|
||||
None, None)
|
||||
|
||||
@catch_notimplementederror
|
||||
def _check_host_status_fields(self, host_status):
|
||||
self.assertIn('host_name-description', host_status)
|
||||
self.assertIn('host_hostname', host_status)
|
||||
self.assertIn('host_memory_total', host_status)
|
||||
self.assertIn('host_memory_overhead', host_status)
|
||||
self.assertIn('host_memory_free', host_status)
|
||||
self.assertIn('host_memory_free_computed', host_status)
|
||||
self.assertIn('host_other_config', host_status)
|
||||
self.assertIn('host_ip_address', host_status)
|
||||
self.assertIn('host_cpu_info', host_status)
|
||||
self.assertIn('disk_available', host_status)
|
||||
self.assertIn('disk_total', host_status)
|
||||
self.assertIn('disk_used', host_status)
|
||||
self.assertIn('host_uuid', host_status)
|
||||
self.assertIn('host_name_label', host_status)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_update_host_status(self):
|
||||
host_status = self.connection.update_host_status()
|
||||
self._check_host_status_fields(host_status)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_host_stats(self):
|
||||
host_status = self.connection.get_host_stats()
|
||||
self._check_host_status_fields(host_status)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_set_host_enabled(self):
|
||||
self.connection.set_host_enabled('a useless argument?', True)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_host_power_action_reboot(self):
|
||||
self.connection.host_power_action('a useless argument?', 'reboot')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_host_power_action_shutdown(self):
|
||||
self.connection.host_power_action('a useless argument?', 'shutdown')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_host_power_action_startup(self):
|
||||
self.connection.host_power_action('a useless argument?', 'startup')
|
||||
|
||||
|
||||
class AbstractDriverTestCase(_VirtDriverTestCase):
|
||||
def setUp(self):
|
||||
import nova.virt.driver
|
||||
|
||||
self.driver_module = nova.virt.driver
|
||||
|
||||
def get_driver_connection(_):
|
||||
return nova.virt.driver.ComputeDriver()
|
||||
|
||||
self.driver_module.get_connection = get_driver_connection
|
||||
super(AbstractDriverTestCase, self).setUp()
|
||||
|
||||
|
||||
class FakeConnectionTestCase(_VirtDriverTestCase):
|
||||
def setUp(self):
|
||||
import nova.virt.fake
|
||||
self.driver_module = nova.virt.fake
|
||||
super(FakeConnectionTestCase, self).setUp()
|
||||
|
||||
# Before long, we'll add the real hypervisor drivers here as well
|
||||
# with whatever instrumentation they need to work independently of
|
||||
# their hypervisor. This way, we can verify that they all act the
|
||||
# same.
|
||||
207
nova/tests/test_volume_types.py
Normal file
207
nova/tests/test_volume_types.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Unit Tests for volume types code
|
||||
"""
|
||||
import time
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.volume import volume_types
|
||||
from nova.db.sqlalchemy.session import get_session
|
||||
from nova.db.sqlalchemy import models
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.test_volume_types')
|
||||
|
||||
|
||||
class VolumeTypeTestCase(test.TestCase):
|
||||
"""Test cases for volume type code"""
|
||||
def setUp(self):
|
||||
super(VolumeTypeTestCase, self).setUp()
|
||||
|
||||
self.ctxt = context.get_admin_context()
|
||||
self.vol_type1_name = str(int(time.time()))
|
||||
self.vol_type1_specs = dict(
|
||||
type="physical drive",
|
||||
drive_type="SAS",
|
||||
size="300",
|
||||
rpm="7200",
|
||||
visible="True")
|
||||
self.vol_type1 = dict(name=self.vol_type1_name,
|
||||
extra_specs=self.vol_type1_specs)
|
||||
|
||||
def test_volume_type_create_then_destroy(self):
|
||||
"""Ensure volume types can be created and deleted"""
|
||||
prev_all_vtypes = volume_types.get_all_types(self.ctxt)
|
||||
|
||||
volume_types.create(self.ctxt,
|
||||
self.vol_type1_name,
|
||||
self.vol_type1_specs)
|
||||
new = volume_types.get_volume_type_by_name(self.ctxt,
|
||||
self.vol_type1_name)
|
||||
|
||||
LOG.info(_("Given data: %s"), self.vol_type1_specs)
|
||||
LOG.info(_("Result data: %s"), new)
|
||||
|
||||
for k, v in self.vol_type1_specs.iteritems():
|
||||
self.assertEqual(v, new['extra_specs'][k],
|
||||
'one of fields doesnt match')
|
||||
|
||||
new_all_vtypes = volume_types.get_all_types(self.ctxt)
|
||||
self.assertEqual(len(prev_all_vtypes) + 1,
|
||||
len(new_all_vtypes),
|
||||
'drive type was not created')
|
||||
|
||||
volume_types.destroy(self.ctxt, self.vol_type1_name)
|
||||
new_all_vtypes = volume_types.get_all_types(self.ctxt)
|
||||
self.assertEqual(prev_all_vtypes,
|
||||
new_all_vtypes,
|
||||
'drive type was not deleted')
|
||||
|
||||
def test_volume_type_create_then_purge(self):
|
||||
"""Ensure volume types can be created and deleted"""
|
||||
prev_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1)
|
||||
|
||||
volume_types.create(self.ctxt,
|
||||
self.vol_type1_name,
|
||||
self.vol_type1_specs)
|
||||
new = volume_types.get_volume_type_by_name(self.ctxt,
|
||||
self.vol_type1_name)
|
||||
|
||||
for k, v in self.vol_type1_specs.iteritems():
|
||||
self.assertEqual(v, new['extra_specs'][k],
|
||||
'one of fields doesnt match')
|
||||
|
||||
new_all_vtypes = volume_types.get_all_types(self.ctxt, inactive=1)
|
||||
self.assertEqual(len(prev_all_vtypes) + 1,
|
||||
len(new_all_vtypes),
|
||||
'drive type was not created')
|
||||
|
||||
volume_types.destroy(self.ctxt, self.vol_type1_name)
|
||||
new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1)
|
||||
self.assertEqual(len(new_all_vtypes),
|
||||
len(new_all_vtypes2),
|
||||
'drive type was incorrectly deleted')
|
||||
|
||||
volume_types.purge(self.ctxt, self.vol_type1_name)
|
||||
new_all_vtypes2 = volume_types.get_all_types(self.ctxt, inactive=1)
|
||||
self.assertEqual(len(new_all_vtypes) - 1,
|
||||
len(new_all_vtypes2),
|
||||
'drive type was not purged')
|
||||
|
||||
def test_get_all_volume_types(self):
|
||||
"""Ensures that all volume types can be retrieved"""
|
||||
session = get_session()
|
||||
total_volume_types = session.query(models.VolumeTypes).\
|
||||
count()
|
||||
vol_types = volume_types.get_all_types(self.ctxt)
|
||||
self.assertEqual(total_volume_types, len(vol_types))
|
||||
|
||||
def test_non_existant_inst_type_shouldnt_delete(self):
|
||||
"""Ensures that volume type creation fails with invalid args"""
|
||||
self.assertRaises(exception.ApiError,
|
||||
volume_types.destroy, self.ctxt, "sfsfsdfdfs")
|
||||
|
||||
def test_repeated_vol_types_should_raise_api_error(self):
|
||||
"""Ensures that volume duplicates raises ApiError"""
|
||||
new_name = self.vol_type1_name + "dup"
|
||||
volume_types.create(self.ctxt, new_name)
|
||||
volume_types.destroy(self.ctxt, new_name)
|
||||
self.assertRaises(
|
||||
exception.ApiError,
|
||||
volume_types.create, self.ctxt, new_name)
|
||||
|
||||
def test_invalid_volume_types_params(self):
|
||||
"""Ensures that volume type creation fails with invalid args"""
|
||||
self.assertRaises(exception.InvalidVolumeType,
|
||||
volume_types.destroy, self.ctxt, None)
|
||||
self.assertRaises(exception.InvalidVolumeType,
|
||||
volume_types.purge, self.ctxt, None)
|
||||
self.assertRaises(exception.InvalidVolumeType,
|
||||
volume_types.get_volume_type, self.ctxt, None)
|
||||
self.assertRaises(exception.InvalidVolumeType,
|
||||
volume_types.get_volume_type_by_name,
|
||||
self.ctxt, None)
|
||||
|
||||
def test_volume_type_get_by_id_and_name(self):
|
||||
"""Ensure volume types get returns same entry"""
|
||||
volume_types.create(self.ctxt,
|
||||
self.vol_type1_name,
|
||||
self.vol_type1_specs)
|
||||
new = volume_types.get_volume_type_by_name(self.ctxt,
|
||||
self.vol_type1_name)
|
||||
|
||||
new2 = volume_types.get_volume_type(self.ctxt, new['id'])
|
||||
self.assertEqual(new, new2)
|
||||
|
||||
def test_volume_type_search_by_extra_spec(self):
|
||||
"""Ensure volume types get by extra spec returns correct type"""
|
||||
volume_types.create(self.ctxt, "type1", {"key1": "val1",
|
||||
"key2": "val2"})
|
||||
volume_types.create(self.ctxt, "type2", {"key2": "val2",
|
||||
"key3": "val3"})
|
||||
volume_types.create(self.ctxt, "type3", {"key3": "another_value",
|
||||
"key4": "val4"})
|
||||
|
||||
vol_types = volume_types.get_all_types(self.ctxt,
|
||||
search_opts={'extra_specs': {"key1": "val1"}})
|
||||
LOG.info("vol_types: %s" % vol_types)
|
||||
self.assertEqual(len(vol_types), 1)
|
||||
self.assertTrue("type1" in vol_types.keys())
|
||||
self.assertEqual(vol_types['type1']['extra_specs'],
|
||||
{"key1": "val1", "key2": "val2"})
|
||||
|
||||
vol_types = volume_types.get_all_types(self.ctxt,
|
||||
search_opts={'extra_specs': {"key2": "val2"}})
|
||||
LOG.info("vol_types: %s" % vol_types)
|
||||
self.assertEqual(len(vol_types), 2)
|
||||
self.assertTrue("type1" in vol_types.keys())
|
||||
self.assertTrue("type2" in vol_types.keys())
|
||||
|
||||
vol_types = volume_types.get_all_types(self.ctxt,
|
||||
search_opts={'extra_specs': {"key3": "val3"}})
|
||||
LOG.info("vol_types: %s" % vol_types)
|
||||
self.assertEqual(len(vol_types), 1)
|
||||
self.assertTrue("type2" in vol_types.keys())
|
||||
|
||||
def test_volume_type_search_by_extra_spec_multiple(self):
|
||||
"""Ensure volume types get by extra spec returns correct type"""
|
||||
volume_types.create(self.ctxt, "type1", {"key1": "val1",
|
||||
"key2": "val2",
|
||||
"key3": "val3"})
|
||||
volume_types.create(self.ctxt, "type2", {"key2": "val2",
|
||||
"key3": "val3"})
|
||||
volume_types.create(self.ctxt, "type3", {"key1": "val1",
|
||||
"key3": "val3",
|
||||
"key4": "val4"})
|
||||
|
||||
vol_types = volume_types.get_all_types(self.ctxt,
|
||||
search_opts={'extra_specs': {"key1": "val1",
|
||||
"key3": "val3"}})
|
||||
LOG.info("vol_types: %s" % vol_types)
|
||||
self.assertEqual(len(vol_types), 2)
|
||||
self.assertTrue("type1" in vol_types.keys())
|
||||
self.assertTrue("type3" in vol_types.keys())
|
||||
self.assertEqual(vol_types['type1']['extra_specs'],
|
||||
{"key1": "val1", "key2": "val2", "key3": "val3"})
|
||||
self.assertEqual(vol_types['type3']['extra_specs'],
|
||||
{"key1": "val1", "key3": "val3", "key4": "val4"})
|
||||
132
nova/tests/test_volume_types_extra_specs.py
Normal file
132
nova/tests/test_volume_types_extra_specs.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
# Copyright 2011 University of Southern California
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Unit Tests for volume types extra specs code
|
||||
"""
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import test
|
||||
from nova.db.sqlalchemy.session import get_session
|
||||
from nova.db.sqlalchemy import models
|
||||
|
||||
|
||||
class VolumeTypeExtraSpecsTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VolumeTypeExtraSpecsTestCase, self).setUp()
|
||||
self.context = context.get_admin_context()
|
||||
self.vol_type1 = dict(name="TEST: Regular volume test")
|
||||
self.vol_type1_specs = dict(vol_extra1="value1",
|
||||
vol_extra2="value2",
|
||||
vol_extra3=3)
|
||||
self.vol_type1['extra_specs'] = self.vol_type1_specs
|
||||
ref = db.api.volume_type_create(self.context, self.vol_type1)
|
||||
self.volume_type1_id = ref.id
|
||||
for k, v in self.vol_type1_specs.iteritems():
|
||||
self.vol_type1_specs[k] = str(v)
|
||||
|
||||
self.vol_type2_noextra = dict(name="TEST: Volume type without extra")
|
||||
ref = db.api.volume_type_create(self.context, self.vol_type2_noextra)
|
||||
self.vol_type2_id = ref.id
|
||||
|
||||
def tearDown(self):
|
||||
# Remove the instance type from the database
|
||||
db.api.volume_type_purge(context.get_admin_context(),
|
||||
self.vol_type1['name'])
|
||||
db.api.volume_type_purge(context.get_admin_context(),
|
||||
self.vol_type2_noextra['name'])
|
||||
super(VolumeTypeExtraSpecsTestCase, self).tearDown()
|
||||
|
||||
def test_volume_type_specs_get(self):
|
||||
expected_specs = self.vol_type1_specs.copy()
|
||||
actual_specs = db.api.volume_type_extra_specs_get(
|
||||
context.get_admin_context(),
|
||||
self.volume_type1_id)
|
||||
self.assertEquals(expected_specs, actual_specs)
|
||||
|
||||
def test_volume_type_extra_specs_delete(self):
|
||||
expected_specs = self.vol_type1_specs.copy()
|
||||
del expected_specs['vol_extra2']
|
||||
db.api.volume_type_extra_specs_delete(context.get_admin_context(),
|
||||
self.volume_type1_id,
|
||||
'vol_extra2')
|
||||
actual_specs = db.api.volume_type_extra_specs_get(
|
||||
context.get_admin_context(),
|
||||
self.volume_type1_id)
|
||||
self.assertEquals(expected_specs, actual_specs)
|
||||
|
||||
def test_volume_type_extra_specs_update(self):
|
||||
expected_specs = self.vol_type1_specs.copy()
|
||||
expected_specs['vol_extra3'] = "4"
|
||||
db.api.volume_type_extra_specs_update_or_create(
|
||||
context.get_admin_context(),
|
||||
self.volume_type1_id,
|
||||
dict(vol_extra3=4))
|
||||
actual_specs = db.api.volume_type_extra_specs_get(
|
||||
context.get_admin_context(),
|
||||
self.volume_type1_id)
|
||||
self.assertEquals(expected_specs, actual_specs)
|
||||
|
||||
def test_volume_type_extra_specs_create(self):
|
||||
expected_specs = self.vol_type1_specs.copy()
|
||||
expected_specs['vol_extra4'] = 'value4'
|
||||
expected_specs['vol_extra5'] = 'value5'
|
||||
db.api.volume_type_extra_specs_update_or_create(
|
||||
context.get_admin_context(),
|
||||
self.volume_type1_id,
|
||||
dict(vol_extra4="value4",
|
||||
vol_extra5="value5"))
|
||||
actual_specs = db.api.volume_type_extra_specs_get(
|
||||
context.get_admin_context(),
|
||||
self.volume_type1_id)
|
||||
self.assertEquals(expected_specs, actual_specs)
|
||||
|
||||
def test_volume_type_get_with_extra_specs(self):
|
||||
volume_type = db.api.volume_type_get(
|
||||
context.get_admin_context(),
|
||||
self.volume_type1_id)
|
||||
self.assertEquals(volume_type['extra_specs'],
|
||||
self.vol_type1_specs)
|
||||
|
||||
volume_type = db.api.volume_type_get(
|
||||
context.get_admin_context(),
|
||||
self.vol_type2_id)
|
||||
self.assertEquals(volume_type['extra_specs'], {})
|
||||
|
||||
def test_volume_type_get_by_name_with_extra_specs(self):
|
||||
volume_type = db.api.volume_type_get_by_name(
|
||||
context.get_admin_context(),
|
||||
self.vol_type1['name'])
|
||||
self.assertEquals(volume_type['extra_specs'],
|
||||
self.vol_type1_specs)
|
||||
|
||||
volume_type = db.api.volume_type_get_by_name(
|
||||
context.get_admin_context(),
|
||||
self.vol_type2_noextra['name'])
|
||||
self.assertEquals(volume_type['extra_specs'], {})
|
||||
|
||||
def test_volume_type_get_all(self):
|
||||
expected_specs = self.vol_type1_specs.copy()
|
||||
|
||||
types = db.api.volume_type_get_all(context.get_admin_context())
|
||||
|
||||
self.assertEquals(
|
||||
types[self.vol_type1['name']]['extra_specs'], expected_specs)
|
||||
|
||||
self.assertEquals(
|
||||
types[self.vol_type2_noextra['name']]['extra_specs'], {})
|
||||
182
nova/tests/test_vsa.py
Normal file
182
nova/tests/test_vsa.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import stubout
|
||||
|
||||
from xml.etree import ElementTree
|
||||
from xml.etree.ElementTree import Element, SubElement
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova import vsa
|
||||
from nova import volume
|
||||
from nova.volume import volume_types
|
||||
from nova.vsa import utils as vsa_utils
|
||||
|
||||
import nova.image.fake
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.vsa')
|
||||
|
||||
|
||||
class VsaTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VsaTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.vsa_api = vsa.API()
|
||||
self.volume_api = volume.API()
|
||||
|
||||
FLAGS.quota_volumes = 100
|
||||
FLAGS.quota_gigabytes = 10000
|
||||
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
volume_types.create(self.context,
|
||||
'SATA_500_7200',
|
||||
extra_specs={'type': 'vsa_drive',
|
||||
'drive_name': 'SATA_500_7200',
|
||||
'drive_type': 'SATA',
|
||||
'drive_size': '500',
|
||||
'drive_rpm': '7200'})
|
||||
|
||||
def fake_show_by_name(meh, context, name):
|
||||
if name == 'wrong_image_name':
|
||||
LOG.debug(_("Test: Emulate wrong VSA name. Raise"))
|
||||
raise exception.ImageNotFound
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
self.stubs.Set(nova.image.fake._FakeImageService,
|
||||
'show_by_name',
|
||||
fake_show_by_name)
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaTestCase, self).tearDown()
|
||||
|
||||
def test_vsa_create_delete_defaults(self):
|
||||
param = {'display_name': 'VSA name test'}
|
||||
vsa_ref = self.vsa_api.create(self.context, **param)
|
||||
self.assertEqual(vsa_ref['display_name'], param['display_name'])
|
||||
self.vsa_api.delete(self.context, vsa_ref['id'])
|
||||
|
||||
def test_vsa_create_delete_check_in_db(self):
|
||||
vsa_list1 = self.vsa_api.get_all(self.context)
|
||||
vsa_ref = self.vsa_api.create(self.context)
|
||||
vsa_list2 = self.vsa_api.get_all(self.context)
|
||||
self.assertEqual(len(vsa_list2), len(vsa_list1) + 1)
|
||||
|
||||
self.vsa_api.delete(self.context, vsa_ref['id'])
|
||||
vsa_list3 = self.vsa_api.get_all(self.context)
|
||||
self.assertEqual(len(vsa_list3), len(vsa_list2) - 1)
|
||||
|
||||
def test_vsa_create_delete_high_vc_count(self):
|
||||
param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
|
||||
vsa_ref = self.vsa_api.create(self.context, **param)
|
||||
self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
|
||||
self.vsa_api.delete(self.context, vsa_ref['id'])
|
||||
|
||||
def test_vsa_create_wrong_image_name(self):
|
||||
param = {'image_name': 'wrong_image_name'}
|
||||
self.assertRaises(exception.ApiError,
|
||||
self.vsa_api.create, self.context, **param)
|
||||
|
||||
def test_vsa_create_db_error(self):
|
||||
|
||||
def fake_vsa_create(context, options):
|
||||
LOG.debug(_("Test: Emulate DB error. Raise"))
|
||||
raise exception.Error
|
||||
|
||||
self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create)
|
||||
self.assertRaises(exception.ApiError,
|
||||
self.vsa_api.create, self.context)
|
||||
|
||||
def test_vsa_create_wrong_storage_params(self):
|
||||
vsa_list1 = self.vsa_api.get_all(self.context)
|
||||
param = {'storage': [{'stub': 1}]}
|
||||
self.assertRaises(exception.ApiError,
|
||||
self.vsa_api.create, self.context, **param)
|
||||
vsa_list2 = self.vsa_api.get_all(self.context)
|
||||
self.assertEqual(len(vsa_list2), len(vsa_list1))
|
||||
|
||||
param = {'storage': [{'drive_name': 'wrong name'}]}
|
||||
self.assertRaises(exception.ApiError,
|
||||
self.vsa_api.create, self.context, **param)
|
||||
|
||||
def test_vsa_create_with_storage(self, multi_vol_creation=True):
|
||||
"""Test creation of VSA with BE storage"""
|
||||
|
||||
FLAGS.vsa_multi_vol_creation = multi_vol_creation
|
||||
|
||||
param = {'storage': [{'drive_name': 'SATA_500_7200',
|
||||
'num_drives': 3}]}
|
||||
vsa_ref = self.vsa_api.create(self.context, **param)
|
||||
self.assertEqual(vsa_ref['vol_count'], 3)
|
||||
self.vsa_api.delete(self.context, vsa_ref['id'])
|
||||
|
||||
param = {'storage': [{'drive_name': 'SATA_500_7200',
|
||||
'num_drives': 3}],
|
||||
'shared': True}
|
||||
vsa_ref = self.vsa_api.create(self.context, **param)
|
||||
self.assertEqual(vsa_ref['vol_count'], 15)
|
||||
self.vsa_api.delete(self.context, vsa_ref['id'])
|
||||
|
||||
def test_vsa_create_with_storage_single_volumes(self):
|
||||
self.test_vsa_create_with_storage(multi_vol_creation=False)
|
||||
|
||||
def test_vsa_update(self):
|
||||
vsa_ref = self.vsa_api.create(self.context)
|
||||
|
||||
param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
|
||||
vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
|
||||
self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
|
||||
|
||||
param = {'vc_count': 2}
|
||||
vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
|
||||
self.assertEqual(vsa_ref['vc_count'], 2)
|
||||
|
||||
self.vsa_api.delete(self.context, vsa_ref['id'])
|
||||
|
||||
def test_vsa_generate_user_data(self):
|
||||
|
||||
FLAGS.vsa_multi_vol_creation = False
|
||||
param = {'display_name': 'VSA name test',
|
||||
'display_description': 'VSA desc test',
|
||||
'vc_count': 2,
|
||||
'storage': [{'drive_name': 'SATA_500_7200',
|
||||
'num_drives': 3}]}
|
||||
vsa_ref = self.vsa_api.create(self.context, **param)
|
||||
volumes = self.vsa_api.get_all_vsa_drives(self.context,
|
||||
vsa_ref['id'])
|
||||
|
||||
user_data = vsa_utils.generate_user_data(vsa_ref, volumes)
|
||||
user_data = base64.b64decode(user_data)
|
||||
|
||||
LOG.debug(_("Test: user_data = %s"), user_data)
|
||||
|
||||
elem = ElementTree.fromstring(user_data)
|
||||
self.assertEqual(elem.findtext('name'),
|
||||
param['display_name'])
|
||||
self.assertEqual(elem.findtext('description'),
|
||||
param['display_description'])
|
||||
self.assertEqual(elem.findtext('vc_count'),
|
||||
str(param['vc_count']))
|
||||
|
||||
self.vsa_api.delete(self.context, vsa_ref['id'])
|
||||
136
nova/tests/test_vsa_volumes.py
Normal file
136
nova/tests/test_vsa_volumes.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import stubout
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import vsa
|
||||
from nova import volume
|
||||
from nova import db
|
||||
from nova import context
|
||||
from nova import test
|
||||
from nova import log as logging
|
||||
import nova.image.fake
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.vsa.volumes')
|
||||
|
||||
|
||||
class VsaVolumesTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(VsaVolumesTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.vsa_api = vsa.API()
|
||||
self.volume_api = volume.API()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context)
|
||||
|
||||
def fake_show_by_name(meh, context, name):
|
||||
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
self.stubs.Set(nova.image.fake._FakeImageService,
|
||||
'show_by_name',
|
||||
fake_show_by_name)
|
||||
|
||||
param = {'display_name': 'VSA name test'}
|
||||
vsa_ref = self.vsa_api.create(self.context, **param)
|
||||
self.vsa_id = vsa_ref['id']
|
||||
|
||||
def tearDown(self):
|
||||
if self.vsa_id:
|
||||
self.vsa_api.delete(self.context, self.vsa_id)
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaVolumesTestCase, self).tearDown()
|
||||
|
||||
def _default_volume_param(self):
|
||||
return {
|
||||
'size': 1,
|
||||
'snapshot_id': None,
|
||||
'name': 'Test volume name',
|
||||
'description': 'Test volume desc name',
|
||||
'volume_type': self.default_vol_type,
|
||||
'metadata': {'from_vsa_id': self.vsa_id}
|
||||
}
|
||||
|
||||
def _get_all_volumes_by_vsa(self):
|
||||
return self.volume_api.get_all(self.context,
|
||||
search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}})
|
||||
|
||||
def test_vsa_volume_create_delete(self):
|
||||
""" Check if volume properly created and deleted. """
|
||||
volume_param = self._default_volume_param()
|
||||
volume_ref = self.volume_api.create(self.context, **volume_param)
|
||||
|
||||
self.assertEqual(volume_ref['display_name'],
|
||||
volume_param['name'])
|
||||
self.assertEqual(volume_ref['display_description'],
|
||||
volume_param['description'])
|
||||
self.assertEqual(volume_ref['size'],
|
||||
volume_param['size'])
|
||||
self.assertEqual(volume_ref['status'],
|
||||
'creating')
|
||||
|
||||
vols2 = self._get_all_volumes_by_vsa()
|
||||
self.assertEqual(1, len(vols2))
|
||||
volume_ref = vols2[0]
|
||||
|
||||
self.assertEqual(volume_ref['display_name'],
|
||||
volume_param['name'])
|
||||
self.assertEqual(volume_ref['display_description'],
|
||||
volume_param['description'])
|
||||
self.assertEqual(volume_ref['size'],
|
||||
volume_param['size'])
|
||||
self.assertEqual(volume_ref['status'],
|
||||
'creating')
|
||||
|
||||
self.volume_api.update(self.context,
|
||||
volume_ref['id'], {'status': 'available'})
|
||||
self.volume_api.delete(self.context, volume_ref['id'])
|
||||
|
||||
vols3 = self._get_all_volumes_by_vsa()
|
||||
self.assertEqual(1, len(vols2))
|
||||
volume_ref = vols3[0]
|
||||
self.assertEqual(volume_ref['status'],
|
||||
'deleting')
|
||||
|
||||
def test_vsa_volume_delete_nonavail_volume(self):
|
||||
""" Check volume deleton in different states. """
|
||||
volume_param = self._default_volume_param()
|
||||
volume_ref = self.volume_api.create(self.context, **volume_param)
|
||||
|
||||
self.volume_api.update(self.context,
|
||||
volume_ref['id'], {'status': 'in-use'})
|
||||
self.assertRaises(exception.ApiError,
|
||||
self.volume_api.delete,
|
||||
self.context, volume_ref['id'])
|
||||
|
||||
def test_vsa_volume_delete_vsa_with_volumes(self):
|
||||
""" Check volume deleton in different states. """
|
||||
|
||||
vols1 = self._get_all_volumes_by_vsa()
|
||||
for i in range(3):
|
||||
volume_param = self._default_volume_param()
|
||||
volume_ref = self.volume_api.create(self.context, **volume_param)
|
||||
|
||||
vols2 = self._get_all_volumes_by_vsa()
|
||||
self.assertEqual(len(vols1) + 3, len(vols2))
|
||||
|
||||
self.vsa_api.delete(self.context, self.vsa_id)
|
||||
|
||||
vols3 = self._get_all_volumes_by_vsa()
|
||||
self.assertEqual(len(vols1), len(vols3))
|
||||
18
nova/vsa/__init__.py
Normal file
18
nova/vsa/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova.vsa.api import API
|
||||
411
nova/vsa/api.py
Normal file
411
nova/vsa/api.py
Normal file
@@ -0,0 +1,411 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Handles all requests relating to Virtual Storage Arrays (VSAs).
|
||||
|
||||
Experimental code. Requires special VSA image.
|
||||
For assistance and guidelines pls contact
|
||||
Zadara Storage Inc & Openstack community
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from nova import compute
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import volume
|
||||
from nova.compute import instance_types
|
||||
from nova.db import base
|
||||
from nova.volume import volume_types
|
||||
|
||||
|
||||
class VsaState:
|
||||
CREATING = 'creating' # VSA creating (not ready yet)
|
||||
LAUNCHING = 'launching' # Launching VCs (all BE volumes were created)
|
||||
CREATED = 'created' # VSA fully created and ready for use
|
||||
PARTIAL = 'partial' # Some BE drives were allocated
|
||||
FAILED = 'failed' # Some BE storage allocations failed
|
||||
DELETING = 'deleting' # VSA started the deletion procedure
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('vsa_ec2_access_key', None,
|
||||
'EC2 access key used by VSA for accessing nova')
|
||||
flags.DEFINE_string('vsa_ec2_user_id', None,
|
||||
'User ID used by VSA for accessing nova')
|
||||
flags.DEFINE_boolean('vsa_multi_vol_creation', True,
|
||||
'Ask scheduler to create multiple volumes in one call')
|
||||
flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type',
|
||||
'Name of volume type associated with FE VSA volumes')
|
||||
|
||||
LOG = logging.getLogger('nova.vsa')
|
||||
|
||||
|
||||
class API(base.Base):
|
||||
"""API for interacting with the VSA manager."""
|
||||
|
||||
def __init__(self, compute_api=None, volume_api=None, **kwargs):
|
||||
self.compute_api = compute_api or compute.API()
|
||||
self.volume_api = volume_api or volume.API()
|
||||
super(API, self).__init__(**kwargs)
|
||||
|
||||
def _check_volume_type_correctness(self, vol_type):
|
||||
if vol_type.get('extra_specs') == None or\
|
||||
vol_type['extra_specs'].get('type') != 'vsa_drive' or\
|
||||
vol_type['extra_specs'].get('drive_type') == None or\
|
||||
vol_type['extra_specs'].get('drive_size') == None:
|
||||
|
||||
raise exception.ApiError(_("Invalid drive type %s")
|
||||
% vol_type['name'])
|
||||
|
||||
def _get_default_vsa_instance_type(self):
|
||||
return instance_types.get_instance_type_by_name(
|
||||
FLAGS.default_vsa_instance_type)
|
||||
|
||||
def _check_storage_parameters(self, context, vsa_name, storage,
|
||||
shared, first_index=0):
|
||||
"""
|
||||
Translates storage array of disks to the list of volumes
|
||||
:param storage: List of dictionaries with following keys:
|
||||
disk_name, num_disks, size
|
||||
:param shared: Specifies if storage is dedicated or shared.
|
||||
For shared storage disks split into partitions
|
||||
"""
|
||||
volume_params = []
|
||||
for node in storage:
|
||||
|
||||
name = node.get('drive_name', None)
|
||||
num_disks = node.get('num_drives', 1)
|
||||
|
||||
if name is None:
|
||||
raise exception.ApiError(_("No drive_name param found in %s")
|
||||
% node)
|
||||
try:
|
||||
vol_type = volume_types.get_volume_type_by_name(context, name)
|
||||
except exception.NotFound:
|
||||
raise exception.ApiError(_("Invalid drive type name %s")
|
||||
% name)
|
||||
|
||||
self._check_volume_type_correctness(vol_type)
|
||||
|
||||
# if size field present - override disk size specified in DB
|
||||
size = int(node.get('size',
|
||||
vol_type['extra_specs'].get('drive_size')))
|
||||
|
||||
if shared:
|
||||
part_size = FLAGS.vsa_part_size_gb
|
||||
total_capacity = num_disks * size
|
||||
num_volumes = total_capacity / part_size
|
||||
size = part_size
|
||||
else:
|
||||
num_volumes = num_disks
|
||||
size = 0 # special handling for full drives
|
||||
|
||||
for i in range(num_volumes):
|
||||
volume_name = "drive-%03d" % first_index
|
||||
first_index += 1
|
||||
volume_desc = 'BE volume for VSA %s type %s' % \
|
||||
(vsa_name, name)
|
||||
volume = {
|
||||
'size': size,
|
||||
'name': volume_name,
|
||||
'description': volume_desc,
|
||||
'volume_type_id': vol_type['id'],
|
||||
}
|
||||
volume_params.append(volume)
|
||||
|
||||
return volume_params
|
||||
|
||||
def create(self, context, display_name='', display_description='',
|
||||
vc_count=1, instance_type=None, image_name=None,
|
||||
availability_zone=None, storage=[], shared=None):
|
||||
"""
|
||||
Provision VSA instance with corresponding compute instances
|
||||
and associated volumes
|
||||
:param storage: List of dictionaries with following keys:
|
||||
disk_name, num_disks, size
|
||||
:param shared: Specifies if storage is dedicated or shared.
|
||||
For shared storage disks split into partitions
|
||||
"""
|
||||
|
||||
LOG.info(_("*** Experimental VSA code ***"))
|
||||
|
||||
if vc_count > FLAGS.max_vcs_in_vsa:
|
||||
LOG.warning(_("Requested number of VCs (%d) is too high."\
|
||||
" Setting to default"), vc_count)
|
||||
vc_count = FLAGS.max_vcs_in_vsa
|
||||
|
||||
if instance_type is None:
|
||||
instance_type = self._get_default_vsa_instance_type()
|
||||
|
||||
if availability_zone is None:
|
||||
availability_zone = FLAGS.storage_availability_zone
|
||||
|
||||
if storage is None:
|
||||
storage = []
|
||||
|
||||
if shared is None or shared == 'False' or shared == False:
|
||||
shared = False
|
||||
else:
|
||||
shared = True
|
||||
|
||||
# check if image is ready before starting any work
|
||||
if image_name is None:
|
||||
image_name = FLAGS.vc_image_name
|
||||
try:
|
||||
image_service = self.compute_api.image_service
|
||||
vc_image = image_service.show_by_name(context, image_name)
|
||||
vc_image_href = vc_image['id']
|
||||
except exception.ImageNotFound:
|
||||
raise exception.ApiError(_("Failed to find configured image %s")
|
||||
% image_name)
|
||||
|
||||
options = {
|
||||
'display_name': display_name,
|
||||
'display_description': display_description,
|
||||
'project_id': context.project_id,
|
||||
'availability_zone': availability_zone,
|
||||
'instance_type_id': instance_type['id'],
|
||||
'image_ref': vc_image_href,
|
||||
'vc_count': vc_count,
|
||||
'status': VsaState.CREATING,
|
||||
}
|
||||
LOG.info(_("Creating VSA: %s") % options)
|
||||
|
||||
# create DB entry for VSA instance
|
||||
try:
|
||||
vsa_ref = self.db.vsa_create(context, options)
|
||||
except exception.Error:
|
||||
raise exception.ApiError(_(sys.exc_info()[1]))
|
||||
vsa_id = vsa_ref['id']
|
||||
vsa_name = vsa_ref['name']
|
||||
|
||||
# check storage parameters
|
||||
try:
|
||||
volume_params = self._check_storage_parameters(context, vsa_name,
|
||||
storage, shared)
|
||||
except exception.ApiError:
|
||||
self.db.vsa_destroy(context, vsa_id)
|
||||
raise exception.ApiError(_("Error in storage parameters: %s")
|
||||
% storage)
|
||||
|
||||
# after creating DB entry, re-check and set some defaults
|
||||
updates = {}
|
||||
if (not hasattr(vsa_ref, 'display_name') or
|
||||
vsa_ref.display_name is None or
|
||||
vsa_ref.display_name == ''):
|
||||
updates['display_name'] = display_name = vsa_name
|
||||
updates['vol_count'] = len(volume_params)
|
||||
vsa_ref = self.update(context, vsa_id, **updates)
|
||||
|
||||
# create volumes
|
||||
if FLAGS.vsa_multi_vol_creation:
|
||||
if len(volume_params) > 0:
|
||||
request_spec = {
|
||||
'num_volumes': len(volume_params),
|
||||
'vsa_id': str(vsa_id),
|
||||
'volumes': volume_params,
|
||||
}
|
||||
|
||||
rpc.cast(context,
|
||||
FLAGS.scheduler_topic,
|
||||
{"method": "create_volumes",
|
||||
"args": {"topic": FLAGS.volume_topic,
|
||||
"request_spec": request_spec,
|
||||
"availability_zone": availability_zone}})
|
||||
else:
|
||||
# create BE volumes one-by-one
|
||||
for vol in volume_params:
|
||||
try:
|
||||
vol_name = vol['name']
|
||||
vol_size = vol['size']
|
||||
vol_type_id = vol['volume_type_id']
|
||||
LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\
|
||||
"volume %(vol_name)s, %(vol_size)d GB, "\
|
||||
"type %(vol_type_id)s"), locals())
|
||||
|
||||
vol_type = volume_types.get_volume_type(context,
|
||||
vol['volume_type_id'])
|
||||
|
||||
vol_ref = self.volume_api.create(context,
|
||||
vol_size,
|
||||
None,
|
||||
vol_name,
|
||||
vol['description'],
|
||||
volume_type=vol_type,
|
||||
metadata=dict(to_vsa_id=str(vsa_id)),
|
||||
availability_zone=availability_zone)
|
||||
except:
|
||||
self.update_vsa_status(context, vsa_id,
|
||||
status=VsaState.PARTIAL)
|
||||
raise
|
||||
|
||||
if len(volume_params) == 0:
|
||||
# No BE volumes - ask VSA manager to start VCs
|
||||
rpc.cast(context,
|
||||
FLAGS.vsa_topic,
|
||||
{"method": "create_vsa",
|
||||
"args": {"vsa_id": str(vsa_id)}})
|
||||
|
||||
return vsa_ref
|
||||
|
||||
def update_vsa_status(self, context, vsa_id, status):
|
||||
updates = dict(status=status)
|
||||
LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"),
|
||||
locals())
|
||||
return self.update(context, vsa_id, **updates)
|
||||
|
||||
def update(self, context, vsa_id, **kwargs):
|
||||
"""Updates the VSA instance in the datastore.
|
||||
|
||||
:param context: The security context
|
||||
:param vsa_id: ID of the VSA instance to update
|
||||
:param kwargs: All additional keyword args are treated
|
||||
as data fields of the instance to be
|
||||
updated
|
||||
|
||||
:returns: None
|
||||
"""
|
||||
LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals())
|
||||
|
||||
updatable_fields = ['status', 'vc_count', 'vol_count',
|
||||
'display_name', 'display_description']
|
||||
changes = {}
|
||||
for field in updatable_fields:
|
||||
if field in kwargs:
|
||||
changes[field] = kwargs[field]
|
||||
|
||||
vc_count = kwargs.get('vc_count', None)
|
||||
if vc_count is not None:
|
||||
# VP-TODO: This request may want to update number of VCs
|
||||
# Get number of current VCs and add/delete VCs appropriately
|
||||
vsa = self.get(context, vsa_id)
|
||||
vc_count = int(vc_count)
|
||||
if vc_count > FLAGS.max_vcs_in_vsa:
|
||||
LOG.warning(_("Requested number of VCs (%d) is too high."\
|
||||
" Setting to default"), vc_count)
|
||||
vc_count = FLAGS.max_vcs_in_vsa
|
||||
|
||||
if vsa['vc_count'] != vc_count:
|
||||
self.update_num_vcs(context, vsa, vc_count)
|
||||
changes['vc_count'] = vc_count
|
||||
|
||||
return self.db.vsa_update(context, vsa_id, changes)
|
||||
|
||||
def update_num_vcs(self, context, vsa, vc_count):
|
||||
vsa_name = vsa['name']
|
||||
old_vc_count = int(vsa['vc_count'])
|
||||
if vc_count > old_vc_count:
|
||||
add_cnt = vc_count - old_vc_count
|
||||
LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."),
|
||||
locals())
|
||||
# VP-TODO: actual code for adding new VCs
|
||||
|
||||
elif vc_count < old_vc_count:
|
||||
del_cnt = old_vc_count - vc_count
|
||||
LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."),
|
||||
locals())
|
||||
# VP-TODO: actual code for deleting extra VCs
|
||||
|
||||
def _force_volume_delete(self, ctxt, volume):
|
||||
"""Delete a volume, bypassing the check that it must be available."""
|
||||
host = volume['host']
|
||||
if not host:
|
||||
# Deleting volume from database and skipping rpc.
|
||||
self.db.volume_destroy(ctxt, volume['id'])
|
||||
return
|
||||
|
||||
rpc.cast(ctxt,
|
||||
self.db.queue_get_for(ctxt, FLAGS.volume_topic, host),
|
||||
{"method": "delete_volume",
|
||||
"args": {"volume_id": volume['id']}})
|
||||
|
||||
def delete_vsa_volumes(self, context, vsa_id, direction,
|
||||
force_delete=True):
|
||||
if direction == "FE":
|
||||
volumes = self.get_all_vsa_volumes(context, vsa_id)
|
||||
else:
|
||||
volumes = self.get_all_vsa_drives(context, vsa_id)
|
||||
|
||||
for volume in volumes:
|
||||
try:
|
||||
vol_name = volume['name']
|
||||
LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\
|
||||
"volume %(vol_name)s"), locals())
|
||||
self.volume_api.delete(context, volume['id'])
|
||||
except exception.ApiError:
|
||||
LOG.info(_("Unable to delete volume %s"), volume['name'])
|
||||
if force_delete:
|
||||
LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\
|
||||
"%(direction)s volume %(vol_name)s"), locals())
|
||||
self._force_volume_delete(context, volume)
|
||||
|
||||
def delete(self, context, vsa_id):
|
||||
"""Terminate a VSA instance."""
|
||||
LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id)
|
||||
|
||||
# Delete all FrontEnd and BackEnd volumes
|
||||
self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True)
|
||||
self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True)
|
||||
|
||||
# Delete all VC instances
|
||||
instances = self.compute_api.get_all(context,
|
||||
search_opts={'metadata': dict(vsa_id=str(vsa_id))})
|
||||
for instance in instances:
|
||||
name = instance['name']
|
||||
LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"),
|
||||
locals())
|
||||
self.compute_api.delete(context, instance['id'])
|
||||
|
||||
# Delete VSA instance
|
||||
self.db.vsa_destroy(context, vsa_id)
|
||||
|
||||
def get(self, context, vsa_id):
|
||||
rv = self.db.vsa_get(context, vsa_id)
|
||||
return rv
|
||||
|
||||
def get_all(self, context):
|
||||
if context.is_admin:
|
||||
return self.db.vsa_get_all(context)
|
||||
return self.db.vsa_get_all_by_project(context, context.project_id)
|
||||
|
||||
def get_vsa_volume_type(self, context):
|
||||
name = FLAGS.vsa_volume_type_name
|
||||
try:
|
||||
vol_type = volume_types.get_volume_type_by_name(context, name)
|
||||
except exception.NotFound:
|
||||
volume_types.create(context, name,
|
||||
extra_specs=dict(type='vsa_volume'))
|
||||
vol_type = volume_types.get_volume_type_by_name(context, name)
|
||||
|
||||
return vol_type
|
||||
|
||||
def get_all_vsa_instances(self, context, vsa_id):
|
||||
return self.compute_api.get_all(context,
|
||||
search_opts={'metadata': dict(vsa_id=str(vsa_id))})
|
||||
|
||||
def get_all_vsa_volumes(self, context, vsa_id):
|
||||
return self.volume_api.get_all(context,
|
||||
search_opts={'metadata': dict(from_vsa_id=str(vsa_id))})
|
||||
|
||||
def get_all_vsa_drives(self, context, vsa_id):
|
||||
return self.volume_api.get_all(context,
|
||||
search_opts={'metadata': dict(to_vsa_id=str(vsa_id))})
|
||||
25
nova/vsa/connection.py
Normal file
25
nova/vsa/connection.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Abstraction of the underlying connection to VC."""
|
||||
|
||||
from nova.vsa import fake
|
||||
|
||||
|
||||
def get_connection():
|
||||
# Return an object that is able to talk to VCs
|
||||
return fake.FakeVcConnection()
|
||||
22
nova/vsa/fake.py
Normal file
22
nova/vsa/fake.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class FakeVcConnection(object):
|
||||
|
||||
def init_host(self, host):
|
||||
pass
|
||||
179
nova/vsa/manager.py
Normal file
179
nova/vsa/manager.py
Normal file
@@ -0,0 +1,179 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Handles all processes relating to Virtual Storage Arrays (VSA).
|
||||
|
||||
**Related Flags**
|
||||
|
||||
"""
|
||||
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import manager
|
||||
from nova import volume
|
||||
from nova import vsa
|
||||
from nova import utils
|
||||
from nova.compute import instance_types
|
||||
from nova.vsa import utils as vsa_utils
|
||||
from nova.vsa.api import VsaState
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection',
|
||||
'Driver to use for controlling VSAs')
|
||||
|
||||
LOG = logging.getLogger('nova.vsa.manager')
|
||||
|
||||
|
||||
class VsaManager(manager.SchedulerDependentManager):
|
||||
"""Manages Virtual Storage Arrays (VSAs)."""
|
||||
|
||||
def __init__(self, vsa_driver=None, *args, **kwargs):
|
||||
if not vsa_driver:
|
||||
vsa_driver = FLAGS.vsa_driver
|
||||
self.driver = utils.import_object(vsa_driver)
|
||||
self.compute_manager = utils.import_object(FLAGS.compute_manager)
|
||||
|
||||
self.compute_api = compute.API()
|
||||
self.volume_api = volume.API()
|
||||
self.vsa_api = vsa.API()
|
||||
|
||||
if FLAGS.vsa_ec2_user_id is None or \
|
||||
FLAGS.vsa_ec2_access_key is None:
|
||||
raise exception.VSANovaAccessParamNotFound()
|
||||
|
||||
super(VsaManager, self).__init__(*args, **kwargs)
|
||||
|
||||
def init_host(self):
|
||||
self.driver.init_host(host=self.host)
|
||||
super(VsaManager, self).init_host()
|
||||
|
||||
@exception.wrap_exception()
|
||||
def create_vsa(self, context, vsa_id):
|
||||
"""Called by API if there were no BE volumes assigned"""
|
||||
LOG.debug(_("Create call received for VSA %s"), vsa_id)
|
||||
|
||||
vsa_id = int(vsa_id) # just in case
|
||||
|
||||
try:
|
||||
vsa = self.vsa_api.get(context, vsa_id)
|
||||
except Exception as ex:
|
||||
msg = _("Failed to find VSA %(vsa_id)d") % locals()
|
||||
LOG.exception(msg)
|
||||
return
|
||||
|
||||
return self._start_vcs(context, vsa)
|
||||
|
||||
@exception.wrap_exception()
|
||||
def vsa_volume_created(self, context, vol_id, vsa_id, status):
|
||||
"""Callback for volume creations"""
|
||||
LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "\
|
||||
"Status %(status)s"), locals())
|
||||
vsa_id = int(vsa_id) # just in case
|
||||
|
||||
# Get all volumes for this VSA
|
||||
# check if any of them still in creating phase
|
||||
drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
|
||||
for drive in drives:
|
||||
if drive['status'] == 'creating':
|
||||
vol_name = drive['name']
|
||||
vol_disp_name = drive['display_name']
|
||||
LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "\
|
||||
"in creating phase - wait"), locals())
|
||||
return
|
||||
|
||||
try:
|
||||
vsa = self.vsa_api.get(context, vsa_id)
|
||||
except Exception as ex:
|
||||
msg = _("Failed to find VSA %(vsa_id)d") % locals()
|
||||
LOG.exception(msg)
|
||||
return
|
||||
|
||||
if len(drives) != vsa['vol_count']:
|
||||
cvol_real = len(drives)
|
||||
cvol_exp = vsa['vol_count']
|
||||
LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\
|
||||
"(%(cvol_real)d of %(cvol_exp)d)"), locals())
|
||||
return
|
||||
|
||||
# all volumes created (successfully or not)
|
||||
return self._start_vcs(context, vsa, drives)
|
||||
|
||||
def _start_vcs(self, context, vsa, drives=[]):
|
||||
"""Start VCs for VSA """
|
||||
|
||||
vsa_id = vsa['id']
|
||||
if vsa['status'] == VsaState.CREATING:
|
||||
self.vsa_api.update_vsa_status(context, vsa_id,
|
||||
VsaState.LAUNCHING)
|
||||
else:
|
||||
return
|
||||
|
||||
# in _separate_ loop go over all volumes and mark as "attached"
|
||||
has_failed_volumes = False
|
||||
for drive in drives:
|
||||
vol_name = drive['name']
|
||||
vol_disp_name = drive['display_name']
|
||||
status = drive['status']
|
||||
LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\
|
||||
"(%(vol_disp_name)s) is in %(status)s state"),
|
||||
locals())
|
||||
if status == 'available':
|
||||
try:
|
||||
# self.volume_api.update(context, volume['id'],
|
||||
# dict(attach_status="attached"))
|
||||
pass
|
||||
except Exception as ex:
|
||||
msg = _("Failed to update attach status for volume "
|
||||
"%(vol_name)s. %(ex)s") % locals()
|
||||
LOG.exception(msg)
|
||||
else:
|
||||
has_failed_volumes = True
|
||||
|
||||
if has_failed_volumes:
|
||||
LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
|
||||
self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
|
||||
self.vsa_api.update_vsa_status(context, vsa_id,
|
||||
VsaState.FAILED)
|
||||
return
|
||||
|
||||
# create user-data record for VC
|
||||
storage_data = vsa_utils.generate_user_data(vsa, drives)
|
||||
|
||||
instance_type = instance_types.get_instance_type(
|
||||
vsa['instance_type_id'])
|
||||
|
||||
# now start the VC instance
|
||||
|
||||
vc_count = vsa['vc_count']
|
||||
LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
|
||||
locals())
|
||||
vc_instances = self.compute_api.create(context,
|
||||
instance_type, # vsa['vsa_instance_type'],
|
||||
vsa['image_ref'],
|
||||
min_count=1,
|
||||
max_count=vc_count,
|
||||
display_name='vc-' + vsa['display_name'],
|
||||
display_description='VC for VSA ' + vsa['display_name'],
|
||||
availability_zone=vsa['availability_zone'],
|
||||
user_data=storage_data,
|
||||
metadata=dict(vsa_id=str(vsa_id)))
|
||||
|
||||
self.vsa_api.update_vsa_status(context, vsa_id,
|
||||
VsaState.CREATED)
|
||||
80
nova/vsa/utils.py
Normal file
80
nova/vsa/utils.py
Normal file
@@ -0,0 +1,80 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from nova import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def generate_user_data(vsa, volumes):
|
||||
SubElement = ElementTree.SubElement
|
||||
|
||||
e_vsa = ElementTree.Element("vsa")
|
||||
|
||||
e_vsa_detail = SubElement(e_vsa, "id")
|
||||
e_vsa_detail.text = str(vsa['id'])
|
||||
e_vsa_detail = SubElement(e_vsa, "name")
|
||||
e_vsa_detail.text = vsa['display_name']
|
||||
e_vsa_detail = SubElement(e_vsa, "description")
|
||||
e_vsa_detail.text = vsa['display_description']
|
||||
e_vsa_detail = SubElement(e_vsa, "vc_count")
|
||||
e_vsa_detail.text = str(vsa['vc_count'])
|
||||
|
||||
e_vsa_detail = SubElement(e_vsa, "auth_user")
|
||||
e_vsa_detail.text = FLAGS.vsa_ec2_user_id
|
||||
e_vsa_detail = SubElement(e_vsa, "auth_access_key")
|
||||
e_vsa_detail.text = FLAGS.vsa_ec2_access_key
|
||||
|
||||
e_volumes = SubElement(e_vsa, "volumes")
|
||||
for volume in volumes:
|
||||
|
||||
loc = volume['provider_location']
|
||||
if loc is None:
|
||||
ip = ''
|
||||
iscsi_iqn = ''
|
||||
iscsi_portal = ''
|
||||
else:
|
||||
(iscsi_target, _sep, iscsi_iqn) = loc.partition(" ")
|
||||
(ip, iscsi_portal) = iscsi_target.split(":", 1)
|
||||
|
||||
e_vol = SubElement(e_volumes, "volume")
|
||||
e_vol_detail = SubElement(e_vol, "id")
|
||||
e_vol_detail.text = str(volume['id'])
|
||||
e_vol_detail = SubElement(e_vol, "name")
|
||||
e_vol_detail.text = volume['name']
|
||||
e_vol_detail = SubElement(e_vol, "display_name")
|
||||
e_vol_detail.text = volume['display_name']
|
||||
e_vol_detail = SubElement(e_vol, "size_gb")
|
||||
e_vol_detail.text = str(volume['size'])
|
||||
e_vol_detail = SubElement(e_vol, "status")
|
||||
e_vol_detail.text = volume['status']
|
||||
e_vol_detail = SubElement(e_vol, "ip")
|
||||
e_vol_detail.text = ip
|
||||
e_vol_detail = SubElement(e_vol, "iscsi_iqn")
|
||||
e_vol_detail.text = iscsi_iqn
|
||||
e_vol_detail = SubElement(e_vol, "iscsi_portal")
|
||||
e_vol_detail.text = iscsi_portal
|
||||
e_vol_detail = SubElement(e_vol, "lun")
|
||||
e_vol_detail.text = '0'
|
||||
e_vol_detail = SubElement(e_vol, "sn_host")
|
||||
e_vol_detail.text = volume['host']
|
||||
|
||||
_xml = ElementTree.tostring(e_vsa)
|
||||
return base64.b64encode(_xml)
|
||||
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
|
||||
4
po/cs.po
4
po/cs.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
|
||||
4
po/da.po
4
po/da.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
|
||||
22
po/de.po
22
po/de.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -2833,3 +2833,21 @@ msgstr ""
|
||||
#~ msgid "Data store %s is unreachable. Trying again in %d seconds."
|
||||
#~ msgstr ""
|
||||
#~ "Datastore %s ist nicht erreichbar. Versuche es erneut in %d Sekunden."
|
||||
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "Alle vorhandenen FLAGS:"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "%s wird gestartet"
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "Kein passender Prozess gefunden"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "Bedient %s"
|
||||
|
||||
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
|
||||
25
po/en_GB.po
25
po/en_GB.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -2812,3 +2812,24 @@ msgstr ""
|
||||
#, python-format
|
||||
msgid "Removing user %(user)s from project %(project)s"
|
||||
msgstr ""
|
||||
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "Wrong number of arguments."
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "No such process"
|
||||
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "Full set of FLAGS:"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr "pidfile %s does not exist. Daemon not running?\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "Starting %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "Serving %s"
|
||||
|
||||
69
po/es.po
69
po/es.po
@@ -8,20 +8,20 @@ msgstr ""
|
||||
"Project-Id-Version: nova\n"
|
||||
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
|
||||
"PO-Revision-Date: 2011-06-30 16:42+0000\n"
|
||||
"Last-Translator: David Caro <Unknown>\n"
|
||||
"PO-Revision-Date: 2011-08-01 03:23+0000\n"
|
||||
"Last-Translator: Juan Alfredo Salas Santillana <Unknown>\n"
|
||||
"Language-Team: Spanish <es@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
#: ../nova/scheduler/simple.py:122
|
||||
msgid "No hosts found"
|
||||
msgstr "No se han encontrado hosts"
|
||||
msgstr "No se encontro anfitriones."
|
||||
|
||||
#: ../nova/exception.py:33
|
||||
msgid "Unexpected error while running command."
|
||||
@@ -2566,7 +2566,7 @@ msgstr ""
|
||||
#: ../nova/auth/manager.py:289
|
||||
#, python-format
|
||||
msgid "User %(uid)s is not a member of project %(pjid)s"
|
||||
msgstr ""
|
||||
msgstr "El usuario %(uid)s no es miembro del proyecto %(pjid)s"
|
||||
|
||||
#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309
|
||||
#, python-format
|
||||
@@ -2584,7 +2584,7 @@ msgstr "Debes especificar un proyecto"
|
||||
#: ../nova/auth/manager.py:414
|
||||
#, python-format
|
||||
msgid "The %s role can not be found"
|
||||
msgstr "El rol %s no se ha podido encontrar"
|
||||
msgstr ""
|
||||
|
||||
#: ../nova/auth/manager.py:416
|
||||
#, python-format
|
||||
@@ -2614,27 +2614,27 @@ msgstr ""
|
||||
#: ../nova/auth/manager.py:515
|
||||
#, python-format
|
||||
msgid "Created project %(name)s with manager %(manager_user)s"
|
||||
msgstr ""
|
||||
msgstr "Creado el proyecto %(name)s con administrador %(manager_user)s"
|
||||
|
||||
#: ../nova/auth/manager.py:533
|
||||
#, python-format
|
||||
msgid "modifying project %s"
|
||||
msgstr "modificando proyecto %s"
|
||||
msgstr "Modificando proyecto %s"
|
||||
|
||||
#: ../nova/auth/manager.py:545
|
||||
#, python-format
|
||||
msgid "Adding user %(uid)s to project %(pid)s"
|
||||
msgstr ""
|
||||
msgstr "Agregando usuario %(uid)s para el proyecto %(pid)s"
|
||||
|
||||
#: ../nova/auth/manager.py:566
|
||||
#, python-format
|
||||
msgid "Remove user %(uid)s from project %(pid)s"
|
||||
msgstr ""
|
||||
msgstr "Borrar usuario %(uid)s del proyecto %(pid)s"
|
||||
|
||||
#: ../nova/auth/manager.py:592
|
||||
#, python-format
|
||||
msgid "Deleting project %s"
|
||||
msgstr "Eliminando proyecto %s"
|
||||
msgstr "Borrando proyecto %s"
|
||||
|
||||
#: ../nova/auth/manager.py:650
|
||||
#, python-format
|
||||
@@ -2644,7 +2644,7 @@ msgstr ""
|
||||
#: ../nova/auth/manager.py:659
|
||||
#, python-format
|
||||
msgid "Deleting user %s"
|
||||
msgstr "Eliminando usuario %s"
|
||||
msgstr "Borrando usuario %s"
|
||||
|
||||
#: ../nova/auth/manager.py:669
|
||||
#, python-format
|
||||
@@ -2710,7 +2710,7 @@ msgstr ""
|
||||
#: ../nova/auth/ldapdriver.py:478
|
||||
#, python-format
|
||||
msgid "Group can't be created because user %s doesn't exist"
|
||||
msgstr ""
|
||||
msgstr "El grupo no se puede crear porque el usuario %s no existe"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:495
|
||||
#, python-format
|
||||
@@ -2730,18 +2730,20 @@ msgstr ""
|
||||
#: ../nova/auth/ldapdriver.py:513
|
||||
#, python-format
|
||||
msgid "User %(uid)s is already a member of the group %(group_dn)s"
|
||||
msgstr ""
|
||||
msgstr "El usuario %(uid)s es actualmente miembro del grupo %(group_dn)s"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:524
|
||||
#, python-format
|
||||
msgid ""
|
||||
"User %s can't be removed from the group because the user doesn't exist"
|
||||
msgstr ""
|
||||
"El usuario %s no se pudo borrar de el grupo a causa de que el usuario no "
|
||||
"existe"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:528
|
||||
#, python-format
|
||||
msgid "User %s is not a member of the group"
|
||||
msgstr ""
|
||||
msgstr "El usuario %s no es miembro de el grupo"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:542
|
||||
#, python-format
|
||||
@@ -2877,6 +2879,10 @@ msgstr "Eliminando el usuario %(user)s del proyecto %(project)s"
|
||||
#~ msgstr ""
|
||||
#~ "El almacen de datos %s es inalcanzable. Reintentandolo en %d segundos."
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "Sirviendo %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
|
||||
#~ msgstr "No puedo obtener IP, usando 127.0.0.1 %s"
|
||||
@@ -3037,10 +3043,24 @@ msgstr "Eliminando el usuario %(user)s del proyecto %(project)s"
|
||||
#~ msgid "Detach volume %s from mountpoint %s on instance %s"
|
||||
#~ msgstr "Desvinculando volumen %s del punto de montaje %s en la instancia %s"
|
||||
|
||||
#~ msgid "unexpected exception getting connection"
|
||||
#~ msgstr "excepción inexperada al obtener la conexión"
|
||||
|
||||
#~ msgid "unexpected error during update"
|
||||
#~ msgstr "error inesperado durante la actualización"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Cannot get blockstats for \"%s\" on \"%s\""
|
||||
#~ msgstr "No puedo obtener estadísticas del bloque para \"%s\" en \"%s\""
|
||||
|
||||
#, python-format
|
||||
#~ msgid "updating %s..."
|
||||
#~ msgstr "actualizando %s..."
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Found instance: %s"
|
||||
#~ msgstr "Encontrada interfaz: %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Cannot get ifstats for \"%s\" on \"%s\""
|
||||
#~ msgstr "No puedo obtener estadísticas de la interfaz para \"%s\" en \"%s\""
|
||||
@@ -3319,3 +3339,20 @@ msgstr "Eliminando el usuario %(user)s del proyecto %(project)s"
|
||||
#, python-format
|
||||
#~ msgid "Spawning VM %s created %s."
|
||||
#~ msgstr "Iniciando VM %s creado %s."
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "No existe el proceso"
|
||||
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "Conjunto completo de opciones (FLAGS):"
|
||||
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "Cantidad de argumentos incorrecta"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr "El \"pidfile\" %s no existe. Quizás el servicio no este corriendo.\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "Iniciando %s"
|
||||
|
||||
52
po/fr.po
52
po/fr.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -2929,3 +2929,51 @@ msgstr "Ajout de l'utilisateur %(user)s au projet %(project)s"
|
||||
#, python-format
|
||||
msgid "Removing user %(user)s from project %(project)s"
|
||||
msgstr "Suppression de l'utilisateur %(user)s du projet %(project)s"
|
||||
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "Nombre d'arguments incorrect."
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "Aucun processus de ce type"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "Démarrage de %s"
|
||||
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "Ensemble de propriétés complet :"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr ""
|
||||
#~ "Le fichier pid %s n'existe pas. Est-ce que le processus est en cours "
|
||||
#~ "d'exécution ?\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "En train de servir %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
|
||||
#~ msgstr "Ne peut pas récupérer blockstats pour \"%(disk)s\" sur \"%(iid)s\""
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
|
||||
#~ msgstr "Ne peut pas récupérer ifstats pour \"%(interface)s\" sur \"%(iid)s\""
|
||||
|
||||
#~ msgid "unexpected error during update"
|
||||
#~ msgstr "erreur inopinée pendant la ise à jour"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "updating %s..."
|
||||
#~ msgstr "mise à jour %s..."
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Found instance: %s"
|
||||
#~ msgstr "Instance trouvée : %s"
|
||||
|
||||
#~ msgid "unexpected exception getting connection"
|
||||
#~ msgstr "erreur inopinée pendant la connexion"
|
||||
|
||||
#~ msgid "Starting instance monitor"
|
||||
#~ msgstr "Démarrage du superviseur d'instance"
|
||||
|
||||
87
po/it.po
87
po/it.po
@@ -8,14 +8,14 @@ msgstr ""
|
||||
"Project-Id-Version: nova\n"
|
||||
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
|
||||
"PO-Revision-Date: 2011-02-22 19:34+0000\n"
|
||||
"Last-Translator: Armando Migliaccio <Unknown>\n"
|
||||
"PO-Revision-Date: 2011-08-21 22:50+0000\n"
|
||||
"Last-Translator: Guido Davide Dall'Olio <Unknown>\n"
|
||||
"Language-Team: Italian <it@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-22 04:48+0000\n"
|
||||
"X-Generator: Launchpad (build 13697)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -449,24 +449,24 @@ msgstr ""
|
||||
#: ../nova/scheduler/simple.py:53
|
||||
#, python-format
|
||||
msgid "Host %s is not alive"
|
||||
msgstr ""
|
||||
msgstr "L'host %s non è attivo"
|
||||
|
||||
#: ../nova/scheduler/simple.py:65
|
||||
msgid "All hosts have too many cores"
|
||||
msgstr ""
|
||||
msgstr "Gli host hanno troppi core"
|
||||
|
||||
#: ../nova/scheduler/simple.py:87
|
||||
#, python-format
|
||||
msgid "Host %s not available"
|
||||
msgstr ""
|
||||
msgstr "Host %s non disponibile"
|
||||
|
||||
#: ../nova/scheduler/simple.py:99
|
||||
msgid "All hosts have too many gigabytes"
|
||||
msgstr ""
|
||||
msgstr "Gli Host hanno troppy gigabyte"
|
||||
|
||||
#: ../nova/scheduler/simple.py:119
|
||||
msgid "All hosts have too many networks"
|
||||
msgstr ""
|
||||
msgstr "Gli host hanno troppe reti"
|
||||
|
||||
#: ../nova/volume/manager.py:85
|
||||
#, python-format
|
||||
@@ -496,7 +496,7 @@ msgstr ""
|
||||
#: ../nova/volume/manager.py:123
|
||||
#, python-format
|
||||
msgid "volume %s: created successfully"
|
||||
msgstr ""
|
||||
msgstr "volume %s: creato con successo"
|
||||
|
||||
#: ../nova/volume/manager.py:131
|
||||
msgid "Volume is still attached"
|
||||
@@ -514,12 +514,12 @@ msgstr ""
|
||||
#: ../nova/volume/manager.py:138
|
||||
#, python-format
|
||||
msgid "volume %s: deleting"
|
||||
msgstr ""
|
||||
msgstr "volume %s: rimuovendo"
|
||||
|
||||
#: ../nova/volume/manager.py:147
|
||||
#, python-format
|
||||
msgid "volume %s: deleted successfully"
|
||||
msgstr ""
|
||||
msgstr "volume %s: rimosso con successo"
|
||||
|
||||
#: ../nova/virt/xenapi/fake.py:74
|
||||
#, python-format
|
||||
@@ -529,7 +529,7 @@ msgstr ""
|
||||
#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404
|
||||
#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478
|
||||
msgid "Raising NotImplemented"
|
||||
msgstr ""
|
||||
msgstr "Sollevando NotImplemented"
|
||||
|
||||
#: ../nova/virt/xenapi/fake.py:306
|
||||
#, python-format
|
||||
@@ -539,7 +539,7 @@ msgstr ""
|
||||
#: ../nova/virt/xenapi/fake.py:341
|
||||
#, python-format
|
||||
msgid "Calling %(localname)s %(impl)s"
|
||||
msgstr ""
|
||||
msgstr "Chiamando %(localname)s %(impl)s"
|
||||
|
||||
#: ../nova/virt/xenapi/fake.py:346
|
||||
#, python-format
|
||||
@@ -564,17 +564,17 @@ msgstr ""
|
||||
|
||||
#: ../nova/virt/connection.py:73
|
||||
msgid "Failed to open connection to the hypervisor"
|
||||
msgstr ""
|
||||
msgstr "Fallita l'apertura della connessione verso l'hypervisor"
|
||||
|
||||
#: ../nova/network/linux_net.py:187
|
||||
#, python-format
|
||||
msgid "Starting VLAN inteface %s"
|
||||
msgstr ""
|
||||
msgstr "Avviando l'interfaccia VLAN %s"
|
||||
|
||||
#: ../nova/network/linux_net.py:208
|
||||
#, python-format
|
||||
msgid "Starting Bridge interface for %s"
|
||||
msgstr ""
|
||||
msgstr "Avviando l'interfaccia Bridge per %s"
|
||||
|
||||
#. pylint: disable=W0703
|
||||
#: ../nova/network/linux_net.py:314
|
||||
@@ -632,7 +632,7 @@ msgstr "Il risultato é %s"
|
||||
#: ../nova/utils.py:159
|
||||
#, python-format
|
||||
msgid "Running cmd (SSH): %s"
|
||||
msgstr ""
|
||||
msgstr "Eseguendo cmd (SSH): %s"
|
||||
|
||||
#: ../nova/utils.py:217
|
||||
#, python-format
|
||||
@@ -642,7 +642,7 @@ msgstr "debug in callback: %s"
|
||||
#: ../nova/utils.py:222
|
||||
#, python-format
|
||||
msgid "Running %s"
|
||||
msgstr ""
|
||||
msgstr "Eseguendo %s"
|
||||
|
||||
#: ../nova/utils.py:262
|
||||
#, python-format
|
||||
@@ -697,12 +697,12 @@ msgstr ""
|
||||
#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171
|
||||
#, python-format
|
||||
msgid "Created VM %s..."
|
||||
msgstr ""
|
||||
msgstr "Creata VM %s.."
|
||||
|
||||
#: ../nova/virt/xenapi/vm_utils.py:138
|
||||
#, python-format
|
||||
msgid "Created VM %(instance_name)s as %(vm_ref)s."
|
||||
msgstr ""
|
||||
msgstr "Creata VM %(instance_name)s come %(vm_ref)s"
|
||||
|
||||
#: ../nova/virt/xenapi/vm_utils.py:168
|
||||
#, python-format
|
||||
@@ -771,7 +771,7 @@ msgstr ""
|
||||
#: ../nova/virt/xenapi/vm_utils.py:332
|
||||
#, python-format
|
||||
msgid "Glance image %s"
|
||||
msgstr ""
|
||||
msgstr "Immagine Glance %s"
|
||||
|
||||
#. we need to invoke a plugin for copying VDI's
|
||||
#. content into proper path
|
||||
@@ -783,7 +783,7 @@ msgstr ""
|
||||
#: ../nova/virt/xenapi/vm_utils.py:352
|
||||
#, python-format
|
||||
msgid "Kernel/Ramdisk VDI %s destroyed"
|
||||
msgstr ""
|
||||
msgstr "Kernel/Ramdisk VDI %s distrutti"
|
||||
|
||||
#: ../nova/virt/xenapi/vm_utils.py:361
|
||||
#, python-format
|
||||
@@ -793,7 +793,7 @@ msgstr ""
|
||||
#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402
|
||||
#, python-format
|
||||
msgid "Looking up vdi %s for PV kernel"
|
||||
msgstr ""
|
||||
msgstr "Cercando vdi %s per kernel PV"
|
||||
|
||||
#: ../nova/virt/xenapi/vm_utils.py:397
|
||||
#, python-format
|
||||
@@ -2802,37 +2802,24 @@ msgstr ""
|
||||
msgid "Removing user %(user)s from project %(project)s"
|
||||
msgstr ""
|
||||
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "Insieme di FLAGS:"
|
||||
|
||||
#, python-format
|
||||
#~ msgid ""
|
||||
#~ "%s\n"
|
||||
#~ "Command: %s\n"
|
||||
#~ "Exit code: %s\n"
|
||||
#~ "Stdout: %r\n"
|
||||
#~ "Stderr: %r"
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr ""
|
||||
#~ "%s\n"
|
||||
#~ "Comando: %s\n"
|
||||
#~ "Exit code: %s\n"
|
||||
#~ "Stdout: %r\n"
|
||||
#~ "Stderr: %r"
|
||||
#~ "Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "(%s) publish (key: %s) %s"
|
||||
#~ msgstr "(%s) pubblica (chiave: %s) %s"
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "Avvio di %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds."
|
||||
#~ msgstr ""
|
||||
#~ "Il server AMQP su %s:%d non é raggiungibile. Riprovare in %d secondi."
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "Servire %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Binding %s to %s with key %s"
|
||||
#~ msgstr "Collegando %s a %s con la chiave %s"
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "Numero errato di argomenti"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s node"
|
||||
#~ msgstr "Avviando il nodo %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Data store %s is unreachable. Trying again in %d seconds."
|
||||
#~ msgstr "Datastore %s é irrangiungibile. Riprovare in %d seconds."
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "Nessun processo trovato"
|
||||
|
||||
50
po/ja.po
50
po/ja.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -2878,6 +2878,17 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
|
||||
#~ msgid "Data store %s is unreachable. Trying again in %d seconds."
|
||||
#~ msgstr "データストア %s に接続できません。 %d 秒後に再接続します。"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "%s サービスの開始"
|
||||
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "FLAGSの一覧:"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
|
||||
#~ msgstr "IPを取得できません。127.0.0.1 を %s として使います。"
|
||||
@@ -3038,6 +3049,13 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
|
||||
#~ msgid "Detach volume %s from mountpoint %s on instance %s"
|
||||
#~ msgstr "Detach volume: ボリューム %s をマウントポイント %s (インスタンス%s)からデタッチします。"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "updating %s..."
|
||||
#~ msgstr "%s の情報の更新…"
|
||||
|
||||
#~ msgid "unexpected error during update"
|
||||
#~ msgstr "更新の最中に予期しないエラーが発生しました。"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Cannot get blockstats for \"%s\" on \"%s\""
|
||||
#~ msgstr "ブロックデバイス \"%s\" の統計を \"%s\" について取得できません。"
|
||||
@@ -3046,6 +3064,13 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
|
||||
#~ msgid "Cannot get ifstats for \"%s\" on \"%s\""
|
||||
#~ msgstr "インタフェース \"%s\" の統計を \"%s\" について取得できません。"
|
||||
|
||||
#~ msgid "unexpected exception getting connection"
|
||||
#~ msgstr "接続に際し予期しないエラーが発生しました。"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Found instance: %s"
|
||||
#~ msgstr "インスタンス %s が見つかりました。"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "No service for %s, %s"
|
||||
#~ msgstr "%s, %s のserviceが存在しません。"
|
||||
@@ -3318,3 +3343,24 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s から削除しま
|
||||
#, python-format
|
||||
#~ msgid "volume %s: creating lv of size %sG"
|
||||
#~ msgstr "ボリューム%sの%sGのlv (論理ボリューム) を作成します。"
|
||||
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "引数の数が異なります。"
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "そのようなプロセスはありません"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
|
||||
#~ msgstr "\"%(iid)s\" 上の \"%(disk)s\" 用のブロック統計(blockstats)が取得できません"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
|
||||
#~ msgstr "\"%(iid)s\" 上の %(interface)s\" 用インターフェース統計(ifstats)が取得できません"
|
||||
|
||||
#~ msgid "Starting instance monitor"
|
||||
#~ msgstr "インスタンスモニタを開始しています"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "%s を起動中"
|
||||
|
||||
52
po/pt_BR.po
52
po/pt_BR.po
@@ -8,14 +8,14 @@ msgstr ""
|
||||
"Project-Id-Version: nova\n"
|
||||
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
|
||||
"PO-Revision-Date: 2011-03-24 14:51+0000\n"
|
||||
"PO-Revision-Date: 2011-07-25 17:40+0000\n"
|
||||
"Last-Translator: msinhore <msinhore@gmail.com>\n"
|
||||
"Language-Team: Brazilian Portuguese <pt_BR@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -36,6 +36,11 @@ msgid ""
|
||||
"Stdout: %(stdout)r\n"
|
||||
"Stderr: %(stderr)r"
|
||||
msgstr ""
|
||||
"%(description)s\n"
|
||||
"Comando: %(cmd)s\n"
|
||||
"Código de saída: %(exit_code)s\n"
|
||||
"Saída padrão: %(stdout)r\n"
|
||||
"Erro: %(stderr)r"
|
||||
|
||||
#: ../nova/exception.py:107
|
||||
msgid "DB exception wrapped"
|
||||
@@ -392,7 +397,7 @@ msgstr "instância %s: suspendendo"
|
||||
#: ../nova/compute/manager.py:472
|
||||
#, python-format
|
||||
msgid "instance %s: resuming"
|
||||
msgstr ""
|
||||
msgstr "instância %s: resumindo"
|
||||
|
||||
#: ../nova/compute/manager.py:491
|
||||
#, python-format
|
||||
@@ -407,12 +412,12 @@ msgstr "instância %s: desbloqueando"
|
||||
#: ../nova/compute/manager.py:513
|
||||
#, python-format
|
||||
msgid "instance %s: getting locked state"
|
||||
msgstr ""
|
||||
msgstr "instância %s: obtendo estado de bloqueio"
|
||||
|
||||
#: ../nova/compute/manager.py:526
|
||||
#, python-format
|
||||
msgid "instance %s: reset network"
|
||||
msgstr ""
|
||||
msgstr "instância %s: reset da rede"
|
||||
|
||||
#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515
|
||||
#, python-format
|
||||
@@ -429,6 +434,7 @@ msgstr "instância %s: obtendo console ajax"
|
||||
msgid ""
|
||||
"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s"
|
||||
msgstr ""
|
||||
"instância %(instance_id)s: atachando volume %(volume_id)s para %(mountpoint)s"
|
||||
|
||||
#. pylint: disable=W0702
|
||||
#. NOTE(vish): The inline callback eats the exception info so we
|
||||
@@ -438,6 +444,8 @@ msgstr ""
|
||||
#, python-format
|
||||
msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing"
|
||||
msgstr ""
|
||||
"instância %(instance_id)s: falha ao atachar ponto de montagem "
|
||||
"%(mountpoint)s, removendo"
|
||||
|
||||
#: ../nova/compute/manager.py:585
|
||||
#, python-format
|
||||
@@ -458,7 +466,7 @@ msgstr "Host %s não está ativo"
|
||||
|
||||
#: ../nova/scheduler/simple.py:65
|
||||
msgid "All hosts have too many cores"
|
||||
msgstr ""
|
||||
msgstr "Todos os hosts tem muitos núcleos de CPU"
|
||||
|
||||
#: ../nova/scheduler/simple.py:87
|
||||
#, python-format
|
||||
@@ -783,7 +791,7 @@ msgstr "Tamanho da imagem %(image)s:%(virtual_size)d"
|
||||
#: ../nova/virt/xenapi/vm_utils.py:332
|
||||
#, python-format
|
||||
msgid "Glance image %s"
|
||||
msgstr ""
|
||||
msgstr "Visão geral da imagem %s"
|
||||
|
||||
#. we need to invoke a plugin for copying VDI's
|
||||
#. content into proper path
|
||||
@@ -815,7 +823,7 @@ msgstr "Kernel PV no VDI: %s"
|
||||
#: ../nova/virt/xenapi/vm_utils.py:405
|
||||
#, python-format
|
||||
msgid "Running pygrub against %s"
|
||||
msgstr ""
|
||||
msgstr "Rodando pygrub novamente %s"
|
||||
|
||||
#: ../nova/virt/xenapi/vm_utils.py:411
|
||||
#, python-format
|
||||
@@ -849,12 +857,12 @@ msgstr "(VM_UTILS) xenapi power_state -> |%s|"
|
||||
#: ../nova/virt/xenapi/vm_utils.py:525
|
||||
#, python-format
|
||||
msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s"
|
||||
msgstr ""
|
||||
msgstr "O VHD %(vdi_uuid)s tem pai %(parent_ref)s"
|
||||
|
||||
#: ../nova/virt/xenapi/vm_utils.py:542
|
||||
#, python-format
|
||||
msgid "Re-scanning SR %s"
|
||||
msgstr ""
|
||||
msgstr "Re-escaneando SR %s"
|
||||
|
||||
#: ../nova/virt/xenapi/vm_utils.py:567
|
||||
#, python-format
|
||||
@@ -2857,6 +2865,17 @@ msgstr ""
|
||||
#~ "Repositório de dados %s não pode ser atingido. Tentando novamente em %d "
|
||||
#~ "segundos."
|
||||
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "Conjunto completo de FLAGS:"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "Iniciando %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "Servindo %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
|
||||
#~ msgstr "Não foi possível obter IP, usando 127.0.0.1 %s"
|
||||
@@ -2965,3 +2984,14 @@ msgstr ""
|
||||
#, python-format
|
||||
#~ msgid "Created user %s (admin: %r)"
|
||||
#~ msgstr "Criado usuário %s (administrador: %r)"
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "Processo inexistente"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr ""
|
||||
#~ "Arquivo do id do processo (pidfile) %s não existe. O Daemon está parado?\n"
|
||||
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "Número errado de argumentos."
|
||||
|
||||
22
po/ru.po
22
po/ru.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -2789,6 +2789,10 @@ msgstr ""
|
||||
msgid "Removing user %(user)s from project %(project)s"
|
||||
msgstr ""
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "Запускается %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "arg: %s\t\tval: %s"
|
||||
#~ msgstr "arg: %s\t\tval: %s"
|
||||
@@ -2841,6 +2845,13 @@ msgstr ""
|
||||
#~ msgid "Adding role %s to user %s in project %s"
|
||||
#~ msgstr "Добавление роли %s для пользователя %s в проект %s"
|
||||
|
||||
#~ msgid "unexpected error during update"
|
||||
#~ msgstr "неожиданная ошибка во время обновления"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "updating %s..."
|
||||
#~ msgstr "обновление %s..."
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Getting object: %s / %s"
|
||||
#~ msgstr "Получение объекта: %s / %s"
|
||||
@@ -2891,6 +2902,10 @@ msgstr ""
|
||||
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
|
||||
#~ msgstr "Не удалось получить IP, используем 127.0.0.1 %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr "pidfile %s не обнаружен. Демон не запущен?\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Getting from %s: %s"
|
||||
#~ msgstr "Получение из %s: %s"
|
||||
@@ -2906,3 +2921,6 @@ msgstr ""
|
||||
#, python-format
|
||||
#~ msgid "Authenticated Request For %s:%s)"
|
||||
#~ msgstr "Запрос аутентификации для %s:%s)"
|
||||
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "Неверное число аргументов."
|
||||
|
||||
4
po/tl.po
4
po/tl.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
|
||||
12
po/uk.po
12
po/uk.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -2792,6 +2792,14 @@ msgstr ""
|
||||
#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds."
|
||||
#~ msgstr "AMQP сервер %s:%d недоступний. Спроба під'єднання через %d секунд."
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "Запускається %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "Обслуговування %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
|
||||
#~ msgstr "Не вдалось отримати IP, використовуючи 127.0.0.1 %s"
|
||||
|
||||
171
po/zh_CN.po
171
po/zh_CN.po
@@ -8,14 +8,18 @@ msgstr ""
|
||||
"Project-Id-Version: nova\n"
|
||||
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
|
||||
"PO-Revision-Date: 2011-06-14 14:44+0000\n"
|
||||
"Last-Translator: chong <Unknown>\n"
|
||||
"PO-Revision-Date: 2011-08-19 09:26+0000\n"
|
||||
"Last-Translator: zhangjunfeng <Unknown>\n"
|
||||
"Language-Team: Chinese (Simplified) <zh_CN@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-20 05:06+0000\n"
|
||||
"X-Generator: Launchpad (build 13697)\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "启动 %s 中"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -44,7 +48,7 @@ msgstr ""
|
||||
|
||||
#: ../nova/exception.py:107
|
||||
msgid "DB exception wrapped"
|
||||
msgstr ""
|
||||
msgstr "数据库异常"
|
||||
|
||||
#. exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
#: ../nova/exception.py:120
|
||||
@@ -84,7 +88,7 @@ msgstr "获取外网IP失败"
|
||||
#: ../nova/api/openstack/servers.py:152
|
||||
#, python-format
|
||||
msgid "%(param)s property not found for image %(_image_id)s"
|
||||
msgstr ""
|
||||
msgstr "没有找到镜像文件%(_image_id)s 的属性 %(param)s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:168
|
||||
msgid "No keypairs defined"
|
||||
@@ -93,55 +97,55 @@ msgstr "未定义密钥对"
|
||||
#: ../nova/api/openstack/servers.py:238
|
||||
#, python-format
|
||||
msgid "Compute.api::lock %s"
|
||||
msgstr ""
|
||||
msgstr "compute.api::加锁 %s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:253
|
||||
#, python-format
|
||||
msgid "Compute.api::unlock %s"
|
||||
msgstr ""
|
||||
msgstr "compute.api::解锁 %s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:267
|
||||
#, python-format
|
||||
msgid "Compute.api::get_lock %s"
|
||||
msgstr ""
|
||||
msgstr "Compute.api::得到锁 %s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:281
|
||||
#, python-format
|
||||
msgid "Compute.api::reset_network %s"
|
||||
msgstr ""
|
||||
msgstr "Compute.api::重置网络 %s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:292
|
||||
#, python-format
|
||||
msgid "Compute.api::pause %s"
|
||||
msgstr ""
|
||||
msgstr "Compute.api::暂停 %s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:303
|
||||
#, python-format
|
||||
msgid "Compute.api::unpause %s"
|
||||
msgstr ""
|
||||
msgstr "Compute.api::继续 %s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:314
|
||||
#, python-format
|
||||
msgid "compute.api::suspend %s"
|
||||
msgstr ""
|
||||
msgstr "compute.api::挂起 %s"
|
||||
|
||||
#: ../nova/api/openstack/servers.py:325
|
||||
#, python-format
|
||||
msgid "compute.api::resume %s"
|
||||
msgstr ""
|
||||
msgstr "compute.api::回复 %s"
|
||||
|
||||
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
|
||||
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
|
||||
#: ../nova/api/ec2/__init__.py:317
|
||||
#, python-format
|
||||
msgid "Instance %s not found"
|
||||
msgstr ""
|
||||
msgstr "实例 %s 没有找到"
|
||||
|
||||
#. NOTE: No Resource Pool concept so far
|
||||
#: ../nova/virt/xenapi/volumeops.py:51
|
||||
#, python-format
|
||||
msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s"
|
||||
msgstr ""
|
||||
msgstr "挂载卷:%(instance_name)s, %(device_path)s, %(mountpoint)s"
|
||||
|
||||
#: ../nova/virt/xenapi/volumeops.py:69
|
||||
#, python-format
|
||||
@@ -2666,12 +2670,12 @@ msgstr "用户 %s 不存在"
|
||||
#: ../nova/auth/ldapdriver.py:472
|
||||
#, python-format
|
||||
msgid "Group can't be created because group %s already exists"
|
||||
msgstr ""
|
||||
msgstr "组不能被创建,因为组 %s 已经存在"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:478
|
||||
#, python-format
|
||||
msgid "Group can't be created because user %s doesn't exist"
|
||||
msgstr ""
|
||||
msgstr "组不能被创建,因为用户 %s 不存在"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:495
|
||||
#, python-format
|
||||
@@ -2686,50 +2690,50 @@ msgstr ""
|
||||
#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521
|
||||
#, python-format
|
||||
msgid "The group at dn %s doesn't exist"
|
||||
msgstr ""
|
||||
msgstr "识别名为 %s 的组不存在"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:513
|
||||
#, python-format
|
||||
msgid "User %(uid)s is already a member of the group %(group_dn)s"
|
||||
msgstr ""
|
||||
msgstr "用户 %(uid)s 已经是 组 %(group_dn)s 中的成员"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:524
|
||||
#, python-format
|
||||
msgid ""
|
||||
"User %s can't be removed from the group because the user doesn't exist"
|
||||
msgstr ""
|
||||
msgstr "用户 %s 不能从组中删除,因为这个用户不存在"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:528
|
||||
#, python-format
|
||||
msgid "User %s is not a member of the group"
|
||||
msgstr ""
|
||||
msgstr "用户 %s 不是这个组的成员"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:542
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Attempted to remove the last member of a group. Deleting the group at %s "
|
||||
"instead."
|
||||
msgstr ""
|
||||
msgstr "尝试删除组中最后一个成员,用删除组 %s 来代替。"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:549
|
||||
#, python-format
|
||||
msgid "User %s can't be removed from all because the user doesn't exist"
|
||||
msgstr ""
|
||||
msgstr "用户 %s 不能从系统中删除,因为这个用户不存在"
|
||||
|
||||
#: ../nova/auth/ldapdriver.py:564
|
||||
#, python-format
|
||||
msgid "Group at dn %s doesn't exist"
|
||||
msgstr ""
|
||||
msgstr "可识别名为 %s 的组不存在"
|
||||
|
||||
#: ../nova/virt/xenapi/network_utils.py:40
|
||||
#, python-format
|
||||
msgid "Found non-unique network for bridge %s"
|
||||
msgstr ""
|
||||
msgstr "发现网桥 %s 的网络不唯一"
|
||||
|
||||
#: ../nova/virt/xenapi/network_utils.py:43
|
||||
#, python-format
|
||||
msgid "Found no network for bridge %s"
|
||||
msgstr ""
|
||||
msgstr "发现网桥 %s 没有网络"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:97
|
||||
#, python-format
|
||||
@@ -2744,22 +2748,22 @@ msgstr "删除用户: %s"
|
||||
#: ../nova/api/ec2/admin.py:127
|
||||
#, python-format
|
||||
msgid "Adding role %(role)s to user %(user)s for project %(project)s"
|
||||
msgstr ""
|
||||
msgstr "添加角色 %(role)s 给项目 %(project)s 中的用户 %(user)s"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:131
|
||||
#, python-format
|
||||
msgid "Adding sitewide role %(role)s to user %(user)s"
|
||||
msgstr ""
|
||||
msgstr "给用户 %(user)s 添加站点角色 %(role)s"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:137
|
||||
#, python-format
|
||||
msgid "Removing role %(role)s from user %(user)s for project %(project)s"
|
||||
msgstr ""
|
||||
msgstr "删除项目 %(project)s中用户 %(user)s的角色 %(role)s"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:141
|
||||
#, python-format
|
||||
msgid "Removing sitewide role %(role)s from user %(user)s"
|
||||
msgstr ""
|
||||
msgstr "删除用户 %(user)s 的站点角色 %(role)s"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223
|
||||
msgid "operation must be add or remove"
|
||||
@@ -2768,22 +2772,22 @@ msgstr "操作必须为添加或删除"
|
||||
#: ../nova/api/ec2/admin.py:159
|
||||
#, python-format
|
||||
msgid "Getting x509 for user: %(name)s on project: %(project)s"
|
||||
msgstr ""
|
||||
msgstr "获得用户: %(name)s 在项目 :%(project)s中的x509"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:177
|
||||
#, python-format
|
||||
msgid "Create project %(name)s managed by %(manager_user)s"
|
||||
msgstr ""
|
||||
msgstr "创建被%(manager_user)s 管理的项目 %(name)s"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:190
|
||||
#, python-format
|
||||
msgid "Modify project: %(name)s managed by %(manager_user)s"
|
||||
msgstr ""
|
||||
msgstr "更改被 %(manager_user)s 管理的项目: %(name)s"
|
||||
|
||||
#: ../nova/api/ec2/admin.py:200
|
||||
#, python-format
|
||||
msgid "Delete project: %s"
|
||||
msgstr "删除工程 %s"
|
||||
msgstr ""
|
||||
|
||||
#: ../nova/api/ec2/admin.py:214
|
||||
#, python-format
|
||||
@@ -2795,94 +2799,19 @@ msgstr "添加用户 %(user)s 到项目 %(project)s 中"
|
||||
msgid "Removing user %(user)s from project %(project)s"
|
||||
msgstr "从项目 %(project)s 中移除用户 %(user)s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid ""
|
||||
#~ "%s\n"
|
||||
#~ "Command: %s\n"
|
||||
#~ "Exit code: %s\n"
|
||||
#~ "Stdout: %r\n"
|
||||
#~ "Stderr: %r"
|
||||
#~ msgstr ""
|
||||
#~ "%s\n"
|
||||
#~ "命令:%s\n"
|
||||
#~ "退出代码:%s\n"
|
||||
#~ "标准输出(stdout):%r\n"
|
||||
#~ "标准错误(stderr):%r"
|
||||
#~ msgid "Full set of FLAGS:"
|
||||
#~ msgstr "FLAGS全集:"
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "没有该进程"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Binding %s to %s with key %s"
|
||||
#~ msgstr "将%s绑定到%s(以%s键值)"
|
||||
#~ msgid "Serving %s"
|
||||
#~ msgstr "正在为 %s 服务"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds."
|
||||
#~ msgstr "位于%s:%d的AMQP服务器不可用。%d秒后重试。"
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr "pidfile %s 不存在,守护进程是否运行?\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Getting from %s: %s"
|
||||
#~ msgstr "从%s获得如下内容:%s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s node"
|
||||
#~ msgstr "启动%s节点"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Data store %s is unreachable. Trying again in %d seconds."
|
||||
#~ msgstr "数据储存服务%s不可用。%d秒之后继续尝试。"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "(%s) publish (key: %s) %s"
|
||||
#~ msgstr "(%s)发布(键值:%s)%s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Couldn't get IP, using 127.0.0.1 %s"
|
||||
#~ msgstr "不能获取IP,将使用 127.0.0.1 %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid ""
|
||||
#~ "Access key %s has had %d failed authentications and will be locked out for "
|
||||
#~ "%d minutes."
|
||||
#~ msgstr "访问键 %s时,存在%d个失败的认证,将于%d分钟后解锁"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Authenticated Request For %s:%s)"
|
||||
#~ msgstr "为%s:%s申请认证"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "arg: %s\t\tval: %s"
|
||||
#~ msgstr "键为: %s\t\t值为: %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Getting x509 for user: %s on project: %s"
|
||||
#~ msgstr "为用户 %s从工程%s中获取 x509"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Create project %s managed by %s"
|
||||
#~ msgstr "创建工程%s,此工程由%s管理"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Unsupported API request: controller = %s,action = %s"
|
||||
#~ msgstr "不支持的API请求: 控制器 = %s,执行 = %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Adding sitewide role %s to user %s"
|
||||
#~ msgstr "增加站点范围的 %s角色给用户 %s"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Adding user %s to project %s"
|
||||
#~ msgstr "增加用户%s到%s工程"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Unauthorized request for controller=%s and action=%s"
|
||||
#~ msgstr "对控制器=%s及动作=%s未经授权"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Removing user %s from project %s"
|
||||
#~ msgstr "正将用户%s从工程%s中移除"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Adding role %s to user %s for project %s"
|
||||
#~ msgstr "正将%s角色赋予用户%s(在工程%s中)"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Removing role %s from user %s for project %s"
|
||||
#~ msgstr "正将角色%s从用户%s在工程%s中移除"
|
||||
#~ msgid "Wrong number of arguments."
|
||||
#~ msgstr "错误参数个数。"
|
||||
|
||||
15
po/zh_TW.po
15
po/zh_TW.po
@@ -14,8 +14,8 @@ msgstr ""
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
|
||||
"X-Generator: Launchpad (build 13405)\n"
|
||||
"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n"
|
||||
"X-Generator: Launchpad (build 13573)\n"
|
||||
|
||||
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
|
||||
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
|
||||
@@ -2787,3 +2787,14 @@ msgstr ""
|
||||
#, python-format
|
||||
msgid "Removing user %(user)s from project %(project)s"
|
||||
msgstr ""
|
||||
|
||||
#~ msgid "No such process"
|
||||
#~ msgstr "沒有此一程序"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "pidfile %s does not exist. Daemon not running?\n"
|
||||
#~ msgstr "pidfile %s 不存在. Daemon未啟動?\n"
|
||||
|
||||
#, python-format
|
||||
#~ msgid "Starting %s"
|
||||
#~ msgstr "正在啟動 %s"
|
||||
|
||||
Reference in New Issue
Block a user