Resolved trunk merge conflicts
This commit is contained in:
1
Authors
1
Authors
@@ -40,6 +40,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail
|
||||
Paul Voccio <paul@openstack.org>
|
||||
Rick Clark <rick@openstack.org>
|
||||
Rick Harris <rconradharris@gmail.com>
|
||||
Rob Kost <kost@isi.edu>
|
||||
Ryan Lane <rlane@wikimedia.org>
|
||||
Ryan Lucio <rlucio@internap.com>
|
||||
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
|
||||
|
||||
@@ -5,6 +5,7 @@ graft CA
|
||||
graft doc
|
||||
graft smoketests
|
||||
graft tools
|
||||
graft etc
|
||||
include nova/api/openstack/notes.txt
|
||||
include nova/auth/novarc.template
|
||||
include nova/auth/slap.sh
|
||||
|
||||
@@ -36,6 +36,7 @@ gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import version
|
||||
from nova import wsgi
|
||||
|
||||
logging.basicConfig()
|
||||
@@ -79,6 +80,8 @@ def run_app(paste_config_file):
|
||||
|
||||
if __name__ == '__main__':
|
||||
FLAGS(sys.argv)
|
||||
LOG.audit(_("Starting nova-api node (version %s)"),
|
||||
version.version_string_with_vcs())
|
||||
conf = wsgi.paste_config_file('nova-api.conf')
|
||||
if conf:
|
||||
run_app(conf)
|
||||
|
||||
@@ -49,7 +49,7 @@ if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
FLAGS(sys.argv)
|
||||
|
||||
direct.register_service('compute', compute_api.ComputeAPI())
|
||||
direct.register_service('compute', compute_api.API())
|
||||
direct.register_service('reflect', direct.Reflection())
|
||||
router = direct.Router()
|
||||
with_json = direct.JsonParamsMiddleware(router)
|
||||
|
||||
@@ -79,7 +79,9 @@ from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.api.ec2.cloud import ec2_id_to_id
|
||||
from nova.auth import manager
|
||||
from nova.cloudpipe import pipelib
|
||||
from nova.db import migration
|
||||
@@ -95,6 +97,16 @@ flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||
|
||||
|
||||
def param2id(object_id):
|
||||
"""Helper function to convert various id types to internal id.
|
||||
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
|
||||
"""
|
||||
if '-' in object_id:
|
||||
return ec2_id_to_id(object_id)
|
||||
else:
|
||||
return int(object_id)
|
||||
|
||||
|
||||
class VpnCommands(object):
|
||||
"""Class for managing VPNs."""
|
||||
|
||||
@@ -535,6 +547,46 @@ class DbCommands(object):
|
||||
print migration.db_version()
|
||||
|
||||
|
||||
class VolumeCommands(object):
|
||||
"""Methods for dealing with a cloud in an odd state"""
|
||||
|
||||
def delete(self, volume_id):
|
||||
"""Delete a volume, bypassing the check that it
|
||||
must be available.
|
||||
args: volume_id_id"""
|
||||
ctxt = context.get_admin_context()
|
||||
volume = db.volume_get(ctxt, param2id(volume_id))
|
||||
host = volume['host']
|
||||
if volume['status'] == 'in-use':
|
||||
print "Volume is in-use."
|
||||
print "Detach volume from instance and then try again."
|
||||
return
|
||||
|
||||
rpc.cast(ctxt,
|
||||
db.queue_get_for(ctxt, FLAGS.volume_topic, host),
|
||||
{"method": "delete_volume",
|
||||
"args": {"volume_id": volume['id']}})
|
||||
|
||||
def reattach(self, volume_id):
|
||||
"""Re-attach a volume that has previously been attached
|
||||
to an instance. Typically called after a compute host
|
||||
has been rebooted.
|
||||
args: volume_id_id"""
|
||||
ctxt = context.get_admin_context()
|
||||
volume = db.volume_get(ctxt, param2id(volume_id))
|
||||
if not volume['instance_id']:
|
||||
print "volume is not attached to an instance"
|
||||
return
|
||||
instance = db.instance_get(ctxt, volume['instance_id'])
|
||||
host = instance['host']
|
||||
rpc.cast(ctxt,
|
||||
db.queue_get_for(ctxt, FLAGS.compute_topic, host),
|
||||
{"method": "attach_volume",
|
||||
"args": {"instance_id": instance['id'],
|
||||
"volume_id": volume['id'],
|
||||
"mountpoint": volume['mountpoint']}})
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
@@ -545,7 +597,8 @@ CATEGORIES = [
|
||||
('network', NetworkCommands),
|
||||
('service', ServiceCommands),
|
||||
('log', LogCommands),
|
||||
('db', DbCommands)]
|
||||
('db', DbCommands),
|
||||
('volume', VolumeCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import json
|
||||
import os
|
||||
import pprint
|
||||
import sys
|
||||
@@ -38,7 +39,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
sys.path.insert(0, possible_topdir)
|
||||
|
||||
import gflags
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = gflags.FLAGS
|
||||
@@ -106,8 +106,12 @@ def do_request(controller, method, params=None):
|
||||
'X-OpenStack-Project': FLAGS.project}
|
||||
|
||||
req = urllib2.Request(url, data, headers)
|
||||
try:
|
||||
resp = urllib2.urlopen(req)
|
||||
return utils.loads(resp.read())
|
||||
except urllib2.HTTPError, e:
|
||||
print e.read()
|
||||
sys.exit(1)
|
||||
return json.loads(resp.read())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -87,6 +87,7 @@ if [ "$CMD" == "install" ]; then
|
||||
sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
|
||||
sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy
|
||||
sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah
|
||||
sudo apt-get install -y python-paste python-pastedeploy
|
||||
#For IPV6
|
||||
sudo apt-get install -y python-netaddr
|
||||
sudo apt-get install -y radvd
|
||||
|
||||
@@ -31,7 +31,7 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl
|
||||
|
||||
::
|
||||
|
||||
sudo add-get install python-software-properties
|
||||
sudo apt-get install python-software-properties
|
||||
sudo add-apt-repository ppa:nova-core/trunk
|
||||
sudo apt-get update
|
||||
sudo apt-get install python-twisted python-gflags
|
||||
|
||||
@@ -60,12 +60,13 @@ For background on the core objects referenced in this section, see :doc:`../obje
|
||||
Deployment
|
||||
----------
|
||||
|
||||
.. todo:: talk about deployment scenarios
|
||||
For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq).
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
multi.node.install
|
||||
dbsync
|
||||
|
||||
|
||||
Networking
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
..
|
||||
Copyright 2010-2011 United States Government as represented by the
|
||||
Administrator of the National Aeronautics and Space Administration.
|
||||
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Installing Nova on Multiple Servers
|
||||
===================================
|
||||
@@ -26,13 +9,14 @@ through that process.
|
||||
|
||||
You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
|
||||
|
||||
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved in the installation and configuration scripts as of October 18th 2010. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
|
||||
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
|
||||
|
||||
For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node.
|
||||
|
||||
Requirements for a multi-node installation
|
||||
------------------------------------------
|
||||
|
||||
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know Postgres. We should document both configurations, though.
|
||||
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though.
|
||||
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
|
||||
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
|
||||
|
||||
@@ -42,6 +26,44 @@ Assumptions
|
||||
* Networking is configured between/through the physical machines on a single subnet.
|
||||
* Installation and execution are both performed by ROOT user.
|
||||
|
||||
Scripted Installation
|
||||
---------------------
|
||||
A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node.
|
||||
|
||||
You must run these scripts with root permissions.
|
||||
|
||||
From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/.
|
||||
|
||||
::
|
||||
|
||||
wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/Nova_CC_Installer_v0.1
|
||||
|
||||
Ensure you can execute the script by modifying the permissions on the script file.
|
||||
|
||||
::
|
||||
|
||||
sudo chmod 755 Nova_CC_Installer_v0.1
|
||||
|
||||
|
||||
::
|
||||
|
||||
sudo ./Nova_CC_Installer_v0.1
|
||||
|
||||
Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node.
|
||||
|
||||
Restart related services::
|
||||
|
||||
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
|
||||
|
||||
You can go to the `Configuration section`_ for next steps.
|
||||
|
||||
Manual Installation - Step-by-Step
|
||||
----------------------------------
|
||||
The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only.
|
||||
|
||||
Cloud Controller Installation
|
||||
`````````````````````````````
|
||||
On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_.
|
||||
|
||||
Step 1 - Use apt-get to get the latest code
|
||||
-------------------------------------------
|
||||
@@ -59,17 +81,16 @@ Step 1 - Use apt-get to get the latest code
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
3. Install nova-pkgs (dependencies should be automatically installed).
|
||||
3. Install python required packages, nova-packages, and helper apps.
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get install python-greenlet
|
||||
sudo apt-get install nova-common nova-doc python-nova nova-api nova-network nova-objectstore nova-scheduler
|
||||
sudo apt-get install python-greenlet python-mysqldb python-nova nova-common nova-doc nova-api nova-network nova-objectstore nova-scheduler nova-compute euca2ools unzip
|
||||
|
||||
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
|
||||
|
||||
Step 2 Set up configuration file (installed in /etc/nova)
|
||||
--------------------------------------------------------
|
||||
---------------------------------------------------------
|
||||
|
||||
1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
|
||||
|
||||
@@ -154,9 +175,29 @@ Step 3 - Setup the SQL DB (MySQL for this setup)
|
||||
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
|
||||
service mysql restart
|
||||
|
||||
3. Network Configuration
|
||||
4. MySQL DB configuration:
|
||||
|
||||
If you use FlatManager (as opposed to VlanManager that we set) as your network manager, there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically.
|
||||
Create NOVA database::
|
||||
|
||||
mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
|
||||
|
||||
Update the DB to include user 'root'@'%' with super user privileges::
|
||||
|
||||
mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
|
||||
|
||||
Set mySQL root password::
|
||||
|
||||
mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
|
||||
|
||||
Compute Node Installation
|
||||
`````````````````````````
|
||||
|
||||
Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node.
|
||||
|
||||
Network Configuration
|
||||
---------------------
|
||||
|
||||
If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically.
|
||||
|
||||
Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
|
||||
|
||||
@@ -179,31 +220,24 @@ Next, restart networking to apply the changes::
|
||||
|
||||
sudo /etc/init.d/networking restart
|
||||
|
||||
4. MySQL DB configuration:
|
||||
Configuration
|
||||
`````````````
|
||||
|
||||
Create NOVA database::
|
||||
On the Compute node, you should continue with these configuration steps.
|
||||
|
||||
mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
|
||||
Step 1 - Set up the Nova environment
|
||||
------------------------------------
|
||||
|
||||
Update the DB to include user 'root'@'%' with super user privileges::
|
||||
|
||||
mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
|
||||
|
||||
Set mySQL root password::
|
||||
|
||||
mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
|
||||
|
||||
Step 4 - Setup Nova environment
|
||||
-------------------------------
|
||||
|
||||
These are the commands you run to set up a user and project::
|
||||
These are the commands you run to update the database if needed, and then set up a user and project::
|
||||
|
||||
/usr/bin/python /usr/bin/nova-manage db sync
|
||||
/usr/bin/python /usr/bin/nova-manage user admin <user_name>
|
||||
/usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
|
||||
/usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
|
||||
|
||||
Here is an example of what this looks like with real data::
|
||||
|
||||
/usr/bin/python /usr/bin/nova-manage db sync
|
||||
/usr/bin/python /usr/bin/nova-manage user admin dub
|
||||
/usr/bin/python /usr/bin/nova-manage project create dubproject dub
|
||||
/usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
|
||||
@@ -215,7 +249,7 @@ Note: The nova-manage service assumes that the first IP address is your network
|
||||
On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
|
||||
|
||||
|
||||
Step 5 - Create Nova certifications
|
||||
Step 2 - Create Nova certifications
|
||||
-----------------------------------
|
||||
|
||||
1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
|
||||
@@ -233,14 +267,14 @@ Step 5 - Create Nova certifications
|
||||
cat /root/creds/novarc >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
|
||||
Step 6 - Restart all relevant services
|
||||
Step 3 - Restart all relevant services
|
||||
--------------------------------------
|
||||
|
||||
Restart all six services in total, just to cover the entire spectrum::
|
||||
|
||||
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
|
||||
|
||||
Step 7 - Closing steps, and cleaning up
|
||||
Step 4 - Closing steps, and cleaning up
|
||||
---------------------------------------
|
||||
|
||||
One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
|
||||
@@ -253,8 +287,8 @@ Another common issue is you cannot ping or SSH your instances after issusing the
|
||||
killall dnsmasq
|
||||
service nova-network restart
|
||||
|
||||
Step 8 – Testing the installation
|
||||
---------------------------------
|
||||
Testing the Installation
|
||||
````````````````````````
|
||||
|
||||
You can then use `euca2ools` to test some items::
|
||||
|
||||
@@ -267,13 +301,15 @@ If you have issues with the API key, you may need to re-source your creds file::
|
||||
|
||||
If you don’t get any immediate errors, you’re successfully making calls to your cloud!
|
||||
|
||||
Step 9 - Spinning up a VM for testing
|
||||
-------------------------------------
|
||||
Spinning up a VM for Testing
|
||||
````````````````````````````
|
||||
|
||||
(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
|
||||
|
||||
The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
|
||||
|
||||
UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we can’t use images without ramdisks yet, so we can’t use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, we’ll use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_.
|
||||
|
||||
Download the image, and publish to your bucket:
|
||||
|
||||
::
|
||||
@@ -325,4 +361,3 @@ You can determine the instance-id with `euca-describe-instances`, and the format
|
||||
For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
|
||||
|
||||
Enjoy your new private cloud, and play responsibly!
|
||||
|
||||
|
||||
@@ -159,7 +159,7 @@ To make things easier, we've provided a small image on the Rackspace CDN. Use th
|
||||
Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7
|
||||
Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected.
|
||||
HTTP request sent, awaiting response... 200 OK
|
||||
Length: 58520278 (56M) [appication/x-gzip]
|
||||
Length: 58520278 (56M) [application/x-gzip]
|
||||
Saving to: `images.tgz'
|
||||
|
||||
100%[======================================>] 58,520,278 14.1M/s in 3.9s
|
||||
|
||||
@@ -11,7 +11,14 @@ use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
/services/Admin: ec2admin
|
||||
/latest: ec2metadata
|
||||
/20: ec2metadata
|
||||
/2007-01-19: ec2metadata
|
||||
/2007-03-01: ec2metadata
|
||||
/2007-08-29: ec2metadata
|
||||
/2007-10-10: ec2metadata
|
||||
/2007-12-15: ec2metadata
|
||||
/2008-02-01: ec2metadata
|
||||
/2008-09-01: ec2metadata
|
||||
/2009-04-04: ec2metadata
|
||||
/1.0: ec2metadata
|
||||
|
||||
[pipeline:ec2cloud]
|
||||
|
||||
@@ -142,9 +142,15 @@ class Reflection(object):
|
||||
if argspec[2]:
|
||||
args_out.insert(0, ('**%s' % argspec[2],))
|
||||
|
||||
if f.__doc__:
|
||||
short_doc = f.__doc__.split('\n')[0]
|
||||
doc = f.__doc__
|
||||
else:
|
||||
short_doc = doc = _('not available')
|
||||
|
||||
methods['/%s/%s' % (route, k)] = {
|
||||
'short_doc': f.__doc__.split('\n')[0],
|
||||
'doc': f.__doc__,
|
||||
'short_doc': short_doc,
|
||||
'doc': doc,
|
||||
'name': k,
|
||||
'args': list(reversed(args_out))}
|
||||
|
||||
@@ -196,6 +202,8 @@ class ServiceWrapper(wsgi.Controller):
|
||||
# TODO(termie): do some basic normalization on methods
|
||||
method = getattr(self.service_handle, action)
|
||||
|
||||
# NOTE(vish): make sure we have no unicode keys for py2.6.
|
||||
params = dict([(str(k), v) for (k, v) in params.iteritems()])
|
||||
result = method(context, **params)
|
||||
if type(result) is dict or type(result) is list:
|
||||
return self._serialize(result, req)
|
||||
|
||||
@@ -59,7 +59,7 @@ def _gen_key(context, user_id, key_name):
|
||||
# creation before creating key_pair
|
||||
try:
|
||||
db.key_pair_get(context, user_id, key_name)
|
||||
raise exception.Duplicate("The key_pair %s already exists"
|
||||
raise exception.Duplicate(_("The key_pair %s already exists")
|
||||
% key_name)
|
||||
except exception.NotFound:
|
||||
pass
|
||||
@@ -133,7 +133,7 @@ class CloudController(object):
|
||||
return result
|
||||
|
||||
def _get_availability_zone_by_host(self, context, host):
|
||||
services = db.service_get_all_by_host(context, host)
|
||||
services = db.service_get_all_by_host(context.elevated(), host)
|
||||
if len(services) > 0:
|
||||
return services[0]['availability_zone']
|
||||
return 'unknown zone'
|
||||
|
||||
@@ -119,8 +119,8 @@ class DbDriver(object):
|
||||
for member_uid in member_uids:
|
||||
member = db.user_get(context.get_admin_context(), member_uid)
|
||||
if not member:
|
||||
raise exception.NotFound("Project can't be created "
|
||||
"because user %s doesn't exist"
|
||||
raise exception.NotFound(_("Project can't be created "
|
||||
"because user %s doesn't exist")
|
||||
% member_uid)
|
||||
members.add(member)
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ class LdapDriver(object):
|
||||
def create_user(self, name, access_key, secret_key, is_admin):
|
||||
"""Create a user"""
|
||||
if self.__user_exists(name):
|
||||
raise exception.Duplicate("LDAP user %s already exists" % name)
|
||||
raise exception.Duplicate(_("LDAP user %s already exists") % name)
|
||||
if FLAGS.ldap_user_modify_only:
|
||||
if self.__ldap_user_exists(name):
|
||||
# Retrieve user by name
|
||||
@@ -310,7 +310,7 @@ class LdapDriver(object):
|
||||
def delete_user(self, uid):
|
||||
"""Delete a user"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s doesn't exist" % uid)
|
||||
raise exception.NotFound(_("User %s doesn't exist") % uid)
|
||||
self.__remove_from_all(uid)
|
||||
if FLAGS.ldap_user_modify_only:
|
||||
# Delete attributes
|
||||
@@ -432,15 +432,15 @@ class LdapDriver(object):
|
||||
description, member_uids=None):
|
||||
"""Create a group"""
|
||||
if self.__group_exists(group_dn):
|
||||
raise exception.Duplicate("Group can't be created because "
|
||||
"group %s already exists" % name)
|
||||
raise exception.Duplicate(_("Group can't be created because "
|
||||
"group %s already exists") % name)
|
||||
members = []
|
||||
if member_uids is not None:
|
||||
for member_uid in member_uids:
|
||||
if not self.__user_exists(member_uid):
|
||||
raise exception.NotFound("Group can't be created "
|
||||
"because user %s doesn't exist" %
|
||||
member_uid)
|
||||
raise exception.NotFound(_("Group can't be created "
|
||||
"because user %s doesn't exist")
|
||||
% member_uid)
|
||||
members.append(self.__uid_to_dn(member_uid))
|
||||
dn = self.__uid_to_dn(uid)
|
||||
if not dn in members:
|
||||
@@ -455,8 +455,8 @@ class LdapDriver(object):
|
||||
def __is_in_group(self, uid, group_dn):
|
||||
"""Check if user is in group"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be searched in group "
|
||||
"because the user doesn't exist" % uid)
|
||||
raise exception.NotFound(_("User %s can't be searched in group "
|
||||
"because the user doesn't exist") % uid)
|
||||
if not self.__group_exists(group_dn):
|
||||
return False
|
||||
res = self.__find_object(group_dn,
|
||||
@@ -467,10 +467,10 @@ class LdapDriver(object):
|
||||
def __add_to_group(self, uid, group_dn):
|
||||
"""Add user to group"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be added to the group "
|
||||
"because the user doesn't exist" % uid)
|
||||
raise exception.NotFound(_("User %s can't be added to the group "
|
||||
"because the user doesn't exist") % uid)
|
||||
if not self.__group_exists(group_dn):
|
||||
raise exception.NotFound("The group at dn %s doesn't exist" %
|
||||
raise exception.NotFound(_("The group at dn %s doesn't exist") %
|
||||
group_dn)
|
||||
if self.__is_in_group(uid, group_dn):
|
||||
raise exception.Duplicate(_("User %(uid)s is already a member of "
|
||||
@@ -481,15 +481,15 @@ class LdapDriver(object):
|
||||
def __remove_from_group(self, uid, group_dn):
|
||||
"""Remove user from group"""
|
||||
if not self.__group_exists(group_dn):
|
||||
raise exception.NotFound("The group at dn %s doesn't exist" %
|
||||
group_dn)
|
||||
raise exception.NotFound(_("The group at dn %s doesn't exist")
|
||||
% group_dn)
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be removed from the "
|
||||
"group because the user doesn't exist" %
|
||||
uid)
|
||||
raise exception.NotFound(_("User %s can't be removed from the "
|
||||
"group because the user doesn't exist")
|
||||
% uid)
|
||||
if not self.__is_in_group(uid, group_dn):
|
||||
raise exception.NotFound("User %s is not a member of the group" %
|
||||
uid)
|
||||
raise exception.NotFound(_("User %s is not a member of the group")
|
||||
% uid)
|
||||
# NOTE(vish): remove user from group and any sub_groups
|
||||
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
|
||||
for sub_dn in sub_dns:
|
||||
@@ -509,8 +509,9 @@ class LdapDriver(object):
|
||||
def __remove_from_all(self, uid):
|
||||
"""Remove user from all roles and projects"""
|
||||
if not self.__user_exists(uid):
|
||||
raise exception.NotFound("User %s can't be removed from all "
|
||||
"because the user doesn't exist" % uid)
|
||||
raise exception.NotFound(_("User %s can't be removed from all "
|
||||
"because the user doesn't exist")
|
||||
% uid)
|
||||
role_dns = self.__find_group_dns_with_member(
|
||||
FLAGS.role_project_subtree, uid)
|
||||
for role_dn in role_dns:
|
||||
|
||||
@@ -249,13 +249,16 @@ class API(base.Base):
|
||||
# ..then we distill the security groups to which they belong..
|
||||
security_groups = set()
|
||||
for rule in security_group_rules:
|
||||
security_groups.add(rule['parent_group_id'])
|
||||
security_group = self.db.security_group_get(
|
||||
context,
|
||||
rule['parent_group_id'])
|
||||
security_groups.add(security_group)
|
||||
|
||||
# ..then we find the instances that are members of these groups..
|
||||
instances = set()
|
||||
for security_group in security_groups:
|
||||
for instance in security_group['instances']:
|
||||
instances.add(instance['id'])
|
||||
instances.add(instance)
|
||||
|
||||
# ...then we find the hosts where they live...
|
||||
hosts = set()
|
||||
|
||||
@@ -67,7 +67,7 @@ class ConsoleProxyManager(manager.Manager):
|
||||
pool['id'],
|
||||
instance_id)
|
||||
except exception.NotFound:
|
||||
logging.debug("Adding console")
|
||||
logging.debug(_("Adding console"))
|
||||
if not password:
|
||||
password = self.driver.generate_password()
|
||||
if not port:
|
||||
|
||||
@@ -96,7 +96,7 @@ class XVPConsoleProxy(object):
|
||||
return os.urandom(length * 2).encode('base64')[:length]
|
||||
|
||||
def _rebuild_xvp_conf(self, context):
|
||||
logging.debug("Rebuilding xvp conf")
|
||||
logging.debug(_("Rebuilding xvp conf"))
|
||||
pools = [pool for pool in
|
||||
db.console_pool_get_all_by_host_type(context, self.host,
|
||||
self.console_type)
|
||||
@@ -113,12 +113,12 @@ class XVPConsoleProxy(object):
|
||||
self._xvp_restart()
|
||||
|
||||
def _write_conf(self, config):
|
||||
logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf)
|
||||
logging.debug(_('Re-wrote %s') % FLAGS.console_xvp_conf)
|
||||
with open(FLAGS.console_xvp_conf, 'w') as cfile:
|
||||
cfile.write(config)
|
||||
|
||||
def _xvp_stop(self):
|
||||
logging.debug("Stopping xvp")
|
||||
logging.debug(_("Stopping xvp"))
|
||||
pid = self._xvp_pid()
|
||||
if not pid:
|
||||
return
|
||||
@@ -131,19 +131,19 @@ class XVPConsoleProxy(object):
|
||||
def _xvp_start(self):
|
||||
if self._xvp_check_running():
|
||||
return
|
||||
logging.debug("Starting xvp")
|
||||
logging.debug(_("Starting xvp"))
|
||||
try:
|
||||
utils.execute('xvp -p %s -c %s -l %s' %
|
||||
(FLAGS.console_xvp_pid,
|
||||
FLAGS.console_xvp_conf,
|
||||
FLAGS.console_xvp_log))
|
||||
except exception.ProcessExecutionError, err:
|
||||
logging.error("Error starting xvp: %s" % err)
|
||||
logging.error(_("Error starting xvp: %s") % err)
|
||||
|
||||
def _xvp_restart(self):
|
||||
logging.debug("Restarting xvp")
|
||||
logging.debug(_("Restarting xvp"))
|
||||
if not self._xvp_check_running():
|
||||
logging.debug("xvp not running...")
|
||||
logging.debug(_("xvp not running..."))
|
||||
self._xvp_start()
|
||||
else:
|
||||
pid = self._xvp_pid()
|
||||
|
||||
@@ -778,7 +778,7 @@ def instance_get_by_id(context, instance_id):
|
||||
result = session.query(models.Instance).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
filter_by(id=instance_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
first()
|
||||
@@ -786,6 +786,7 @@ def instance_get_by_id(context, instance_id):
|
||||
result = session.query(models.Instance).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
filter_by(project_id=context.project_id).\
|
||||
filter_by(id=instance_id).\
|
||||
filter_by(deleted=False).\
|
||||
|
||||
@@ -40,15 +40,15 @@ from nova import version
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_string('logging_context_format_string',
|
||||
'(%(name)s %(nova_version)s): %(levelname)s '
|
||||
'%(asctime)s %(levelname)s %(name)s '
|
||||
'[%(request_id)s %(user)s '
|
||||
'%(project)s] %(message)s',
|
||||
'format string to use for log messages')
|
||||
'format string to use for log messages with context')
|
||||
|
||||
flags.DEFINE_string('logging_default_format_string',
|
||||
'(%(name)s %(nova_version)s): %(levelname)s [N/A] '
|
||||
'%(asctime)s %(levelname)s %(name)s [-] '
|
||||
'%(message)s',
|
||||
'format string to use for log messages')
|
||||
'format string to use for log messages without context')
|
||||
|
||||
flags.DEFINE_string('logging_debug_format_suffix',
|
||||
'from %(processName)s (pid=%(process)d) %(funcName)s'
|
||||
|
||||
@@ -212,7 +212,7 @@ class NetworkManager(manager.Manager):
|
||||
|
||||
def release_fixed_ip(self, context, mac, address):
|
||||
"""Called by dhcp-bridge when ip is released."""
|
||||
LOG.debug("Releasing IP %s", address, context=context)
|
||||
LOG.debug(_("Releasing IP %s"), address, context=context)
|
||||
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
|
||||
instance_ref = fixed_ip_ref['instance']
|
||||
if not instance_ref:
|
||||
|
||||
@@ -391,8 +391,8 @@ class ImagesResource(resource.Resource):
|
||||
image_location = get_argument(request, 'image_location', u'')
|
||||
|
||||
image_path = os.path.join(FLAGS.images_path, image_id)
|
||||
if not image_path.startswith(FLAGS.images_path) or \
|
||||
os.path.exists(image_path):
|
||||
if ((not image_path.startswith(FLAGS.images_path)) or
|
||||
os.path.exists(image_path)):
|
||||
LOG.audit(_("Not authorized to upload image: invalid directory "
|
||||
"%s"),
|
||||
image_path, context=request.context)
|
||||
|
||||
@@ -259,22 +259,25 @@ class Image(object):
|
||||
process_input=encrypted_key,
|
||||
check_exit_code=False)
|
||||
if err:
|
||||
raise exception.Error("Failed to decrypt private key: %s" % err)
|
||||
raise exception.Error(_("Failed to decrypt private key: %s")
|
||||
% err)
|
||||
iv, err = utils.execute(
|
||||
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
|
||||
process_input=encrypted_iv,
|
||||
check_exit_code=False)
|
||||
if err:
|
||||
raise exception.Error("Failed to decrypt initialization "
|
||||
"vector: %s" % err)
|
||||
raise exception.Error(_("Failed to decrypt initialization "
|
||||
"vector: %s") % err)
|
||||
|
||||
_out, err = utils.execute(
|
||||
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
|
||||
% (encrypted_filename, key, iv, decrypted_filename),
|
||||
check_exit_code=False)
|
||||
if err:
|
||||
raise exception.Error("Failed to decrypt image file %s : %s" %
|
||||
(encrypted_filename, err))
|
||||
raise exception.Error(_("Failed to decrypt image file "
|
||||
"%(image_file)s: %(err)s") %
|
||||
{'image_file': encrypted_filename,
|
||||
'err': err})
|
||||
|
||||
@staticmethod
|
||||
def untarzip_image(path, filename):
|
||||
|
||||
@@ -344,7 +344,7 @@ def call(context, topic, msg):
|
||||
|
||||
def cast(context, topic, msg):
|
||||
"""Sends a message on a topic without waiting for a response"""
|
||||
LOG.debug("Making asynchronous cast...")
|
||||
LOG.debug(_("Making asynchronous cast..."))
|
||||
_pack_context(msg, context)
|
||||
conn = Connection.instance()
|
||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
||||
|
||||
@@ -48,7 +48,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-compute')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s is not alive" % host)
|
||||
raise driver.WillNotSchedule(_("Host %s is not alive") % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
@@ -80,7 +80,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-volume')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s not available" % host)
|
||||
raise driver.WillNotSchedule(_("Host %s not available") % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
|
||||
@@ -38,6 +38,7 @@ from nova import log as logging
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import version
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -156,7 +157,8 @@ class Service(object):
|
||||
report_interval = FLAGS.report_interval
|
||||
if not periodic_interval:
|
||||
periodic_interval = FLAGS.periodic_interval
|
||||
logging.audit(_("Starting %s node"), topic)
|
||||
logging.audit(_("Starting %s node (version %s)"), topic,
|
||||
version.version_string_with_vcs())
|
||||
service_obj = cls(host, binary, topic, manager,
|
||||
report_interval, periodic_interval)
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ def WrapTwistedOptions(wrapped):
|
||||
try:
|
||||
self.parseArgs(*argv)
|
||||
except TypeError:
|
||||
raise usage.UsageError("Wrong number of arguments.")
|
||||
raise usage.UsageError(_("Wrong number of arguments."))
|
||||
|
||||
self.postOptions()
|
||||
return args
|
||||
@@ -220,7 +220,7 @@ def stop(pidfile):
|
||||
time.sleep(0.1)
|
||||
except OSError, err:
|
||||
err = str(err)
|
||||
if err.find("No such process") > 0:
|
||||
if err.find(_("No such process")) > 0:
|
||||
if os.path.exists(pidfile):
|
||||
os.remove(pidfile)
|
||||
else:
|
||||
|
||||
@@ -511,7 +511,6 @@ class LibvirtConnection(object):
|
||||
base_dir = os.path.join(FLAGS.instances_path, '_base')
|
||||
if not os.path.exists(base_dir):
|
||||
os.mkdir(base_dir)
|
||||
os.chmod(base_dir, 0777)
|
||||
base = os.path.join(base_dir, fname)
|
||||
if not os.path.exists(base):
|
||||
fn(target=base, *args, **kwargs)
|
||||
@@ -542,7 +541,6 @@ class LibvirtConnection(object):
|
||||
|
||||
# ensure directories exist and are writable
|
||||
utils.execute('mkdir -p %s' % basepath(suffix=''))
|
||||
utils.execute('chmod 0777 %s' % basepath(suffix=''))
|
||||
|
||||
LOG.info(_('instance %s: Creating image'), inst['name'])
|
||||
f = open(basepath('libvirt.xml'), 'w')
|
||||
@@ -734,7 +732,8 @@ class LibvirtConnection(object):
|
||||
'cpu_time': cpu_time}
|
||||
|
||||
def get_diagnostics(self, instance_name):
|
||||
raise exception.APIError("diagnostics are not supported for libvirt")
|
||||
raise exception.APIError(_("diagnostics are not supported "
|
||||
"for libvirt"))
|
||||
|
||||
def get_disks(self, instance_name):
|
||||
"""
|
||||
|
||||
@@ -100,6 +100,14 @@ class VolumeDriver(object):
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
try:
|
||||
self._try_execute("sudo lvdisplay %s/%s" %
|
||||
(FLAGS.volume_group,
|
||||
volume['name']))
|
||||
except Exception as e:
|
||||
# If the volume isn't present, then don't attempt to delete
|
||||
return True
|
||||
|
||||
self._try_execute("sudo lvremove -f %s/%s" %
|
||||
(FLAGS.volume_group,
|
||||
volume['name']))
|
||||
@@ -218,8 +226,14 @@ class ISCSIDriver(VolumeDriver):
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
try:
|
||||
iscsi_target = self.db.volume_get_iscsi_target_num(context,
|
||||
volume['id'])
|
||||
except exception.NotFound:
|
||||
LOG.info(_("Skipping ensure_export. No iscsi_target " +
|
||||
"provisioned for volume: %d"), volume['id'])
|
||||
return
|
||||
|
||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||
self._sync_exec("sudo ietadm --op new "
|
||||
@@ -258,8 +272,23 @@ class ISCSIDriver(VolumeDriver):
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
try:
|
||||
iscsi_target = self.db.volume_get_iscsi_target_num(context,
|
||||
volume['id'])
|
||||
except exception.NotFound:
|
||||
LOG.info(_("Skipping remove_export. No iscsi_target " +
|
||||
"provisioned for volume: %d"), volume['id'])
|
||||
return
|
||||
|
||||
try:
|
||||
# ietadm show will exit with an error
|
||||
# this export has already been removed
|
||||
self._execute("sudo ietadm --op show --tid=%s " % iscsi_target)
|
||||
except Exception as e:
|
||||
LOG.info(_("Skipping remove_export. No iscsi_target " +
|
||||
"is presently exported for volume: %d"), volume['id'])
|
||||
return
|
||||
|
||||
self._execute("sudo ietadm --op delete --tid=%s "
|
||||
"--lun=0" % iscsi_target)
|
||||
self._execute("sudo ietadm --op delete --tid=%s" %
|
||||
|
||||
@@ -84,7 +84,10 @@ class VolumeManager(manager.Manager):
|
||||
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
|
||||
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
|
||||
for volume in volumes:
|
||||
if volume['status'] in ['available', 'in-use']:
|
||||
self.driver.ensure_export(ctxt, volume)
|
||||
else:
|
||||
LOG.info(_("volume %s: skipping export"), volume_ref['name'])
|
||||
|
||||
def create_volume(self, context, volume_id):
|
||||
"""Creates and exports the volume."""
|
||||
@@ -99,14 +102,19 @@ class VolumeManager(manager.Manager):
|
||||
# before passing it to the driver.
|
||||
volume_ref['host'] = self.host
|
||||
|
||||
try:
|
||||
vol_name = volume_ref['name']
|
||||
vol_size = volume_ref['size']
|
||||
LOG.debug(_("volume %(vol_name)s: creating lv of size %(vol_size)sG")
|
||||
% locals())
|
||||
LOG.debug(_("volume %(vol_name)s: creating lv of"
|
||||
" size %(vol_size)sG") % locals())
|
||||
self.driver.create_volume(volume_ref)
|
||||
|
||||
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
|
||||
self.driver.create_export(context, volume_ref)
|
||||
except Exception as e:
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'], {'status': 'error'})
|
||||
raise e
|
||||
|
||||
now = datetime.datetime.utcnow()
|
||||
self.db.volume_update(context,
|
||||
@@ -123,10 +131,18 @@ class VolumeManager(manager.Manager):
|
||||
raise exception.Error(_("Volume is still attached"))
|
||||
if volume_ref['host'] != self.host:
|
||||
raise exception.Error(_("Volume is not local to this node"))
|
||||
|
||||
try:
|
||||
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
|
||||
self.driver.remove_export(context, volume_ref)
|
||||
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
|
||||
self.driver.delete_volume(volume_ref)
|
||||
except Exception as e:
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'],
|
||||
{'status': 'error_deleting'})
|
||||
raise e
|
||||
|
||||
self.db.volume_destroy(context, volume_id)
|
||||
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
|
||||
return True
|
||||
|
||||
@@ -144,7 +144,7 @@ class Application(object):
|
||||
See the end of http://pythonpaste.org/webob/modules/dec.html
|
||||
for more info.
|
||||
"""
|
||||
raise NotImplementedError("You must implement __call__")
|
||||
raise NotImplementedError(_("You must implement __call__"))
|
||||
|
||||
|
||||
class Middleware(Application):
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
# that we need.
|
||||
#
|
||||
|
||||
import gettext
|
||||
gettext.install('nova', unicode=1)
|
||||
import httplib
|
||||
import logging
|
||||
import logging.handlers
|
||||
@@ -60,7 +62,7 @@ def ignore_failure(func, *args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except XenAPI.Failure, e:
|
||||
logging.error('Ignoring XenAPI.Failure %s', e)
|
||||
logging.error(_('Ignoring XenAPI.Failure %s'), e)
|
||||
return None
|
||||
|
||||
|
||||
@@ -78,19 +80,25 @@ def validate_exists(args, key, default=None):
|
||||
"""
|
||||
if key in args:
|
||||
if len(args[key]) == 0:
|
||||
raise ArgumentError('Argument %r value %r is too short.' %
|
||||
(key, args[key]))
|
||||
raise ArgumentError(_('Argument %(key)s value %(value)s is too '
|
||||
'short.') %
|
||||
{'key': key,
|
||||
'value': args[key]})
|
||||
if not ARGUMENT_PATTERN.match(args[key]):
|
||||
raise ArgumentError('Argument %r value %r contains invalid '
|
||||
'characters.' % (key, args[key]))
|
||||
raise ArgumentError(_('Argument %(key)s value %(value)s contains '
|
||||
'invalid characters.') %
|
||||
{'key': key,
|
||||
'value': args[key]})
|
||||
if args[key][0] == '-':
|
||||
raise ArgumentError('Argument %r value %r starts with a hyphen.'
|
||||
% (key, args[key]))
|
||||
raise ArgumentError(_('Argument %(key)s value %(value)s starts '
|
||||
'with a hyphen.') %
|
||||
{'key': key,
|
||||
'value': args[key]})
|
||||
return args[key]
|
||||
elif default is not None:
|
||||
return default
|
||||
else:
|
||||
raise ArgumentError('Argument %s is required.' % key)
|
||||
raise ArgumentError(_('Argument %s is required.') % key)
|
||||
|
||||
|
||||
def validate_bool(args, key, default=None):
|
||||
@@ -105,8 +113,10 @@ def validate_bool(args, key, default=None):
|
||||
elif value.lower() == 'false':
|
||||
return False
|
||||
else:
|
||||
raise ArgumentError("Argument %s may not take value %r. "
|
||||
"Valid values are ['true', 'false']." % (key, value))
|
||||
raise ArgumentError(_("Argument %(key)s may not take value %(value)s. "
|
||||
"Valid values are ['true', 'false'].")
|
||||
% {'key': key,
|
||||
'value': value})
|
||||
|
||||
|
||||
def exists(args, key):
|
||||
@@ -116,7 +126,7 @@ def exists(args, key):
|
||||
if key in args:
|
||||
return args[key]
|
||||
else:
|
||||
raise ArgumentError('Argument %s is required.' % key)
|
||||
raise ArgumentError(_('Argument %s is required.') % key)
|
||||
|
||||
|
||||
def optional(args, key):
|
||||
@@ -149,8 +159,13 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
|
||||
'other_config': {},
|
||||
'sm_config': {},
|
||||
'tags': []})
|
||||
logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label,
|
||||
virtual_size, read_only, sr_ref)
|
||||
logging.debug(_('Created VDI %(vdi_ref)s (%(label)s, %(size)s, '
|
||||
'%(read_only)s) on %(sr_ref)s.') %
|
||||
{'vdi_ref': vdi_ref,
|
||||
'label': name_label,
|
||||
'size': virtual_size,
|
||||
'read_only': read_only,
|
||||
'sr_ref': sr_ref})
|
||||
return vdi_ref
|
||||
|
||||
|
||||
@@ -169,19 +184,19 @@ def with_vdi_in_dom0(session, vdi, read_only, f):
|
||||
vbd_rec['qos_algorithm_type'] = ''
|
||||
vbd_rec['qos_algorithm_params'] = {}
|
||||
vbd_rec['qos_supported_algorithms'] = []
|
||||
logging.debug('Creating VBD for VDI %s ... ', vdi)
|
||||
logging.debug(_('Creating VBD for VDI %s ... '), vdi)
|
||||
vbd = session.xenapi.VBD.create(vbd_rec)
|
||||
logging.debug('Creating VBD for VDI %s done.', vdi)
|
||||
logging.debug(_('Creating VBD for VDI %s done.'), vdi)
|
||||
try:
|
||||
logging.debug('Plugging VBD %s ... ', vbd)
|
||||
logging.debug(_('Plugging VBD %s ... '), vbd)
|
||||
session.xenapi.VBD.plug(vbd)
|
||||
logging.debug('Plugging VBD %s done.', vbd)
|
||||
logging.debug(_('Plugging VBD %s done.'), vbd)
|
||||
return f(session.xenapi.VBD.get_device(vbd))
|
||||
finally:
|
||||
logging.debug('Destroying VBD for VDI %s ... ', vdi)
|
||||
logging.debug(_('Destroying VBD for VDI %s ... '), vdi)
|
||||
vbd_unplug_with_retry(session, vbd)
|
||||
ignore_failure(session.xenapi.VBD.destroy, vbd)
|
||||
logging.debug('Destroying VBD for VDI %s done.', vdi)
|
||||
logging.debug(_('Destroying VBD for VDI %s done.'), vdi)
|
||||
|
||||
|
||||
def vbd_unplug_with_retry(session, vbd):
|
||||
@@ -192,19 +207,20 @@ def vbd_unplug_with_retry(session, vbd):
|
||||
while True:
|
||||
try:
|
||||
session.xenapi.VBD.unplug(vbd)
|
||||
logging.debug('VBD.unplug successful first time.')
|
||||
logging.debug(_('VBD.unplug successful first time.'))
|
||||
return
|
||||
except XenAPI.Failure, e:
|
||||
if (len(e.details) > 0 and
|
||||
e.details[0] == 'DEVICE_DETACH_REJECTED'):
|
||||
logging.debug('VBD.unplug rejected: retrying...')
|
||||
logging.debug(_('VBD.unplug rejected: retrying...'))
|
||||
time.sleep(1)
|
||||
elif (len(e.details) > 0 and
|
||||
e.details[0] == 'DEVICE_ALREADY_DETACHED'):
|
||||
logging.debug('VBD.unplug successful eventually.')
|
||||
logging.debug(_('VBD.unplug successful eventually.'))
|
||||
return
|
||||
else:
|
||||
logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e)
|
||||
logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'),
|
||||
e)
|
||||
return
|
||||
|
||||
|
||||
|
||||
@@ -60,7 +60,8 @@ class NovaTestRunner(core.TextTestRunner):
|
||||
if __name__ == '__main__':
|
||||
c = config.Config(stream=sys.stdout,
|
||||
env=os.environ,
|
||||
verbosity=3)
|
||||
verbosity=3,
|
||||
plugins=core.DefaultPluginManager())
|
||||
|
||||
runner = NovaTestRunner(stream=c.stream,
|
||||
verbosity=c.verbosity,
|
||||
|
||||
32
run_tests.sh
32
run_tests.sh
@@ -31,46 +31,44 @@ always_venv=0
|
||||
never_venv=0
|
||||
force=0
|
||||
noseargs=
|
||||
|
||||
wrapper=""
|
||||
|
||||
for arg in "$@"; do
|
||||
process_option $arg
|
||||
done
|
||||
|
||||
function run_tests {
|
||||
# Just run the test suites in current environment
|
||||
${wrapper} rm -f nova.sqlite
|
||||
${wrapper} $NOSETESTS 2> run_tests.err.log
|
||||
}
|
||||
|
||||
NOSETESTS="python run_tests.py $noseargs"
|
||||
|
||||
if [ $never_venv -eq 1 ]; then
|
||||
# Just run the test suites in current environment
|
||||
rm -f nova.sqlite
|
||||
$NOSETESTS 2> run_tests.err.log
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ $never_venv -eq 0 ]
|
||||
then
|
||||
# Remove the virtual environment if --force used
|
||||
if [ $force -eq 1 ]; then
|
||||
echo "Cleaning virtualenv..."
|
||||
rm -rf ${venv}
|
||||
fi
|
||||
|
||||
if [ -e ${venv} ]; then
|
||||
${with_venv} rm -f nova.sqlite
|
||||
${with_venv} $NOSETESTS 2> run_tests.err.log
|
||||
wrapper="${with_venv}"
|
||||
else
|
||||
if [ $always_venv -eq 1 ]; then
|
||||
# Automatically install the virtualenv
|
||||
python tools/install_venv.py
|
||||
wrapper="${with_venv}"
|
||||
else
|
||||
echo -e "No virtual environment found...create one? (Y/n) \c"
|
||||
read use_ve
|
||||
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
|
||||
# Install the virtualenv and run the test suite in it
|
||||
python tools/install_venv.py
|
||||
else
|
||||
rm -f nova.sqlite
|
||||
$NOSETESTS 2> run_tests.err.log
|
||||
exit
|
||||
wrapper=${with_venv}
|
||||
fi
|
||||
fi
|
||||
${with_venv} rm -f nova.sqlite
|
||||
${with_venv} $NOSETESTS 2> run_tests.err.log
|
||||
fi
|
||||
fi
|
||||
|
||||
run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1
|
||||
|
||||
Reference in New Issue
Block a user