Resolved trunk merge conflicts

This commit is contained in:
Ed Leafe
2011-01-21 16:10:26 -05:00
37 changed files with 388 additions and 204 deletions

View File

@@ -40,6 +40,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail
Paul Voccio <paul@openstack.org> Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org> Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com> Rick Harris <rconradharris@gmail.com>
Rob Kost <kost@isi.edu>
Ryan Lane <rlane@wikimedia.org> Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com> Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com> Salvatore Orlando <salvatore.orlando@eu.citrix.com>

View File

@@ -5,6 +5,7 @@ graft CA
graft doc graft doc
graft smoketests graft smoketests
graft tools graft tools
graft etc
include nova/api/openstack/notes.txt include nova/api/openstack/notes.txt
include nova/auth/novarc.template include nova/auth/novarc.template
include nova/auth/slap.sh include nova/auth/slap.sh

View File

@@ -36,6 +36,7 @@ gettext.install('nova', unicode=1)
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova import version
from nova import wsgi from nova import wsgi
logging.basicConfig() logging.basicConfig()
@@ -79,6 +80,8 @@ def run_app(paste_config_file):
if __name__ == '__main__': if __name__ == '__main__':
FLAGS(sys.argv) FLAGS(sys.argv)
LOG.audit(_("Starting nova-api node (version %s)"),
version.version_string_with_vcs())
conf = wsgi.paste_config_file('nova-api.conf') conf = wsgi.paste_config_file('nova-api.conf')
if conf: if conf:
run_app(conf) run_app(conf)

View File

@@ -49,7 +49,7 @@ if __name__ == '__main__':
utils.default_flagfile() utils.default_flagfile()
FLAGS(sys.argv) FLAGS(sys.argv)
direct.register_service('compute', compute_api.ComputeAPI()) direct.register_service('compute', compute_api.API())
direct.register_service('reflect', direct.Reflection()) direct.register_service('reflect', direct.Reflection())
router = direct.Router() router = direct.Router()
with_json = direct.JsonParamsMiddleware(router) with_json = direct.JsonParamsMiddleware(router)

View File

@@ -79,7 +79,9 @@ from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova import quota from nova import quota
from nova import rpc
from nova import utils from nova import utils
from nova.api.ec2.cloud import ec2_id_to_id
from nova.auth import manager from nova.auth import manager
from nova.cloudpipe import pipelib from nova.cloudpipe import pipelib
from nova.db import migration from nova.db import migration
@@ -95,6 +97,16 @@ flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager')
def param2id(object_id):
"""Helper function to convert various id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if '-' in object_id:
return ec2_id_to_id(object_id)
else:
return int(object_id)
class VpnCommands(object): class VpnCommands(object):
"""Class for managing VPNs.""" """Class for managing VPNs."""
@@ -535,6 +547,46 @@ class DbCommands(object):
print migration.db_version() print migration.db_version()
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
def delete(self, volume_id):
"""Delete a volume, bypassing the check that it
must be available.
args: volume_id_id"""
ctxt = context.get_admin_context()
volume = db.volume_get(ctxt, param2id(volume_id))
host = volume['host']
if volume['status'] == 'in-use':
print "Volume is in-use."
print "Detach volume from instance and then try again."
return
rpc.cast(ctxt,
db.queue_get_for(ctxt, FLAGS.volume_topic, host),
{"method": "delete_volume",
"args": {"volume_id": volume['id']}})
def reattach(self, volume_id):
"""Re-attach a volume that has previously been attached
to an instance. Typically called after a compute host
has been rebooted.
args: volume_id_id"""
ctxt = context.get_admin_context()
volume = db.volume_get(ctxt, param2id(volume_id))
if not volume['instance_id']:
print "volume is not attached to an instance"
return
instance = db.instance_get(ctxt, volume['instance_id'])
host = instance['host']
rpc.cast(ctxt,
db.queue_get_for(ctxt, FLAGS.compute_topic, host),
{"method": "attach_volume",
"args": {"instance_id": instance['id'],
"volume_id": volume['id'],
"mountpoint": volume['mountpoint']}})
CATEGORIES = [ CATEGORIES = [
('user', UserCommands), ('user', UserCommands),
('project', ProjectCommands), ('project', ProjectCommands),
@@ -545,7 +597,8 @@ CATEGORIES = [
('network', NetworkCommands), ('network', NetworkCommands),
('service', ServiceCommands), ('service', ServiceCommands),
('log', LogCommands), ('log', LogCommands),
('db', DbCommands)] ('db', DbCommands),
('volume', VolumeCommands)]
def lazy_match(name, key_value_tuples): def lazy_match(name, key_value_tuples):

View File

@@ -22,6 +22,7 @@
import eventlet import eventlet
eventlet.monkey_patch() eventlet.monkey_patch()
import json
import os import os
import pprint import pprint
import sys import sys
@@ -38,7 +39,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
import gflags import gflags
from nova import utils
FLAGS = gflags.FLAGS FLAGS = gflags.FLAGS
@@ -106,8 +106,12 @@ def do_request(controller, method, params=None):
'X-OpenStack-Project': FLAGS.project} 'X-OpenStack-Project': FLAGS.project}
req = urllib2.Request(url, data, headers) req = urllib2.Request(url, data, headers)
resp = urllib2.urlopen(req) try:
return utils.loads(resp.read()) resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.read()
sys.exit(1)
return json.loads(resp.read())
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -87,6 +87,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy
sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah
sudo apt-get install -y python-paste python-pastedeploy
#For IPV6 #For IPV6
sudo apt-get install -y python-netaddr sudo apt-get install -y python-netaddr
sudo apt-get install -y radvd sudo apt-get install -y radvd

View File

@@ -31,7 +31,7 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl
:: ::
sudo add-get install python-software-properties sudo apt-get install python-software-properties
sudo add-apt-repository ppa:nova-core/trunk sudo add-apt-repository ppa:nova-core/trunk
sudo apt-get update sudo apt-get update
sudo apt-get install python-twisted python-gflags sudo apt-get install python-twisted python-gflags

View File

@@ -60,12 +60,13 @@ For background on the core objects referenced in this section, see :doc:`../obje
Deployment Deployment
---------- ----------
.. todo:: talk about deployment scenarios For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq).
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
multi.node.install multi.node.install
dbsync
Networking Networking

View File

@@ -1,20 +1,3 @@
..
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Installing Nova on Multiple Servers Installing Nova on Multiple Servers
=================================== ===================================
@@ -26,13 +9,14 @@ through that process.
You can install multiple nodes to increase performance and availability of the OpenStack Compute installation. You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved in the installation and configuration scripts as of October 18th 2010. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward. This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node.
Requirements for a multi-node installation Requirements for a multi-node installation
------------------------------------------ ------------------------------------------
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know Postgres. We should document both configurations, though. * You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though.
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies. * For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL. * For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
@@ -41,7 +25,45 @@ Assumptions
* Networking is configured between/through the physical machines on a single subnet. * Networking is configured between/through the physical machines on a single subnet.
* Installation and execution are both performed by ROOT user. * Installation and execution are both performed by ROOT user.
Scripted Installation
---------------------
A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node.
You must run these scripts with root permissions.
From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/.
::
wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/Nova_CC_Installer_v0.1
Ensure you can execute the script by modifying the permissions on the script file.
::
sudo chmod 755 Nova_CC_Installer_v0.1
::
sudo ./Nova_CC_Installer_v0.1
Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node.
Restart related services::
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
You can go to the `Configuration section`_ for next steps.
Manual Installation - Step-by-Step
----------------------------------
The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only.
Cloud Controller Installation
`````````````````````````````
On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_.
Step 1 - Use apt-get to get the latest code Step 1 - Use apt-get to get the latest code
------------------------------------------- -------------------------------------------
@@ -59,19 +81,18 @@ Step 1 - Use apt-get to get the latest code
sudo apt-get update sudo apt-get update
3. Install nova-pkgs (dependencies should be automatically installed). 3. Install python required packages, nova-packages, and helper apps.
:: ::
sudo apt-get install python-greenlet sudo apt-get install python-greenlet python-mysqldb python-nova nova-common nova-doc nova-api nova-network nova-objectstore nova-scheduler nova-compute euca2ools unzip
sudo apt-get install nova-common nova-doc python-nova nova-api nova-network nova-objectstore nova-scheduler
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1! It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
Step 2 Setup configuration file (installed in /etc/nova) Step 2 Set up configuration file (installed in /etc/nova)
-------------------------------------------------------- ---------------------------------------------------------
1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf: 1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
:: ::
@@ -81,7 +102,7 @@ Step 2 Setup configuration file (installed in /etc/nova)
--logdir=/var/log/nova --logdir=/var/log/nova
--state_path=/var/lib/nova --state_path=/var/lib/nova
The following items ALSO need to be defined in /etc/nova/nova.conf. Ive added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly: The following items ALSO need to be defined in /etc/nova/nova.conf. Ive added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
--sql_connection ### Location of Nova SQL DB --sql_connection ### Location of Nova SQL DB
@@ -130,7 +151,7 @@ Detailed explanation of the following example is available above.
The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. :: The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
chown -R root:nova /etc/nova chown -R root:nova /etc/nova
chmod 644 /etc/nova/nova.conf chmod 644 /etc/nova/nova.conf
Step 3 - Setup the SQL DB (MySQL for this setup) Step 3 - Setup the SQL DB (MySQL for this setup)
------------------------------------------------ ------------------------------------------------
@@ -153,10 +174,30 @@ Step 3 - Setup the SQL DB (MySQL for this setup)
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
service mysql restart service mysql restart
4. MySQL DB configuration:
3. Network Configuration Create NOVA database::
mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
If you use FlatManager (as opposed to VlanManager that we set) as your network manager, there are some additional networking changes youll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as its set up for you automatically. Update the DB to include user 'root'@'%' with super user privileges::
mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
Set mySQL root password::
mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
Compute Node Installation
`````````````````````````
Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node.
Network Configuration
---------------------
If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes youll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as its set up for you automatically.
Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following:: Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
@@ -179,31 +220,24 @@ Next, restart networking to apply the changes::
sudo /etc/init.d/networking restart sudo /etc/init.d/networking restart
4. MySQL DB configuration: Configuration
`````````````
Create NOVA database::
mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;' On the Compute node, you should continue with these configuration steps.
Update the DB to include user 'root'@'%' with super user privileges::
mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" Step 1 - Set up the Nova environment
------------------------------------
Set mySQL root password::
mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');" These are the commands you run to update the database if needed, and then set up a user and project::
Step 4 - Setup Nova environment
-------------------------------
These are the commands you run to set up a user and project::
/usr/bin/python /usr/bin/nova-manage db sync
/usr/bin/python /usr/bin/nova-manage user admin <user_name> /usr/bin/python /usr/bin/nova-manage user admin <user_name>
/usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name> /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
/usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project> /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
Here is an example of what this looks like with real data:: Here is an example of what this looks like with real data::
/usr/bin/python /usr/bin/nova-manage db sync
/usr/bin/python /usr/bin/nova-manage user admin dub /usr/bin/python /usr/bin/nova-manage user admin dub
/usr/bin/python /usr/bin/nova-manage project create dubproject dub /usr/bin/python /usr/bin/nova-manage project create dubproject dub
/usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255 /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
@@ -215,7 +249,7 @@ Note: The nova-manage service assumes that the first IP address is your network
On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device. On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
Step 5 - Create Nova certifications Step 2 - Create Nova certifications
----------------------------------- -----------------------------------
1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions. 1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
@@ -229,18 +263,18 @@ Step 5 - Create Nova certifications
:: ::
unzip /root/creds/novacreds.zip -d /root/creds/ unzip /root/creds/novacreds.zip -d /root/creds/
cat /root/creds/novarc >> ~/.bashrc cat /root/creds/novarc >> ~/.bashrc
source ~/.bashrc source ~/.bashrc
Step 6 - Restart all relevant services Step 3 - Restart all relevant services
-------------------------------------- --------------------------------------
Restart all six services in total, just to cover the entire spectrum:: Restart all six services in total, just to cover the entire spectrum::
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
Step 7 - Closing steps, and cleaning up Step 4 - Closing steps, and cleaning up
--------------------------------------- ---------------------------------------
One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs:: One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
@@ -253,8 +287,8 @@ Another common issue is you cannot ping or SSH your instances after issusing the
killall dnsmasq killall dnsmasq
service nova-network restart service nova-network restart
Step 8 Testing the installation Testing the Installation
--------------------------------- ````````````````````````
You can then use `euca2ools` to test some items:: You can then use `euca2ools` to test some items::
@@ -267,13 +301,15 @@ If you have issues with the API key, you may need to re-source your creds file::
If you dont get any immediate errors, youre successfully making calls to your cloud! If you dont get any immediate errors, youre successfully making calls to your cloud!
Step 9 - Spinning up a VM for testing Spinning up a VM for Testing
------------------------------------- ````````````````````````````
(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.) (This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM. The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we cant use images without ramdisks yet, so we cant use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, well use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_.
Download the image, and publish to your bucket: Download the image, and publish to your bucket:
:: ::
@@ -324,5 +360,4 @@ You can determine the instance-id with `euca-describe-instances`, and the format
For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information! For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
Enjoy your new private cloud, and play responsibly! Enjoy your new private cloud, and play responsibly!

View File

@@ -159,7 +159,7 @@ To make things easier, we've provided a small image on the Rackspace CDN. Use th
Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7 Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7
Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected. Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected.
HTTP request sent, awaiting response... 200 OK HTTP request sent, awaiting response... 200 OK
Length: 58520278 (56M) [appication/x-gzip] Length: 58520278 (56M) [application/x-gzip]
Saving to: `images.tgz' Saving to: `images.tgz'
100%[======================================>] 58,520,278 14.1M/s in 3.9s 100%[======================================>] 58,520,278 14.1M/s in 3.9s

View File

@@ -20,7 +20,7 @@ Welcome to Nova's documentation!
Nova is a cloud computing fabric controller, the main part of an IaaS system. Nova is a cloud computing fabric controller, the main part of an IaaS system.
Individuals and organizations can use Nova to host and manage their own cloud Individuals and organizations can use Nova to host and manage their own cloud
computing systems. Nova originated as a project out of NASA Ames Research Laboratory. computing systems. Nova originated as a project out of NASA Ames Research Laboratory.
Nova is written with the following design guidelines in mind: Nova is written with the following design guidelines in mind:
@@ -32,7 +32,7 @@ Nova is written with the following design guidelines in mind:
* **API Compatibility**: Nova strives to provide API-compatible with popular systems like Amazon EC2 * **API Compatibility**: Nova strives to provide API-compatible with popular systems like Amazon EC2
This documentation is generated by the Sphinx toolkit and lives in the source This documentation is generated by the Sphinx toolkit and lives in the source
tree. Additional documentation on Nova and other components of OpenStack can tree. Additional documentation on Nova and other components of OpenStack can
be found on the `OpenStack wiki`_. Also see the :doc:`community` page for be found on the `OpenStack wiki`_. Also see the :doc:`community` page for
other ways to interact with the community. other ways to interact with the community.

View File

@@ -11,7 +11,14 @@ use = egg:Paste#urlmap
/services/Cloud: ec2cloud /services/Cloud: ec2cloud
/services/Admin: ec2admin /services/Admin: ec2admin
/latest: ec2metadata /latest: ec2metadata
/20: ec2metadata /2007-01-19: ec2metadata
/2007-03-01: ec2metadata
/2007-08-29: ec2metadata
/2007-10-10: ec2metadata
/2007-12-15: ec2metadata
/2008-02-01: ec2metadata
/2008-09-01: ec2metadata
/2009-04-04: ec2metadata
/1.0: ec2metadata /1.0: ec2metadata
[pipeline:ec2cloud] [pipeline:ec2cloud]

View File

@@ -142,9 +142,15 @@ class Reflection(object):
if argspec[2]: if argspec[2]:
args_out.insert(0, ('**%s' % argspec[2],)) args_out.insert(0, ('**%s' % argspec[2],))
if f.__doc__:
short_doc = f.__doc__.split('\n')[0]
doc = f.__doc__
else:
short_doc = doc = _('not available')
methods['/%s/%s' % (route, k)] = { methods['/%s/%s' % (route, k)] = {
'short_doc': f.__doc__.split('\n')[0], 'short_doc': short_doc,
'doc': f.__doc__, 'doc': doc,
'name': k, 'name': k,
'args': list(reversed(args_out))} 'args': list(reversed(args_out))}
@@ -196,6 +202,8 @@ class ServiceWrapper(wsgi.Controller):
# TODO(termie): do some basic normalization on methods # TODO(termie): do some basic normalization on methods
method = getattr(self.service_handle, action) method = getattr(self.service_handle, action)
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = dict([(str(k), v) for (k, v) in params.iteritems()])
result = method(context, **params) result = method(context, **params)
if type(result) is dict or type(result) is list: if type(result) is dict or type(result) is list:
return self._serialize(result, req) return self._serialize(result, req)

View File

@@ -59,7 +59,7 @@ def _gen_key(context, user_id, key_name):
# creation before creating key_pair # creation before creating key_pair
try: try:
db.key_pair_get(context, user_id, key_name) db.key_pair_get(context, user_id, key_name)
raise exception.Duplicate("The key_pair %s already exists" raise exception.Duplicate(_("The key_pair %s already exists")
% key_name) % key_name)
except exception.NotFound: except exception.NotFound:
pass pass
@@ -133,7 +133,7 @@ class CloudController(object):
return result return result
def _get_availability_zone_by_host(self, context, host): def _get_availability_zone_by_host(self, context, host):
services = db.service_get_all_by_host(context, host) services = db.service_get_all_by_host(context.elevated(), host)
if len(services) > 0: if len(services) > 0:
return services[0]['availability_zone'] return services[0]['availability_zone']
return 'unknown zone' return 'unknown zone'

View File

@@ -119,8 +119,8 @@ class DbDriver(object):
for member_uid in member_uids: for member_uid in member_uids:
member = db.user_get(context.get_admin_context(), member_uid) member = db.user_get(context.get_admin_context(), member_uid)
if not member: if not member:
raise exception.NotFound("Project can't be created " raise exception.NotFound(_("Project can't be created "
"because user %s doesn't exist" "because user %s doesn't exist")
% member_uid) % member_uid)
members.add(member) members.add(member)

View File

@@ -146,7 +146,7 @@ class LdapDriver(object):
def create_user(self, name, access_key, secret_key, is_admin): def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user""" """Create a user"""
if self.__user_exists(name): if self.__user_exists(name):
raise exception.Duplicate("LDAP user %s already exists" % name) raise exception.Duplicate(_("LDAP user %s already exists") % name)
if FLAGS.ldap_user_modify_only: if FLAGS.ldap_user_modify_only:
if self.__ldap_user_exists(name): if self.__ldap_user_exists(name):
# Retrieve user by name # Retrieve user by name
@@ -310,7 +310,7 @@ class LdapDriver(object):
def delete_user(self, uid): def delete_user(self, uid):
"""Delete a user""" """Delete a user"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound("User %s doesn't exist" % uid) raise exception.NotFound(_("User %s doesn't exist") % uid)
self.__remove_from_all(uid) self.__remove_from_all(uid)
if FLAGS.ldap_user_modify_only: if FLAGS.ldap_user_modify_only:
# Delete attributes # Delete attributes
@@ -432,15 +432,15 @@ class LdapDriver(object):
description, member_uids=None): description, member_uids=None):
"""Create a group""" """Create a group"""
if self.__group_exists(group_dn): if self.__group_exists(group_dn):
raise exception.Duplicate("Group can't be created because " raise exception.Duplicate(_("Group can't be created because "
"group %s already exists" % name) "group %s already exists") % name)
members = [] members = []
if member_uids is not None: if member_uids is not None:
for member_uid in member_uids: for member_uid in member_uids:
if not self.__user_exists(member_uid): if not self.__user_exists(member_uid):
raise exception.NotFound("Group can't be created " raise exception.NotFound(_("Group can't be created "
"because user %s doesn't exist" % "because user %s doesn't exist")
member_uid) % member_uid)
members.append(self.__uid_to_dn(member_uid)) members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid) dn = self.__uid_to_dn(uid)
if not dn in members: if not dn in members:
@@ -455,8 +455,8 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn): def __is_in_group(self, uid, group_dn):
"""Check if user is in group""" """Check if user is in group"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be searched in group " raise exception.NotFound(_("User %s can't be searched in group "
"because the user doesn't exist" % uid) "because the user doesn't exist") % uid)
if not self.__group_exists(group_dn): if not self.__group_exists(group_dn):
return False return False
res = self.__find_object(group_dn, res = self.__find_object(group_dn,
@@ -467,10 +467,10 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn): def __add_to_group(self, uid, group_dn):
"""Add user to group""" """Add user to group"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be added to the group " raise exception.NotFound(_("User %s can't be added to the group "
"because the user doesn't exist" % uid) "because the user doesn't exist") % uid)
if not self.__group_exists(group_dn): if not self.__group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" % raise exception.NotFound(_("The group at dn %s doesn't exist") %
group_dn) group_dn)
if self.__is_in_group(uid, group_dn): if self.__is_in_group(uid, group_dn):
raise exception.Duplicate(_("User %(uid)s is already a member of " raise exception.Duplicate(_("User %(uid)s is already a member of "
@@ -481,15 +481,15 @@ class LdapDriver(object):
def __remove_from_group(self, uid, group_dn): def __remove_from_group(self, uid, group_dn):
"""Remove user from group""" """Remove user from group"""
if not self.__group_exists(group_dn): if not self.__group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" % raise exception.NotFound(_("The group at dn %s doesn't exist")
group_dn) % group_dn)
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from the " raise exception.NotFound(_("User %s can't be removed from the "
"group because the user doesn't exist" % "group because the user doesn't exist")
uid) % uid)
if not self.__is_in_group(uid, group_dn): if not self.__is_in_group(uid, group_dn):
raise exception.NotFound("User %s is not a member of the group" % raise exception.NotFound(_("User %s is not a member of the group")
uid) % uid)
# NOTE(vish): remove user from group and any sub_groups # NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(group_dn, uid) sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns: for sub_dn in sub_dns:
@@ -509,8 +509,9 @@ class LdapDriver(object):
def __remove_from_all(self, uid): def __remove_from_all(self, uid):
"""Remove user from all roles and projects""" """Remove user from all roles and projects"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from all " raise exception.NotFound(_("User %s can't be removed from all "
"because the user doesn't exist" % uid) "because the user doesn't exist")
% uid)
role_dns = self.__find_group_dns_with_member( role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid) FLAGS.role_project_subtree, uid)
for role_dn in role_dns: for role_dn in role_dns:

View File

@@ -249,13 +249,16 @@ class API(base.Base):
# ..then we distill the security groups to which they belong.. # ..then we distill the security groups to which they belong..
security_groups = set() security_groups = set()
for rule in security_group_rules: for rule in security_group_rules:
security_groups.add(rule['parent_group_id']) security_group = self.db.security_group_get(
context,
rule['parent_group_id'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups.. # ..then we find the instances that are members of these groups..
instances = set() instances = set()
for security_group in security_groups: for security_group in security_groups:
for instance in security_group['instances']: for instance in security_group['instances']:
instances.add(instance['id']) instances.add(instance)
# ...then we find the hosts where they live... # ...then we find the hosts where they live...
hosts = set() hosts = set()

View File

@@ -67,7 +67,7 @@ class ConsoleProxyManager(manager.Manager):
pool['id'], pool['id'],
instance_id) instance_id)
except exception.NotFound: except exception.NotFound:
logging.debug("Adding console") logging.debug(_("Adding console"))
if not password: if not password:
password = self.driver.generate_password() password = self.driver.generate_password()
if not port: if not port:

View File

@@ -96,7 +96,7 @@ class XVPConsoleProxy(object):
return os.urandom(length * 2).encode('base64')[:length] return os.urandom(length * 2).encode('base64')[:length]
def _rebuild_xvp_conf(self, context): def _rebuild_xvp_conf(self, context):
logging.debug("Rebuilding xvp conf") logging.debug(_("Rebuilding xvp conf"))
pools = [pool for pool in pools = [pool for pool in
db.console_pool_get_all_by_host_type(context, self.host, db.console_pool_get_all_by_host_type(context, self.host,
self.console_type) self.console_type)
@@ -113,12 +113,12 @@ class XVPConsoleProxy(object):
self._xvp_restart() self._xvp_restart()
def _write_conf(self, config): def _write_conf(self, config):
logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf) logging.debug(_('Re-wrote %s') % FLAGS.console_xvp_conf)
with open(FLAGS.console_xvp_conf, 'w') as cfile: with open(FLAGS.console_xvp_conf, 'w') as cfile:
cfile.write(config) cfile.write(config)
def _xvp_stop(self): def _xvp_stop(self):
logging.debug("Stopping xvp") logging.debug(_("Stopping xvp"))
pid = self._xvp_pid() pid = self._xvp_pid()
if not pid: if not pid:
return return
@@ -131,19 +131,19 @@ class XVPConsoleProxy(object):
def _xvp_start(self): def _xvp_start(self):
if self._xvp_check_running(): if self._xvp_check_running():
return return
logging.debug("Starting xvp") logging.debug(_("Starting xvp"))
try: try:
utils.execute('xvp -p %s -c %s -l %s' % utils.execute('xvp -p %s -c %s -l %s' %
(FLAGS.console_xvp_pid, (FLAGS.console_xvp_pid,
FLAGS.console_xvp_conf, FLAGS.console_xvp_conf,
FLAGS.console_xvp_log)) FLAGS.console_xvp_log))
except exception.ProcessExecutionError, err: except exception.ProcessExecutionError, err:
logging.error("Error starting xvp: %s" % err) logging.error(_("Error starting xvp: %s") % err)
def _xvp_restart(self): def _xvp_restart(self):
logging.debug("Restarting xvp") logging.debug(_("Restarting xvp"))
if not self._xvp_check_running(): if not self._xvp_check_running():
logging.debug("xvp not running...") logging.debug(_("xvp not running..."))
self._xvp_start() self._xvp_start()
else: else:
pid = self._xvp_pid() pid = self._xvp_pid()

View File

@@ -778,7 +778,7 @@ def instance_get_by_id(context, instance_id):
result = session.query(models.Instance).\ result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\ options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload_all('fixed_ip.network')).\
filter_by(id=instance_id).\ filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\ filter_by(deleted=can_read_deleted(context)).\
first() first()
@@ -786,6 +786,7 @@ def instance_get_by_id(context, instance_id):
result = session.query(models.Instance).\ result = session.query(models.Instance).\
options(joinedload('security_groups')).\ options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('fixed_ip.network')).\
filter_by(project_id=context.project_id).\ filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\ filter_by(id=instance_id).\
filter_by(deleted=False).\ filter_by(deleted=False).\

View File

@@ -40,15 +40,15 @@ from nova import version
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_string('logging_context_format_string', flags.DEFINE_string('logging_context_format_string',
'(%(name)s %(nova_version)s): %(levelname)s ' '%(asctime)s %(levelname)s %(name)s '
'[%(request_id)s %(user)s ' '[%(request_id)s %(user)s '
'%(project)s] %(message)s', '%(project)s] %(message)s',
'format string to use for log messages') 'format string to use for log messages with context')
flags.DEFINE_string('logging_default_format_string', flags.DEFINE_string('logging_default_format_string',
'(%(name)s %(nova_version)s): %(levelname)s [N/A] ' '%(asctime)s %(levelname)s %(name)s [-] '
'%(message)s', '%(message)s',
'format string to use for log messages') 'format string to use for log messages without context')
flags.DEFINE_string('logging_debug_format_suffix', flags.DEFINE_string('logging_debug_format_suffix',
'from %(processName)s (pid=%(process)d) %(funcName)s' 'from %(processName)s (pid=%(process)d) %(funcName)s'

View File

@@ -212,7 +212,7 @@ class NetworkManager(manager.Manager):
def release_fixed_ip(self, context, mac, address): def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released.""" """Called by dhcp-bridge when ip is released."""
LOG.debug("Releasing IP %s", address, context=context) LOG.debug(_("Releasing IP %s"), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance'] instance_ref = fixed_ip_ref['instance']
if not instance_ref: if not instance_ref:

View File

@@ -391,8 +391,8 @@ class ImagesResource(resource.Resource):
image_location = get_argument(request, 'image_location', u'') image_location = get_argument(request, 'image_location', u'')
image_path = os.path.join(FLAGS.images_path, image_id) image_path = os.path.join(FLAGS.images_path, image_id)
if not image_path.startswith(FLAGS.images_path) or \ if ((not image_path.startswith(FLAGS.images_path)) or
os.path.exists(image_path): os.path.exists(image_path)):
LOG.audit(_("Not authorized to upload image: invalid directory " LOG.audit(_("Not authorized to upload image: invalid directory "
"%s"), "%s"),
image_path, context=request.context) image_path, context=request.context)

View File

@@ -259,22 +259,25 @@ class Image(object):
process_input=encrypted_key, process_input=encrypted_key,
check_exit_code=False) check_exit_code=False)
if err: if err:
raise exception.Error("Failed to decrypt private key: %s" % err) raise exception.Error(_("Failed to decrypt private key: %s")
% err)
iv, err = utils.execute( iv, err = utils.execute(
'openssl rsautl -decrypt -inkey %s' % cloud_private_key, 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
process_input=encrypted_iv, process_input=encrypted_iv,
check_exit_code=False) check_exit_code=False)
if err: if err:
raise exception.Error("Failed to decrypt initialization " raise exception.Error(_("Failed to decrypt initialization "
"vector: %s" % err) "vector: %s") % err)
_out, err = utils.execute( _out, err = utils.execute(
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
% (encrypted_filename, key, iv, decrypted_filename), % (encrypted_filename, key, iv, decrypted_filename),
check_exit_code=False) check_exit_code=False)
if err: if err:
raise exception.Error("Failed to decrypt image file %s : %s" % raise exception.Error(_("Failed to decrypt image file "
(encrypted_filename, err)) "%(image_file)s: %(err)s") %
{'image_file': encrypted_filename,
'err': err})
@staticmethod @staticmethod
def untarzip_image(path, filename): def untarzip_image(path, filename):

View File

@@ -344,7 +344,7 @@ def call(context, topic, msg):
def cast(context, topic, msg): def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response""" """Sends a message on a topic without waiting for a response"""
LOG.debug("Making asynchronous cast...") LOG.debug(_("Making asynchronous cast..."))
_pack_context(msg, context) _pack_context(msg, context)
conn = Connection.instance() conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic) publisher = TopicPublisher(connection=conn, topic=topic)

View File

@@ -48,7 +48,7 @@ class SimpleScheduler(chance.ChanceScheduler):
service = db.service_get_by_args(context.elevated(), host, service = db.service_get_by_args(context.elevated(), host,
'nova-compute') 'nova-compute')
if not self.service_is_up(service): if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s is not alive" % host) raise driver.WillNotSchedule(_("Host %s is not alive") % host)
# TODO(vish): this probably belongs in the manager, if we # TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow # can generalize this somehow
@@ -80,7 +80,7 @@ class SimpleScheduler(chance.ChanceScheduler):
service = db.service_get_by_args(context.elevated(), host, service = db.service_get_by_args(context.elevated(), host,
'nova-volume') 'nova-volume')
if not self.service_is_up(service): if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s not available" % host) raise driver.WillNotSchedule(_("Host %s not available") % host)
# TODO(vish): this probably belongs in the manager, if we # TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow # can generalize this somehow

View File

@@ -38,6 +38,7 @@ from nova import log as logging
from nova import flags from nova import flags
from nova import rpc from nova import rpc
from nova import utils from nova import utils
from nova import version
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@@ -156,7 +157,8 @@ class Service(object):
report_interval = FLAGS.report_interval report_interval = FLAGS.report_interval
if not periodic_interval: if not periodic_interval:
periodic_interval = FLAGS.periodic_interval periodic_interval = FLAGS.periodic_interval
logging.audit(_("Starting %s node"), topic) logging.audit(_("Starting %s node (version %s)"), topic,
version.version_string_with_vcs())
service_obj = cls(host, binary, topic, manager, service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval) report_interval, periodic_interval)

View File

@@ -156,7 +156,7 @@ def WrapTwistedOptions(wrapped):
try: try:
self.parseArgs(*argv) self.parseArgs(*argv)
except TypeError: except TypeError:
raise usage.UsageError("Wrong number of arguments.") raise usage.UsageError(_("Wrong number of arguments."))
self.postOptions() self.postOptions()
return args return args
@@ -220,7 +220,7 @@ def stop(pidfile):
time.sleep(0.1) time.sleep(0.1)
except OSError, err: except OSError, err:
err = str(err) err = str(err)
if err.find("No such process") > 0: if err.find(_("No such process")) > 0:
if os.path.exists(pidfile): if os.path.exists(pidfile):
os.remove(pidfile) os.remove(pidfile)
else: else:

View File

@@ -511,7 +511,6 @@ class LibvirtConnection(object):
base_dir = os.path.join(FLAGS.instances_path, '_base') base_dir = os.path.join(FLAGS.instances_path, '_base')
if not os.path.exists(base_dir): if not os.path.exists(base_dir):
os.mkdir(base_dir) os.mkdir(base_dir)
os.chmod(base_dir, 0777)
base = os.path.join(base_dir, fname) base = os.path.join(base_dir, fname)
if not os.path.exists(base): if not os.path.exists(base):
fn(target=base, *args, **kwargs) fn(target=base, *args, **kwargs)
@@ -542,7 +541,6 @@ class LibvirtConnection(object):
# ensure directories exist and are writable # ensure directories exist and are writable
utils.execute('mkdir -p %s' % basepath(suffix='')) utils.execute('mkdir -p %s' % basepath(suffix=''))
utils.execute('chmod 0777 %s' % basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name']) LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w') f = open(basepath('libvirt.xml'), 'w')
@@ -734,7 +732,8 @@ class LibvirtConnection(object):
'cpu_time': cpu_time} 'cpu_time': cpu_time}
def get_diagnostics(self, instance_name): def get_diagnostics(self, instance_name):
raise exception.APIError("diagnostics are not supported for libvirt") raise exception.APIError(_("diagnostics are not supported "
"for libvirt"))
def get_disks(self, instance_name): def get_disks(self, instance_name):
""" """

View File

@@ -100,6 +100,14 @@ class VolumeDriver(object):
def delete_volume(self, volume): def delete_volume(self, volume):
"""Deletes a logical volume.""" """Deletes a logical volume."""
try:
self._try_execute("sudo lvdisplay %s/%s" %
(FLAGS.volume_group,
volume['name']))
except Exception as e:
# If the volume isn't present, then don't attempt to delete
return True
self._try_execute("sudo lvremove -f %s/%s" % self._try_execute("sudo lvremove -f %s/%s" %
(FLAGS.volume_group, (FLAGS.volume_group,
volume['name'])) volume['name']))
@@ -218,8 +226,14 @@ class ISCSIDriver(VolumeDriver):
def ensure_export(self, context, volume): def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume.""" """Synchronously recreates an export for a logical volume."""
iscsi_target = self.db.volume_get_iscsi_target_num(context, try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id']) volume['id'])
except exception.NotFound:
LOG.info(_("Skipping ensure_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
self._sync_exec("sudo ietadm --op new " self._sync_exec("sudo ietadm --op new "
@@ -258,8 +272,23 @@ class ISCSIDriver(VolumeDriver):
def remove_export(self, context, volume): def remove_export(self, context, volume):
"""Removes an export for a logical volume.""" """Removes an export for a logical volume."""
iscsi_target = self.db.volume_get_iscsi_target_num(context, try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id']) volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
try:
# ietadm show will exit with an error
# this export has already been removed
self._execute("sudo ietadm --op show --tid=%s " % iscsi_target)
except Exception as e:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"is presently exported for volume: %d"), volume['id'])
return
self._execute("sudo ietadm --op delete --tid=%s " self._execute("sudo ietadm --op delete --tid=%s "
"--lun=0" % iscsi_target) "--lun=0" % iscsi_target)
self._execute("sudo ietadm --op delete --tid=%s" % self._execute("sudo ietadm --op delete --tid=%s" %

View File

@@ -84,7 +84,10 @@ class VolumeManager(manager.Manager):
volumes = self.db.volume_get_all_by_host(ctxt, self.host) volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes)) LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes: for volume in volumes:
self.driver.ensure_export(ctxt, volume) if volume['status'] in ['available', 'in-use']:
self.driver.ensure_export(ctxt, volume)
else:
LOG.info(_("volume %s: skipping export"), volume_ref['name'])
def create_volume(self, context, volume_id): def create_volume(self, context, volume_id):
"""Creates and exports the volume.""" """Creates and exports the volume."""
@@ -99,14 +102,19 @@ class VolumeManager(manager.Manager):
# before passing it to the driver. # before passing it to the driver.
volume_ref['host'] = self.host volume_ref['host'] = self.host
vol_name = volume_ref['name'] try:
vol_size = volume_ref['size'] vol_name = volume_ref['name']
LOG.debug(_("volume %(vol_name)s: creating lv of size %(vol_size)sG") vol_size = volume_ref['size']
% locals()) LOG.debug(_("volume %(vol_name)s: creating lv of"
self.driver.create_volume(volume_ref) " size %(vol_size)sG") % locals())
self.driver.create_volume(volume_ref)
LOG.debug(_("volume %s: creating export"), volume_ref['name']) LOG.debug(_("volume %s: creating export"), volume_ref['name'])
self.driver.create_export(context, volume_ref) self.driver.create_export(context, volume_ref)
except Exception as e:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
raise e
now = datetime.datetime.utcnow() now = datetime.datetime.utcnow()
self.db.volume_update(context, self.db.volume_update(context,
@@ -123,10 +131,18 @@ class VolumeManager(manager.Manager):
raise exception.Error(_("Volume is still attached")) raise exception.Error(_("Volume is still attached"))
if volume_ref['host'] != self.host: if volume_ref['host'] != self.host:
raise exception.Error(_("Volume is not local to this node")) raise exception.Error(_("Volume is not local to this node"))
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref) try:
LOG.debug(_("volume %s: deleting"), volume_ref['name']) LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.delete_volume(volume_ref) self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref)
except Exception as e:
self.db.volume_update(context,
volume_ref['id'],
{'status': 'error_deleting'})
raise e
self.db.volume_destroy(context, volume_id) self.db.volume_destroy(context, volume_id)
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
return True return True

View File

@@ -144,7 +144,7 @@ class Application(object):
See the end of http://pythonpaste.org/webob/modules/dec.html See the end of http://pythonpaste.org/webob/modules/dec.html
for more info. for more info.
""" """
raise NotImplementedError("You must implement __call__") raise NotImplementedError(_("You must implement __call__"))
class Middleware(Application): class Middleware(Application):

View File

@@ -19,6 +19,8 @@
# that we need. # that we need.
# #
import gettext
gettext.install('nova', unicode=1)
import httplib import httplib
import logging import logging
import logging.handlers import logging.handlers
@@ -60,7 +62,7 @@ def ignore_failure(func, *args, **kwargs):
try: try:
return func(*args, **kwargs) return func(*args, **kwargs)
except XenAPI.Failure, e: except XenAPI.Failure, e:
logging.error('Ignoring XenAPI.Failure %s', e) logging.error(_('Ignoring XenAPI.Failure %s'), e)
return None return None
@@ -78,19 +80,25 @@ def validate_exists(args, key, default=None):
""" """
if key in args: if key in args:
if len(args[key]) == 0: if len(args[key]) == 0:
raise ArgumentError('Argument %r value %r is too short.' % raise ArgumentError(_('Argument %(key)s value %(value)s is too '
(key, args[key])) 'short.') %
{'key': key,
'value': args[key]})
if not ARGUMENT_PATTERN.match(args[key]): if not ARGUMENT_PATTERN.match(args[key]):
raise ArgumentError('Argument %r value %r contains invalid ' raise ArgumentError(_('Argument %(key)s value %(value)s contains '
'characters.' % (key, args[key])) 'invalid characters.') %
{'key': key,
'value': args[key]})
if args[key][0] == '-': if args[key][0] == '-':
raise ArgumentError('Argument %r value %r starts with a hyphen.' raise ArgumentError(_('Argument %(key)s value %(value)s starts '
% (key, args[key])) 'with a hyphen.') %
{'key': key,
'value': args[key]})
return args[key] return args[key]
elif default is not None: elif default is not None:
return default return default
else: else:
raise ArgumentError('Argument %s is required.' % key) raise ArgumentError(_('Argument %s is required.') % key)
def validate_bool(args, key, default=None): def validate_bool(args, key, default=None):
@@ -105,8 +113,10 @@ def validate_bool(args, key, default=None):
elif value.lower() == 'false': elif value.lower() == 'false':
return False return False
else: else:
raise ArgumentError("Argument %s may not take value %r. " raise ArgumentError(_("Argument %(key)s may not take value %(value)s. "
"Valid values are ['true', 'false']." % (key, value)) "Valid values are ['true', 'false'].")
% {'key': key,
'value': value})
def exists(args, key): def exists(args, key):
@@ -116,7 +126,7 @@ def exists(args, key):
if key in args: if key in args:
return args[key] return args[key]
else: else:
raise ArgumentError('Argument %s is required.' % key) raise ArgumentError(_('Argument %s is required.') % key)
def optional(args, key): def optional(args, key):
@@ -149,8 +159,13 @@ def create_vdi(session, sr_ref, name_label, virtual_size, read_only):
'other_config': {}, 'other_config': {},
'sm_config': {}, 'sm_config': {},
'tags': []}) 'tags': []})
logging.debug('Created VDI %s (%s, %s, %s) on %s.', vdi_ref, name_label, logging.debug(_('Created VDI %(vdi_ref)s (%(label)s, %(size)s, '
virtual_size, read_only, sr_ref) '%(read_only)s) on %(sr_ref)s.') %
{'vdi_ref': vdi_ref,
'label': name_label,
'size': virtual_size,
'read_only': read_only,
'sr_ref': sr_ref})
return vdi_ref return vdi_ref
@@ -169,19 +184,19 @@ def with_vdi_in_dom0(session, vdi, read_only, f):
vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = [] vbd_rec['qos_supported_algorithms'] = []
logging.debug('Creating VBD for VDI %s ... ', vdi) logging.debug(_('Creating VBD for VDI %s ... '), vdi)
vbd = session.xenapi.VBD.create(vbd_rec) vbd = session.xenapi.VBD.create(vbd_rec)
logging.debug('Creating VBD for VDI %s done.', vdi) logging.debug(_('Creating VBD for VDI %s done.'), vdi)
try: try:
logging.debug('Plugging VBD %s ... ', vbd) logging.debug(_('Plugging VBD %s ... '), vbd)
session.xenapi.VBD.plug(vbd) session.xenapi.VBD.plug(vbd)
logging.debug('Plugging VBD %s done.', vbd) logging.debug(_('Plugging VBD %s done.'), vbd)
return f(session.xenapi.VBD.get_device(vbd)) return f(session.xenapi.VBD.get_device(vbd))
finally: finally:
logging.debug('Destroying VBD for VDI %s ... ', vdi) logging.debug(_('Destroying VBD for VDI %s ... '), vdi)
vbd_unplug_with_retry(session, vbd) vbd_unplug_with_retry(session, vbd)
ignore_failure(session.xenapi.VBD.destroy, vbd) ignore_failure(session.xenapi.VBD.destroy, vbd)
logging.debug('Destroying VBD for VDI %s done.', vdi) logging.debug(_('Destroying VBD for VDI %s done.'), vdi)
def vbd_unplug_with_retry(session, vbd): def vbd_unplug_with_retry(session, vbd):
@@ -192,19 +207,20 @@ def vbd_unplug_with_retry(session, vbd):
while True: while True:
try: try:
session.xenapi.VBD.unplug(vbd) session.xenapi.VBD.unplug(vbd)
logging.debug('VBD.unplug successful first time.') logging.debug(_('VBD.unplug successful first time.'))
return return
except XenAPI.Failure, e: except XenAPI.Failure, e:
if (len(e.details) > 0 and if (len(e.details) > 0 and
e.details[0] == 'DEVICE_DETACH_REJECTED'): e.details[0] == 'DEVICE_DETACH_REJECTED'):
logging.debug('VBD.unplug rejected: retrying...') logging.debug(_('VBD.unplug rejected: retrying...'))
time.sleep(1) time.sleep(1)
elif (len(e.details) > 0 and elif (len(e.details) > 0 and
e.details[0] == 'DEVICE_ALREADY_DETACHED'): e.details[0] == 'DEVICE_ALREADY_DETACHED'):
logging.debug('VBD.unplug successful eventually.') logging.debug(_('VBD.unplug successful eventually.'))
return return
else: else:
logging.error('Ignoring XenAPI.Failure in VBD.unplug: %s', e) logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'),
e)
return return

View File

@@ -60,7 +60,8 @@ class NovaTestRunner(core.TextTestRunner):
if __name__ == '__main__': if __name__ == '__main__':
c = config.Config(stream=sys.stdout, c = config.Config(stream=sys.stdout,
env=os.environ, env=os.environ,
verbosity=3) verbosity=3,
plugins=core.DefaultPluginManager())
runner = NovaTestRunner(stream=c.stream, runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity, verbosity=c.verbosity,

View File

@@ -31,46 +31,44 @@ always_venv=0
never_venv=0 never_venv=0
force=0 force=0
noseargs= noseargs=
wrapper=""
for arg in "$@"; do for arg in "$@"; do
process_option $arg process_option $arg
done done
function run_tests {
# Just run the test suites in current environment
${wrapper} rm -f nova.sqlite
${wrapper} $NOSETESTS 2> run_tests.err.log
}
NOSETESTS="python run_tests.py $noseargs" NOSETESTS="python run_tests.py $noseargs"
if [ $never_venv -eq 1 ]; then if [ $never_venv -eq 0 ]
# Just run the test suites in current environment then
rm -f nova.sqlite # Remove the virtual environment if --force used
$NOSETESTS 2> run_tests.err.log if [ $force -eq 1 ]; then
exit echo "Cleaning virtualenv..."
fi rm -rf ${venv}
fi
# Remove the virtual environment if --force used if [ -e ${venv} ]; then
if [ $force -eq 1 ]; then wrapper="${with_venv}"
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ -e ${venv} ]; then
${with_venv} rm -f nova.sqlite
${with_venv} $NOSETESTS 2> run_tests.err.log
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py
else else
echo -e "No virtual environment found...create one? (Y/n) \c" if [ $always_venv -eq 1 ]; then
read use_ve # Automatically install the virtualenv
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py python tools/install_venv.py
wrapper="${with_venv}"
else else
rm -f nova.sqlite echo -e "No virtual environment found...create one? (Y/n) \c"
$NOSETESTS 2> run_tests.err.log read use_ve
exit if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
wrapper=${with_venv}
fi
fi fi
fi fi
${with_venv} rm -f nova.sqlite
${with_venv} $NOSETESTS 2> run_tests.err.log
fi fi
run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1

View File

@@ -41,6 +41,7 @@ class local_BuildDoc(BuildDoc):
self.finalize_options() self.finalize_options()
BuildDoc.run(self) BuildDoc.run(self)
class local_sdist(sdist): class local_sdist(sdist):
"""Customized sdist hook - builds the ChangeLog file from VC first""" """Customized sdist hook - builds the ChangeLog file from VC first"""
@@ -57,17 +58,17 @@ class local_sdist(sdist):
changelog_file.write(str_dict_replace(changelog, mailmap)) changelog_file.write(str_dict_replace(changelog, mailmap))
sdist.run(self) sdist.run(self)
nova_cmdclass= { 'sdist': local_sdist, nova_cmdclass = {'sdist': local_sdist,
'build_sphinx' : local_BuildDoc } 'build_sphinx': local_BuildDoc}
try: try:
from babel.messages import frontend as babel from babel.messages import frontend as babel
nova_cmdclass['compile_catalog'] = babel.compile_catalog nova_cmdclass['compile_catalog'] = babel.compile_catalog
nova_cmdclass['extract_messages'] = babel.extract_messages nova_cmdclass['extract_messages'] = babel.extract_messages
nova_cmdclass['init_catalog'] = babel.init_catalog nova_cmdclass['init_catalog'] = babel.init_catalog
nova_cmdclass['update_catalog'] = babel.update_catalog nova_cmdclass['update_catalog'] = babel.update_catalog
except: except:
pass pass
setup(name='nova', setup(name='nova',
version=version.canonical_version_string(), version=version.canonical_version_string(),