merged from trunk

This commit is contained in:
Andy Smith
2011-01-04 12:57:18 -08:00
49 changed files with 1018 additions and 772 deletions

View File

@@ -6,6 +6,7 @@ keys
networks networks
nova.sqlite nova.sqlite
CA/cacert.pem CA/cacert.pem
CA/crl.pem
CA/index.txt* CA/index.txt*
CA/openssl.cnf CA/openssl.cnf
CA/serial* CA/serial*

View File

@@ -24,7 +24,9 @@
<todd@ansolabs.com> <todd@rubidine.com> <todd@ansolabs.com> <todd@rubidine.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com> <vishvananda@gmail.com> <vishvananda@yahoo.com>
<vishvananda@gmail.com> <root@mirror.nasanebula.net> <vishvananda@gmail.com> <root@mirror.nasanebula.net>
# These are from people who failed to set a proper committer <vishvananda@gmail.com> <root@ubuntu>
. <root@tonbuntu> <sleepsonthefloor@gmail.com> <root@tonbuntu>
. <laner@controller> <rlane@wikimedia.org> <laner@controller>
. <root@ubuntu> <rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>

View File

@@ -1,9 +1,12 @@
Andy Smith <code@term.ie> Andy Smith <code@term.ie>
Anne Gentle <anne@openstack.org> Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com> Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chris Behrens <cbehrens@codestud.com> Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com> Chmouel Boudjnah <chmouel@chmouel.com>
Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com> Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com> Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com> Ed Leafe <ed@leafe.com>
@@ -24,7 +27,10 @@ Michael Gundlach <michael.gundlach@rackspace.com>
Monty Taylor <mordred@inaugust.com> Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org> Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org> Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
Ryan Lane <rlane@wikimedia.org>
Ryan Lucio <rlucio@internap.com> Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com> Sandy Walsh <sandy.walsh@rackspace.com>
Soren Hansen <soren.hansen@rackspace.com> Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org> Thierry Carrez <thierry@openstack.org>
@@ -33,3 +39,4 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com> Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com>

109
bin/nova-api-paste Executable file
View File

@@ -0,0 +1,109 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova API."""
import gettext
import logging
import os
import sys
from paste import deploy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import wsgi
LOG = logging.getLogger('nova.api')
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
FLAGS = flags.FLAGS
API_ENDPOINTS = ['ec2', 'openstack']
def load_configuration(paste_config):
"""Load the paste configuration from the config file and return it."""
config = None
# Try each known name to get the global DEFAULTS, which will give ports
for name in API_ENDPOINTS:
try:
config = deploy.appconfig("config:%s" % paste_config, name=name)
except LookupError:
pass
if config:
verbose = config.get('verbose', None)
if verbose:
FLAGS.verbose = int(verbose) == 1
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
return config
LOG.debug(_("Paste config at %s has no secion for known apis"),
paste_config)
print _("Paste config at %s has no secion for any known apis") % \
paste_config
os.exit(1)
def launch_api(paste_config_file, section, server, port, host):
"""Launch an api server from the specified port and IP."""
LOG.debug(_("Launching %s api on %s:%s"), section, host, port)
app = deploy.loadapp('config:%s' % paste_config_file, name=section)
server.start(app, int(port), host)
def run_app(paste_config_file):
LOG.debug(_("Using paste.deploy config at: %s"), configfile)
config = load_configuration(paste_config_file)
LOG.debug(_("Configuration: %r"), config)
server = wsgi.Server()
ip = config.get('host', '0.0.0.0')
for api in API_ENDPOINTS:
port = config.get("%s_port" % api, None)
if not port:
continue
host = config.get("%s_host" % api, ip)
launch_api(configfile, api, server, port, host)
LOG.debug(_("All api servers launched, now waiting"))
server.wait()
if __name__ == '__main__':
FLAGS(sys.argv)
configfiles = ['/etc/nova/nova-api.conf']
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
configfiles.insert(0,
os.path.join(possible_topdir, 'etc', 'nova-api.conf'))
for configfile in configfiles:
if os.path.exists(configfile):
run_app(configfile)
break
else:
LOG.debug(_("Skipping missing configuration: %s"), configfile)

View File

@@ -22,6 +22,7 @@
import eventlet import eventlet
eventlet.monkey_patch() eventlet.monkey_patch()
import gettext
import os import os
import sys import sys
@@ -33,6 +34,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import api from nova import api
from nova import flags from nova import flags
from nova import service from nova import service

View File

@@ -110,7 +110,6 @@ def main():
FLAGS.num_networks = 5 FLAGS.num_networks = 5
path = os.path.abspath(os.path.join(os.path.dirname(__file__), path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..',
'_trial_temp',
'nova.sqlite')) 'nova.sqlite'))
FLAGS.sql_connection = 'sqlite:///%s' % path FLAGS.sql_connection = 'sqlite:///%s' % path
action = argv[1] action = argv[1]

View File

@@ -53,6 +53,7 @@
CLI interface for nova management. CLI interface for nova management.
""" """
import datetime
import gettext import gettext
import logging import logging
import os import os
@@ -452,6 +453,52 @@ class NetworkCommands(object):
int(network_size), int(vlan_start), int(network_size), int(vlan_start),
int(vpn_start)) int(vpn_start))
class ServiceCommands(object):
"""Enable and disable running services"""
def list(self, host=None, service=None):
"""Show a list of all running services. Filter by host & service name.
args: [host] [service]"""
ctxt = context.get_admin_context()
now = datetime.datetime.utcnow()
services = db.service_get_all(ctxt)
if host:
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= 15)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
active, art,
svc['updated_at'])
def enable(self, host, service):
"""Enable scheduling for a service
args: host service"""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
print "Unable to find service"
return
db.service_update(ctxt, svc['id'], {'disabled': False})
def disable(self, host, service):
"""Disable scheduling for a service
args: host service"""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
print "Unable to find service"
return
db.service_update(ctxt, svc['id'], {'disabled': True})
CATEGORIES = [ CATEGORIES = [
('user', UserCommands), ('user', UserCommands),
('project', ProjectCommands), ('project', ProjectCommands),
@@ -459,7 +506,8 @@ CATEGORIES = [
('shell', ShellCommands), ('shell', ShellCommands),
('vpn', VpnCommands), ('vpn', VpnCommands),
('floating', FloatingIpCommands), ('floating', FloatingIpCommands),
('network', NetworkCommands)] ('network', NetworkCommands),
('service', ServiceCommands)]
def lazy_match(name, key_value_tuples): def lazy_match(name, key_value_tuples):

View File

@@ -16,13 +16,13 @@ Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (
Step 2: Install dependencies Step 2: Install dependencies
---------------------------- ----------------------------
Nova requires rabbitmq for messaging and optionally you can use redis for storing state, so install these first. Nova requires rabbitmq for messaging, so install that first.
*Note:* You must have sudo installed to run these commands as shown here. *Note:* You must have sudo installed to run these commands as shown here.
:: ::
sudo apt-get install rabbitmq-server redis-server sudo apt-get install rabbitmq-server
You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
@@ -31,11 +31,10 @@ If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gfl
:: ::
sudo apt-get install python-twisted sudo add-get install python-software-properties
sudo add-apt-repository ppa:nova-core/trunk
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 95C71FE2 sudo apt-get update
sudo sh -c 'echo "deb http://ppa.launchpad.net/openstack/openstack-ppa/ubuntu lucid main" > /etc/apt/sources.list.d/openstackppa.list' sudo apt-get install python-twisted python-gflags
sudo apt-get update && sudo apt-get install python-gflags
Once you've done this, continue at Step 3 here: :doc:`../single.node.install` Once you've done this, continue at Step 3 here: :doc:`../single.node.install`

View File

@@ -76,11 +76,11 @@ External unix tools that are required:
* aoetools and vblade-persist (if you use aoe-volumes) * aoetools and vblade-persist (if you use aoe-volumes)
Nova uses cutting-edge versions of many packages. There are ubuntu packages in Nova uses cutting-edge versions of many packages. There are ubuntu packages in
the nova-core ppa. You can use add this ppa to your sources list on an ubuntu the nova-core trunk ppa. You can use add this ppa to your sources list on an
machine with the following commands:: ubuntu machine with the following commands::
sudo apt-get install -y python-software-properties sudo apt-get install -y python-software-properties
sudo add-apt-repository ppa:nova-core/ppa sudo add-apt-repository ppa:nova-core/trunk
Recommended Recommended
----------- -----------

View File

@@ -46,12 +46,12 @@ Assumptions
Step 1 Use apt-get to get the latest code Step 1 Use apt-get to get the latest code
----------------------------------------- -----------------------------------------
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/ppa. 1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk.
:: ::
sudo apt-get install python-software-properties sudo apt-get install python-software-properties
sudo add-apt-repository ppa:nova-core/ppa sudo add-apt-repository ppa:nova-core/trunk
2. Run update. 2. Run update.
@@ -77,21 +77,20 @@ Nova development has consolidated all .conf files to nova.conf as of November 20
#. These need to be defined in the nova.conf configuration file:: #. These need to be defined in the nova.conf configuration file::
--sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
--s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which
# will contain the VM images and buckets # will contain the VM images and buckets
--rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted
--cc_host=$CC_ADDR # This is where the the nova-api service lives --cc_host=$CC_ADDR # This is where the the nova-api service lives
--verbose # Optional but very helpful during initial setup --verbose # Optional but very helpful during initial setup
--ec2_url=http://$CC_ADDR:8773/services/Cloud --ec2_url=http://$CC_ADDR:8773/services/Cloud
--network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26 --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
--network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
#. Create a nova group:: #. Create a nova group::
sudo addgroup nova sudo addgroup nova
The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password. The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.

View File

@@ -24,7 +24,7 @@ Routing
To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information. To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ fore more information.
URLs are mapped to "action" methods on "controller" classes in nova/api/openstack/__init__/ApiRouter.__init__ . URLs are mapped to "action" methods on "controller" classes in `nova/api/openstack/__init__/ApiRouter.__init__` .
See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two:
- mapper.connect() lets you map a single URL to a single action on a controller. - mapper.connect() lets you map a single URL to a single action on a controller.
@@ -33,9 +33,9 @@ See http://routes.groovie.org/manual.html for all syntax, but you'll probably ju
Controllers and actions Controllers and actions
----------------------- -----------------------
Controllers live in nova/api/openstack, and inherit from nova.wsgi.Controller. Controllers live in `nova/api/openstack`, and inherit from nova.wsgi.Controller.
See nova/api/openstack/servers.py for an example. See `nova/api/openstack/servers.py` for an example.
Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc.
@@ -46,7 +46,7 @@ Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML
If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. <servers> list contains <server> tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. <server id="4"/> instead of <server><id>4</id></server>). If you define a new controller, you'll need to define a _serialization_metadata attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. <servers> list contains <server> tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. <server id="4"/> instead of <server><id>4</id></server>).
See nova/api/openstack/servers.py for an example. See `nova/api/openstack/servers.py` for an example.
Faults Faults
------ ------

View File

@@ -71,8 +71,8 @@ RPC Casts
The diagram below the message flow during an rp.cast operation: The diagram below the message flow during an rp.cast operation:
1. a Topic Publisher is instantiated to send the message request to the queuing system. 1. A Topic Publisher is instantiated to send the message request to the queuing system.
2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task.
.. image:: /images/rabbit/flow2.png .. image:: /images/rabbit/flow2.png
:width: 60% :width: 60%

View File

@@ -75,7 +75,7 @@ Nova is built on a shared-nothing, messaging-based architecture. All of the majo
To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.) To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`. .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>`_.
Concept: Storage Concept: Storage
---------------- ----------------
@@ -129,12 +129,12 @@ The simplest networking mode. Each instance receives a fixed ip from the pool.
Flat DHCP Mode Flat DHCP Mode
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover. This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode Nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
VLAN DHCP Mode VLAN DHCP Mode
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`. This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, Nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds) The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds)
@@ -154,16 +154,16 @@ Concept: nova-manage
-------------------- --------------------
The nova-manage command is used to perform many essential functions for The nova-manage command is used to perform many essential functions for
administration and ongoing maintenance of nova, such as user creation, administration and ongoing maintenance of Nova, such as user creation,
vpn management, and much more. vpn management, and much more.
See doc:`nova.manage` in the Administration Guide for more details. See :doc:`nova.manage` in the Administration Guide for more details.
Concept: Flags Concept: Flags
-------------- --------------
Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
Concept: Plugins Concept: Plugins
@@ -181,7 +181,7 @@ Concept: Plugins
Concept: IPC/RPC Concept: IPC/RPC
---------------- ----------------
Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/. Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various Nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
Concept: Fakes Concept: Fakes
-------------- --------------

View File

@@ -59,38 +59,21 @@ different configurations (though for more complex setups you should see
* HOST_IP * HOST_IP
* Default: address of first interface from the ifconfig command * Default: address of first interface from the ifconfig command
* Values: 127.0.0.1, or any other valid address * Values: 127.0.0.1, or any other valid address
* TEST
TEST * Default: 0
~~~~ * Values: 1, run tests after checkout and initial setup
* USE_MYSQL
**Default**: 0 * Default: 0, use sqlite3
**Values**: 1, run tests after checkout and initial setup * Values: 1, use mysql instead of sqlite3
* MYSQL_PASS (Only useful if $USE_MYSQL=1)
USE_MYSQL * Default: nova
~~~~~~~~~ * Values: value of root password for mysql
* USE_LDAP
**Default**: 0, use sqlite3 * Default: 0, use :mod:`nova.auth.dbdriver`
**Values**: 1, use mysql instead of sqlite3 * Values: 1, use :mod:`nova.auth.ldapdriver`
* LIBVIRT_TYPE
MYSQL_PASS * Default: qemu
~~~~~~~~~~ * Values: uml, kvm
Only useful if $USE_MYSQL=1.
**Default**: nova
**Values**: value of root password for mysql
USE_LDAP
~~~~~~~~
**Default**: 0, use :mod:`nova.auth.dbdriver`
**Values**: 1, use :mod:`nova.auth.ldapdriver`
LIBVIRT_TYPE
~~~~~~~~~~~~
**Default**: qemu
**Values**: uml, kvm
Usage Usage
----- -----

View File

@@ -150,6 +150,9 @@ def _match(key, value, attrs):
"""Match a given key and value against an attribute list.""" """Match a given key and value against an attribute list."""
if key not in attrs: if key not in attrs:
return False return False
# This is a wild card search. Implemented as all or nothing for now.
if value == "*":
return True
if key != "objectclass": if key != "objectclass":
return value in attrs[key] return value in attrs[key]
# it is an objectclass check, so check subclasses # it is an objectclass check, so check subclasses

View File

@@ -32,11 +32,16 @@ from nova import flags
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_integer('ldap_schema_version', 2,
'Current version of the LDAP schema')
flags.DEFINE_string('ldap_url', 'ldap://localhost', flags.DEFINE_string('ldap_url', 'ldap://localhost',
'Point this at your ldap server') 'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
'DN of admin user') 'DN of admin user')
flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id')
flags.DEFINE_string('ldap_user_name_attribute', 'cn',
'Attribute to use as name')
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
'OU for Users') 'OU for Users')
@@ -73,10 +78,20 @@ class LdapDriver(object):
Defines enter and exit and therefore supports the with/as syntax. Defines enter and exit and therefore supports the with/as syntax.
""" """
project_pattern = '(owner=*)'
isadmin_attribute = 'isNovaAdmin'
project_attribute = 'owner'
project_objectclass = 'groupOfNames'
def __init__(self): def __init__(self):
"""Imports the LDAP module""" """Imports the LDAP module"""
self.ldap = __import__('ldap') self.ldap = __import__('ldap')
self.conn = None self.conn = None
if FLAGS.ldap_schema_version == 1:
LdapDriver.project_pattern = '(objectclass=novaProject)'
LdapDriver.isadmin_attribute = 'isAdmin'
LdapDriver.project_attribute = 'projectManager'
LdapDriver.project_objectclass = 'novaProject'
def __enter__(self): def __enter__(self):
"""Creates the connection to LDAP""" """Creates the connection to LDAP"""
@@ -104,13 +119,13 @@ class LdapDriver(object):
"""Retrieve project by id""" """Retrieve project by id"""
dn = 'cn=%s,%s' % (pid, dn = 'cn=%s,%s' % (pid,
FLAGS.ldap_project_subtree) FLAGS.ldap_project_subtree)
attr = self.__find_object(dn, '(objectclass=novaProject)') attr = self.__find_object(dn, LdapDriver.project_pattern)
return self.__to_project(attr) return self.__to_project(attr)
def get_users(self): def get_users(self):
"""Retrieve list of users""" """Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree, attrs = self.__find_objects(FLAGS.ldap_user_subtree,
'(objectclass=novaUser)') '(objectclass=novaUser)')
users = [] users = []
for attr in attrs: for attr in attrs:
user = self.__to_user(attr) user = self.__to_user(attr)
@@ -120,7 +135,7 @@ class LdapDriver(object):
def get_projects(self, uid=None): def get_projects(self, uid=None):
"""Retrieve list of projects""" """Retrieve list of projects"""
pattern = '(objectclass=novaProject)' pattern = LdapDriver.project_pattern
if uid: if uid:
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree, attrs = self.__find_objects(FLAGS.ldap_project_subtree,
@@ -139,23 +154,25 @@ class LdapDriver(object):
# Malformed entries are useless, replace attributes found. # Malformed entries are useless, replace attributes found.
attr = [] attr = []
if 'secretKey' in user.keys(): if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ attr.append((self.ldap.MOD_REPLACE, 'secretKey',
[secret_key])) [secret_key]))
else: else:
attr.append((self.ldap.MOD_ADD, 'secretKey', \ attr.append((self.ldap.MOD_ADD, 'secretKey',
[secret_key])) [secret_key]))
if 'accessKey' in user.keys(): if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ attr.append((self.ldap.MOD_REPLACE, 'accessKey',
[access_key])) [access_key]))
else: else:
attr.append((self.ldap.MOD_ADD, 'accessKey', \ attr.append((self.ldap.MOD_ADD, 'accessKey',
[access_key])) [access_key]))
if 'isAdmin' in user.keys(): if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \ attr.append((self.ldap.MOD_REPLACE,
[str(is_admin).upper()])) LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
else: else:
attr.append((self.ldap.MOD_ADD, 'isAdmin', \ attr.append((self.ldap.MOD_ADD,
[str(is_admin).upper()])) LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
self.conn.modify_s(self.__uid_to_dn(name), attr) self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name) return self.get_user(name)
else: else:
@@ -168,12 +185,12 @@ class LdapDriver(object):
'inetOrgPerson', 'inetOrgPerson',
'novaUser']), 'novaUser']),
('ou', [FLAGS.ldap_user_unit]), ('ou', [FLAGS.ldap_user_unit]),
('uid', [name]), (FLAGS.ldap_user_id_attribute, [name]),
('sn', [name]), ('sn', [name]),
('cn', [name]), (FLAGS.ldap_user_name_attribute, [name]),
('secretKey', [secret_key]), ('secretKey', [secret_key]),
('accessKey', [access_key]), ('accessKey', [access_key]),
('isAdmin', [str(is_admin).upper()]), (LdapDriver.isadmin_attribute, [str(is_admin).upper()]),
] ]
self.conn.add_s(self.__uid_to_dn(name), attr) self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr)) return self.__to_user(dict(attr))
@@ -204,10 +221,10 @@ class LdapDriver(object):
if not manager_dn in members: if not manager_dn in members:
members.append(manager_dn) members.append(manager_dn)
attr = [ attr = [
('objectclass', ['novaProject']), ('objectclass', [LdapDriver.project_objectclass]),
('cn', [name]), ('cn', [name]),
('description', [description]), ('description', [description]),
('projectManager', [manager_dn]), (LdapDriver.project_attribute, [manager_dn]),
('member', members)] ('member', members)]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
return self.__to_project(dict(attr)) return self.__to_project(dict(attr))
@@ -223,7 +240,8 @@ class LdapDriver(object):
"manager %s doesn't exist") "manager %s doesn't exist")
% manager_uid) % manager_uid)
manager_dn = self.__uid_to_dn(manager_uid) manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
manager_dn))
if description: if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description)) attr.append((self.ldap.MOD_REPLACE, 'description', description))
self.conn.modify_s('cn=%s,%s' % (project_id, self.conn.modify_s('cn=%s,%s' % (project_id,
@@ -283,10 +301,9 @@ class LdapDriver(object):
return roles return roles
else: else:
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
roles = self.__find_objects(project_dn, query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
'(&(&(objectclass=groupOfNames)' (LdapDriver.project_pattern, self.__uid_to_dn(uid)))
'(!(objectclass=novaProject)))' roles = self.__find_objects(project_dn, query)
'(member=%s))' % self.__uid_to_dn(uid))
return [role['cn'][0] for role in roles] return [role['cn'][0] for role in roles]
def delete_user(self, uid): def delete_user(self, uid):
@@ -300,14 +317,15 @@ class LdapDriver(object):
# Retrieve user by name # Retrieve user by name
user = self.__get_ldap_user(uid) user = self.__get_ldap_user(uid)
if 'secretKey' in user.keys(): if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'secretKey', \ attr.append((self.ldap.MOD_DELETE, 'secretKey',
user['secretKey'])) user['secretKey']))
if 'accessKey' in user.keys(): if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'accessKey', \ attr.append((self.ldap.MOD_DELETE, 'accessKey',
user['accessKey'])) user['accessKey']))
if 'isAdmin' in user.keys(): if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_DELETE, 'isAdmin', \ attr.append((self.ldap.MOD_DELETE,
user['isAdmin'])) LdapDriver.isadmin_attribute,
user[LdapDriver.isadmin_attribute]))
self.conn.modify_s(self.__uid_to_dn(uid), attr) self.conn.modify_s(self.__uid_to_dn(uid), attr)
else: else:
# Delete entry # Delete entry
@@ -329,7 +347,8 @@ class LdapDriver(object):
if secret_key: if secret_key:
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
if admin is not None: if admin is not None:
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper())) attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute,
str(admin).upper()))
self.conn.modify_s(self.__uid_to_dn(uid), attr) self.conn.modify_s(self.__uid_to_dn(uid), attr)
def __user_exists(self, uid): def __user_exists(self, uid):
@@ -347,7 +366,7 @@ class LdapDriver(object):
def __get_ldap_user(self, uid): def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id""" """Retrieve LDAP user entry by id"""
attr = self.__find_object(self.__uid_to_dn(uid), attr = self.__find_object(self.__uid_to_dn(uid),
'(objectclass=novaUser)') '(objectclass=novaUser)')
return attr return attr
def __find_object(self, dn, query=None, scope=None): def __find_object(self, dn, query=None, scope=None):
@@ -383,19 +402,21 @@ class LdapDriver(object):
def __find_role_dns(self, tree): def __find_role_dns(self, tree):
"""Find dns of role objects in given tree""" """Find dns of role objects in given tree"""
return self.__find_dns(tree, query = ('(&(objectclass=groupOfNames)(!%s))' %
'(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') LdapDriver.project_pattern)
return self.__find_dns(tree, query)
def __find_group_dns_with_member(self, tree, uid): def __find_group_dns_with_member(self, tree, uid):
"""Find dns of group objects in a given tree that contain member""" """Find dns of group objects in a given tree that contain member"""
dns = self.__find_dns(tree, query = ('(&(objectclass=groupOfNames)(member=%s))' %
'(&(objectclass=groupOfNames)(member=%s))' % self.__uid_to_dn(uid))
self.__uid_to_dn(uid)) dns = self.__find_dns(tree, query)
return dns return dns
def __group_exists(self, dn): def __group_exists(self, dn):
"""Check if group exists""" """Check if group exists"""
return self.__find_object(dn, '(objectclass=groupOfNames)') is not None query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
@staticmethod @staticmethod
def __role_to_dn(role, project_id=None): def __role_to_dn(role, project_id=None):
@@ -417,9 +438,9 @@ class LdapDriver(object):
if member_uids is not None: if member_uids is not None:
for member_uid in member_uids: for member_uid in member_uids:
if not self.__user_exists(member_uid): if not self.__user_exists(member_uid):
raise exception.NotFound(_("Group can't be created " raise exception.NotFound("Group can't be created "
"because user %s doesn't exist") "because user %s doesn't exist" %
% member_uid) member_uid)
members.append(self.__uid_to_dn(member_uid)) members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid) dn = self.__uid_to_dn(uid)
if not dn in members: if not dn in members:
@@ -434,9 +455,8 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn): def __is_in_group(self, uid, group_dn):
"""Check if user is in group""" """Check if user is in group"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound(_("User %s can't be searched in group " raise exception.NotFound("User %s can't be searched in group "
"because the user doesn't exist") "because the user doesn't exist" % uid)
% uid)
if not self.__group_exists(group_dn): if not self.__group_exists(group_dn):
return False return False
res = self.__find_object(group_dn, res = self.__find_object(group_dn,
@@ -447,12 +467,11 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn): def __add_to_group(self, uid, group_dn):
"""Add user to group""" """Add user to group"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound(_("User %s can't be added to the group " raise exception.NotFound("User %s can't be added to the group "
"because the user doesn't exist") "because the user doesn't exist" % uid)
% uid)
if not self.__group_exists(group_dn): if not self.__group_exists(group_dn):
raise exception.NotFound(_("The group at dn %s doesn't exist") raise exception.NotFound("The group at dn %s doesn't exist" %
% group_dn) group_dn)
if self.__is_in_group(uid, group_dn): if self.__is_in_group(uid, group_dn):
raise exception.Duplicate(_("User %s is already a member of " raise exception.Duplicate(_("User %s is already a member of "
"the group %s") % (uid, group_dn)) "the group %s") % (uid, group_dn))
@@ -462,18 +481,17 @@ class LdapDriver(object):
def __remove_from_group(self, uid, group_dn): def __remove_from_group(self, uid, group_dn):
"""Remove user from group""" """Remove user from group"""
if not self.__group_exists(group_dn): if not self.__group_exists(group_dn):
raise exception.NotFound(_("The group at dn %s doesn't exist") raise exception.NotFound("The group at dn %s doesn't exist" %
% group_dn) group_dn)
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound(_("User %s can't be removed from the " raise exception.NotFound("User %s can't be removed from the "
"group because the user doesn't exist") "group because the user doesn't exist" %
% uid) uid)
if not self.__is_in_group(uid, group_dn): if not self.__is_in_group(uid, group_dn):
raise exception.NotFound(_("User %s is not a member of the group") raise exception.NotFound("User %s is not a member of the group" %
% uid) uid)
# NOTE(vish): remove user from group and any sub_groups # NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member( sub_dns = self.__find_group_dns_with_member(group_dn, uid)
group_dn, uid)
for sub_dn in sub_dns: for sub_dn in sub_dns:
self.__safe_remove_from_group(uid, sub_dn) self.__safe_remove_from_group(uid, sub_dn)
@@ -491,9 +509,8 @@ class LdapDriver(object):
def __remove_from_all(self, uid): def __remove_from_all(self, uid):
"""Remove user from all roles and projects""" """Remove user from all roles and projects"""
if not self.__user_exists(uid): if not self.__user_exists(uid):
raise exception.NotFound(_("User %s can't be removed from all " raise exception.NotFound("User %s can't be removed from all "
"because the user doesn't exist") "because the user doesn't exist" % uid)
% uid)
role_dns = self.__find_group_dns_with_member( role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid) FLAGS.role_project_subtree, uid)
for role_dn in role_dns: for role_dn in role_dns:
@@ -521,13 +538,13 @@ class LdapDriver(object):
if attr is None: if attr is None:
return None return None
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \ if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
and 'isAdmin' in attr.keys()): and LdapDriver.isadmin_attribute in attr.keys()):
return { return {
'id': attr['uid'][0], 'id': attr[FLAGS.ldap_user_id_attribute][0],
'name': attr['cn'][0], 'name': attr[FLAGS.ldap_user_name_attribute][0],
'access': attr['accessKey'][0], 'access': attr['accessKey'][0],
'secret': attr['secretKey'][0], 'secret': attr['secretKey'][0],
'admin': (attr['isAdmin'][0] == 'TRUE')} 'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')}
else: else:
return None return None
@@ -539,7 +556,8 @@ class LdapDriver(object):
return { return {
'id': attr['cn'][0], 'id': attr['cn'][0],
'name': attr['cn'][0], 'name': attr['cn'][0],
'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), 'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0], 'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]} 'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
@@ -549,9 +567,10 @@ class LdapDriver(object):
return dn.split(',')[0].split('=')[1] return dn.split(',')[0].split('=')[1]
@staticmethod @staticmethod
def __uid_to_dn(dn): def __uid_to_dn(uid):
"""Convert uid to dn""" """Convert uid to dn"""
return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) return (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
class FakeLdapDriver(LdapDriver): class FakeLdapDriver(LdapDriver):

View File

@@ -1,7 +1,9 @@
# #
# Person object for Nova # Person object for Nova
# inetorgperson with extra attributes # inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com> # Schema version: 2
# Authors: Vishvananda Ishaya <vishvananda@gmail.com>
# Ryan Lane <rlane@wikimedia.org>
# #
# #
@@ -30,55 +32,19 @@ attributetype (
SINGLE-VALUE SINGLE-VALUE
) )
attributetype (
novaAttrs:3
NAME 'keyFingerprint'
DESC 'Fingerprint of private key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype ( attributetype (
novaAttrs:4 novaAttrs:4
NAME 'isAdmin' NAME 'isNovaAdmin'
DESC 'Is user an administrator?' DESC 'Is user an nova administrator?'
EQUALITY booleanMatch EQUALITY booleanMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE SINGLE-VALUE
) )
attributetype (
novaAttrs:5
NAME 'projectManager'
DESC 'Project Managers of a project'
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
)
objectClass ( objectClass (
novaOCs:1 novaOCs:1
NAME 'novaUser' NAME 'novaUser'
DESC 'access and secret keys' DESC 'access and secret keys'
AUXILIARY AUXILIARY
MUST ( uid ) MAY ( accessKey $ secretKey $ isNovaAdmin )
MAY ( accessKey $ secretKey $ isAdmin )
)
objectClass (
novaOCs:2
NAME 'novaKeyPair'
DESC 'Key pair for User'
SUP top
STRUCTURAL
MUST ( cn $ sshPublicKey $ keyFingerprint )
)
objectClass (
novaOCs:3
NAME 'novaProject'
DESC 'Container for project'
SUP groupOfNames
STRUCTURAL
MUST ( cn $ projectManager )
) )

View File

@@ -1,16 +1,13 @@
# #
# Person object for Nova # Person object for Nova
# inetorgperson with extra attributes # inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com> # Schema version: 2
# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com> # Authors: Vishvananda Ishaya <vishvananda@gmail.com>
# Ryan Lane <rlane@wikimedia.org>
# #
# using internet experimental oid arc as per BP64 3.1 # using internet experimental oid arc as per BP64 3.1
dn: cn=schema dn: cn=schema
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) )
attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )

View File

@@ -32,7 +32,6 @@ abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
schemapath='/var/opendj/instance/config/schema' schemapath='/var/opendj/instance/config/schema'
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
chown opendj:opendj $schemapath/98-nova_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF

View File

@@ -22,7 +22,7 @@ apt-get install -y slapd ldap-utils python-ldap
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
@@ -33,7 +33,6 @@ cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
include /etc/ldap/schema/core.schema include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/inetorgperson.schema include /etc/ldap/schema/inetorgperson.schema
include /etc/ldap/schema/openssh-lpk_openldap.schema
include /etc/ldap/schema/nova.schema include /etc/ldap/schema/nova.schema
pidfile /var/run/slapd/slapd.pid pidfile /var/run/slapd/slapd.pid
argsfile /var/run/slapd/slapd.args argsfile /var/run/slapd/slapd.args

View File

@@ -212,6 +212,8 @@ DEFINE_list('region_list',
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('glance_port', 9292, 'glance port')
DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
@@ -239,6 +241,7 @@ DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port') DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111', DEFINE_string('default_image', 'ami-11111',
'default image to use, testing only') 'default image to use, testing only')
DEFINE_string('default_instance_type', 'm1.small', DEFINE_string('default_instance_type', 'm1.small',
@@ -260,6 +263,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
DEFINE_string('sql_connection', DEFINE_string('sql_connection',
'sqlite:///$state_path/nova.sqlite', 'sqlite:///$state_path/nova.sqlite',
'connection string for sql database') 'connection string for sql database')
DEFINE_string('sql_idle_timeout',
'3600',
'timeout for idle sql database connections')
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute') 'Manager for compute')

View File

@@ -37,6 +37,11 @@ class NoValidHost(exception.Error):
pass pass
class WillNotSchedule(exception.Error):
"""The specified host is not up or doesn't exist."""
pass
class Scheduler(object): class Scheduler(object):
"""The base class that all Scheduler clases should inherit from.""" """The base class that all Scheduler clases should inherit from."""

View File

@@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances.""" """Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id) instance_ref = db.instance_get(context, instance_id)
if instance_ref['availability_zone'] and context.is_admin:
zone, _x, host = instance_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-compute')
if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s is not alive" % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.instance_update(context, instance_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_compute_sorted(context) results = db.service_get_all_compute_sorted(context)
for result in results: for result in results:
(service, instance_cores) = result (service, instance_cores) = result
@@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes.""" """Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id) volume_ref = db.volume_get(context, volume_id)
if (':' in volume_ref['availability_zone']) and context.is_admin:
zone, _x, host = volume_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s not available" % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.volume_update(context, volume_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_volume_sorted(context) results = db.service_get_all_volume_sorted(context)
for result in results: for result in results:
(service, volume_gigabytes) = result (service, volume_gigabytes) = result

View File

@@ -38,9 +38,12 @@ from nova import fakerabbit
from nova import flags from nova import flags
from nova import rpc from nova import rpc
from nova.network import manager as network_manager from nova.network import manager as network_manager
from nova.tests import fake_flags
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_bool('flush_db', True,
'Flush the database before running fake tests')
flags.DEFINE_bool('fake_tests', True, flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing') 'should we use everything for testing')

View File

@@ -1,54 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import boto
from boto.ec2.regioninfo import RegionInfo
import unittest
ACCESS_KEY = 'fake'
SECRET_KEY = 'fake'
CLC_IP = '127.0.0.1'
CLC_PORT = 8773
REGION = 'test'
def get_connection():
return boto.connect_ec2(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
is_secure=False,
region=RegionInfo(None, REGION, CLC_IP),
port=CLC_PORT,
path='/services/Cloud',
debug=99)
class APIIntegrationTests(unittest.TestCase):
def test_001_get_all_images(self):
conn = get_connection()
res = conn.get_all_images()
if __name__ == '__main__':
unittest.main()
#print conn.get_all_key_pairs()
#print conn.create_key_pair
#print conn.create_security_group('name', 'description')

View File

@@ -1,153 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import quota
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
class QuotaTestCase(test.TestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
quota_instances=2,
quota_cores=4,
quota_volumes=2,
quota_gigabytes=20,
quota_floating_ips=1)
self.cloud = cloud.CloudController()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.RequestContext(project=self.project,
user=self.user)
def tearDown(self):
manager.AuthManager().delete_project(self.project)
manager.AuthManager().delete_user(self.user)
super(QuotaTestCase, self).tearDown()
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.large'
inst['vcpus'] = cores
inst['mac_address'] = utils.generate_mac()
return db.instance_create(self.context, inst)['id']
def _create_volume(self, size=10):
"""Create a test volume"""
vol = {}
vol['user_id'] = self.user.id
vol['project_id'] = self.project.id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 10)
db.quota_destroy(self.context, self.project.id)
def test_too_many_instances(self):
instance_ids = []
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
image_id='fake')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
def test_too_many_cores(self):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
image_id='fake')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
def test_too_many_volumes(self):
volume_ids = []
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(quota.QuotaError, self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_gigabytes(self):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(quota.QuotaError,
self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address, 'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.project.id)
# NOTE(vish): This assert never fails. When cloud attempts to
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)

View File

@@ -1,227 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import mox
from nova import exception
from nova import flags
from nova import rpc
from nova import test
from nova import service
from nova import manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
"Manager for testing")
class FakeManager(manager.Manager):
"""Fake manager for tests"""
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services"""
def test_attribute_error_for_no_manager(self):
serv = service.Service('test',
'test',
'test',
'nova.tests.service_unittest.FakeManager')
self.assertRaises(AttributeError, getattr, serv, 'test_method')
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
'nova.tests.service_unittest.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'manager')
def test_override_manager_method(self):
serv = ExtendedService('test',
'test',
'test',
'nova.tests.service_unittest.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'service')
class ServiceTestCase(test.TestCase):
"""Test cases for Services"""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.mox.StubOutWithMock(service, 'db')
def test_create(self):
host = 'foo'
binary = 'nova-fake'
topic = 'fake'
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=host, binary=binary)
self.mox.StubOutWithMock(rpc,
'AdapterConsumer',
use_mock_anything=True)
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
topic=topic,
proxy=mox.IsA(service.Service)).AndReturn(
rpc.AdapterConsumer)
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
topic='%s.%s' % (topic, host),
proxy=mox.IsA(service.Service)).AndReturn(
rpc.AdapterConsumer)
rpc.AdapterConsumer.attach_to_eventlet()
rpc.AdapterConsumer.attach_to_eventlet()
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0}
service_ref = {'host': host,
'binary': binary,
'report_count': 0,
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
self.mox.ReplayAll()
app.start()
app.stop()
self.assert_(app)
# We're testing sort of weird behavior in how report_state decides
# whether it is disconnected, it looks for a variable on itself called
# 'model_disconnected' and report_state doesn't really do much so this
# these are mostly just for coverage
def test_report_state_no_service(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
service_ref['id']).AndReturn(service_ref)
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'nova.tests.service_unittest.FakeManager')
serv.start()
serv.report_state()
def test_report_state_newly_disconnected(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(Exception())
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'nova.tests.service_unittest.FakeManager')
serv.start()
serv.report_state()
self.assert_(serv.model_disconnected)
def test_report_state_newly_connected(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
service_ref['id']).AndReturn(service_ref)
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'nova.tests.service_unittest.FakeManager')
serv.start()
serv.model_disconnected = True
serv.report_state()
self.assert_(not serv.model_disconnected)

View File

@@ -79,7 +79,7 @@ class FakeHttplibConnection(object):
pass pass
class XmlConversionTestCase(test.TrialTestCase): class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion""" """Unit test api xml conversion"""
def test_number_conversion(self): def test_number_conversion(self):
conv = apirequest._try_convert conv = apirequest._try_convert
@@ -96,7 +96,7 @@ class XmlConversionTestCase(test.TrialTestCase):
self.assertEqual(conv('-0'), 0) self.assertEqual(conv('-0'), 0)
class ApiEc2TestCase(test.TrialTestCase): class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API""" """Unit test for the cloud controller on an EC2 API"""
def setUp(self): def setUp(self):
super(ApiEc2TestCase, self).setUp() super(ApiEc2TestCase, self).setUp()

View File

@@ -104,13 +104,13 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id) self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context()) instances = db.instance_get_all(context.get_admin_context())
logging.info("Running instances: %s", instances) logging.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1) self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context()) instances = db.instance_get_all(context.get_admin_context())
logging.info("After terminating instances: %s", instances) logging.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0) self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self): def test_run_terminate_timestamps(self):
@@ -139,6 +139,14 @@ class ComputeTestCase(test.TestCase):
self.compute.unpause_instance(self.context, instance_id) self.compute.unpause_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
def test_suspend(self):
"""ensure instance can be suspended"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.suspend_instance(self.context, instance_id)
self.compute.resume_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_reboot(self): def test_reboot(self):
"""Ensure instance can be rebooted""" """Ensure instance can be rebooted"""
instance_id = self._create_instance() instance_id = self._create_instance()
@@ -146,6 +154,14 @@ class ComputeTestCase(test.TestCase):
self.compute.reboot_instance(self.context, instance_id) self.compute.reboot_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance_id)
self.compute.snapshot_instance(self.context, instance_id, name)
self.compute.terminate_instance(self.context, instance_id)
def test_console_output(self): def test_console_output(self):
"""Make sure we can get console output from instance""" """Make sure we can get console output from instance"""
instance_id = self._create_instance() instance_id = self._create_instance()

View File

@@ -38,7 +38,7 @@ def conditional_forbid(req):
return 'OK' return 'OK'
class LockoutTestCase(test.TrialTestCase): class LockoutTestCase(test.TestCase):
"""Test case for the Lockout middleware.""" """Test case for the Lockout middleware."""
def setUp(self): # pylint: disable-msg=C0103 def setUp(self): # pylint: disable-msg=C0103
super(LockoutTestCase, self).setUp() super(LockoutTestCase, self).setUp()

View File

@@ -22,13 +22,13 @@ from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase): class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self): def test_authors_up_to_date(self):
if os.path.exists('../.bzr'): if os.path.exists('.bzr'):
contributors = set() contributors = set()
mailmap = parse_mailmap('../.mailmap') mailmap = parse_mailmap('.mailmap')
import bzrlib.workingtree import bzrlib.workingtree
tree = bzrlib.workingtree.WorkingTree.open('..') tree = bzrlib.workingtree.WorkingTree.open('.')
tree.lock_read() tree.lock_read()
try: try:
parents = tree.get_parent_ids() parents = tree.get_parent_ids()
@@ -42,7 +42,7 @@ class ProjectTestCase(test.TestCase):
email = author.split(' ')[-1] email = author.split(' ')[-1]
contributors.add(str_dict_replace(email, mailmap)) contributors.add(str_dict_replace(email, mailmap))
authors_file = open('../Authors', 'r').read() authors_file = open('Authors', 'r').read()
missing = set() missing = set()
for contributor in contributors: for contributor in contributors:

View File

@@ -19,6 +19,8 @@
Tests For Scheduler Tests For Scheduler
""" """
import datetime
from nova import context from nova import context
from nova import db from nova import db
from nova import flags from nova import flags
@@ -33,6 +35,7 @@ from nova.scheduler import driver
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple') flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
class TestDriver(driver.Scheduler): class TestDriver(driver.Scheduler):
@@ -48,7 +51,7 @@ class SchedulerTestCase(test.TestCase):
"""Test case for scheduler""" """Test case for scheduler"""
def setUp(self): def setUp(self):
super(SchedulerTestCase, self).setUp() super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver') self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
def test_fallback(self): def test_fallback(self):
scheduler = manager.SchedulerManager() scheduler = manager.SchedulerManager()
@@ -94,7 +97,7 @@ class SimpleDriverTestCase(test.TestCase):
self.manager.delete_user(self.user) self.manager.delete_user(self.user)
self.manager.delete_project(self.project) self.manager.delete_project(self.project)
def _create_instance(self): def _create_instance(self, **kwargs):
"""Create a test instance""" """Create a test instance"""
inst = {} inst = {}
inst['image_id'] = 'ami-test' inst['image_id'] = 'ami-test'
@@ -105,6 +108,7 @@ class SimpleDriverTestCase(test.TestCase):
inst['mac_address'] = utils.generate_mac() inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0 inst['ami_launch_index'] = 0
inst['vcpus'] = 1 inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
return db.instance_create(self.context, inst)['id'] return db.instance_create(self.context, inst)['id']
def _create_volume(self): def _create_volume(self):
@@ -113,9 +117,33 @@ class SimpleDriverTestCase(test.TestCase):
vol['image_id'] = 'ami-test' vol['image_id'] = 'ami-test'
vol['reservation_id'] = 'r-fakeres' vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1 vol['size'] = 1
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id'] return db.volume_create(self.context, vol)['id']
def test_hosts_are_up(self): def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
db.service_update(self.context, s2['id'], {'disabled': True})
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(0, len(hosts))
compute1.kill()
compute2.kill()
def test_reports_enabled_hosts_as_up(self):
"""Ensures driver can find the hosts that are up""" """Ensures driver can find the hosts that are up"""
# NOTE(vish): constructing service without create method # NOTE(vish): constructing service without create method
# because we are going to use it without queue # because we are going to use it without queue
@@ -130,7 +158,7 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager) FLAGS.compute_manager)
compute2.start() compute2.start()
hosts = self.scheduler.driver.hosts_up(self.context, 'compute') hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(len(hosts), 2) self.assertEqual(2, len(hosts))
compute1.kill() compute1.kill()
compute2.kill() compute2.kill()
@@ -157,6 +185,63 @@ class SimpleDriverTestCase(test.TestCase):
compute1.kill() compute1.kill()
compute2.kill() compute2.kill()
def test_specific_host_gets_instance(self):
"""Ensures if you set availability_zone it launches on that zone"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
compute1.terminate_instance(self.context, instance_id1)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
compute2.kill()
def test_wont_sechedule_if_specified_host_is_down(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
instance_id2 = self._create_instance(availability_zone='nova:host1')
self.assertRaises(driver.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id2)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_will_schedule_on_disabled_host_if_specified(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_too_many_cores(self): def test_too_many_cores(self):
"""Ensures we don't go over max cores""" """Ensures we don't go over max cores"""
compute1 = service.Service('host1', compute1 = service.Service('host1',

View File

@@ -33,6 +33,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager')
class LibvirtConnTestCase(test.TestCase): class LibvirtConnTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(LibvirtConnTestCase, self).setUp() super(LibvirtConnTestCase, self).setUp()
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True) self.flags(fake_call=True)
self.manager = manager.AuthManager() self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake', self.user = self.manager.create_user('fake', 'fake', 'fake',
@@ -53,39 +54,37 @@ class LibvirtConnTestCase(test.TestCase):
def test_xml_and_uri_no_ramdisk_no_kernel(self): def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
self.do_test_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False) expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk(self): def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef'
self.do_test_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False) expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self): def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['ramdisk_id'] = 'ari-deadbeef'
self.do_test_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False) expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self): def test_xml_and_uri(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef'
self.do_test_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True) expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self): def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef'
self.do_test_xml_and_uri(instance_data, self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_kernel=True, expect_ramdisk=True, expect_ramdisk=True, rescue=True)
rescue=True)
def do_test_xml_and_uri(self, instance, def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
expect_ramdisk, expect_kernel, rescue=False):
rescue=False):
user_context = context.RequestContext(project=self.project, user_context = context.RequestContext(project=self.project,
user=self.user) user=self.user)
instance_ref = db.instance_create(user_context, instance) instance_ref = db.instance_create(user_context, instance)
@@ -159,7 +158,6 @@ class LibvirtConnTestCase(test.TestCase):
(lambda t: t.find('./devices/serial/source').get( (lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'), 'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')] (lambda t: t.find('./memory').text, '2097152')]
if rescue: if rescue:
common_checks += [ common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get( (lambda t: t.findall('./devices/disk/source')[0].get(

265
nova/tests/test_xenapi.py Normal file
View File

@@ -0,0 +1,265 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for XenAPI
"""
import stubout
from nova import db
from nova import context
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
FLAGS = flags.FLAGS
class XenAPIVolumeTestCase(test.TestCase):
"""
Unit tests for Volume operations
"""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.values = {'name': 1, 'id': 1,
'project_id': 'fake',
'user_id': 'fake',
'image_id': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
def _create_volume(self, size='0'):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(context.get_admin_context(), vol)
def test_create_iscsi_storage(self):
""" This shows how to test helper classes' methods """
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc')
label = 'SR-%s' % vol['ec2_id']
description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description)
srs = xenapi_fake.get_all('SR')
self.assertEqual(sr_ref, srs[0])
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_parse_volume_info_raise_exception(self):
""" This shows how to test helper classes' methods """
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
vol['ec2_id'],
'/dev/sd')
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
""" This shows how to test Ops classes' methods """
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['ec2_id'],
'/dev/sdc')
def check():
# check that the VM has a VBD attached to it
# Get XenAPI reference for the VM
vms = xenapi_fake.get_all('VM')
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vms[0])
check()
def test_attach_volume_raise_exception(self):
""" This shows how to test when exceptions are raised """
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
volume['ec2_id'],
'/dev/sdc')
def tearDown(self):
super(XenAPIVolumeTestCase, self).tearDown()
self.stubs.UnsetAll()
class XenAPIVMTestCase(test.TestCase):
"""
Unit tests for VM operations
"""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
xenapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_get_diagnostics(self):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
template_vm_ref = self.conn.snapshot(instance, name)
def ensure_vm_was_torn_down():
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [1])
def ensure_vbd_was_torn_down():
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [1])
def ensure_vdi_was_torn_down():
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
def check():
ensure_vm_was_torn_down()
ensure_vbd_was_torn_down()
ensure_vdi_was_torn_down()
check()
def test_spawn(self):
instance = self._create_instance()
def check():
instances = self.conn.list_instances()
self.assertEquals(instances, [1])
# Get Nova record for VM
vm_info = self.conn.get_info(1)
# Get XenAPI record for VM
vms = xenapi_fake.get_all('VM')
vm = xenapi_fake.get_record('VM', vms[0])
# Check that m1.large above turned into the right thing.
instance_type = instance_types.INSTANCE_TYPES['m1.large']
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(vm_info['max_mem'], mem_kib)
self.assertEquals(vm_info['mem'], mem_kib)
self.assertEquals(vm['memory_static_max'], mem_bytes)
self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(vm['VCPUs_max'], str(vcpus))
self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(vm['power_state'], 'Running')
check()
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.stubs.UnsetAll()
def _create_instance(self):
"""Creates and spawns a test instance"""
values = {
'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff'}
instance = db.instance_create(values)
self.conn.spawn(instance)
return instance

View File

@@ -0,0 +1,20 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`xenapi` -- Stubs for XenAPI
=================================
"""

169
nova/tests/xenapi/stubs.py Normal file
View File

@@ -0,0 +1,169 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite"""
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
# Stubout wait_for_task
def fake_wait_for_task(self, id, task):
class FakeEvent:
def send(self, value):
self.rv = value
def wait(self):
return self.rv
done = FakeEvent()
self._poll_task(id, task, done)
rv = done.wait()
return rv
stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
fake_wait_for_task)
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False,
sr_ref=sr_ref, sharable=False)
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
vdi_uuid = vdi_rec['uuid']
return vdi_uuid
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
def fake_parse_xmlrpc_value(val):
return val
stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
#TODO(sirp): Should we actually fake out the data here
return "fakeparent"
stubs.Set(vm_utils, 'wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
def stubout_session(stubs, cls):
"""Stubs out two methods from XenAPISession"""
def fake_import(self):
"""Stubs out get_imported_xenapi of XenAPISession"""
fake_module = 'nova.virt.xenapi.fake'
from_list = ['fake']
return __import__(fake_module, globals(), locals(), from_list, -1)
stubs.Set(xenapi_conn.XenAPISession, '_create_session',
lambda s, url: cls(url))
stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi',
fake_import)
def stub_out_get_target(stubs):
"""Stubs out _get_target in volume_utils"""
def fake_get_target(volume_id):
return (None, None)
stubs.Set(volume_utils, '_get_target', fake_get_target)
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
super(FakeSessionForVMTests, self).__init__(uri)
def network_get_all_records_where(self, _1, _2):
return self.xenapi.network.get_all_records()
def host_call_plugin(self, _1, _2, _3, _4, _5):
return ''
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted':
raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
vm['power_state']])
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
def VM_snapshot(self, session_ref, vm_ref, label):
status = "Running"
template_vm_ref = fake.create_vm(label, status, is_a_template=True,
is_control_domain=False)
sr_ref = "fakesr"
template_vdi_ref = fake.create_vdi(label, read_only=True,
sr_ref=sr_ref, sharable=False)
template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref)
return template_vm_ref
def VDI_destroy(self, session_ref, vdi_ref):
fake.destroy_vdi(vdi_ref)
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
def __init__(self, uri):
super(FakeSessionForVolumeTests, self).__init__(uri)
def VBD_plug(self, _1, ref):
rec = fake.get_record('VBD', ref)
rec['currently-attached'] = True
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
refs = fake.get_all('VDI')
for ref in refs:
rec = fake.get_record('VDI', ref)
if rec['uuid'] == uuid:
valid_vdi = True
if not valid_vdi:
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
""" Stubs out a XenAPISession for Volume tests: it injects failures """
def __init__(self, uri):
super(FakeSessionForVolumeFailedTests, self).__init__(uri)
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
def PBD_unplug(self, _1, ref):
rec = fake.get_record('PBD', ref)
rec['currently-attached'] = False
def SR_forget(self, _1, ref):
pass

View File

@@ -49,7 +49,8 @@ def import_class(import_str):
try: try:
__import__(mod_str) __import__(mod_str)
return getattr(sys.modules[mod_str], class_str) return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError): except (ImportError, ValueError, AttributeError), exc:
logging.debug(_('Inner Exception: %s'), exc)
raise exception.NotFound(_('Class %s cannot be found') % class_str) raise exception.NotFound(_('Class %s cannot be found') % class_str)

View File

@@ -288,7 +288,7 @@ class Serializer(object):
needed to serialize a dictionary to that type. needed to serialize a dictionary to that type.
""" """
self.metadata = metadata or {} self.metadata = metadata or {}
req = webob.Request(environ) req = webob.Request.blank('', environ)
suffix = req.path_info.split('.')[-1].lower() suffix = req.path_info.split('.')[-1].lower()
if suffix == 'json': if suffix == 'json':
self.handler = self._to_json self.handler = self._to_json

View File

@@ -1,127 +1,68 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License");
# not use this file except in compliance with the License. You may obtain # you may not use this file except in compliance with the License.
# a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS,
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# License for the specific language governing permissions and limitations # See the License for the specific language governing permissions and
# under the License. # limitations under the License.
"""
This is our basic test running framework based on Twisted's Trial.
Usage Examples:
# to run all the tests
python run_tests.py
# to run a specific test suite imported here
python run_tests.py NodeConnectionTestCase
# to run a specific test imported here
python run_tests.py NodeConnectionTestCase.test_reboot
# to run some test suites elsewhere
python run_tests.py nova.tests.node_unittest
python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase
Due to our use of multiprocessing it we frequently get some ignorable
'Interrupted system call' exceptions after test completion.
"""
import eventlet
eventlet.monkey_patch()
import __main__
import gettext
import os import os
import unittest
import sys import sys
gettext.install('nova', unicode=1) from nose import config
from nose import result
from twisted.scripts import trial as trial_script from nose import core
from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
from nova.tests.api_unittest import *
from nova.tests.auth_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.compute_unittest import *
from nova.tests.easy_unittest import *
from nova.tests.flags_unittest import *
from nova.tests.middleware_unittest import *
from nova.tests.misc_unittest import *
from nova.tests.network_unittest import *
#from nova.tests.objectstore_unittest import *
from nova.tests.quota_unittest import *
from nova.tests.rpc_unittest import *
from nova.tests.scheduler_unittest import *
from nova.tests.service_unittest import *
from nova.tests.twistd_unittest import *
from nova.tests.virt_unittest import *
from nova.tests.volume_unittest import *
FLAGS = flags.FLAGS class NovaTestResult(result.TextTestResult):
flags.DEFINE_bool('flush_db', True, def __init__(self, *args, **kw):
'Flush the database before running fake tests') result.TextTestResult.__init__(self, *args, **kw)
flags.DEFINE_string('tests_stderr', 'run_tests.err.log', self._last_case = None
'Path to where to pipe STDERR during test runs.'
' Default = "run_tests.err.log"') def getDescription(self, test):
return str(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
current_case = test.test.__class__.__name__
if self.showAll:
if current_case != self._last_case:
self.stream.writeln(current_case)
self._last_case = current_case
self.stream.write(
' %s' % str(test.test._testMethodName).ljust(60))
self.stream.flush()
class NovaTestRunner(core.TextTestRunner):
def _makeResult(self):
return NovaTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
if __name__ == '__main__': if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) c = config.Config(stream=sys.stdout,
config = OptionsClass() env=os.environ,
argv = config.parseOptions() verbosity=3)
FLAGS.verbose = True runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
# TODO(termie): these should make a call instead of doing work on import config=c)
if FLAGS.fake_tests: sys.exit(not core.run(config=c, testRunner=runner))
from nova.tests.fake_flags import *
else:
from nova.tests.real_flags import *
# Establish redirect for STDERR
sys.stderr.flush()
err = open(FLAGS.tests_stderr, 'w+', 0)
os.dup2(err.fileno(), sys.stderr.fileno())
if len(argv) == 1 and len(config['tests']) == 0:
# If no tests were specified run the ones imported in this file
# NOTE(termie): "tests" is not a flag, just some Trial related stuff
config['tests'].update(['__main__'])
elif len(config['tests']):
# If we specified tests check first whether they are in __main__
for arg in config['tests']:
key = arg.split('.')[0]
if hasattr(__main__, key):
config['tests'].remove(arg)
config['tests'].add('__main__.%s' % arg)
trial_script._initialDebugSetup(config)
trialRunner = trial_script._makeRunner(config)
suite = trial_script._getSuite(config)
if config['until-failure']:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(show_missing=1, summary=False,
coverdir=config.coverdir)
sys.exit(not test_result.wasSuccessful())

View File

@@ -21,6 +21,7 @@ function process_option {
-V|--virtual-env) let always_venv=1; let never_venv=0;; -V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;; -N|--no-virtual-env) let always_venv=0; let never_venv=1;;
-f|--force) let force=1;; -f|--force) let force=1;;
*) noseargs="$noseargs $1"
esac esac
} }
@@ -29,14 +30,19 @@ with_venv=tools/with_venv.sh
always_venv=0 always_venv=0
never_venv=0 never_venv=0
force=0 force=0
noseargs=
for arg in "$@"; do for arg in "$@"; do
process_option $arg process_option $arg
done done
NOSETESTS="python run_tests.py $noseargs"
if [ $never_venv -eq 1 ]; then if [ $never_venv -eq 1 ]; then
# Just run the test suites in current environment # Just run the test suites in current environment
python run_tests.py rm -f nova.sqlite
$NOSETESTS 2> run_tests.err.log
exit exit
fi fi
@@ -47,7 +53,8 @@ if [ $force -eq 1 ]; then
fi fi
if [ -e ${venv} ]; then if [ -e ${venv} ]; then
${with_venv} python run_tests.py $@ ${with_venv} rm -f nova.sqlite
${with_venv} $NOSETESTS 2> run_tests.err.log
else else
if [ $always_venv -eq 1 ]; then if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv # Automatically install the virtualenv
@@ -59,9 +66,11 @@ else
# Install the virtualenv and run the test suite in it # Install the virtualenv and run the test suite in it
python tools/install_venv.py python tools/install_venv.py
else else
python run_tests.py rm -f nova.sqlite
$NOSETESTS 2> run_tests.err.log
exit exit
fi fi
fi fi
${with_venv} python run_tests.py $@ ${with_venv} rm -f nova.sqlite
${with_venv} $NOSETESTS 2> run_tests.err.log
fi fi

View File

@@ -58,6 +58,7 @@ setup(name='nova',
'build_sphinx' : local_BuildDoc }, 'build_sphinx' : local_BuildDoc },
packages=find_packages(exclude=['bin', 'smoketests']), packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True, include_package_data=True,
test_suite='nose.collector',
scripts=['bin/nova-api', scripts=['bin/nova-api',
'bin/nova-compute', 'bin/nova-compute',
'bin/nova-dhcpbridge', 'bin/nova-dhcpbridge',