Merge trunk.
This commit is contained in:
7
.mailmap
7
.mailmap
@@ -24,7 +24,6 @@
|
|||||||
<todd@ansolabs.com> <todd@rubidine.com>
|
<todd@ansolabs.com> <todd@rubidine.com>
|
||||||
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
||||||
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
||||||
# These are from people who failed to set a proper committer
|
<vishvananda@gmail.com> <root@ubuntu>
|
||||||
. <root@tonbuntu>
|
<sleepsonthefloor@gmail.com> <root@tonbuntu>
|
||||||
. <laner@controller>
|
<rlane@wikimedia.org> <laner@controller>
|
||||||
. <root@ubuntu>
|
|
||||||
|
5
Authors
5
Authors
@@ -4,6 +4,7 @@ Anthony Young <sleepsonthefloor@gmail.com>
|
|||||||
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
||||||
Chris Behrens <cbehrens@codestud.com>
|
Chris Behrens <cbehrens@codestud.com>
|
||||||
Chmouel Boudjnah <chmouel@chmouel.com>
|
Chmouel Boudjnah <chmouel@chmouel.com>
|
||||||
|
David Pravec <David.Pravec@danix.org>
|
||||||
Dean Troyer <dtroyer@gmail.com>
|
Dean Troyer <dtroyer@gmail.com>
|
||||||
Devin Carlen <devin.carlen@gmail.com>
|
Devin Carlen <devin.carlen@gmail.com>
|
||||||
Ed Leafe <ed@leafe.com>
|
Ed Leafe <ed@leafe.com>
|
||||||
@@ -24,11 +25,15 @@ Michael Gundlach <michael.gundlach@rackspace.com>
|
|||||||
Monty Taylor <mordred@inaugust.com>
|
Monty Taylor <mordred@inaugust.com>
|
||||||
Paul Voccio <paul@openstack.org>
|
Paul Voccio <paul@openstack.org>
|
||||||
Rick Clark <rick@openstack.org>
|
Rick Clark <rick@openstack.org>
|
||||||
|
Ryan Lane <rlane@wikimedia.org>
|
||||||
Ryan Lucio <rlucio@internap.com>
|
Ryan Lucio <rlucio@internap.com>
|
||||||
|
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
|
||||||
Sandy Walsh <sandy.walsh@rackspace.com>
|
Sandy Walsh <sandy.walsh@rackspace.com>
|
||||||
Soren Hansen <soren.hansen@rackspace.com>
|
Soren Hansen <soren.hansen@rackspace.com>
|
||||||
|
Thierry Carrez <thierry@openstack.org>
|
||||||
Todd Willey <todd@ansolabs.com>
|
Todd Willey <todd@ansolabs.com>
|
||||||
Trey Morris <trey.morris@rackspace.com>
|
Trey Morris <trey.morris@rackspace.com>
|
||||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
Zhixue Wu <Zhixue.Wu@citrix.com>
|
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||||
|
|
||||||
|
@@ -16,16 +16,24 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
# ARG is the id of the user
|
# $1 is the id of the project and $2 is the subject of the cert
|
||||||
export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1"
|
NAME=$1
|
||||||
mkdir INTER/$1
|
SUBJ=$2
|
||||||
cd INTER/$1
|
mkdir -p projects/$NAME
|
||||||
|
cd projects/$NAME
|
||||||
cp ../../openssl.cnf.tmpl openssl.cnf
|
cp ../../openssl.cnf.tmpl openssl.cnf
|
||||||
sed -i -e s/%USERNAME%/$1/g openssl.cnf
|
sed -i -e s/%USERNAME%/$NAME/g openssl.cnf
|
||||||
mkdir certs crl newcerts private
|
mkdir certs crl newcerts private
|
||||||
|
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
|
||||||
echo "10" > serial
|
echo "10" > serial
|
||||||
touch index.txt
|
touch index.txt
|
||||||
openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes
|
# NOTE(vish): Disabling intermediate ca's because we don't actually need them.
|
||||||
openssl req -new -sha2 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "$SUBJ"
|
# It makes more sense to have each project have its own root ca.
|
||||||
cd ../../
|
# openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes
|
||||||
openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch
|
# openssl req -new -sha256 -key private/cakey.pem -out ../../reqs/inter$NAME.csr -batch -subj "$SUBJ"
|
||||||
|
openssl ca -gencrl -config ./openssl.cnf -out crl.pem
|
||||||
|
if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then
|
||||||
|
sudo chown -R nova:nogroup .
|
||||||
|
fi
|
||||||
|
# cd ../../
|
||||||
|
# openssl ca -extensions v3_ca -days 365 -out INTER/$NAME/cacert.pem -in reqs/inter$NAME.csr -config openssl.cnf -batch
|
||||||
|
@@ -25,4 +25,5 @@ else
|
|||||||
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
|
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
|
||||||
touch index.txt
|
touch index.txt
|
||||||
echo "10" > serial
|
echo "10" > serial
|
||||||
|
openssl ca -gencrl -config ./openssl.cnf -out crl.pem
|
||||||
fi
|
fi
|
||||||
|
46
nova/tests/api_integration.py → CA/genvpn.sh
Normal file → Executable file
46
nova/tests/api_integration.py → CA/genvpn.sh
Normal file → Executable file
@@ -1,3 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
# Copyright 2010 United States Government as represented by the
|
||||||
@@ -16,39 +17,20 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
# This gets zipped and run on the cloudpipe-managed OpenVPN server
|
||||||
|
NAME=$1
|
||||||
|
SUBJ=$2
|
||||||
|
|
||||||
import boto
|
mkdir -p projects/$NAME
|
||||||
from boto.ec2.regioninfo import RegionInfo
|
cd projects/$NAME
|
||||||
import unittest
|
|
||||||
|
|
||||||
|
# generate a server priv key
|
||||||
|
openssl genrsa -out server.key 2048
|
||||||
|
|
||||||
ACCESS_KEY = 'fake'
|
# generate a server CSR
|
||||||
SECRET_KEY = 'fake'
|
openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ"
|
||||||
CLC_IP = '127.0.0.1'
|
|
||||||
CLC_PORT = 8773
|
|
||||||
REGION = 'test'
|
|
||||||
|
|
||||||
|
novauid=`getent passwd nova | awk -F: '{print $3}'`
|
||||||
def get_connection():
|
if [ ! -z "${novauid}" ] && [ "`id -u`" != "${novauid}" ]; then
|
||||||
return boto.connect_ec2(
|
sudo chown -R nova:nogroup .
|
||||||
aws_access_key_id=ACCESS_KEY,
|
fi
|
||||||
aws_secret_access_key=SECRET_KEY,
|
|
||||||
is_secure=False,
|
|
||||||
region=RegionInfo(None, REGION, CLC_IP),
|
|
||||||
port=CLC_PORT,
|
|
||||||
path='/services/Cloud',
|
|
||||||
debug=99)
|
|
||||||
|
|
||||||
|
|
||||||
class APIIntegrationTests(unittest.TestCase):
|
|
||||||
def test_001_get_all_images(self):
|
|
||||||
conn = get_connection()
|
|
||||||
res = conn.get_all_images()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
|
|
||||||
#print conn.get_all_key_pairs()
|
|
||||||
#print conn.create_key_pair
|
|
||||||
#print conn.create_security_group('name', 'description')
|
|
@@ -24,7 +24,6 @@ dir = .
|
|||||||
|
|
||||||
[ ca ]
|
[ ca ]
|
||||||
default_ca = CA_default
|
default_ca = CA_default
|
||||||
unique_subject = no
|
|
||||||
|
|
||||||
[ CA_default ]
|
[ CA_default ]
|
||||||
serial = $dir/serial
|
serial = $dir/serial
|
||||||
@@ -32,6 +31,8 @@ database = $dir/index.txt
|
|||||||
new_certs_dir = $dir/newcerts
|
new_certs_dir = $dir/newcerts
|
||||||
certificate = $dir/cacert.pem
|
certificate = $dir/cacert.pem
|
||||||
private_key = $dir/private/cakey.pem
|
private_key = $dir/private/cakey.pem
|
||||||
|
unique_subject = no
|
||||||
|
default_crl_days = 365
|
||||||
default_days = 365
|
default_days = 365
|
||||||
default_md = md5
|
default_md = md5
|
||||||
preserve = no
|
preserve = no
|
||||||
|
@@ -22,6 +22,7 @@
|
|||||||
import eventlet
|
import eventlet
|
||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -33,6 +34,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import api
|
from nova import api
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import service
|
from nova import service
|
||||||
|
@@ -110,7 +110,6 @@ def main():
|
|||||||
FLAGS.num_networks = 5
|
FLAGS.num_networks = 5
|
||||||
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
'..',
|
'..',
|
||||||
'_trial_temp',
|
|
||||||
'nova.sqlite'))
|
'nova.sqlite'))
|
||||||
FLAGS.sql_connection = 'sqlite:///%s' % path
|
FLAGS.sql_connection = 'sqlite:///%s' % path
|
||||||
action = argv[1]
|
action = argv[1]
|
||||||
|
@@ -73,6 +73,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
gettext.install('nova', unicode=1)
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
|
from nova import crypto
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -97,47 +98,43 @@ class VpnCommands(object):
|
|||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.pipe = pipelib.CloudPipe()
|
self.pipe = pipelib.CloudPipe()
|
||||||
|
|
||||||
def list(self):
|
def list(self, project=None):
|
||||||
"""Print a listing of the VPNs for all projects."""
|
"""Print a listing of the VPN data for one or all projects.
|
||||||
|
|
||||||
|
args: [project=all]"""
|
||||||
print "%-12s\t" % 'project',
|
print "%-12s\t" % 'project',
|
||||||
print "%-20s\t" % 'ip:port',
|
print "%-20s\t" % 'ip:port',
|
||||||
|
print "%-20s\t" % 'private_ip',
|
||||||
print "%s" % 'state'
|
print "%s" % 'state'
|
||||||
for project in self.manager.get_projects():
|
if project:
|
||||||
|
projects = [self.manager.get_project(project)]
|
||||||
|
else:
|
||||||
|
projects = self.manager.get_projects()
|
||||||
|
# NOTE(vish): This hits the database a lot. We could optimize
|
||||||
|
# by getting all networks in one query and all vpns
|
||||||
|
# in aother query, then doing lookups by project
|
||||||
|
for project in projects:
|
||||||
print "%-12s\t" % project.name,
|
print "%-12s\t" % project.name,
|
||||||
|
ipport = "%s:%s" % (project.vpn_ip, project.vpn_port)
|
||||||
try:
|
print "%-20s\t" % ipport,
|
||||||
s = "%s:%s" % (project.vpn_ip, project.vpn_port)
|
ctxt = context.get_admin_context()
|
||||||
except exception.NotFound:
|
vpn = db.instance_get_project_vpn(ctxt, project.id)
|
||||||
s = "None"
|
|
||||||
print "%-20s\t" % s,
|
|
||||||
|
|
||||||
vpn = self._vpn_for(project.id)
|
|
||||||
if vpn:
|
if vpn:
|
||||||
command = "ping -c1 -w1 %s > /dev/null; echo $?"
|
address = None
|
||||||
out, _err = utils.execute(command % vpn['private_dns_name'],
|
state = 'down'
|
||||||
check_exit_code=False)
|
if vpn.get('fixed_ip', None):
|
||||||
if out.strip() == '0':
|
address = vpn['fixed_ip']['address']
|
||||||
net = 'up'
|
if project.vpn_ip and utils.vpn_ping(project.vpn_ip,
|
||||||
else:
|
project.vpn_port):
|
||||||
net = 'down'
|
state = 'up'
|
||||||
print vpn['private_dns_name'],
|
print address,
|
||||||
print vpn['node_name'],
|
print vpn['host'],
|
||||||
print vpn['instance_id'],
|
print vpn['ec2_id'],
|
||||||
print vpn['state_description'],
|
print vpn['state_description'],
|
||||||
print net
|
print state
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print None
|
print None
|
||||||
|
|
||||||
def _vpn_for(self, project_id):
|
|
||||||
"""Get the VPN instance for a project ID."""
|
|
||||||
for instance in db.instance_get_all(context.get_admin_context()):
|
|
||||||
if (instance['image_id'] == FLAGS.vpn_image_id
|
|
||||||
and not instance['state_description'] in
|
|
||||||
['shutting_down', 'shutdown']
|
|
||||||
and instance['project_id'] == project_id):
|
|
||||||
return instance
|
|
||||||
|
|
||||||
def spawn(self):
|
def spawn(self):
|
||||||
"""Run all VPNs."""
|
"""Run all VPNs."""
|
||||||
for p in reversed(self.manager.get_projects()):
|
for p in reversed(self.manager.get_projects()):
|
||||||
@@ -150,6 +147,21 @@ class VpnCommands(object):
|
|||||||
"""Start the VPN for a given project."""
|
"""Start the VPN for a given project."""
|
||||||
self.pipe.launch_vpn_instance(project_id)
|
self.pipe.launch_vpn_instance(project_id)
|
||||||
|
|
||||||
|
def change(self, project_id, ip, port):
|
||||||
|
"""Change the ip and port for a vpn.
|
||||||
|
|
||||||
|
args: project, ip, port"""
|
||||||
|
project = self.manager.get_project(project_id)
|
||||||
|
if not project:
|
||||||
|
print 'No project %s' % (project_id)
|
||||||
|
return
|
||||||
|
admin = context.get_admin_context()
|
||||||
|
network_ref = db.project_get_network(admin, project_id)
|
||||||
|
db.network_update(admin,
|
||||||
|
network_ref['id'],
|
||||||
|
{'vpn_public_address': ip,
|
||||||
|
'vpn_public_port': int(port)})
|
||||||
|
|
||||||
|
|
||||||
class ShellCommands(object):
|
class ShellCommands(object):
|
||||||
def bpython(self):
|
def bpython(self):
|
||||||
@@ -296,6 +308,14 @@ class UserCommands(object):
|
|||||||
is_admin = False
|
is_admin = False
|
||||||
self.manager.modify_user(name, access_key, secret_key, is_admin)
|
self.manager.modify_user(name, access_key, secret_key, is_admin)
|
||||||
|
|
||||||
|
def revoke(self, user_id, project_id=None):
|
||||||
|
"""revoke certs for a user
|
||||||
|
arguments: user_id [project_id]"""
|
||||||
|
if project_id:
|
||||||
|
crypto.revoke_certs_by_user_and_project(user_id, project_id)
|
||||||
|
else:
|
||||||
|
crypto.revoke_certs_by_user(user_id)
|
||||||
|
|
||||||
|
|
||||||
class ProjectCommands(object):
|
class ProjectCommands(object):
|
||||||
"""Class for managing projects."""
|
"""Class for managing projects."""
|
||||||
|
@@ -37,7 +37,6 @@ class DbDriver(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Imports the LDAP module"""
|
"""Imports the LDAP module"""
|
||||||
pass
|
pass
|
||||||
db
|
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self
|
return self
|
||||||
@@ -83,7 +82,7 @@ class DbDriver(object):
|
|||||||
user_ref = db.user_create(context.get_admin_context(), values)
|
user_ref = db.user_create(context.get_admin_context(), values)
|
||||||
return self._db_user_to_auth_user(user_ref)
|
return self._db_user_to_auth_user(user_ref)
|
||||||
except exception.Duplicate, e:
|
except exception.Duplicate, e:
|
||||||
raise exception.Duplicate('User %s already exists' % name)
|
raise exception.Duplicate(_('User %s already exists') % name)
|
||||||
|
|
||||||
def _db_user_to_auth_user(self, user_ref):
|
def _db_user_to_auth_user(self, user_ref):
|
||||||
return {'id': user_ref['id'],
|
return {'id': user_ref['id'],
|
||||||
@@ -105,8 +104,9 @@ class DbDriver(object):
|
|||||||
"""Create a project"""
|
"""Create a project"""
|
||||||
manager = db.user_get(context.get_admin_context(), manager_uid)
|
manager = db.user_get(context.get_admin_context(), manager_uid)
|
||||||
if not manager:
|
if not manager:
|
||||||
raise exception.NotFound("Project can't be created because "
|
raise exception.NotFound(_("Project can't be created because "
|
||||||
"manager %s doesn't exist" % manager_uid)
|
"manager %s doesn't exist")
|
||||||
|
% manager_uid)
|
||||||
|
|
||||||
# description is a required attribute
|
# description is a required attribute
|
||||||
if description is None:
|
if description is None:
|
||||||
@@ -133,8 +133,8 @@ class DbDriver(object):
|
|||||||
try:
|
try:
|
||||||
project = db.project_create(context.get_admin_context(), values)
|
project = db.project_create(context.get_admin_context(), values)
|
||||||
except exception.Duplicate:
|
except exception.Duplicate:
|
||||||
raise exception.Duplicate("Project can't be created because "
|
raise exception.Duplicate(_("Project can't be created because "
|
||||||
"project %s already exists" % name)
|
"project %s already exists") % name)
|
||||||
|
|
||||||
for member in members:
|
for member in members:
|
||||||
db.project_add_member(context.get_admin_context(),
|
db.project_add_member(context.get_admin_context(),
|
||||||
@@ -155,8 +155,8 @@ class DbDriver(object):
|
|||||||
if manager_uid:
|
if manager_uid:
|
||||||
manager = db.user_get(context.get_admin_context(), manager_uid)
|
manager = db.user_get(context.get_admin_context(), manager_uid)
|
||||||
if not manager:
|
if not manager:
|
||||||
raise exception.NotFound("Project can't be modified because "
|
raise exception.NotFound(_("Project can't be modified because "
|
||||||
"manager %s doesn't exist" %
|
"manager %s doesn't exist") %
|
||||||
manager_uid)
|
manager_uid)
|
||||||
values['project_manager'] = manager['id']
|
values['project_manager'] = manager['id']
|
||||||
if description:
|
if description:
|
||||||
@@ -243,8 +243,8 @@ class DbDriver(object):
|
|||||||
def _validate_user_and_project(self, user_id, project_id):
|
def _validate_user_and_project(self, user_id, project_id):
|
||||||
user = db.user_get(context.get_admin_context(), user_id)
|
user = db.user_get(context.get_admin_context(), user_id)
|
||||||
if not user:
|
if not user:
|
||||||
raise exception.NotFound('User "%s" not found' % user_id)
|
raise exception.NotFound(_('User "%s" not found') % user_id)
|
||||||
project = db.project_get(context.get_admin_context(), project_id)
|
project = db.project_get(context.get_admin_context(), project_id)
|
||||||
if not project:
|
if not project:
|
||||||
raise exception.NotFound('Project "%s" not found' % project_id)
|
raise exception.NotFound(_('Project "%s" not found') % project_id)
|
||||||
return user, project
|
return user, project
|
||||||
|
@@ -30,7 +30,7 @@ import json
|
|||||||
class Store(object):
|
class Store(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
if hasattr(self.__class__, '_instance'):
|
if hasattr(self.__class__, '_instance'):
|
||||||
raise Exception('Attempted to instantiate singleton')
|
raise Exception(_('Attempted to instantiate singleton'))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def instance(cls):
|
def instance(cls):
|
||||||
@@ -150,6 +150,9 @@ def _match(key, value, attrs):
|
|||||||
"""Match a given key and value against an attribute list."""
|
"""Match a given key and value against an attribute list."""
|
||||||
if key not in attrs:
|
if key not in attrs:
|
||||||
return False
|
return False
|
||||||
|
# This is a wild card search. Implemented as all or nothing for now.
|
||||||
|
if value == "*":
|
||||||
|
return True
|
||||||
if key != "objectclass":
|
if key != "objectclass":
|
||||||
return value in attrs[key]
|
return value in attrs[key]
|
||||||
# it is an objectclass check, so check subclasses
|
# it is an objectclass check, so check subclasses
|
||||||
|
@@ -32,11 +32,16 @@ from nova import flags
|
|||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_integer('ldap_schema_version', 2,
|
||||||
|
'Current version of the LDAP schema')
|
||||||
flags.DEFINE_string('ldap_url', 'ldap://localhost',
|
flags.DEFINE_string('ldap_url', 'ldap://localhost',
|
||||||
'Point this at your ldap server')
|
'Point this at your ldap server')
|
||||||
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
|
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
|
||||||
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
|
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
|
||||||
'DN of admin user')
|
'DN of admin user')
|
||||||
|
flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id')
|
||||||
|
flags.DEFINE_string('ldap_user_name_attribute', 'cn',
|
||||||
|
'Attribute to use as name')
|
||||||
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
|
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
|
||||||
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
|
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
|
||||||
'OU for Users')
|
'OU for Users')
|
||||||
@@ -73,10 +78,20 @@ class LdapDriver(object):
|
|||||||
Defines enter and exit and therefore supports the with/as syntax.
|
Defines enter and exit and therefore supports the with/as syntax.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
project_pattern = '(owner=*)'
|
||||||
|
isadmin_attribute = 'isNovaAdmin'
|
||||||
|
project_attribute = 'owner'
|
||||||
|
project_objectclass = 'groupOfNames'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Imports the LDAP module"""
|
"""Imports the LDAP module"""
|
||||||
self.ldap = __import__('ldap')
|
self.ldap = __import__('ldap')
|
||||||
self.conn = None
|
self.conn = None
|
||||||
|
if FLAGS.ldap_schema_version == 1:
|
||||||
|
LdapDriver.project_pattern = '(objectclass=novaProject)'
|
||||||
|
LdapDriver.isadmin_attribute = 'isAdmin'
|
||||||
|
LdapDriver.project_attribute = 'projectManager'
|
||||||
|
LdapDriver.project_objectclass = 'novaProject'
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
"""Creates the connection to LDAP"""
|
"""Creates the connection to LDAP"""
|
||||||
@@ -104,13 +119,13 @@ class LdapDriver(object):
|
|||||||
"""Retrieve project by id"""
|
"""Retrieve project by id"""
|
||||||
dn = 'cn=%s,%s' % (pid,
|
dn = 'cn=%s,%s' % (pid,
|
||||||
FLAGS.ldap_project_subtree)
|
FLAGS.ldap_project_subtree)
|
||||||
attr = self.__find_object(dn, '(objectclass=novaProject)')
|
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
||||||
return self.__to_project(attr)
|
return self.__to_project(attr)
|
||||||
|
|
||||||
def get_users(self):
|
def get_users(self):
|
||||||
"""Retrieve list of users"""
|
"""Retrieve list of users"""
|
||||||
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
||||||
'(objectclass=novaUser)')
|
'(objectclass=novaUser)')
|
||||||
users = []
|
users = []
|
||||||
for attr in attrs:
|
for attr in attrs:
|
||||||
user = self.__to_user(attr)
|
user = self.__to_user(attr)
|
||||||
@@ -120,7 +135,7 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def get_projects(self, uid=None):
|
def get_projects(self, uid=None):
|
||||||
"""Retrieve list of projects"""
|
"""Retrieve list of projects"""
|
||||||
pattern = '(objectclass=novaProject)'
|
pattern = LdapDriver.project_pattern
|
||||||
if uid:
|
if uid:
|
||||||
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
|
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
|
||||||
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
|
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
|
||||||
@@ -139,27 +154,29 @@ class LdapDriver(object):
|
|||||||
# Malformed entries are useless, replace attributes found.
|
# Malformed entries are useless, replace attributes found.
|
||||||
attr = []
|
attr = []
|
||||||
if 'secretKey' in user.keys():
|
if 'secretKey' in user.keys():
|
||||||
attr.append((self.ldap.MOD_REPLACE, 'secretKey', \
|
attr.append((self.ldap.MOD_REPLACE, 'secretKey',
|
||||||
[secret_key]))
|
[secret_key]))
|
||||||
else:
|
else:
|
||||||
attr.append((self.ldap.MOD_ADD, 'secretKey', \
|
attr.append((self.ldap.MOD_ADD, 'secretKey',
|
||||||
[secret_key]))
|
[secret_key]))
|
||||||
if 'accessKey' in user.keys():
|
if 'accessKey' in user.keys():
|
||||||
attr.append((self.ldap.MOD_REPLACE, 'accessKey', \
|
attr.append((self.ldap.MOD_REPLACE, 'accessKey',
|
||||||
[access_key]))
|
[access_key]))
|
||||||
else:
|
else:
|
||||||
attr.append((self.ldap.MOD_ADD, 'accessKey', \
|
attr.append((self.ldap.MOD_ADD, 'accessKey',
|
||||||
[access_key]))
|
[access_key]))
|
||||||
if 'isAdmin' in user.keys():
|
if LdapDriver.isadmin_attribute in user.keys():
|
||||||
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \
|
attr.append((self.ldap.MOD_REPLACE,
|
||||||
[str(is_admin).upper()]))
|
LdapDriver.isadmin_attribute,
|
||||||
|
[str(is_admin).upper()]))
|
||||||
else:
|
else:
|
||||||
attr.append((self.ldap.MOD_ADD, 'isAdmin', \
|
attr.append((self.ldap.MOD_ADD,
|
||||||
[str(is_admin).upper()]))
|
LdapDriver.isadmin_attribute,
|
||||||
|
[str(is_admin).upper()]))
|
||||||
self.conn.modify_s(self.__uid_to_dn(name), attr)
|
self.conn.modify_s(self.__uid_to_dn(name), attr)
|
||||||
return self.get_user(name)
|
return self.get_user(name)
|
||||||
else:
|
else:
|
||||||
raise exception.NotFound("LDAP object for %s doesn't exist"
|
raise exception.NotFound(_("LDAP object for %s doesn't exist")
|
||||||
% name)
|
% name)
|
||||||
else:
|
else:
|
||||||
attr = [
|
attr = [
|
||||||
@@ -168,12 +185,12 @@ class LdapDriver(object):
|
|||||||
'inetOrgPerson',
|
'inetOrgPerson',
|
||||||
'novaUser']),
|
'novaUser']),
|
||||||
('ou', [FLAGS.ldap_user_unit]),
|
('ou', [FLAGS.ldap_user_unit]),
|
||||||
('uid', [name]),
|
(FLAGS.ldap_user_id_attribute, [name]),
|
||||||
('sn', [name]),
|
('sn', [name]),
|
||||||
('cn', [name]),
|
(FLAGS.ldap_user_name_attribute, [name]),
|
||||||
('secretKey', [secret_key]),
|
('secretKey', [secret_key]),
|
||||||
('accessKey', [access_key]),
|
('accessKey', [access_key]),
|
||||||
('isAdmin', [str(is_admin).upper()]),
|
(LdapDriver.isadmin_attribute, [str(is_admin).upper()]),
|
||||||
]
|
]
|
||||||
self.conn.add_s(self.__uid_to_dn(name), attr)
|
self.conn.add_s(self.__uid_to_dn(name), attr)
|
||||||
return self.__to_user(dict(attr))
|
return self.__to_user(dict(attr))
|
||||||
@@ -182,11 +199,12 @@ class LdapDriver(object):
|
|||||||
description=None, member_uids=None):
|
description=None, member_uids=None):
|
||||||
"""Create a project"""
|
"""Create a project"""
|
||||||
if self.__project_exists(name):
|
if self.__project_exists(name):
|
||||||
raise exception.Duplicate("Project can't be created because "
|
raise exception.Duplicate(_("Project can't be created because "
|
||||||
"project %s already exists" % name)
|
"project %s already exists") % name)
|
||||||
if not self.__user_exists(manager_uid):
|
if not self.__user_exists(manager_uid):
|
||||||
raise exception.NotFound("Project can't be created because "
|
raise exception.NotFound(_("Project can't be created because "
|
||||||
"manager %s doesn't exist" % manager_uid)
|
"manager %s doesn't exist")
|
||||||
|
% manager_uid)
|
||||||
manager_dn = self.__uid_to_dn(manager_uid)
|
manager_dn = self.__uid_to_dn(manager_uid)
|
||||||
# description is a required attribute
|
# description is a required attribute
|
||||||
if description is None:
|
if description is None:
|
||||||
@@ -195,18 +213,18 @@ class LdapDriver(object):
|
|||||||
if member_uids is not None:
|
if member_uids is not None:
|
||||||
for member_uid in member_uids:
|
for member_uid in member_uids:
|
||||||
if not self.__user_exists(member_uid):
|
if not self.__user_exists(member_uid):
|
||||||
raise exception.NotFound("Project can't be created "
|
raise exception.NotFound(_("Project can't be created "
|
||||||
"because user %s doesn't exist"
|
"because user %s doesn't exist")
|
||||||
% member_uid)
|
% member_uid)
|
||||||
members.append(self.__uid_to_dn(member_uid))
|
members.append(self.__uid_to_dn(member_uid))
|
||||||
# always add the manager as a member because members is required
|
# always add the manager as a member because members is required
|
||||||
if not manager_dn in members:
|
if not manager_dn in members:
|
||||||
members.append(manager_dn)
|
members.append(manager_dn)
|
||||||
attr = [
|
attr = [
|
||||||
('objectclass', ['novaProject']),
|
('objectclass', [LdapDriver.project_objectclass]),
|
||||||
('cn', [name]),
|
('cn', [name]),
|
||||||
('description', [description]),
|
('description', [description]),
|
||||||
('projectManager', [manager_dn]),
|
(LdapDriver.project_attribute, [manager_dn]),
|
||||||
('member', members)]
|
('member', members)]
|
||||||
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
|
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
|
||||||
return self.__to_project(dict(attr))
|
return self.__to_project(dict(attr))
|
||||||
@@ -218,11 +236,12 @@ class LdapDriver(object):
|
|||||||
attr = []
|
attr = []
|
||||||
if manager_uid:
|
if manager_uid:
|
||||||
if not self.__user_exists(manager_uid):
|
if not self.__user_exists(manager_uid):
|
||||||
raise exception.NotFound("Project can't be modified because "
|
raise exception.NotFound(_("Project can't be modified because "
|
||||||
"manager %s doesn't exist" %
|
"manager %s doesn't exist")
|
||||||
manager_uid)
|
% manager_uid)
|
||||||
manager_dn = self.__uid_to_dn(manager_uid)
|
manager_dn = self.__uid_to_dn(manager_uid)
|
||||||
attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn))
|
attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
|
||||||
|
manager_dn))
|
||||||
if description:
|
if description:
|
||||||
attr.append((self.ldap.MOD_REPLACE, 'description', description))
|
attr.append((self.ldap.MOD_REPLACE, 'description', description))
|
||||||
self.conn.modify_s('cn=%s,%s' % (project_id,
|
self.conn.modify_s('cn=%s,%s' % (project_id,
|
||||||
@@ -282,10 +301,9 @@ class LdapDriver(object):
|
|||||||
return roles
|
return roles
|
||||||
else:
|
else:
|
||||||
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
|
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
|
||||||
roles = self.__find_objects(project_dn,
|
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
|
||||||
'(&(&(objectclass=groupOfNames)'
|
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
|
||||||
'(!(objectclass=novaProject)))'
|
roles = self.__find_objects(project_dn, query)
|
||||||
'(member=%s))' % self.__uid_to_dn(uid))
|
|
||||||
return [role['cn'][0] for role in roles]
|
return [role['cn'][0] for role in roles]
|
||||||
|
|
||||||
def delete_user(self, uid):
|
def delete_user(self, uid):
|
||||||
@@ -299,14 +317,15 @@ class LdapDriver(object):
|
|||||||
# Retrieve user by name
|
# Retrieve user by name
|
||||||
user = self.__get_ldap_user(uid)
|
user = self.__get_ldap_user(uid)
|
||||||
if 'secretKey' in user.keys():
|
if 'secretKey' in user.keys():
|
||||||
attr.append((self.ldap.MOD_DELETE, 'secretKey', \
|
attr.append((self.ldap.MOD_DELETE, 'secretKey',
|
||||||
user['secretKey']))
|
user['secretKey']))
|
||||||
if 'accessKey' in user.keys():
|
if 'accessKey' in user.keys():
|
||||||
attr.append((self.ldap.MOD_DELETE, 'accessKey', \
|
attr.append((self.ldap.MOD_DELETE, 'accessKey',
|
||||||
user['accessKey']))
|
user['accessKey']))
|
||||||
if 'isAdmin' in user.keys():
|
if LdapDriver.isadmin_attribute in user.keys():
|
||||||
attr.append((self.ldap.MOD_DELETE, 'isAdmin', \
|
attr.append((self.ldap.MOD_DELETE,
|
||||||
user['isAdmin']))
|
LdapDriver.isadmin_attribute,
|
||||||
|
user[LdapDriver.isadmin_attribute]))
|
||||||
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
||||||
else:
|
else:
|
||||||
# Delete entry
|
# Delete entry
|
||||||
@@ -328,7 +347,8 @@ class LdapDriver(object):
|
|||||||
if secret_key:
|
if secret_key:
|
||||||
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
|
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
|
||||||
if admin is not None:
|
if admin is not None:
|
||||||
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper()))
|
attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute,
|
||||||
|
str(admin).upper()))
|
||||||
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
||||||
|
|
||||||
def __user_exists(self, uid):
|
def __user_exists(self, uid):
|
||||||
@@ -346,7 +366,7 @@ class LdapDriver(object):
|
|||||||
def __get_ldap_user(self, uid):
|
def __get_ldap_user(self, uid):
|
||||||
"""Retrieve LDAP user entry by id"""
|
"""Retrieve LDAP user entry by id"""
|
||||||
attr = self.__find_object(self.__uid_to_dn(uid),
|
attr = self.__find_object(self.__uid_to_dn(uid),
|
||||||
'(objectclass=novaUser)')
|
'(objectclass=novaUser)')
|
||||||
return attr
|
return attr
|
||||||
|
|
||||||
def __find_object(self, dn, query=None, scope=None):
|
def __find_object(self, dn, query=None, scope=None):
|
||||||
@@ -382,19 +402,21 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def __find_role_dns(self, tree):
|
def __find_role_dns(self, tree):
|
||||||
"""Find dns of role objects in given tree"""
|
"""Find dns of role objects in given tree"""
|
||||||
return self.__find_dns(tree,
|
query = ('(&(objectclass=groupOfNames)(!%s))' %
|
||||||
'(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
|
LdapDriver.project_pattern)
|
||||||
|
return self.__find_dns(tree, query)
|
||||||
|
|
||||||
def __find_group_dns_with_member(self, tree, uid):
|
def __find_group_dns_with_member(self, tree, uid):
|
||||||
"""Find dns of group objects in a given tree that contain member"""
|
"""Find dns of group objects in a given tree that contain member"""
|
||||||
dns = self.__find_dns(tree,
|
query = ('(&(objectclass=groupOfNames)(member=%s))' %
|
||||||
'(&(objectclass=groupOfNames)(member=%s))' %
|
self.__uid_to_dn(uid))
|
||||||
self.__uid_to_dn(uid))
|
dns = self.__find_dns(tree, query)
|
||||||
return dns
|
return dns
|
||||||
|
|
||||||
def __group_exists(self, dn):
|
def __group_exists(self, dn):
|
||||||
"""Check if group exists"""
|
"""Check if group exists"""
|
||||||
return self.__find_object(dn, '(objectclass=groupOfNames)') is not None
|
query = '(objectclass=groupOfNames)'
|
||||||
|
return self.__find_object(dn, query) is not None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __role_to_dn(role, project_id=None):
|
def __role_to_dn(role, project_id=None):
|
||||||
@@ -417,7 +439,8 @@ class LdapDriver(object):
|
|||||||
for member_uid in member_uids:
|
for member_uid in member_uids:
|
||||||
if not self.__user_exists(member_uid):
|
if not self.__user_exists(member_uid):
|
||||||
raise exception.NotFound("Group can't be created "
|
raise exception.NotFound("Group can't be created "
|
||||||
"because user %s doesn't exist" % member_uid)
|
"because user %s doesn't exist" %
|
||||||
|
member_uid)
|
||||||
members.append(self.__uid_to_dn(member_uid))
|
members.append(self.__uid_to_dn(member_uid))
|
||||||
dn = self.__uid_to_dn(uid)
|
dn = self.__uid_to_dn(uid)
|
||||||
if not dn in members:
|
if not dn in members:
|
||||||
@@ -433,7 +456,7 @@ class LdapDriver(object):
|
|||||||
"""Check if user is in group"""
|
"""Check if user is in group"""
|
||||||
if not self.__user_exists(uid):
|
if not self.__user_exists(uid):
|
||||||
raise exception.NotFound("User %s can't be searched in group "
|
raise exception.NotFound("User %s can't be searched in group "
|
||||||
"becuase the user doesn't exist" % (uid,))
|
"because the user doesn't exist" % uid)
|
||||||
if not self.__group_exists(group_dn):
|
if not self.__group_exists(group_dn):
|
||||||
return False
|
return False
|
||||||
res = self.__find_object(group_dn,
|
res = self.__find_object(group_dn,
|
||||||
@@ -445,13 +468,13 @@ class LdapDriver(object):
|
|||||||
"""Add user to group"""
|
"""Add user to group"""
|
||||||
if not self.__user_exists(uid):
|
if not self.__user_exists(uid):
|
||||||
raise exception.NotFound("User %s can't be added to the group "
|
raise exception.NotFound("User %s can't be added to the group "
|
||||||
"becuase the user doesn't exist" % (uid,))
|
"because the user doesn't exist" % uid)
|
||||||
if not self.__group_exists(group_dn):
|
if not self.__group_exists(group_dn):
|
||||||
raise exception.NotFound("The group at dn %s doesn't exist" %
|
raise exception.NotFound("The group at dn %s doesn't exist" %
|
||||||
(group_dn,))
|
group_dn)
|
||||||
if self.__is_in_group(uid, group_dn):
|
if self.__is_in_group(uid, group_dn):
|
||||||
raise exception.Duplicate("User %s is already a member of "
|
raise exception.Duplicate(_("User %s is already a member of "
|
||||||
"the group %s" % (uid, group_dn))
|
"the group %s") % (uid, group_dn))
|
||||||
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
|
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
|
||||||
self.conn.modify_s(group_dn, attr)
|
self.conn.modify_s(group_dn, attr)
|
||||||
|
|
||||||
@@ -459,16 +482,16 @@ class LdapDriver(object):
|
|||||||
"""Remove user from group"""
|
"""Remove user from group"""
|
||||||
if not self.__group_exists(group_dn):
|
if not self.__group_exists(group_dn):
|
||||||
raise exception.NotFound("The group at dn %s doesn't exist" %
|
raise exception.NotFound("The group at dn %s doesn't exist" %
|
||||||
(group_dn,))
|
group_dn)
|
||||||
if not self.__user_exists(uid):
|
if not self.__user_exists(uid):
|
||||||
raise exception.NotFound("User %s can't be removed from the "
|
raise exception.NotFound("User %s can't be removed from the "
|
||||||
"group because the user doesn't exist" % (uid,))
|
"group because the user doesn't exist" %
|
||||||
|
uid)
|
||||||
if not self.__is_in_group(uid, group_dn):
|
if not self.__is_in_group(uid, group_dn):
|
||||||
raise exception.NotFound("User %s is not a member of the group" %
|
raise exception.NotFound("User %s is not a member of the group" %
|
||||||
(uid,))
|
uid)
|
||||||
# NOTE(vish): remove user from group and any sub_groups
|
# NOTE(vish): remove user from group and any sub_groups
|
||||||
sub_dns = self.__find_group_dns_with_member(
|
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
|
||||||
group_dn, uid)
|
|
||||||
for sub_dn in sub_dns:
|
for sub_dn in sub_dns:
|
||||||
self.__safe_remove_from_group(uid, sub_dn)
|
self.__safe_remove_from_group(uid, sub_dn)
|
||||||
|
|
||||||
@@ -479,15 +502,15 @@ class LdapDriver(object):
|
|||||||
try:
|
try:
|
||||||
self.conn.modify_s(group_dn, attr)
|
self.conn.modify_s(group_dn, attr)
|
||||||
except self.ldap.OBJECT_CLASS_VIOLATION:
|
except self.ldap.OBJECT_CLASS_VIOLATION:
|
||||||
logging.debug("Attempted to remove the last member of a group. "
|
logging.debug(_("Attempted to remove the last member of a group. "
|
||||||
"Deleting the group at %s instead.", group_dn)
|
"Deleting the group at %s instead."), group_dn)
|
||||||
self.__delete_group(group_dn)
|
self.__delete_group(group_dn)
|
||||||
|
|
||||||
def __remove_from_all(self, uid):
|
def __remove_from_all(self, uid):
|
||||||
"""Remove user from all roles and projects"""
|
"""Remove user from all roles and projects"""
|
||||||
if not self.__user_exists(uid):
|
if not self.__user_exists(uid):
|
||||||
raise exception.NotFound("User %s can't be removed from all "
|
raise exception.NotFound("User %s can't be removed from all "
|
||||||
"because the user doesn't exist" % (uid,))
|
"because the user doesn't exist" % uid)
|
||||||
role_dns = self.__find_group_dns_with_member(
|
role_dns = self.__find_group_dns_with_member(
|
||||||
FLAGS.role_project_subtree, uid)
|
FLAGS.role_project_subtree, uid)
|
||||||
for role_dn in role_dns:
|
for role_dn in role_dns:
|
||||||
@@ -500,7 +523,8 @@ class LdapDriver(object):
|
|||||||
def __delete_group(self, group_dn):
|
def __delete_group(self, group_dn):
|
||||||
"""Delete Group"""
|
"""Delete Group"""
|
||||||
if not self.__group_exists(group_dn):
|
if not self.__group_exists(group_dn):
|
||||||
raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
|
raise exception.NotFound(_("Group at dn %s doesn't exist")
|
||||||
|
% group_dn)
|
||||||
self.conn.delete_s(group_dn)
|
self.conn.delete_s(group_dn)
|
||||||
|
|
||||||
def __delete_roles(self, project_dn):
|
def __delete_roles(self, project_dn):
|
||||||
@@ -514,13 +538,13 @@ class LdapDriver(object):
|
|||||||
if attr is None:
|
if attr is None:
|
||||||
return None
|
return None
|
||||||
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
|
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
|
||||||
and 'isAdmin' in attr.keys()):
|
and LdapDriver.isadmin_attribute in attr.keys()):
|
||||||
return {
|
return {
|
||||||
'id': attr['uid'][0],
|
'id': attr[FLAGS.ldap_user_id_attribute][0],
|
||||||
'name': attr['cn'][0],
|
'name': attr[FLAGS.ldap_user_name_attribute][0],
|
||||||
'access': attr['accessKey'][0],
|
'access': attr['accessKey'][0],
|
||||||
'secret': attr['secretKey'][0],
|
'secret': attr['secretKey'][0],
|
||||||
'admin': (attr['isAdmin'][0] == 'TRUE')}
|
'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')}
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -532,7 +556,8 @@ class LdapDriver(object):
|
|||||||
return {
|
return {
|
||||||
'id': attr['cn'][0],
|
'id': attr['cn'][0],
|
||||||
'name': attr['cn'][0],
|
'name': attr['cn'][0],
|
||||||
'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
|
'project_manager_id':
|
||||||
|
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
|
||||||
'description': attr.get('description', [None])[0],
|
'description': attr.get('description', [None])[0],
|
||||||
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
|
||||||
|
|
||||||
@@ -542,9 +567,10 @@ class LdapDriver(object):
|
|||||||
return dn.split(',')[0].split('=')[1]
|
return dn.split(',')[0].split('=')[1]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __uid_to_dn(dn):
|
def __uid_to_dn(uid):
|
||||||
"""Convert uid to dn"""
|
"""Convert uid to dn"""
|
||||||
return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
|
return (FLAGS.ldap_user_id_attribute + '=%s,%s'
|
||||||
|
% (uid, FLAGS.ldap_user_subtree))
|
||||||
|
|
||||||
|
|
||||||
class FakeLdapDriver(LdapDriver):
|
class FakeLdapDriver(LdapDriver):
|
||||||
|
@@ -64,12 +64,9 @@ flags.DEFINE_string('credential_key_file', 'pk.pem',
|
|||||||
'Filename of private key in credentials zip')
|
'Filename of private key in credentials zip')
|
||||||
flags.DEFINE_string('credential_cert_file', 'cert.pem',
|
flags.DEFINE_string('credential_cert_file', 'cert.pem',
|
||||||
'Filename of certificate in credentials zip')
|
'Filename of certificate in credentials zip')
|
||||||
flags.DEFINE_string('credential_rc_file', 'novarc',
|
flags.DEFINE_string('credential_rc_file', '%src',
|
||||||
'Filename of rc in credentials zip')
|
'Filename of rc in credentials zip, %s will be '
|
||||||
flags.DEFINE_string('credential_cert_subject',
|
'replaced by name of the region (nova by default)')
|
||||||
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
|
|
||||||
'OU=NovaDev/CN=%s-%s',
|
|
||||||
'Subject for certificate for users')
|
|
||||||
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
||||||
'Driver that auth manager uses')
|
'Driver that auth manager uses')
|
||||||
|
|
||||||
@@ -257,12 +254,12 @@ class AuthManager(object):
|
|||||||
# TODO(vish): check for valid timestamp
|
# TODO(vish): check for valid timestamp
|
||||||
(access_key, _sep, project_id) = access.partition(':')
|
(access_key, _sep, project_id) = access.partition(':')
|
||||||
|
|
||||||
logging.info('Looking up user: %r', access_key)
|
logging.info(_('Looking up user: %r'), access_key)
|
||||||
user = self.get_user_from_access_key(access_key)
|
user = self.get_user_from_access_key(access_key)
|
||||||
logging.info('user: %r', user)
|
logging.info('user: %r', user)
|
||||||
if user == None:
|
if user == None:
|
||||||
raise exception.NotFound('No user found for access key %s' %
|
raise exception.NotFound(_('No user found for access key %s')
|
||||||
access_key)
|
% access_key)
|
||||||
|
|
||||||
# NOTE(vish): if we stop using project name as id we need better
|
# NOTE(vish): if we stop using project name as id we need better
|
||||||
# logic to find a default project for user
|
# logic to find a default project for user
|
||||||
@@ -271,12 +268,12 @@ class AuthManager(object):
|
|||||||
|
|
||||||
project = self.get_project(project_id)
|
project = self.get_project(project_id)
|
||||||
if project == None:
|
if project == None:
|
||||||
raise exception.NotFound('No project called %s could be found' %
|
raise exception.NotFound(_('No project called %s could be found')
|
||||||
project_id)
|
% project_id)
|
||||||
if not self.is_admin(user) and not self.is_project_member(user,
|
if not self.is_admin(user) and not self.is_project_member(user,
|
||||||
project):
|
project):
|
||||||
raise exception.NotFound('User %s is not a member of project %s' %
|
raise exception.NotFound(_('User %s is not a member of project %s')
|
||||||
(user.id, project.id))
|
% (user.id, project.id))
|
||||||
if check_type == 's3':
|
if check_type == 's3':
|
||||||
sign = signer.Signer(user.secret.encode())
|
sign = signer.Signer(user.secret.encode())
|
||||||
expected_signature = sign.s3_authorization(headers, verb, path)
|
expected_signature = sign.s3_authorization(headers, verb, path)
|
||||||
@@ -284,7 +281,7 @@ class AuthManager(object):
|
|||||||
logging.debug('expected_signature: %s', expected_signature)
|
logging.debug('expected_signature: %s', expected_signature)
|
||||||
logging.debug('signature: %s', signature)
|
logging.debug('signature: %s', signature)
|
||||||
if signature != expected_signature:
|
if signature != expected_signature:
|
||||||
raise exception.NotAuthorized('Signature does not match')
|
raise exception.NotAuthorized(_('Signature does not match'))
|
||||||
elif check_type == 'ec2':
|
elif check_type == 'ec2':
|
||||||
# NOTE(vish): hmac can't handle unicode, so encode ensures that
|
# NOTE(vish): hmac can't handle unicode, so encode ensures that
|
||||||
# secret isn't unicode
|
# secret isn't unicode
|
||||||
@@ -294,7 +291,7 @@ class AuthManager(object):
|
|||||||
logging.debug('expected_signature: %s', expected_signature)
|
logging.debug('expected_signature: %s', expected_signature)
|
||||||
logging.debug('signature: %s', signature)
|
logging.debug('signature: %s', signature)
|
||||||
if signature != expected_signature:
|
if signature != expected_signature:
|
||||||
raise exception.NotAuthorized('Signature does not match')
|
raise exception.NotAuthorized(_('Signature does not match'))
|
||||||
return (user, project)
|
return (user, project)
|
||||||
|
|
||||||
def get_access_key(self, user, project):
|
def get_access_key(self, user, project):
|
||||||
@@ -364,7 +361,7 @@ class AuthManager(object):
|
|||||||
with self.driver() as drv:
|
with self.driver() as drv:
|
||||||
if role == 'projectmanager':
|
if role == 'projectmanager':
|
||||||
if not project:
|
if not project:
|
||||||
raise exception.Error("Must specify project")
|
raise exception.Error(_("Must specify project"))
|
||||||
return self.is_project_manager(user, project)
|
return self.is_project_manager(user, project)
|
||||||
|
|
||||||
global_role = drv.has_role(User.safe_id(user),
|
global_role = drv.has_role(User.safe_id(user),
|
||||||
@@ -398,9 +395,9 @@ class AuthManager(object):
|
|||||||
@param project: Project in which to add local role.
|
@param project: Project in which to add local role.
|
||||||
"""
|
"""
|
||||||
if role not in FLAGS.allowed_roles:
|
if role not in FLAGS.allowed_roles:
|
||||||
raise exception.NotFound("The %s role can not be found" % role)
|
raise exception.NotFound(_("The %s role can not be found") % role)
|
||||||
if project is not None and role in FLAGS.global_roles:
|
if project is not None and role in FLAGS.global_roles:
|
||||||
raise exception.NotFound("The %s role is global only" % role)
|
raise exception.NotFound(_("The %s role is global only") % role)
|
||||||
with self.driver() as drv:
|
with self.driver() as drv:
|
||||||
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
|
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
|
||||||
|
|
||||||
@@ -543,10 +540,10 @@ class AuthManager(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
network_ref = db.project_get_network(context.get_admin_context(),
|
network_ref = db.project_get_network(context.get_admin_context(),
|
||||||
Project.safe_id(project))
|
Project.safe_id(project), False)
|
||||||
|
|
||||||
if not network_ref['vpn_public_port']:
|
if not network_ref:
|
||||||
raise exception.NotFound('project network data has not been set')
|
return (None, None)
|
||||||
return (network_ref['vpn_public_address'],
|
return (network_ref['vpn_public_address'],
|
||||||
network_ref['vpn_public_port'])
|
network_ref['vpn_public_port'])
|
||||||
|
|
||||||
@@ -628,27 +625,37 @@ class AuthManager(object):
|
|||||||
def get_key_pairs(context):
|
def get_key_pairs(context):
|
||||||
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
|
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
|
||||||
|
|
||||||
def get_credentials(self, user, project=None):
|
def get_credentials(self, user, project=None, use_dmz=True):
|
||||||
"""Get credential zip for user in project"""
|
"""Get credential zip for user in project"""
|
||||||
if not isinstance(user, User):
|
if not isinstance(user, User):
|
||||||
user = self.get_user(user)
|
user = self.get_user(user)
|
||||||
if project is None:
|
if project is None:
|
||||||
project = user.id
|
project = user.id
|
||||||
pid = Project.safe_id(project)
|
pid = Project.safe_id(project)
|
||||||
rc = self.__generate_rc(user.access, user.secret, pid)
|
private_key, signed_cert = crypto.generate_x509_cert(user.id, pid)
|
||||||
private_key, signed_cert = self._generate_x509_cert(user.id, pid)
|
|
||||||
|
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
zf = os.path.join(tmpdir, "temp.zip")
|
zf = os.path.join(tmpdir, "temp.zip")
|
||||||
zippy = zipfile.ZipFile(zf, 'w')
|
zippy = zipfile.ZipFile(zf, 'w')
|
||||||
zippy.writestr(FLAGS.credential_rc_file, rc)
|
if use_dmz and FLAGS.region_list:
|
||||||
|
regions = {}
|
||||||
|
for item in FLAGS.region_list:
|
||||||
|
region, _sep, region_host = item.partition("=")
|
||||||
|
regions[region] = region_host
|
||||||
|
else:
|
||||||
|
regions = {'nova': FLAGS.cc_host}
|
||||||
|
for region, host in regions.iteritems():
|
||||||
|
rc = self.__generate_rc(user.access,
|
||||||
|
user.secret,
|
||||||
|
pid,
|
||||||
|
use_dmz,
|
||||||
|
host)
|
||||||
|
zippy.writestr(FLAGS.credential_rc_file % region, rc)
|
||||||
|
|
||||||
zippy.writestr(FLAGS.credential_key_file, private_key)
|
zippy.writestr(FLAGS.credential_key_file, private_key)
|
||||||
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
|
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
|
||||||
|
|
||||||
try:
|
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
||||||
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
|
||||||
except exception.NotFound:
|
|
||||||
vpn_ip = None
|
|
||||||
if vpn_ip:
|
if vpn_ip:
|
||||||
configfile = open(FLAGS.vpn_client_template, "r")
|
configfile = open(FLAGS.vpn_client_template, "r")
|
||||||
s = string.Template(configfile.read())
|
s = string.Template(configfile.read())
|
||||||
@@ -659,10 +666,9 @@ class AuthManager(object):
|
|||||||
port=vpn_port)
|
port=vpn_port)
|
||||||
zippy.writestr(FLAGS.credential_vpn_file, config)
|
zippy.writestr(FLAGS.credential_vpn_file, config)
|
||||||
else:
|
else:
|
||||||
logging.warn("No vpn data for project %s" %
|
logging.warn(_("No vpn data for project %s"), pid)
|
||||||
pid)
|
|
||||||
|
|
||||||
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
|
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
|
||||||
zippy.close()
|
zippy.close()
|
||||||
with open(zf, 'rb') as f:
|
with open(zf, 'rb') as f:
|
||||||
read_buffer = f.read()
|
read_buffer = f.read()
|
||||||
@@ -670,38 +676,38 @@ class AuthManager(object):
|
|||||||
shutil.rmtree(tmpdir)
|
shutil.rmtree(tmpdir)
|
||||||
return read_buffer
|
return read_buffer
|
||||||
|
|
||||||
def get_environment_rc(self, user, project=None):
|
def get_environment_rc(self, user, project=None, use_dmz=True):
|
||||||
"""Get credential zip for user in project"""
|
"""Get credential zip for user in project"""
|
||||||
if not isinstance(user, User):
|
if not isinstance(user, User):
|
||||||
user = self.get_user(user)
|
user = self.get_user(user)
|
||||||
if project is None:
|
if project is None:
|
||||||
project = user.id
|
project = user.id
|
||||||
pid = Project.safe_id(project)
|
pid = Project.safe_id(project)
|
||||||
return self.__generate_rc(user.access, user.secret, pid)
|
return self.__generate_rc(user.access, user.secret, pid, use_dmz)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __generate_rc(access, secret, pid):
|
def __generate_rc(access, secret, pid, use_dmz=True, host=None):
|
||||||
"""Generate rc file for user"""
|
"""Generate rc file for user"""
|
||||||
|
if use_dmz:
|
||||||
|
cc_host = FLAGS.cc_dmz
|
||||||
|
else:
|
||||||
|
cc_host = FLAGS.cc_host
|
||||||
|
# NOTE(vish): Always use the dmz since it is used from inside the
|
||||||
|
# instance
|
||||||
|
s3_host = FLAGS.s3_dmz
|
||||||
|
if host:
|
||||||
|
s3_host = host
|
||||||
|
cc_host = host
|
||||||
rc = open(FLAGS.credentials_template).read()
|
rc = open(FLAGS.credentials_template).read()
|
||||||
rc = rc % {'access': access,
|
rc = rc % {'access': access,
|
||||||
'project': pid,
|
'project': pid,
|
||||||
'secret': secret,
|
'secret': secret,
|
||||||
'ec2': FLAGS.ec2_url,
|
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix,
|
||||||
's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
|
cc_host,
|
||||||
|
FLAGS.cc_port,
|
||||||
|
FLAGS.ec2_suffix),
|
||||||
|
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
|
||||||
'nova': FLAGS.ca_file,
|
'nova': FLAGS.ca_file,
|
||||||
'cert': FLAGS.credential_cert_file,
|
'cert': FLAGS.credential_cert_file,
|
||||||
'key': FLAGS.credential_key_file}
|
'key': FLAGS.credential_key_file}
|
||||||
return rc
|
return rc
|
||||||
|
|
||||||
def _generate_x509_cert(self, uid, pid):
|
|
||||||
"""Generate x509 cert for user"""
|
|
||||||
(private_key, csr) = crypto.generate_x509_cert(
|
|
||||||
self.__cert_subject(uid))
|
|
||||||
# TODO(joshua): This should be async call back to the cloud controller
|
|
||||||
signed_cert = crypto.sign_csr(csr, pid)
|
|
||||||
return (private_key, signed_cert)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __cert_subject(uid):
|
|
||||||
"""Helper to generate cert subject"""
|
|
||||||
return FLAGS.credential_cert_subject % (uid, utils.isotime())
|
|
||||||
|
@@ -1,7 +1,9 @@
|
|||||||
#
|
#
|
||||||
# Person object for Nova
|
# Person object for Nova
|
||||||
# inetorgperson with extra attributes
|
# inetorgperson with extra attributes
|
||||||
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
# Schema version: 2
|
||||||
|
# Authors: Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
# Ryan Lane <rlane@wikimedia.org>
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
|
||||||
@@ -30,55 +32,19 @@ attributetype (
|
|||||||
SINGLE-VALUE
|
SINGLE-VALUE
|
||||||
)
|
)
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:3
|
|
||||||
NAME 'keyFingerprint'
|
|
||||||
DESC 'Fingerprint of private key'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
attributetype (
|
||||||
novaAttrs:4
|
novaAttrs:4
|
||||||
NAME 'isAdmin'
|
NAME 'isNovaAdmin'
|
||||||
DESC 'Is user an administrator?'
|
DESC 'Is user an nova administrator?'
|
||||||
EQUALITY booleanMatch
|
EQUALITY booleanMatch
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
||||||
SINGLE-VALUE
|
SINGLE-VALUE
|
||||||
)
|
)
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:5
|
|
||||||
NAME 'projectManager'
|
|
||||||
DESC 'Project Managers of a project'
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
objectClass (
|
||||||
novaOCs:1
|
novaOCs:1
|
||||||
NAME 'novaUser'
|
NAME 'novaUser'
|
||||||
DESC 'access and secret keys'
|
DESC 'access and secret keys'
|
||||||
AUXILIARY
|
AUXILIARY
|
||||||
MUST ( uid )
|
MAY ( accessKey $ secretKey $ isNovaAdmin )
|
||||||
MAY ( accessKey $ secretKey $ isAdmin )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:2
|
|
||||||
NAME 'novaKeyPair'
|
|
||||||
DESC 'Key pair for User'
|
|
||||||
SUP top
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ sshPublicKey $ keyFingerprint )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:3
|
|
||||||
NAME 'novaProject'
|
|
||||||
DESC 'Container for project'
|
|
||||||
SUP groupOfNames
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ projectManager )
|
|
||||||
)
|
)
|
||||||
|
@@ -1,16 +1,13 @@
|
|||||||
#
|
#
|
||||||
# Person object for Nova
|
# Person object for Nova
|
||||||
# inetorgperson with extra attributes
|
# inetorgperson with extra attributes
|
||||||
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
# Schema version: 2
|
||||||
# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com>
|
# Authors: Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
# Ryan Lane <rlane@wikimedia.org>
|
||||||
#
|
#
|
||||||
# using internet experimental oid arc as per BP64 3.1
|
# using internet experimental oid arc as per BP64 3.1
|
||||||
dn: cn=schema
|
dn: cn=schema
|
||||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE)
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) )
|
||||||
attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
|
|
||||||
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
|
|
||||||
objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
|
|
||||||
objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )
|
|
||||||
|
@@ -32,7 +32,6 @@ abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
|||||||
schemapath='/var/opendj/instance/config/schema'
|
schemapath='/var/opendj/instance/config/schema'
|
||||||
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
|
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
|
||||||
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
|
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
|
||||||
chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
|
|
||||||
chown opendj:opendj $schemapath/98-nova_sun.ldif
|
chown opendj:opendj $schemapath/98-nova_sun.ldif
|
||||||
|
|
||||||
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
|
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
|
||||||
|
@@ -22,7 +22,7 @@ apt-get install -y slapd ldap-utils python-ldap
|
|||||||
|
|
||||||
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
||||||
cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
|
cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
|
||||||
cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema
|
cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema
|
||||||
|
|
||||||
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
||||||
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
||||||
@@ -33,7 +33,6 @@ cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
|||||||
include /etc/ldap/schema/core.schema
|
include /etc/ldap/schema/core.schema
|
||||||
include /etc/ldap/schema/cosine.schema
|
include /etc/ldap/schema/cosine.schema
|
||||||
include /etc/ldap/schema/inetorgperson.schema
|
include /etc/ldap/schema/inetorgperson.schema
|
||||||
include /etc/ldap/schema/openssh-lpk_openldap.schema
|
|
||||||
include /etc/ldap/schema/nova.schema
|
include /etc/ldap/schema/nova.schema
|
||||||
pidfile /var/run/slapd/slapd.pid
|
pidfile /var/run/slapd/slapd.pid
|
||||||
argsfile /var/run/slapd/slapd.args
|
argsfile /var/run/slapd/slapd.args
|
||||||
|
59
nova/fakememcache.py
Normal file
59
nova/fakememcache.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Super simple fake memcache client."""
|
||||||
|
|
||||||
|
import utils
|
||||||
|
|
||||||
|
|
||||||
|
class Client(object):
|
||||||
|
"""Replicates a tiny subset of memcached client interface."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
"""Ignores the passed in args"""
|
||||||
|
self.cache = {}
|
||||||
|
|
||||||
|
def get(self, key):
|
||||||
|
"""Retrieves the value for a key or None."""
|
||||||
|
(timeout, value) = self.cache.get(key, (0, None))
|
||||||
|
if timeout == 0 or utils.utcnow_ts() < timeout:
|
||||||
|
return value
|
||||||
|
return None
|
||||||
|
|
||||||
|
def set(self, key, value, time=0, min_compress_len=0):
|
||||||
|
"""Sets the value for a key."""
|
||||||
|
timeout = 0
|
||||||
|
if time != 0:
|
||||||
|
timeout = utils.utcnow_ts() + time
|
||||||
|
self.cache[key] = (timeout, value)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def add(self, key, value, time=0, min_compress_len=0):
|
||||||
|
"""Sets the value for a key if it doesn't exist."""
|
||||||
|
if not self.get(key) is None:
|
||||||
|
return False
|
||||||
|
return self.set(key, value, time, min_compress_len)
|
||||||
|
|
||||||
|
def incr(self, key, delta=1):
|
||||||
|
"""Increments the value for a key."""
|
||||||
|
value = self.get(key)
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
new_value = int(value) + delta
|
||||||
|
self.cache[key] = (self.cache[key][0], str(new_value))
|
||||||
|
return new_value
|
@@ -25,6 +25,10 @@ from carrot.backends import base
|
|||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
|
|
||||||
|
EXCHANGES = {}
|
||||||
|
QUEUES = {}
|
||||||
|
|
||||||
|
|
||||||
class Message(base.BaseMessage):
|
class Message(base.BaseMessage):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -37,12 +41,12 @@ class Exchange(object):
|
|||||||
self._routes = {}
|
self._routes = {}
|
||||||
|
|
||||||
def publish(self, message, routing_key=None):
|
def publish(self, message, routing_key=None):
|
||||||
logging.debug('(%s) publish (key: %s) %s',
|
logging.debug(_('(%s) publish (key: %s) %s'),
|
||||||
self.name, routing_key, message)
|
self.name, routing_key, message)
|
||||||
routing_key = routing_key.split('.')[0]
|
routing_key = routing_key.split('.')[0]
|
||||||
if routing_key in self._routes:
|
if routing_key in self._routes:
|
||||||
for f in self._routes[routing_key]:
|
for f in self._routes[routing_key]:
|
||||||
logging.debug('Publishing to route %s', f)
|
logging.debug(_('Publishing to route %s'), f)
|
||||||
f(message, routing_key=routing_key)
|
f(message, routing_key=routing_key)
|
||||||
|
|
||||||
def bind(self, callback, routing_key):
|
def bind(self, callback, routing_key):
|
||||||
@@ -68,81 +72,63 @@ class Queue(object):
|
|||||||
return self._queue.get()
|
return self._queue.get()
|
||||||
|
|
||||||
|
|
||||||
class Backend(object):
|
class Backend(base.BaseBackend):
|
||||||
""" Singleton backend for testing """
|
def queue_declare(self, queue, **kwargs):
|
||||||
class __impl(base.BaseBackend):
|
global QUEUES
|
||||||
def __init__(self, *args, **kwargs):
|
if queue not in QUEUES:
|
||||||
#super(__impl, self).__init__(*args, **kwargs)
|
logging.debug(_('Declaring queue %s'), queue)
|
||||||
self._exchanges = {}
|
QUEUES[queue] = Queue(queue)
|
||||||
self._queues = {}
|
|
||||||
|
|
||||||
def _reset_all(self):
|
def exchange_declare(self, exchange, type, *args, **kwargs):
|
||||||
self._exchanges = {}
|
global EXCHANGES
|
||||||
self._queues = {}
|
if exchange not in EXCHANGES:
|
||||||
|
logging.debug(_('Declaring exchange %s'), exchange)
|
||||||
|
EXCHANGES[exchange] = Exchange(exchange, type)
|
||||||
|
|
||||||
def queue_declare(self, queue, **kwargs):
|
def queue_bind(self, queue, exchange, routing_key, **kwargs):
|
||||||
if queue not in self._queues:
|
global EXCHANGES
|
||||||
logging.debug('Declaring queue %s', queue)
|
global QUEUES
|
||||||
self._queues[queue] = Queue(queue)
|
logging.debug(_('Binding %s to %s with key %s'),
|
||||||
|
queue, exchange, routing_key)
|
||||||
|
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
||||||
|
|
||||||
def exchange_declare(self, exchange, type, *args, **kwargs):
|
def declare_consumer(self, queue, callback, *args, **kwargs):
|
||||||
if exchange not in self._exchanges:
|
self.current_queue = queue
|
||||||
logging.debug('Declaring exchange %s', exchange)
|
self.current_callback = callback
|
||||||
self._exchanges[exchange] = Exchange(exchange, type)
|
|
||||||
|
|
||||||
def queue_bind(self, queue, exchange, routing_key, **kwargs):
|
def consume(self, limit=None):
|
||||||
logging.debug('Binding %s to %s with key %s',
|
while True:
|
||||||
queue, exchange, routing_key)
|
item = self.get(self.current_queue)
|
||||||
self._exchanges[exchange].bind(self._queues[queue].push,
|
if item:
|
||||||
routing_key)
|
self.current_callback(item)
|
||||||
|
raise StopIteration()
|
||||||
|
greenthread.sleep(0)
|
||||||
|
|
||||||
def declare_consumer(self, queue, callback, *args, **kwargs):
|
def get(self, queue, no_ack=False):
|
||||||
self.current_queue = queue
|
global QUEUES
|
||||||
self.current_callback = callback
|
if not queue in QUEUES or not QUEUES[queue].size():
|
||||||
|
return None
|
||||||
|
(message_data, content_type, content_encoding) = QUEUES[queue].pop()
|
||||||
|
message = Message(backend=self, body=message_data,
|
||||||
|
content_type=content_type,
|
||||||
|
content_encoding=content_encoding)
|
||||||
|
message.result = True
|
||||||
|
logging.debug(_('Getting from %s: %s'), queue, message)
|
||||||
|
return message
|
||||||
|
|
||||||
def consume(self, *args, **kwargs):
|
def prepare_message(self, message_data, delivery_mode,
|
||||||
while True:
|
content_type, content_encoding, **kwargs):
|
||||||
item = self.get(self.current_queue)
|
"""Prepare message for sending."""
|
||||||
if item:
|
return (message_data, content_type, content_encoding)
|
||||||
self.current_callback(item)
|
|
||||||
raise StopIteration()
|
|
||||||
greenthread.sleep(0)
|
|
||||||
|
|
||||||
def get(self, queue, no_ack=False):
|
def publish(self, message, exchange, routing_key, **kwargs):
|
||||||
if not queue in self._queues or not self._queues[queue].size():
|
global EXCHANGES
|
||||||
return None
|
if exchange in EXCHANGES:
|
||||||
(message_data, content_type, content_encoding) = \
|
EXCHANGES[exchange].publish(message, routing_key=routing_key)
|
||||||
self._queues[queue].pop()
|
|
||||||
message = Message(backend=self, body=message_data,
|
|
||||||
content_type=content_type,
|
|
||||||
content_encoding=content_encoding)
|
|
||||||
message.result = True
|
|
||||||
logging.debug('Getting from %s: %s', queue, message)
|
|
||||||
return message
|
|
||||||
|
|
||||||
def prepare_message(self, message_data, delivery_mode,
|
|
||||||
content_type, content_encoding, **kwargs):
|
|
||||||
"""Prepare message for sending."""
|
|
||||||
return (message_data, content_type, content_encoding)
|
|
||||||
|
|
||||||
def publish(self, message, exchange, routing_key, **kwargs):
|
|
||||||
if exchange in self._exchanges:
|
|
||||||
self._exchanges[exchange].publish(
|
|
||||||
message, routing_key=routing_key)
|
|
||||||
|
|
||||||
__instance = None
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
if Backend.__instance is None:
|
|
||||||
Backend.__instance = Backend.__impl(*args, **kwargs)
|
|
||||||
self.__dict__['_Backend__instance'] = Backend.__instance
|
|
||||||
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
return getattr(self.__instance, attr)
|
|
||||||
|
|
||||||
def __setattr__(self, attr, value):
|
|
||||||
return setattr(self.__instance, attr, value)
|
|
||||||
|
|
||||||
|
|
||||||
def reset_all():
|
def reset_all():
|
||||||
Backend()._reset_all()
|
global EXCHANGES
|
||||||
|
global QUEUES
|
||||||
|
EXCHANGES = {}
|
||||||
|
QUEUES = {}
|
||||||
|
@@ -29,6 +29,8 @@ import sys
|
|||||||
|
|
||||||
import gflags
|
import gflags
|
||||||
|
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
class FlagValues(gflags.FlagValues):
|
class FlagValues(gflags.FlagValues):
|
||||||
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
|
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
|
||||||
@@ -211,7 +213,8 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
|||||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||||
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
|
DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
|
||||||
|
DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
|
||||||
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
||||||
DEFINE_string('scheduler_topic', 'scheduler',
|
DEFINE_string('scheduler_topic', 'scheduler',
|
||||||
'the topic scheduler nodes listen on')
|
'the topic scheduler nodes listen on')
|
||||||
@@ -230,8 +233,11 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
|||||||
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
|
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
|
||||||
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
|
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
|
||||||
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
||||||
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
|
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
|
||||||
'Url to ec2 api server')
|
DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
|
||||||
|
DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
|
||||||
|
DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
||||||
|
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
|
||||||
|
|
||||||
DEFINE_string('default_image', 'ami-11111',
|
DEFINE_string('default_image', 'ami-11111',
|
||||||
'default image to use, testing only')
|
'default image to use, testing only')
|
||||||
@@ -241,10 +247,10 @@ DEFINE_string('null_kernel', 'nokernel',
|
|||||||
'kernel image that indicates not to use a kernel,'
|
'kernel image that indicates not to use a kernel,'
|
||||||
' but to use a raw disk image instead')
|
' but to use a raw disk image instead')
|
||||||
|
|
||||||
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
|
DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server')
|
||||||
DEFINE_string('vpn_key_suffix',
|
DEFINE_string('vpn_key_suffix',
|
||||||
'-key',
|
'-vpn',
|
||||||
'Suffix to add to project name for vpn key')
|
'Suffix to add to project name for vpn key and secgroups')
|
||||||
|
|
||||||
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
||||||
|
|
||||||
|
36
nova/rpc.py
36
nova/rpc.py
@@ -91,15 +91,15 @@ class Consumer(messaging.Consumer):
|
|||||||
self.failed_connection = False
|
self.failed_connection = False
|
||||||
break
|
break
|
||||||
except: # Catching all because carrot sucks
|
except: # Catching all because carrot sucks
|
||||||
logging.exception("AMQP server on %s:%d is unreachable." \
|
logging.exception(_("AMQP server on %s:%d is unreachable."
|
||||||
" Trying again in %d seconds." % (
|
" Trying again in %d seconds.") % (
|
||||||
FLAGS.rabbit_host,
|
FLAGS.rabbit_host,
|
||||||
FLAGS.rabbit_port,
|
FLAGS.rabbit_port,
|
||||||
FLAGS.rabbit_retry_interval))
|
FLAGS.rabbit_retry_interval))
|
||||||
self.failed_connection = True
|
self.failed_connection = True
|
||||||
if self.failed_connection:
|
if self.failed_connection:
|
||||||
logging.exception("Unable to connect to AMQP server" \
|
logging.exception(_("Unable to connect to AMQP server"
|
||||||
" after %d tries. Shutting down." % FLAGS.rabbit_max_retries)
|
" after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
|
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
|
||||||
@@ -116,14 +116,14 @@ class Consumer(messaging.Consumer):
|
|||||||
self.declare()
|
self.declare()
|
||||||
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
||||||
if self.failed_connection:
|
if self.failed_connection:
|
||||||
logging.error("Reconnected to queue")
|
logging.error(_("Reconnected to queue"))
|
||||||
self.failed_connection = False
|
self.failed_connection = False
|
||||||
# NOTE(vish): This is catching all errors because we really don't
|
# NOTE(vish): This is catching all errors because we really don't
|
||||||
# exceptions to be logged 10 times a second if some
|
# exceptions to be logged 10 times a second if some
|
||||||
# persistent failure occurs.
|
# persistent failure occurs.
|
||||||
except Exception: # pylint: disable-msg=W0703
|
except Exception: # pylint: disable-msg=W0703
|
||||||
if not self.failed_connection:
|
if not self.failed_connection:
|
||||||
logging.exception("Failed to fetch message from queue")
|
logging.exception(_("Failed to fetch message from queue"))
|
||||||
self.failed_connection = True
|
self.failed_connection = True
|
||||||
|
|
||||||
def attach_to_eventlet(self):
|
def attach_to_eventlet(self):
|
||||||
@@ -153,7 +153,7 @@ class TopicConsumer(Consumer):
|
|||||||
class AdapterConsumer(TopicConsumer):
|
class AdapterConsumer(TopicConsumer):
|
||||||
"""Calls methods on a proxy object based on method and args"""
|
"""Calls methods on a proxy object based on method and args"""
|
||||||
def __init__(self, connection=None, topic="broadcast", proxy=None):
|
def __init__(self, connection=None, topic="broadcast", proxy=None):
|
||||||
LOG.debug('Initing the Adapter Consumer for %s' % (topic))
|
LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
super(AdapterConsumer, self).__init__(connection=connection,
|
super(AdapterConsumer, self).__init__(connection=connection,
|
||||||
topic=topic)
|
topic=topic)
|
||||||
@@ -168,7 +168,7 @@ class AdapterConsumer(TopicConsumer):
|
|||||||
|
|
||||||
Example: {'method': 'echo', 'args': {'value': 42}}
|
Example: {'method': 'echo', 'args': {'value': 42}}
|
||||||
"""
|
"""
|
||||||
LOG.debug('received %s' % (message_data))
|
LOG.debug(_('received %s') % (message_data))
|
||||||
msg_id = message_data.pop('_msg_id', None)
|
msg_id = message_data.pop('_msg_id', None)
|
||||||
|
|
||||||
ctxt = _unpack_context(message_data)
|
ctxt = _unpack_context(message_data)
|
||||||
@@ -181,8 +181,8 @@ class AdapterConsumer(TopicConsumer):
|
|||||||
# messages stay in the queue indefinitely, so for now
|
# messages stay in the queue indefinitely, so for now
|
||||||
# we just log the message and send an error string
|
# we just log the message and send an error string
|
||||||
# back to the caller
|
# back to the caller
|
||||||
LOG.warn('no method for message: %s' % (message_data))
|
LOG.warn(_('no method for message: %s') % (message_data))
|
||||||
msg_reply(msg_id, 'No method for message: %s' % message_data)
|
msg_reply(msg_id, _('No method for message: %s') % message_data)
|
||||||
return
|
return
|
||||||
|
|
||||||
node_func = getattr(self.proxy, str(method))
|
node_func = getattr(self.proxy, str(method))
|
||||||
@@ -242,10 +242,10 @@ def msg_reply(msg_id, reply=None, failure=None):
|
|||||||
if failure:
|
if failure:
|
||||||
message = str(failure[1])
|
message = str(failure[1])
|
||||||
tb = traceback.format_exception(*failure)
|
tb = traceback.format_exception(*failure)
|
||||||
logging.error("Returning exception %s to caller", message)
|
logging.error(_("Returning exception %s to caller"), message)
|
||||||
logging.error(tb)
|
logging.error(tb)
|
||||||
failure = (failure[0].__name__, str(failure[1]), tb)
|
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||||
conn = Connection.instance()
|
conn = Connection.instance(True)
|
||||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||||
try:
|
try:
|
||||||
publisher.send({'result': reply, 'failure': failure})
|
publisher.send({'result': reply, 'failure': failure})
|
||||||
@@ -283,7 +283,7 @@ def _unpack_context(msg):
|
|||||||
if key.startswith('_context_'):
|
if key.startswith('_context_'):
|
||||||
value = msg.pop(key)
|
value = msg.pop(key)
|
||||||
context_dict[key[9:]] = value
|
context_dict[key[9:]] = value
|
||||||
LOG.debug('unpacked context: %s', context_dict)
|
LOG.debug(_('unpacked context: %s'), context_dict)
|
||||||
return context.RequestContext.from_dict(context_dict)
|
return context.RequestContext.from_dict(context_dict)
|
||||||
|
|
||||||
|
|
||||||
@@ -302,10 +302,10 @@ def _pack_context(msg, context):
|
|||||||
|
|
||||||
def call(context, topic, msg):
|
def call(context, topic, msg):
|
||||||
"""Sends a message on a topic and wait for a response"""
|
"""Sends a message on a topic and wait for a response"""
|
||||||
LOG.debug("Making asynchronous call...")
|
LOG.debug(_("Making asynchronous call..."))
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
LOG.debug("MSG_ID is %s" % (msg_id))
|
LOG.debug(_("MSG_ID is %s") % (msg_id))
|
||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
|
|
||||||
class WaitMessage(object):
|
class WaitMessage(object):
|
||||||
@@ -353,7 +353,7 @@ def cast(context, topic, msg):
|
|||||||
|
|
||||||
def generic_response(message_data, message):
|
def generic_response(message_data, message):
|
||||||
"""Logs a result and exits"""
|
"""Logs a result and exits"""
|
||||||
LOG.debug('response %s', message_data)
|
LOG.debug(_('response %s'), message_data)
|
||||||
message.ack()
|
message.ack()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
@@ -362,8 +362,8 @@ def send_message(topic, message, wait=True):
|
|||||||
"""Sends a message for testing"""
|
"""Sends a message for testing"""
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
message.update({'_msg_id': msg_id})
|
message.update({'_msg_id': msg_id})
|
||||||
LOG.debug('topic is %s', topic)
|
LOG.debug(_('topic is %s'), topic)
|
||||||
LOG.debug('message %s', message)
|
LOG.debug(_('message %s'), message)
|
||||||
|
|
||||||
if wait:
|
if wait:
|
||||||
consumer = messaging.Consumer(connection=Connection.instance(),
|
consumer = messaging.Consumer(connection=Connection.instance(),
|
||||||
|
@@ -60,7 +60,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
for result in results:
|
for result in results:
|
||||||
(service, instance_cores) = result
|
(service, instance_cores) = result
|
||||||
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
|
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
|
||||||
raise driver.NoValidHost("All hosts have too many cores")
|
raise driver.NoValidHost(_("All hosts have too many cores"))
|
||||||
if self.service_is_up(service):
|
if self.service_is_up(service):
|
||||||
# NOTE(vish): this probably belongs in the manager, if we
|
# NOTE(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
@@ -70,7 +70,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
{'host': service['host'],
|
{'host': service['host'],
|
||||||
'scheduled_at': now})
|
'scheduled_at': now})
|
||||||
return service['host']
|
return service['host']
|
||||||
raise driver.NoValidHost("No hosts found")
|
raise driver.NoValidHost(_("No hosts found"))
|
||||||
|
|
||||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||||
"""Picks a host that is up and has the fewest volumes."""
|
"""Picks a host that is up and has the fewest volumes."""
|
||||||
@@ -92,7 +92,8 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
for result in results:
|
for result in results:
|
||||||
(service, volume_gigabytes) = result
|
(service, volume_gigabytes) = result
|
||||||
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
||||||
raise driver.NoValidHost("All hosts have too many gigabytes")
|
raise driver.NoValidHost(_("All hosts have too many "
|
||||||
|
"gigabytes"))
|
||||||
if self.service_is_up(service):
|
if self.service_is_up(service):
|
||||||
# NOTE(vish): this probably belongs in the manager, if we
|
# NOTE(vish): this probably belongs in the manager, if we
|
||||||
# can generalize this somehow
|
# can generalize this somehow
|
||||||
@@ -102,7 +103,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
{'host': service['host'],
|
{'host': service['host'],
|
||||||
'scheduled_at': now})
|
'scheduled_at': now})
|
||||||
return service['host']
|
return service['host']
|
||||||
raise driver.NoValidHost("No hosts found")
|
raise driver.NoValidHost(_("No hosts found"))
|
||||||
|
|
||||||
def schedule_set_network_host(self, context, *_args, **_kwargs):
|
def schedule_set_network_host(self, context, *_args, **_kwargs):
|
||||||
"""Picks a host that is up and has the fewest networks."""
|
"""Picks a host that is up and has the fewest networks."""
|
||||||
@@ -111,7 +112,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
for result in results:
|
for result in results:
|
||||||
(service, instance_count) = result
|
(service, instance_count) = result
|
||||||
if instance_count >= FLAGS.max_networks:
|
if instance_count >= FLAGS.max_networks:
|
||||||
raise driver.NoValidHost("All hosts have too many networks")
|
raise driver.NoValidHost(_("All hosts have too many networks"))
|
||||||
if self.service_is_up(service):
|
if self.service_is_up(service):
|
||||||
return service['host']
|
return service['host']
|
||||||
raise driver.NoValidHost("No hosts found")
|
raise driver.NoValidHost(_("No hosts found"))
|
||||||
|
@@ -1,153 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from nova import context
|
|
||||||
from nova import db
|
|
||||||
from nova import exception
|
|
||||||
from nova import flags
|
|
||||||
from nova import quota
|
|
||||||
from nova import test
|
|
||||||
from nova import utils
|
|
||||||
from nova.auth import manager
|
|
||||||
from nova.api.ec2 import cloud
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class QuotaTestCase(test.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
|
||||||
super(QuotaTestCase, self).setUp()
|
|
||||||
self.flags(connection_type='fake',
|
|
||||||
quota_instances=2,
|
|
||||||
quota_cores=4,
|
|
||||||
quota_volumes=2,
|
|
||||||
quota_gigabytes=20,
|
|
||||||
quota_floating_ips=1)
|
|
||||||
|
|
||||||
self.cloud = cloud.CloudController()
|
|
||||||
self.manager = manager.AuthManager()
|
|
||||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
|
||||||
self.project = self.manager.create_project('admin', 'admin', 'admin')
|
|
||||||
self.network = utils.import_object(FLAGS.network_manager)
|
|
||||||
self.context = context.RequestContext(project=self.project,
|
|
||||||
user=self.user)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
manager.AuthManager().delete_project(self.project)
|
|
||||||
manager.AuthManager().delete_user(self.user)
|
|
||||||
super(QuotaTestCase, self).tearDown()
|
|
||||||
|
|
||||||
def _create_instance(self, cores=2):
|
|
||||||
"""Create a test instance"""
|
|
||||||
inst = {}
|
|
||||||
inst['image_id'] = 'ami-test'
|
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
|
||||||
inst['user_id'] = self.user.id
|
|
||||||
inst['project_id'] = self.project.id
|
|
||||||
inst['instance_type'] = 'm1.large'
|
|
||||||
inst['vcpus'] = cores
|
|
||||||
inst['mac_address'] = utils.generate_mac()
|
|
||||||
return db.instance_create(self.context, inst)['id']
|
|
||||||
|
|
||||||
def _create_volume(self, size=10):
|
|
||||||
"""Create a test volume"""
|
|
||||||
vol = {}
|
|
||||||
vol['user_id'] = self.user.id
|
|
||||||
vol['project_id'] = self.project.id
|
|
||||||
vol['size'] = size
|
|
||||||
return db.volume_create(self.context, vol)['id']
|
|
||||||
|
|
||||||
def test_quota_overrides(self):
|
|
||||||
"""Make sure overriding a projects quotas works"""
|
|
||||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
|
||||||
self.assertEqual(num_instances, 2)
|
|
||||||
db.quota_create(self.context, {'project_id': self.project.id,
|
|
||||||
'instances': 10})
|
|
||||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
|
||||||
self.assertEqual(num_instances, 4)
|
|
||||||
db.quota_update(self.context, self.project.id, {'cores': 100})
|
|
||||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
|
||||||
self.assertEqual(num_instances, 10)
|
|
||||||
db.quota_destroy(self.context, self.project.id)
|
|
||||||
|
|
||||||
def test_too_many_instances(self):
|
|
||||||
instance_ids = []
|
|
||||||
for i in range(FLAGS.quota_instances):
|
|
||||||
instance_id = self._create_instance()
|
|
||||||
instance_ids.append(instance_id)
|
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
|
||||||
self.context,
|
|
||||||
min_count=1,
|
|
||||||
max_count=1,
|
|
||||||
instance_type='m1.small',
|
|
||||||
image_id='fake')
|
|
||||||
for instance_id in instance_ids:
|
|
||||||
db.instance_destroy(self.context, instance_id)
|
|
||||||
|
|
||||||
def test_too_many_cores(self):
|
|
||||||
instance_ids = []
|
|
||||||
instance_id = self._create_instance(cores=4)
|
|
||||||
instance_ids.append(instance_id)
|
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
|
||||||
self.context,
|
|
||||||
min_count=1,
|
|
||||||
max_count=1,
|
|
||||||
instance_type='m1.small',
|
|
||||||
image_id='fake')
|
|
||||||
for instance_id in instance_ids:
|
|
||||||
db.instance_destroy(self.context, instance_id)
|
|
||||||
|
|
||||||
def test_too_many_volumes(self):
|
|
||||||
volume_ids = []
|
|
||||||
for i in range(FLAGS.quota_volumes):
|
|
||||||
volume_id = self._create_volume()
|
|
||||||
volume_ids.append(volume_id)
|
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.create_volume,
|
|
||||||
self.context,
|
|
||||||
size=10)
|
|
||||||
for volume_id in volume_ids:
|
|
||||||
db.volume_destroy(self.context, volume_id)
|
|
||||||
|
|
||||||
def test_too_many_gigabytes(self):
|
|
||||||
volume_ids = []
|
|
||||||
volume_id = self._create_volume(size=20)
|
|
||||||
volume_ids.append(volume_id)
|
|
||||||
self.assertRaises(quota.QuotaError,
|
|
||||||
self.cloud.create_volume,
|
|
||||||
self.context,
|
|
||||||
size=10)
|
|
||||||
for volume_id in volume_ids:
|
|
||||||
db.volume_destroy(self.context, volume_id)
|
|
||||||
|
|
||||||
def test_too_many_addresses(self):
|
|
||||||
address = '192.168.0.100'
|
|
||||||
db.floating_ip_create(context.get_admin_context(),
|
|
||||||
{'address': address, 'host': FLAGS.host})
|
|
||||||
float_addr = self.network.allocate_floating_ip(self.context,
|
|
||||||
self.project.id)
|
|
||||||
# NOTE(vish): This assert never fails. When cloud attempts to
|
|
||||||
# make an rpc.call, the test just finishes with OK. It
|
|
||||||
# appears to be something in the magic inline callbacks
|
|
||||||
# that is breaking.
|
|
||||||
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
|
|
||||||
self.context)
|
|
||||||
db.floating_ip_destroy(context.get_admin_context(), address)
|
|
@@ -1,227 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit Tests for remote procedure calls using queue
|
|
||||||
"""
|
|
||||||
|
|
||||||
import mox
|
|
||||||
|
|
||||||
from nova import exception
|
|
||||||
from nova import flags
|
|
||||||
from nova import rpc
|
|
||||||
from nova import test
|
|
||||||
from nova import service
|
|
||||||
from nova import manager
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
|
|
||||||
"Manager for testing")
|
|
||||||
|
|
||||||
|
|
||||||
class FakeManager(manager.Manager):
|
|
||||||
"""Fake manager for tests"""
|
|
||||||
def test_method(self):
|
|
||||||
return 'manager'
|
|
||||||
|
|
||||||
|
|
||||||
class ExtendedService(service.Service):
|
|
||||||
def test_method(self):
|
|
||||||
return 'service'
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceManagerTestCase(test.TestCase):
|
|
||||||
"""Test cases for Services"""
|
|
||||||
|
|
||||||
def test_attribute_error_for_no_manager(self):
|
|
||||||
serv = service.Service('test',
|
|
||||||
'test',
|
|
||||||
'test',
|
|
||||||
'nova.tests.service_unittest.FakeManager')
|
|
||||||
self.assertRaises(AttributeError, getattr, serv, 'test_method')
|
|
||||||
|
|
||||||
def test_message_gets_to_manager(self):
|
|
||||||
serv = service.Service('test',
|
|
||||||
'test',
|
|
||||||
'test',
|
|
||||||
'nova.tests.service_unittest.FakeManager')
|
|
||||||
serv.start()
|
|
||||||
self.assertEqual(serv.test_method(), 'manager')
|
|
||||||
|
|
||||||
def test_override_manager_method(self):
|
|
||||||
serv = ExtendedService('test',
|
|
||||||
'test',
|
|
||||||
'test',
|
|
||||||
'nova.tests.service_unittest.FakeManager')
|
|
||||||
serv.start()
|
|
||||||
self.assertEqual(serv.test_method(), 'service')
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceTestCase(test.TestCase):
|
|
||||||
"""Test cases for Services"""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(ServiceTestCase, self).setUp()
|
|
||||||
self.mox.StubOutWithMock(service, 'db')
|
|
||||||
|
|
||||||
def test_create(self):
|
|
||||||
host = 'foo'
|
|
||||||
binary = 'nova-fake'
|
|
||||||
topic = 'fake'
|
|
||||||
|
|
||||||
# NOTE(vish): Create was moved out of mox replay to make sure that
|
|
||||||
# the looping calls are created in StartService.
|
|
||||||
app = service.Service.create(host=host, binary=binary)
|
|
||||||
|
|
||||||
self.mox.StubOutWithMock(rpc,
|
|
||||||
'AdapterConsumer',
|
|
||||||
use_mock_anything=True)
|
|
||||||
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
|
||||||
topic=topic,
|
|
||||||
proxy=mox.IsA(service.Service)).AndReturn(
|
|
||||||
rpc.AdapterConsumer)
|
|
||||||
|
|
||||||
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
|
||||||
topic='%s.%s' % (topic, host),
|
|
||||||
proxy=mox.IsA(service.Service)).AndReturn(
|
|
||||||
rpc.AdapterConsumer)
|
|
||||||
|
|
||||||
rpc.AdapterConsumer.attach_to_eventlet()
|
|
||||||
rpc.AdapterConsumer.attach_to_eventlet()
|
|
||||||
|
|
||||||
service_create = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'topic': topic,
|
|
||||||
'report_count': 0}
|
|
||||||
service_ref = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'report_count': 0,
|
|
||||||
'id': 1}
|
|
||||||
|
|
||||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
|
||||||
host,
|
|
||||||
binary).AndRaise(exception.NotFound())
|
|
||||||
service.db.service_create(mox.IgnoreArg(),
|
|
||||||
service_create).AndReturn(service_ref)
|
|
||||||
self.mox.ReplayAll()
|
|
||||||
|
|
||||||
app.start()
|
|
||||||
app.stop()
|
|
||||||
self.assert_(app)
|
|
||||||
|
|
||||||
# We're testing sort of weird behavior in how report_state decides
|
|
||||||
# whether it is disconnected, it looks for a variable on itself called
|
|
||||||
# 'model_disconnected' and report_state doesn't really do much so this
|
|
||||||
# these are mostly just for coverage
|
|
||||||
def test_report_state_no_service(self):
|
|
||||||
host = 'foo'
|
|
||||||
binary = 'bar'
|
|
||||||
topic = 'test'
|
|
||||||
service_create = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'topic': topic,
|
|
||||||
'report_count': 0}
|
|
||||||
service_ref = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'topic': topic,
|
|
||||||
'report_count': 0,
|
|
||||||
'id': 1}
|
|
||||||
|
|
||||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
|
||||||
host,
|
|
||||||
binary).AndRaise(exception.NotFound())
|
|
||||||
service.db.service_create(mox.IgnoreArg(),
|
|
||||||
service_create).AndReturn(service_ref)
|
|
||||||
service.db.service_get(mox.IgnoreArg(),
|
|
||||||
service_ref['id']).AndReturn(service_ref)
|
|
||||||
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
|
|
||||||
mox.ContainsKeyValue('report_count', 1))
|
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
|
||||||
serv = service.Service(host,
|
|
||||||
binary,
|
|
||||||
topic,
|
|
||||||
'nova.tests.service_unittest.FakeManager')
|
|
||||||
serv.start()
|
|
||||||
serv.report_state()
|
|
||||||
|
|
||||||
def test_report_state_newly_disconnected(self):
|
|
||||||
host = 'foo'
|
|
||||||
binary = 'bar'
|
|
||||||
topic = 'test'
|
|
||||||
service_create = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'topic': topic,
|
|
||||||
'report_count': 0}
|
|
||||||
service_ref = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'topic': topic,
|
|
||||||
'report_count': 0,
|
|
||||||
'id': 1}
|
|
||||||
|
|
||||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
|
||||||
host,
|
|
||||||
binary).AndRaise(exception.NotFound())
|
|
||||||
service.db.service_create(mox.IgnoreArg(),
|
|
||||||
service_create).AndReturn(service_ref)
|
|
||||||
service.db.service_get(mox.IgnoreArg(),
|
|
||||||
mox.IgnoreArg()).AndRaise(Exception())
|
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
|
||||||
serv = service.Service(host,
|
|
||||||
binary,
|
|
||||||
topic,
|
|
||||||
'nova.tests.service_unittest.FakeManager')
|
|
||||||
serv.start()
|
|
||||||
serv.report_state()
|
|
||||||
self.assert_(serv.model_disconnected)
|
|
||||||
|
|
||||||
def test_report_state_newly_connected(self):
|
|
||||||
host = 'foo'
|
|
||||||
binary = 'bar'
|
|
||||||
topic = 'test'
|
|
||||||
service_create = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'topic': topic,
|
|
||||||
'report_count': 0}
|
|
||||||
service_ref = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'topic': topic,
|
|
||||||
'report_count': 0,
|
|
||||||
'id': 1}
|
|
||||||
|
|
||||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
|
||||||
host,
|
|
||||||
binary).AndRaise(exception.NotFound())
|
|
||||||
service.db.service_create(mox.IgnoreArg(),
|
|
||||||
service_create).AndReturn(service_ref)
|
|
||||||
service.db.service_get(mox.IgnoreArg(),
|
|
||||||
service_ref['id']).AndReturn(service_ref)
|
|
||||||
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
|
|
||||||
mox.ContainsKeyValue('report_count', 1))
|
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
|
||||||
serv = service.Service(host,
|
|
||||||
binary,
|
|
||||||
topic,
|
|
||||||
'nova.tests.service_unittest.FakeManager')
|
|
||||||
serv.start()
|
|
||||||
serv.model_disconnected = True
|
|
||||||
serv.report_state()
|
|
||||||
|
|
||||||
self.assert_(not serv.model_disconnected)
|
|
@@ -208,17 +208,13 @@ class AuthManagerTestCase(object):
|
|||||||
# so it probably belongs in crypto_unittest
|
# so it probably belongs in crypto_unittest
|
||||||
# but I'm leaving it where I found it.
|
# but I'm leaving it where I found it.
|
||||||
with user_and_project_generator(self.manager) as (user, project):
|
with user_and_project_generator(self.manager) as (user, project):
|
||||||
# NOTE(todd): Should mention why we must setup controller first
|
# NOTE(vish): Setup runs genroot.sh if it hasn't been run
|
||||||
# (somebody please clue me in)
|
cloud.CloudController().setup()
|
||||||
cloud_controller = cloud.CloudController()
|
_key, cert_str = crypto.generate_x509_cert(user.id, project.id)
|
||||||
cloud_controller.setup()
|
|
||||||
_key, cert_str = self.manager._generate_x509_cert('test1',
|
|
||||||
'testproj')
|
|
||||||
logging.debug(cert_str)
|
logging.debug(cert_str)
|
||||||
|
|
||||||
# Need to verify that it's signed by the right intermediate CA
|
full_chain = crypto.fetch_ca(project_id=project.id, chain=True)
|
||||||
full_chain = crypto.fetch_ca(project_id='testproj', chain=True)
|
int_cert = crypto.fetch_ca(project_id=project.id, chain=False)
|
||||||
int_cert = crypto.fetch_ca(project_id='testproj', chain=False)
|
|
||||||
cloud_cert = crypto.fetch_ca()
|
cloud_cert = crypto.fetch_ca()
|
||||||
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
|
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
|
||||||
signed_cert = X509.load_cert_string(cert_str)
|
signed_cert = X509.load_cert_string(cert_str)
|
||||||
@@ -227,7 +223,8 @@ class AuthManagerTestCase(object):
|
|||||||
cloud_cert = X509.load_cert_string(cloud_cert)
|
cloud_cert = X509.load_cert_string(cloud_cert)
|
||||||
self.assertTrue(signed_cert.verify(chain_cert.get_pubkey()))
|
self.assertTrue(signed_cert.verify(chain_cert.get_pubkey()))
|
||||||
self.assertTrue(signed_cert.verify(int_cert.get_pubkey()))
|
self.assertTrue(signed_cert.verify(int_cert.get_pubkey()))
|
||||||
if not FLAGS.use_intermediate_ca:
|
|
||||||
|
if not FLAGS.use_project_ca:
|
||||||
self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey()))
|
self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey()))
|
||||||
else:
|
else:
|
||||||
self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey()))
|
self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey()))
|
@@ -22,20 +22,18 @@ import logging
|
|||||||
from M2Crypto import BIO
|
from M2Crypto import BIO
|
||||||
from M2Crypto import RSA
|
from M2Crypto import RSA
|
||||||
import os
|
import os
|
||||||
import StringIO
|
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
from xml.etree import ElementTree
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import crypto
|
from nova import crypto
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
|
from nova import service
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
@@ -54,7 +52,8 @@ os.makedirs(IMAGES_PATH)
|
|||||||
class CloudTestCase(test.TestCase):
|
class CloudTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(CloudTestCase, self).setUp()
|
super(CloudTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake', images_path=IMAGES_PATH)
|
self.flags(connection_type='fake',
|
||||||
|
images_path=IMAGES_PATH)
|
||||||
|
|
||||||
self.conn = rpc.Connection.instance()
|
self.conn = rpc.Connection.instance()
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
@@ -62,27 +61,23 @@ class CloudTestCase(test.TestCase):
|
|||||||
# set up our cloud
|
# set up our cloud
|
||||||
self.cloud = cloud.CloudController()
|
self.cloud = cloud.CloudController()
|
||||||
|
|
||||||
# set up a service
|
# set up services
|
||||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
self.compute = service.Service.create(binary='nova-compute')
|
||||||
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
|
self.compute.start()
|
||||||
topic=FLAGS.compute_topic,
|
self.network = service.Service.create(binary='nova-network')
|
||||||
proxy=self.compute)
|
self.network.start()
|
||||||
self.compute_consumer.attach_to_eventlet()
|
|
||||||
self.network = utils.import_object(FLAGS.network_manager)
|
|
||||||
self.network_consumer = rpc.AdapterConsumer(connection=self.conn,
|
|
||||||
topic=FLAGS.network_topic,
|
|
||||||
proxy=self.network)
|
|
||||||
self.network_consumer.attach_to_eventlet()
|
|
||||||
|
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||||
self.project = self.manager.create_project('proj', 'admin', 'proj')
|
self.project = self.manager.create_project('proj', 'admin', 'proj')
|
||||||
self.context = context.RequestContext(user=self.user,
|
self.context = context.RequestContext(user=self.user,
|
||||||
project=self.project)
|
project=self.project)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
|
self.compute.kill()
|
||||||
|
self.network.kill()
|
||||||
super(CloudTestCase, self).tearDown()
|
super(CloudTestCase, self).tearDown()
|
||||||
|
|
||||||
def _create_key(self, name):
|
def _create_key(self, name):
|
||||||
@@ -109,12 +104,13 @@ class CloudTestCase(test.TestCase):
|
|||||||
{'address': address,
|
{'address': address,
|
||||||
'host': FLAGS.host})
|
'host': FLAGS.host})
|
||||||
self.cloud.allocate_address(self.context)
|
self.cloud.allocate_address(self.context)
|
||||||
inst = db.instance_create(self.context, {})
|
inst = db.instance_create(self.context, {'host': FLAGS.host})
|
||||||
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
||||||
ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
|
ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
|
||||||
self.cloud.associate_address(self.context,
|
self.cloud.associate_address(self.context,
|
||||||
instance_id=ec2_id,
|
instance_id=ec2_id,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
|
greenthread.sleep(0.3)
|
||||||
self.cloud.disassociate_address(self.context,
|
self.cloud.disassociate_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
self.cloud.release_address(self.context,
|
self.cloud.release_address(self.context,
|
@@ -41,6 +41,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
super(ComputeTestCase, self).setUp()
|
super(ComputeTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
|
stub_network=True,
|
||||||
network_manager='nova.network.manager.FlatManager')
|
network_manager='nova.network.manager.FlatManager')
|
||||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||||
self.compute_api = compute_api.ComputeAPI()
|
self.compute_api = compute_api.ComputeAPI()
|
||||||
@@ -100,13 +101,13 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
|
|
||||||
instances = db.instance_get_all(context.get_admin_context())
|
instances = db.instance_get_all(context.get_admin_context())
|
||||||
logging.info("Running instances: %s", instances)
|
logging.info(_("Running instances: %s"), instances)
|
||||||
self.assertEqual(len(instances), 1)
|
self.assertEqual(len(instances), 1)
|
||||||
|
|
||||||
self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
instances = db.instance_get_all(context.get_admin_context())
|
instances = db.instance_get_all(context.get_admin_context())
|
||||||
logging.info("After terminating instances: %s", instances)
|
logging.info(_("After terminating instances: %s"), instances)
|
||||||
self.assertEqual(len(instances), 0)
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
def test_run_terminate_timestamps(self):
|
def test_run_terminate_timestamps(self):
|
||||||
@@ -135,6 +136,14 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.compute.unpause_instance(self.context, instance_id)
|
self.compute.unpause_instance(self.context, instance_id)
|
||||||
self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
|
def test_suspend(self):
|
||||||
|
"""ensure instance can be suspended"""
|
||||||
|
instance_id = self._create_instance()
|
||||||
|
self.compute.run_instance(self.context, instance_id)
|
||||||
|
self.compute.suspend_instance(self.context, instance_id)
|
||||||
|
self.compute.resume_instance(self.context, instance_id)
|
||||||
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
def test_reboot(self):
|
def test_reboot(self):
|
||||||
"""Ensure instance can be rebooted"""
|
"""Ensure instance can be rebooted"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
86
nova/tests/test_middleware.py
Normal file
86
nova/tests/test_middleware.py
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import webob
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from nova.api import ec2
|
||||||
|
from nova import flags
|
||||||
|
from nova import test
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
@webob.dec.wsgify
|
||||||
|
def conditional_forbid(req):
|
||||||
|
"""Helper wsgi app returns 403 if param 'die' is 1."""
|
||||||
|
if 'die' in req.params and req.params['die'] == '1':
|
||||||
|
raise webob.exc.HTTPForbidden()
|
||||||
|
return 'OK'
|
||||||
|
|
||||||
|
|
||||||
|
class LockoutTestCase(test.TrialTestCase):
|
||||||
|
"""Test case for the Lockout middleware."""
|
||||||
|
def setUp(self): # pylint: disable-msg=C0103
|
||||||
|
super(LockoutTestCase, self).setUp()
|
||||||
|
utils.set_time_override()
|
||||||
|
self.lockout = ec2.Lockout(conditional_forbid)
|
||||||
|
|
||||||
|
def tearDown(self): # pylint: disable-msg=C0103
|
||||||
|
utils.clear_time_override()
|
||||||
|
super(LockoutTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def _send_bad_attempts(self, access_key, num_attempts=1):
|
||||||
|
"""Fail x."""
|
||||||
|
for i in xrange(num_attempts):
|
||||||
|
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
|
||||||
|
self.assertEqual(req.get_response(self.lockout).status_int, 403)
|
||||||
|
|
||||||
|
def _is_locked_out(self, access_key):
|
||||||
|
"""Sends a test request to see if key is locked out."""
|
||||||
|
req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
|
||||||
|
return (req.get_response(self.lockout).status_int == 403)
|
||||||
|
|
||||||
|
def test_lockout(self):
|
||||||
|
self._send_bad_attempts('test', FLAGS.lockout_attempts)
|
||||||
|
self.assertTrue(self._is_locked_out('test'))
|
||||||
|
|
||||||
|
def test_timeout(self):
|
||||||
|
self._send_bad_attempts('test', FLAGS.lockout_attempts)
|
||||||
|
self.assertTrue(self._is_locked_out('test'))
|
||||||
|
utils.advance_time_seconds(FLAGS.lockout_minutes * 60)
|
||||||
|
self.assertFalse(self._is_locked_out('test'))
|
||||||
|
|
||||||
|
def test_multiple_keys(self):
|
||||||
|
self._send_bad_attempts('test1', FLAGS.lockout_attempts)
|
||||||
|
self.assertTrue(self._is_locked_out('test1'))
|
||||||
|
self.assertFalse(self._is_locked_out('test2'))
|
||||||
|
utils.advance_time_seconds(FLAGS.lockout_minutes * 60)
|
||||||
|
self.assertFalse(self._is_locked_out('test1'))
|
||||||
|
self.assertFalse(self._is_locked_out('test2'))
|
||||||
|
|
||||||
|
def test_window_timeout(self):
|
||||||
|
self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
|
||||||
|
self.assertFalse(self._is_locked_out('test'))
|
||||||
|
utils.advance_time_seconds(FLAGS.lockout_window * 60)
|
||||||
|
self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
|
||||||
|
self.assertFalse(self._is_locked_out('test'))
|
@@ -22,13 +22,13 @@ from nova.utils import parse_mailmap, str_dict_replace
|
|||||||
|
|
||||||
class ProjectTestCase(test.TestCase):
|
class ProjectTestCase(test.TestCase):
|
||||||
def test_authors_up_to_date(self):
|
def test_authors_up_to_date(self):
|
||||||
if os.path.exists('../.bzr'):
|
if os.path.exists('.bzr'):
|
||||||
contributors = set()
|
contributors = set()
|
||||||
|
|
||||||
mailmap = parse_mailmap('../.mailmap')
|
mailmap = parse_mailmap('.mailmap')
|
||||||
|
|
||||||
import bzrlib.workingtree
|
import bzrlib.workingtree
|
||||||
tree = bzrlib.workingtree.WorkingTree.open('..')
|
tree = bzrlib.workingtree.WorkingTree.open('.')
|
||||||
tree.lock_read()
|
tree.lock_read()
|
||||||
try:
|
try:
|
||||||
parents = tree.get_parent_ids()
|
parents = tree.get_parent_ids()
|
||||||
@@ -42,7 +42,7 @@ class ProjectTestCase(test.TestCase):
|
|||||||
email = author.split(' ')[-1]
|
email = author.split(' ')[-1]
|
||||||
contributors.add(str_dict_replace(email, mailmap))
|
contributors.add(str_dict_replace(email, mailmap))
|
||||||
|
|
||||||
authors_file = open('../Authors', 'r').read()
|
authors_file = open('Authors', 'r').read()
|
||||||
|
|
||||||
missing = set()
|
missing = set()
|
||||||
for contributor in contributors:
|
for contributor in contributors:
|
@@ -26,6 +26,7 @@ from nova import context
|
|||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
from nova import service
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
@@ -40,6 +41,7 @@ class NetworkTestCase(test.TestCase):
|
|||||||
# NOTE(vish): if you change these flags, make sure to change the
|
# NOTE(vish): if you change these flags, make sure to change the
|
||||||
# flags in the corresponding section in nova-dhcpbridge
|
# flags in the corresponding section in nova-dhcpbridge
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
|
fake_call=True,
|
||||||
fake_network=True,
|
fake_network=True,
|
||||||
network_size=16,
|
network_size=16,
|
||||||
num_networks=5)
|
num_networks=5)
|
||||||
@@ -56,16 +58,13 @@ class NetworkTestCase(test.TestCase):
|
|||||||
# create the necessary network data for the project
|
# create the necessary network data for the project
|
||||||
user_context = context.RequestContext(project=self.projects[i],
|
user_context = context.RequestContext(project=self.projects[i],
|
||||||
user=self.user)
|
user=self.user)
|
||||||
network_ref = self.network.get_network(user_context)
|
host = self.network.get_network_host(user_context.elevated())
|
||||||
self.network.set_network_host(context.get_admin_context(),
|
|
||||||
network_ref['id'])
|
|
||||||
instance_ref = self._create_instance(0)
|
instance_ref = self._create_instance(0)
|
||||||
self.instance_id = instance_ref['id']
|
self.instance_id = instance_ref['id']
|
||||||
instance_ref = self._create_instance(1)
|
instance_ref = self._create_instance(1)
|
||||||
self.instance2_id = instance_ref['id']
|
self.instance2_id = instance_ref['id']
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(NetworkTestCase, self).tearDown()
|
|
||||||
# TODO(termie): this should really be instantiating clean datastores
|
# TODO(termie): this should really be instantiating clean datastores
|
||||||
# in between runs, one failure kills all the tests
|
# in between runs, one failure kills all the tests
|
||||||
db.instance_destroy(context.get_admin_context(), self.instance_id)
|
db.instance_destroy(context.get_admin_context(), self.instance_id)
|
||||||
@@ -73,6 +72,7 @@ class NetworkTestCase(test.TestCase):
|
|||||||
for project in self.projects:
|
for project in self.projects:
|
||||||
self.manager.delete_project(project)
|
self.manager.delete_project(project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
|
super(NetworkTestCase, self).tearDown()
|
||||||
|
|
||||||
def _create_instance(self, project_num, mac=None):
|
def _create_instance(self, project_num, mac=None):
|
||||||
if not mac:
|
if not mac:
|
@@ -33,7 +33,7 @@ class RpcTestCase(test.TestCase):
|
|||||||
"""Test cases for rpc"""
|
"""Test cases for rpc"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(RpcTestCase, self).setUp()
|
super(RpcTestCase, self).setUp()
|
||||||
self.conn = rpc.Connection.instance()
|
self.conn = rpc.Connection.instance(True)
|
||||||
self.receiver = TestReceiver()
|
self.receiver = TestReceiver()
|
||||||
self.consumer = rpc.AdapterConsumer(connection=self.conn,
|
self.consumer = rpc.AdapterConsumer(connection=self.conn,
|
||||||
topic='test',
|
topic='test',
|
||||||
@@ -79,6 +79,33 @@ class RpcTestCase(test.TestCase):
|
|||||||
except rpc.RemoteError as exc:
|
except rpc.RemoteError as exc:
|
||||||
self.assertEqual(int(exc.value), value)
|
self.assertEqual(int(exc.value), value)
|
||||||
|
|
||||||
|
def test_nested_calls(self):
|
||||||
|
"""Test that we can do an rpc.call inside another call"""
|
||||||
|
class Nested(object):
|
||||||
|
@staticmethod
|
||||||
|
def echo(context, queue, value):
|
||||||
|
"""Calls echo in the passed queue"""
|
||||||
|
logging.debug("Nested received %s, %s", queue, value)
|
||||||
|
ret = rpc.call(context,
|
||||||
|
queue,
|
||||||
|
{"method": "echo",
|
||||||
|
"args": {"value": value}})
|
||||||
|
logging.debug("Nested return %s", ret)
|
||||||
|
return value
|
||||||
|
|
||||||
|
nested = Nested()
|
||||||
|
conn = rpc.Connection.instance(True)
|
||||||
|
consumer = rpc.AdapterConsumer(connection=conn,
|
||||||
|
topic='nested',
|
||||||
|
proxy=nested)
|
||||||
|
consumer.attach_to_eventlet()
|
||||||
|
value = 42
|
||||||
|
result = rpc.call(self.context,
|
||||||
|
'nested', {"method": "echo",
|
||||||
|
"args": {"queue": "test",
|
||||||
|
"value": value}})
|
||||||
|
self.assertEqual(value, result)
|
||||||
|
|
||||||
|
|
||||||
class TestReceiver(object):
|
class TestReceiver(object):
|
||||||
"""Simple Proxy class so the consumer has methods to call
|
"""Simple Proxy class so the consumer has methods to call
|
@@ -50,7 +50,7 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
"""Test case for scheduler"""
|
"""Test case for scheduler"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(SchedulerTestCase, self).setUp()
|
super(SchedulerTestCase, self).setUp()
|
||||||
self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver')
|
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
|
||||||
|
|
||||||
def test_fallback(self):
|
def test_fallback(self):
|
||||||
scheduler = manager.SchedulerManager()
|
scheduler = manager.SchedulerManager()
|
||||||
@@ -80,6 +80,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(SimpleDriverTestCase, self).setUp()
|
super(SimpleDriverTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
|
stub_network=True,
|
||||||
max_cores=4,
|
max_cores=4,
|
||||||
max_gigabytes=4,
|
max_gigabytes=4,
|
||||||
network_manager='nova.network.manager.FlatManager',
|
network_manager='nova.network.manager.FlatManager',
|
@@ -33,6 +33,7 @@ flags.DECLARE('instances_path', 'nova.compute.manager')
|
|||||||
class LibvirtConnTestCase(test.TestCase):
|
class LibvirtConnTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LibvirtConnTestCase, self).setUp()
|
super(LibvirtConnTestCase, self).setUp()
|
||||||
|
self.flags(fake_call=True)
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('fake', 'fake', 'fake',
|
self.user = self.manager.create_user('fake', 'fake', 'fake',
|
||||||
admin=True)
|
admin=True)
|
||||||
@@ -52,45 +53,43 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_xml_and_uri_no_ramdisk_no_kernel(self):
|
def test_xml_and_uri_no_ramdisk_no_kernel(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
self.do_test_xml_and_uri(instance_data,
|
self._check_xml_and_uri(instance_data,
|
||||||
expect_kernel=False, expect_ramdisk=False)
|
expect_kernel=False, expect_ramdisk=False)
|
||||||
|
|
||||||
def test_xml_and_uri_no_ramdisk(self):
|
def test_xml_and_uri_no_ramdisk(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
instance_data['kernel_id'] = 'aki-deadbeef'
|
instance_data['kernel_id'] = 'aki-deadbeef'
|
||||||
self.do_test_xml_and_uri(instance_data,
|
self._check_xml_and_uri(instance_data,
|
||||||
expect_kernel=True, expect_ramdisk=False)
|
expect_kernel=True, expect_ramdisk=False)
|
||||||
|
|
||||||
def test_xml_and_uri_no_kernel(self):
|
def test_xml_and_uri_no_kernel(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
||||||
self.do_test_xml_and_uri(instance_data,
|
self._check_xml_and_uri(instance_data,
|
||||||
expect_kernel=False, expect_ramdisk=False)
|
expect_kernel=False, expect_ramdisk=False)
|
||||||
|
|
||||||
def test_xml_and_uri(self):
|
def test_xml_and_uri(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
||||||
instance_data['kernel_id'] = 'aki-deadbeef'
|
instance_data['kernel_id'] = 'aki-deadbeef'
|
||||||
self.do_test_xml_and_uri(instance_data,
|
self._check_xml_and_uri(instance_data,
|
||||||
expect_kernel=True, expect_ramdisk=True)
|
expect_kernel=True, expect_ramdisk=True)
|
||||||
|
|
||||||
def test_xml_and_uri_rescue(self):
|
def test_xml_and_uri_rescue(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
||||||
instance_data['kernel_id'] = 'aki-deadbeef'
|
instance_data['kernel_id'] = 'aki-deadbeef'
|
||||||
self.do_test_xml_and_uri(instance_data,
|
self._check_xml_and_uri(instance_data, expect_kernel=True,
|
||||||
expect_kernel=True, expect_ramdisk=True,
|
expect_ramdisk=True, rescue=True)
|
||||||
rescue=True)
|
|
||||||
|
|
||||||
def do_test_xml_and_uri(self, instance,
|
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
|
||||||
expect_ramdisk, expect_kernel,
|
rescue=False):
|
||||||
rescue=False):
|
|
||||||
user_context = context.RequestContext(project=self.project,
|
user_context = context.RequestContext(project=self.project,
|
||||||
user=self.user)
|
user=self.user)
|
||||||
instance_ref = db.instance_create(user_context, instance)
|
instance_ref = db.instance_create(user_context, instance)
|
||||||
network_ref = self.network.get_network(user_context)
|
host = self.network.get_network_host(user_context.elevated())
|
||||||
self.network.set_network_host(context.get_admin_context(),
|
network_ref = db.project_get_network(context.get_admin_context(),
|
||||||
network_ref['id'])
|
self.project.id)
|
||||||
|
|
||||||
fixed_ip = {'address': self.test_ip,
|
fixed_ip = {'address': self.test_ip,
|
||||||
'network_id': network_ref['id']}
|
'network_id': network_ref['id']}
|
||||||
@@ -129,43 +128,44 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
check_list.append(check)
|
check_list.append(check)
|
||||||
else:
|
else:
|
||||||
if expect_kernel:
|
if expect_kernel:
|
||||||
check = (lambda t: t.find('./os/kernel').text.split('/'
|
check = (lambda t: t.find('./os/kernel').text.split(
|
||||||
)[1], 'kernel')
|
'/')[1], 'kernel')
|
||||||
else:
|
else:
|
||||||
check = (lambda t: t.find('./os/kernel'), None)
|
check = (lambda t: t.find('./os/kernel'), None)
|
||||||
check_list.append(check)
|
check_list.append(check)
|
||||||
|
|
||||||
if expect_ramdisk:
|
if expect_ramdisk:
|
||||||
check = (lambda t: t.find('./os/initrd').text.split('/'
|
check = (lambda t: t.find('./os/initrd').text.split(
|
||||||
)[1], 'ramdisk')
|
'/')[1], 'ramdisk')
|
||||||
else:
|
else:
|
||||||
check = (lambda t: t.find('./os/initrd'), None)
|
check = (lambda t: t.find('./os/initrd'), None)
|
||||||
check_list.append(check)
|
check_list.append(check)
|
||||||
|
|
||||||
common_checks = [
|
common_checks = [
|
||||||
(lambda t: t.find('.').tag, 'domain'),
|
(lambda t: t.find('.').tag, 'domain'),
|
||||||
(lambda t: t.find('./devices/interface/filterref/parameter'
|
(lambda t: t.find(
|
||||||
).get('name'), 'IP'),
|
'./devices/interface/filterref/parameter').get('name'), 'IP'),
|
||||||
(lambda t: t.find('./devices/interface/filterref/parameter'
|
(lambda t: t.find(
|
||||||
).get('value'), '10.11.12.13'),
|
'./devices/interface/filterref/parameter').get(
|
||||||
(lambda t: t.findall('./devices/interface/filterref/parameter'
|
'value'), '10.11.12.13'),
|
||||||
)[1].get('name'), 'DHCPSERVER'),
|
(lambda t: t.findall(
|
||||||
(lambda t: t.findall('./devices/interface/filterref/parameter'
|
'./devices/interface/filterref/parameter')[1].get(
|
||||||
)[1].get('value'), '10.0.0.1'),
|
'name'), 'DHCPSERVER'),
|
||||||
(lambda t: t.find('./devices/serial/source').get('path'
|
(lambda t: t.findall(
|
||||||
).split('/')[1], 'console.log'),
|
'./devices/interface/filterref/parameter')[1].get(
|
||||||
|
'value'), '10.0.0.1'),
|
||||||
|
(lambda t: t.find('./devices/serial/source').get(
|
||||||
|
'path').split('/')[1], 'console.log'),
|
||||||
(lambda t: t.find('./memory').text, '2097152')]
|
(lambda t: t.find('./memory').text, '2097152')]
|
||||||
|
|
||||||
if rescue:
|
if rescue:
|
||||||
common_checks += [(lambda t: t.findall('./devices/disk/source'
|
common_checks += [
|
||||||
)[0].get('file').split('/')[1],
|
(lambda t: t.findall('./devices/disk/source')[0].get(
|
||||||
'rescue-disk'),
|
'file').split('/')[1], 'rescue-disk'),
|
||||||
(lambda t: t.findall('./devices/disk/source'
|
(lambda t: t.findall('./devices/disk/source')[1].get(
|
||||||
)[1].get('file').split('/')[1],
|
'file').split('/')[1], 'disk')]
|
||||||
'disk')]
|
|
||||||
else:
|
else:
|
||||||
common_checks += [(lambda t: t.findall('./devices/disk/source'
|
common_checks += [(lambda t: t.findall(
|
||||||
)[0].get('file').split('/')[1],
|
'./devices/disk/source')[0].get('file').split('/')[1],
|
||||||
'disk')]
|
'disk')]
|
||||||
|
|
||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
@@ -336,7 +336,7 @@ class NWFilterTestCase(test.TestCase):
|
|||||||
self.security_group.id)
|
self.security_group.id)
|
||||||
instance = db.instance_get(self.context, inst_id)
|
instance = db.instance_get(self.context, inst_id)
|
||||||
|
|
||||||
d = self.fw.setup_nwfilters_for_instance(instance)
|
self.fw.setup_base_nwfilters()
|
||||||
|
self.fw.setup_nwfilters_for_instance(instance)
|
||||||
_ensure_all_called()
|
_ensure_all_called()
|
||||||
self.teardown_security_group()
|
self.teardown_security_group()
|
||||||
return d
|
|
220
nova/tests/test_xenapi.py
Normal file
220
nova/tests/test_xenapi.py
Normal file
@@ -0,0 +1,220 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Test suite for XenAPI
|
||||||
|
"""
|
||||||
|
|
||||||
|
import stubout
|
||||||
|
|
||||||
|
from nova import db
|
||||||
|
from nova import context
|
||||||
|
from nova import flags
|
||||||
|
from nova import test
|
||||||
|
from nova import utils
|
||||||
|
from nova.auth import manager
|
||||||
|
from nova.compute import instance_types
|
||||||
|
from nova.compute import power_state
|
||||||
|
from nova.virt import xenapi_conn
|
||||||
|
from nova.virt.xenapi import fake
|
||||||
|
from nova.virt.xenapi import volume_utils
|
||||||
|
from nova.tests.db import fakes
|
||||||
|
from nova.tests.xenapi import stubs
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class XenAPIVolumeTestCase(test.TestCase):
|
||||||
|
"""
|
||||||
|
Unit tests for Volume operations
|
||||||
|
"""
|
||||||
|
def setUp(self):
|
||||||
|
super(XenAPIVolumeTestCase, self).setUp()
|
||||||
|
self.stubs = stubout.StubOutForTesting()
|
||||||
|
FLAGS.target_host = '127.0.0.1'
|
||||||
|
FLAGS.xenapi_connection_url = 'test_url'
|
||||||
|
FLAGS.xenapi_connection_password = 'test_pass'
|
||||||
|
fakes.stub_out_db_instance_api(self.stubs)
|
||||||
|
stubs.stub_out_get_target(self.stubs)
|
||||||
|
fake.reset()
|
||||||
|
self.values = {'name': 1, 'id': 1,
|
||||||
|
'project_id': 'fake',
|
||||||
|
'user_id': 'fake',
|
||||||
|
'image_id': 1,
|
||||||
|
'kernel_id': 2,
|
||||||
|
'ramdisk_id': 3,
|
||||||
|
'instance_type': 'm1.large',
|
||||||
|
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _create_volume(self, size='0'):
|
||||||
|
"""Create a volume object."""
|
||||||
|
vol = {}
|
||||||
|
vol['size'] = size
|
||||||
|
vol['user_id'] = 'fake'
|
||||||
|
vol['project_id'] = 'fake'
|
||||||
|
vol['host'] = 'localhost'
|
||||||
|
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||||
|
vol['status'] = "creating"
|
||||||
|
vol['attach_status'] = "detached"
|
||||||
|
return db.volume_create(context.get_admin_context(), vol)
|
||||||
|
|
||||||
|
def test_create_iscsi_storage(self):
|
||||||
|
""" This shows how to test helper classes' methods """
|
||||||
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
|
||||||
|
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
|
||||||
|
helper = volume_utils.VolumeHelper
|
||||||
|
helper.XenAPI = session.get_imported_xenapi()
|
||||||
|
vol = self._create_volume()
|
||||||
|
info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc')
|
||||||
|
label = 'SR-%s' % vol['ec2_id']
|
||||||
|
description = 'Test-SR'
|
||||||
|
sr_ref = helper.create_iscsi_storage(session, info, label, description)
|
||||||
|
srs = fake.get_all('SR')
|
||||||
|
self.assertEqual(sr_ref, srs[0])
|
||||||
|
db.volume_destroy(context.get_admin_context(), vol['id'])
|
||||||
|
|
||||||
|
def test_parse_volume_info_raise_exception(self):
|
||||||
|
""" This shows how to test helper classes' methods """
|
||||||
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
|
||||||
|
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
|
||||||
|
helper = volume_utils.VolumeHelper
|
||||||
|
helper.XenAPI = session.get_imported_xenapi()
|
||||||
|
vol = self._create_volume()
|
||||||
|
# oops, wrong mount point!
|
||||||
|
self.assertRaises(volume_utils.StorageError,
|
||||||
|
helper.parse_volume_info,
|
||||||
|
vol['ec2_id'],
|
||||||
|
'/dev/sd')
|
||||||
|
db.volume_destroy(context.get_admin_context(), vol['id'])
|
||||||
|
|
||||||
|
def test_attach_volume(self):
|
||||||
|
""" This shows how to test Ops classes' methods """
|
||||||
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
|
||||||
|
conn = xenapi_conn.get_connection(False)
|
||||||
|
volume = self._create_volume()
|
||||||
|
instance = db.instance_create(self.values)
|
||||||
|
fake.create_vm(instance.name, 'Running')
|
||||||
|
result = conn.attach_volume(instance.name, volume['ec2_id'],
|
||||||
|
'/dev/sdc')
|
||||||
|
|
||||||
|
def check():
|
||||||
|
# check that the VM has a VBD attached to it
|
||||||
|
# Get XenAPI reference for the VM
|
||||||
|
vms = fake.get_all('VM')
|
||||||
|
# Get XenAPI record for VBD
|
||||||
|
vbds = fake.get_all('VBD')
|
||||||
|
vbd = fake.get_record('VBD', vbds[0])
|
||||||
|
vm_ref = vbd['VM']
|
||||||
|
self.assertEqual(vm_ref, vms[0])
|
||||||
|
|
||||||
|
check()
|
||||||
|
|
||||||
|
def test_attach_volume_raise_exception(self):
|
||||||
|
""" This shows how to test when exceptions are raised """
|
||||||
|
stubs.stubout_session(self.stubs,
|
||||||
|
stubs.FakeSessionForVolumeFailedTests)
|
||||||
|
conn = xenapi_conn.get_connection(False)
|
||||||
|
volume = self._create_volume()
|
||||||
|
instance = db.instance_create(self.values)
|
||||||
|
fake.create_vm(instance.name, 'Running')
|
||||||
|
self.assertRaises(Exception,
|
||||||
|
conn.attach_volume,
|
||||||
|
instance.name,
|
||||||
|
volume['ec2_id'],
|
||||||
|
'/dev/sdc')
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
super(XenAPIVolumeTestCase, self).tearDown()
|
||||||
|
self.stubs.UnsetAll()
|
||||||
|
|
||||||
|
|
||||||
|
class XenAPIVMTestCase(test.TestCase):
|
||||||
|
"""
|
||||||
|
Unit tests for VM operations
|
||||||
|
"""
|
||||||
|
def setUp(self):
|
||||||
|
super(XenAPIVMTestCase, self).setUp()
|
||||||
|
self.manager = manager.AuthManager()
|
||||||
|
self.user = self.manager.create_user('fake', 'fake', 'fake',
|
||||||
|
admin=True)
|
||||||
|
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
|
self.network = utils.import_object(FLAGS.network_manager)
|
||||||
|
self.stubs = stubout.StubOutForTesting()
|
||||||
|
FLAGS.xenapi_connection_url = 'test_url'
|
||||||
|
FLAGS.xenapi_connection_password = 'test_pass'
|
||||||
|
fake.reset()
|
||||||
|
fakes.stub_out_db_instance_api(self.stubs)
|
||||||
|
fake.create_network('fake', FLAGS.flat_network_bridge)
|
||||||
|
|
||||||
|
def test_list_instances_0(self):
|
||||||
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||||
|
conn = xenapi_conn.get_connection(False)
|
||||||
|
instances = conn.list_instances()
|
||||||
|
self.assertEquals(instances, [])
|
||||||
|
|
||||||
|
def test_spawn(self):
|
||||||
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||||
|
values = {'name': 1, 'id': 1,
|
||||||
|
'project_id': self.project.id,
|
||||||
|
'user_id': self.user.id,
|
||||||
|
'image_id': 1,
|
||||||
|
'kernel_id': 2,
|
||||||
|
'ramdisk_id': 3,
|
||||||
|
'instance_type': 'm1.large',
|
||||||
|
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||||
|
}
|
||||||
|
conn = xenapi_conn.get_connection(False)
|
||||||
|
instance = db.instance_create(values)
|
||||||
|
conn.spawn(instance)
|
||||||
|
|
||||||
|
def check():
|
||||||
|
instances = conn.list_instances()
|
||||||
|
self.assertEquals(instances, [1])
|
||||||
|
|
||||||
|
# Get Nova record for VM
|
||||||
|
vm_info = conn.get_info(1)
|
||||||
|
|
||||||
|
# Get XenAPI record for VM
|
||||||
|
vms = fake.get_all('VM')
|
||||||
|
vm = fake.get_record('VM', vms[0])
|
||||||
|
|
||||||
|
# Check that m1.large above turned into the right thing.
|
||||||
|
instance_type = instance_types.INSTANCE_TYPES['m1.large']
|
||||||
|
mem_kib = long(instance_type['memory_mb']) << 10
|
||||||
|
mem_bytes = str(mem_kib << 10)
|
||||||
|
vcpus = instance_type['vcpus']
|
||||||
|
self.assertEquals(vm_info['max_mem'], mem_kib)
|
||||||
|
self.assertEquals(vm_info['mem'], mem_kib)
|
||||||
|
self.assertEquals(vm['memory_static_max'], mem_bytes)
|
||||||
|
self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
|
||||||
|
self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
|
||||||
|
self.assertEquals(vm['VCPUs_max'], str(vcpus))
|
||||||
|
self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
|
||||||
|
|
||||||
|
# Check that the VM is running according to Nova
|
||||||
|
self.assertEquals(vm_info['state'], power_state.RUNNING)
|
||||||
|
|
||||||
|
# Check that the VM is running according to XenAPI.
|
||||||
|
self.assertEquals(vm['power_state'], 'Running')
|
||||||
|
|
||||||
|
check()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
super(XenAPIVMTestCase, self).tearDown()
|
||||||
|
self.manager.delete_project(self.project)
|
||||||
|
self.manager.delete_user(self.user)
|
||||||
|
self.stubs.UnsetAll()
|
20
nova/tests/xenapi/__init__.py
Normal file
20
nova/tests/xenapi/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
:mod:`xenapi` -- Stubs for XenAPI
|
||||||
|
=================================
|
||||||
|
"""
|
103
nova/tests/xenapi/stubs.py
Normal file
103
nova/tests/xenapi/stubs.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Stubouts, mocks and fixtures for the test suite"""
|
||||||
|
|
||||||
|
from nova.virt import xenapi_conn
|
||||||
|
from nova.virt.xenapi import fake
|
||||||
|
from nova.virt.xenapi import volume_utils
|
||||||
|
|
||||||
|
|
||||||
|
def stubout_session(stubs, cls):
|
||||||
|
"""Stubs out two methods from XenAPISession"""
|
||||||
|
def fake_import(self):
|
||||||
|
"""Stubs out get_imported_xenapi of XenAPISession"""
|
||||||
|
fake_module = 'nova.virt.xenapi.fake'
|
||||||
|
from_list = ['fake']
|
||||||
|
return __import__(fake_module, globals(), locals(), from_list, -1)
|
||||||
|
|
||||||
|
stubs.Set(xenapi_conn.XenAPISession, '_create_session',
|
||||||
|
lambda s, url: cls(url))
|
||||||
|
stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi',
|
||||||
|
fake_import)
|
||||||
|
|
||||||
|
|
||||||
|
def stub_out_get_target(stubs):
|
||||||
|
"""Stubs out _get_target in volume_utils"""
|
||||||
|
def fake_get_target(volume_id):
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
|
stubs.Set(volume_utils, '_get_target', fake_get_target)
|
||||||
|
|
||||||
|
|
||||||
|
class FakeSessionForVMTests(fake.SessionBase):
|
||||||
|
""" Stubs out a XenAPISession for VM tests """
|
||||||
|
def __init__(self, uri):
|
||||||
|
super(FakeSessionForVMTests, self).__init__(uri)
|
||||||
|
|
||||||
|
def network_get_all_records_where(self, _1, _2):
|
||||||
|
return self.xenapi.network.get_all_records()
|
||||||
|
|
||||||
|
def host_call_plugin(self, _1, _2, _3, _4, _5):
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def VM_start(self, _1, ref, _2, _3):
|
||||||
|
vm = fake.get_record('VM', ref)
|
||||||
|
if vm['power_state'] != 'Halted':
|
||||||
|
raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
|
||||||
|
vm['power_state']])
|
||||||
|
vm['power_state'] = 'Running'
|
||||||
|
vm['is_a_template'] = False
|
||||||
|
vm['is_control_domain'] = False
|
||||||
|
|
||||||
|
|
||||||
|
class FakeSessionForVolumeTests(fake.SessionBase):
|
||||||
|
""" Stubs out a XenAPISession for Volume tests """
|
||||||
|
def __init__(self, uri):
|
||||||
|
super(FakeSessionForVolumeTests, self).__init__(uri)
|
||||||
|
|
||||||
|
def VBD_plug(self, _1, ref):
|
||||||
|
rec = fake.get_record('VBD', ref)
|
||||||
|
rec['currently-attached'] = True
|
||||||
|
|
||||||
|
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
|
||||||
|
_6, _7, _8, _9, _10, _11):
|
||||||
|
valid_vdi = False
|
||||||
|
refs = fake.get_all('VDI')
|
||||||
|
for ref in refs:
|
||||||
|
rec = fake.get_record('VDI', ref)
|
||||||
|
if rec['uuid'] == uuid:
|
||||||
|
valid_vdi = True
|
||||||
|
if not valid_vdi:
|
||||||
|
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
|
||||||
|
|
||||||
|
|
||||||
|
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
|
||||||
|
""" Stubs out a XenAPISession for Volume tests: it injects failures """
|
||||||
|
def __init__(self, uri):
|
||||||
|
super(FakeSessionForVolumeFailedTests, self).__init__(uri)
|
||||||
|
|
||||||
|
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
|
||||||
|
_6, _7, _8, _9, _10, _11):
|
||||||
|
# This is for testing failure
|
||||||
|
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
|
||||||
|
|
||||||
|
def PBD_unplug(self, _1, ref):
|
||||||
|
rec = fake.get_record('PBD', ref)
|
||||||
|
rec['currently-attached'] = False
|
||||||
|
|
||||||
|
def SR_forget(self, _1, ref):
|
||||||
|
pass
|
@@ -208,7 +208,7 @@ def stop(pidfile):
|
|||||||
pid = None
|
pid = None
|
||||||
|
|
||||||
if not pid:
|
if not pid:
|
||||||
message = "pidfile %s does not exist. Daemon not running?\n"
|
message = _("pidfile %s does not exist. Daemon not running?\n")
|
||||||
sys.stderr.write(message % pidfile)
|
sys.stderr.write(message % pidfile)
|
||||||
# Not an error in a restart
|
# Not an error in a restart
|
||||||
return
|
return
|
||||||
@@ -229,7 +229,7 @@ def stop(pidfile):
|
|||||||
|
|
||||||
|
|
||||||
def serve(filename):
|
def serve(filename):
|
||||||
logging.debug("Serving %s" % filename)
|
logging.debug(_("Serving %s") % filename)
|
||||||
name = os.path.basename(filename)
|
name = os.path.basename(filename)
|
||||||
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
|
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
|
||||||
options = OptionsClass()
|
options = OptionsClass()
|
||||||
@@ -281,7 +281,7 @@ def serve(filename):
|
|||||||
else:
|
else:
|
||||||
logging.getLogger().setLevel(logging.WARNING)
|
logging.getLogger().setLevel(logging.WARNING)
|
||||||
|
|
||||||
logging.debug("Full set of FLAGS:")
|
logging.debug(_("Full set of FLAGS:"))
|
||||||
for flag in FLAGS:
|
for flag in FLAGS:
|
||||||
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
|
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
|
||||||
|
|
||||||
|
125
run_tests.py
125
run_tests.py
@@ -1,125 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
This is our basic test running framework based on Twisted's Trial.
|
|
||||||
|
|
||||||
Usage Examples:
|
|
||||||
|
|
||||||
# to run all the tests
|
|
||||||
python run_tests.py
|
|
||||||
|
|
||||||
# to run a specific test suite imported here
|
|
||||||
python run_tests.py NodeConnectionTestCase
|
|
||||||
|
|
||||||
# to run a specific test imported here
|
|
||||||
python run_tests.py NodeConnectionTestCase.test_reboot
|
|
||||||
|
|
||||||
# to run some test suites elsewhere
|
|
||||||
python run_tests.py nova.tests.node_unittest
|
|
||||||
python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase
|
|
||||||
|
|
||||||
Due to our use of multiprocessing it we frequently get some ignorable
|
|
||||||
'Interrupted system call' exceptions after test completion.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
eventlet.monkey_patch()
|
|
||||||
|
|
||||||
import __main__
|
|
||||||
import gettext
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
|
||||||
|
|
||||||
from twisted.scripts import trial as trial_script
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova import twistd
|
|
||||||
|
|
||||||
from nova.tests.access_unittest import *
|
|
||||||
from nova.tests.api_unittest import *
|
|
||||||
from nova.tests.auth_unittest import *
|
|
||||||
from nova.tests.cloud_unittest import *
|
|
||||||
from nova.tests.compute_unittest import *
|
|
||||||
from nova.tests.flags_unittest import *
|
|
||||||
from nova.tests.misc_unittest import *
|
|
||||||
from nova.tests.network_unittest import *
|
|
||||||
#from nova.tests.objectstore_unittest import *
|
|
||||||
from nova.tests.quota_unittest import *
|
|
||||||
from nova.tests.rpc_unittest import *
|
|
||||||
from nova.tests.scheduler_unittest import *
|
|
||||||
from nova.tests.service_unittest import *
|
|
||||||
from nova.tests.twistd_unittest import *
|
|
||||||
from nova.tests.virt_unittest import *
|
|
||||||
from nova.tests.volume_unittest import *
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
flags.DEFINE_bool('flush_db', True,
|
|
||||||
'Flush the database before running fake tests')
|
|
||||||
flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
|
|
||||||
'Path to where to pipe STDERR during test runs.'
|
|
||||||
' Default = "run_tests.err.log"')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
|
|
||||||
config = OptionsClass()
|
|
||||||
argv = config.parseOptions()
|
|
||||||
|
|
||||||
FLAGS.verbose = True
|
|
||||||
|
|
||||||
# TODO(termie): these should make a call instead of doing work on import
|
|
||||||
if FLAGS.fake_tests:
|
|
||||||
from nova.tests.fake_flags import *
|
|
||||||
else:
|
|
||||||
from nova.tests.real_flags import *
|
|
||||||
|
|
||||||
# Establish redirect for STDERR
|
|
||||||
sys.stderr.flush()
|
|
||||||
err = open(FLAGS.tests_stderr, 'w+', 0)
|
|
||||||
os.dup2(err.fileno(), sys.stderr.fileno())
|
|
||||||
|
|
||||||
if len(argv) == 1 and len(config['tests']) == 0:
|
|
||||||
# If no tests were specified run the ones imported in this file
|
|
||||||
# NOTE(termie): "tests" is not a flag, just some Trial related stuff
|
|
||||||
config['tests'].update(['__main__'])
|
|
||||||
elif len(config['tests']):
|
|
||||||
# If we specified tests check first whether they are in __main__
|
|
||||||
for arg in config['tests']:
|
|
||||||
key = arg.split('.')[0]
|
|
||||||
if hasattr(__main__, key):
|
|
||||||
config['tests'].remove(arg)
|
|
||||||
config['tests'].add('__main__.%s' % arg)
|
|
||||||
|
|
||||||
trial_script._initialDebugSetup(config)
|
|
||||||
trialRunner = trial_script._makeRunner(config)
|
|
||||||
suite = trial_script._getSuite(config)
|
|
||||||
if config['until-failure']:
|
|
||||||
test_result = trialRunner.runUntilFailure(suite)
|
|
||||||
else:
|
|
||||||
test_result = trialRunner.run(suite)
|
|
||||||
if config.tracer:
|
|
||||||
sys.settrace(None)
|
|
||||||
results = config.tracer.results()
|
|
||||||
results.write_results(show_missing=1, summary=False,
|
|
||||||
coverdir=config.coverdir)
|
|
||||||
sys.exit(not test_result.wasSuccessful())
|
|
Reference in New Issue
Block a user