merged upstream changes
This commit is contained in:
10
.bzrignore
10
.bzrignore
@@ -1,3 +1,13 @@
|
|||||||
run_tests.err.log
|
run_tests.err.log
|
||||||
.nova-venv
|
.nova-venv
|
||||||
ChangeLog
|
ChangeLog
|
||||||
|
_trial_temp
|
||||||
|
keys
|
||||||
|
networks
|
||||||
|
nova.sqlite
|
||||||
|
CA/cacert.pem
|
||||||
|
CA/index.txt*
|
||||||
|
CA/openssl.cnf
|
||||||
|
CA/serial*
|
||||||
|
CA/newcerts/*.pem
|
||||||
|
CA/private/cakey.pem
|
||||||
|
|||||||
29
.mailmap
Normal file
29
.mailmap
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Format is:
|
||||||
|
# <preferred e-mail> <other e-mail>
|
||||||
|
<code@term.ie> <github@anarkystic.com>
|
||||||
|
<code@term.ie> <termie@preciousroy.local>
|
||||||
|
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
|
||||||
|
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
|
||||||
|
<matt.dietz@rackspace.com> <mdietz@openstack>
|
||||||
|
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
|
||||||
|
<devin.carlen@gmail.com> <devcamcar@illian.local>
|
||||||
|
<ewan.mellor@citrix.com> <emellor@silver>
|
||||||
|
<jaypipes@gmail.com> <jpipes@serialcoder>
|
||||||
|
<anotherjesse@gmail.com> <jesse@dancelamb>
|
||||||
|
<anotherjesse@gmail.com> <jesse@gigantor.local>
|
||||||
|
<anotherjesse@gmail.com> <jesse@ubuntu>
|
||||||
|
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
|
||||||
|
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
||||||
|
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
|
||||||
|
<justin@fathomdb.com> <justinsb@justinsb-desktop>
|
||||||
|
<mordred@inaugust.com> <mordred@hudson>
|
||||||
|
<paul@openstack.org> <pvoccio@castor.local>
|
||||||
|
<paul@openstack.org> <paul.voccio@rackspace.com>
|
||||||
|
<todd@ansolabs.com> <todd@lapex>
|
||||||
|
<todd@ansolabs.com> <todd@rubidine.com>
|
||||||
|
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
||||||
|
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
||||||
|
# These are from people who failed to set a proper committer
|
||||||
|
. <root@tonbuntu>
|
||||||
|
. <laner@controller>
|
||||||
|
. <root@ubuntu>
|
||||||
9
Authors
9
Authors
@@ -1,6 +1,9 @@
|
|||||||
Andy Smith <code@term.ie>
|
Andy Smith <code@term.ie>
|
||||||
Anne Gentle <anne@openstack.org>
|
Anne Gentle <anne@openstack.org>
|
||||||
|
Anthony Young <sleepsonthefloor@gmail.com>
|
||||||
|
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
||||||
Chris Behrens <cbehrens@codestud.com>
|
Chris Behrens <cbehrens@codestud.com>
|
||||||
|
Dean Troyer <dtroyer@gmail.com>
|
||||||
Devin Carlen <devin.carlen@gmail.com>
|
Devin Carlen <devin.carlen@gmail.com>
|
||||||
Eric Day <eday@oddments.org>
|
Eric Day <eday@oddments.org>
|
||||||
Ewan Mellor <ewan.mellor@citrix.com>
|
Ewan Mellor <ewan.mellor@citrix.com>
|
||||||
@@ -8,7 +11,8 @@ Hisaki Ohara <hisaki.ohara@intel.com>
|
|||||||
Jay Pipes <jaypipes@gmail.com>
|
Jay Pipes <jaypipes@gmail.com>
|
||||||
Jesse Andrews <anotherjesse@gmail.com>
|
Jesse Andrews <anotherjesse@gmail.com>
|
||||||
Joe Heck <heckj@mac.com>
|
Joe Heck <heckj@mac.com>
|
||||||
Joel Moore joelbm24@gmail.com
|
Joel Moore <joelbm24@gmail.com>
|
||||||
|
Josh Kearney <josh.kearney@rackspace.com>
|
||||||
Joshua McKenty <jmckenty@gmail.com>
|
Joshua McKenty <jmckenty@gmail.com>
|
||||||
Justin Santa Barbara <justin@fathomdb.com>
|
Justin Santa Barbara <justin@fathomdb.com>
|
||||||
Matt Dietz <matt.dietz@rackspace.com>
|
Matt Dietz <matt.dietz@rackspace.com>
|
||||||
@@ -16,6 +20,9 @@ Michael Gundlach <michael.gundlach@rackspace.com>
|
|||||||
Monty Taylor <mordred@inaugust.com>
|
Monty Taylor <mordred@inaugust.com>
|
||||||
Paul Voccio <paul@openstack.org>
|
Paul Voccio <paul@openstack.org>
|
||||||
Rick Clark <rick@openstack.org>
|
Rick Clark <rick@openstack.org>
|
||||||
|
Ryan Lucio <rlucio@internap.com>
|
||||||
Soren Hansen <soren.hansen@rackspace.com>
|
Soren Hansen <soren.hansen@rackspace.com>
|
||||||
Todd Willey <todd@ansolabs.com>
|
Todd Willey <todd@ansolabs.com>
|
||||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
|
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||||
|
|||||||
@@ -13,9 +13,7 @@ include nova/cloudpipe/client.ovpn.template
|
|||||||
include nova/compute/fakevirtinstance.xml
|
include nova/compute/fakevirtinstance.xml
|
||||||
include nova/compute/interfaces.template
|
include nova/compute/interfaces.template
|
||||||
include nova/virt/interfaces.template
|
include nova/virt/interfaces.template
|
||||||
include nova/virt/libvirt.qemu.xml.template
|
include nova/virt/libvirt.*.xml.template
|
||||||
include nova/virt/libvirt.uml.xml.template
|
|
||||||
include nova/virt/libvirt.xen.xml.template
|
|
||||||
include nova/tests/CA/
|
include nova/tests/CA/
|
||||||
include nova/tests/CA/cacert.pem
|
include nova/tests/CA/cacert.pem
|
||||||
include nova/tests/CA/private/
|
include nova/tests/CA/private/
|
||||||
|
|||||||
11
bin/nova-api
11
bin/nova-api
@@ -37,13 +37,20 @@ from nova import utils
|
|||||||
from nova import server
|
from nova import server
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer('api_port', 8773, 'API port')
|
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
||||||
|
flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host')
|
||||||
|
flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
|
||||||
|
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
|
||||||
|
|
||||||
|
|
||||||
def main(_args):
|
def main(_args):
|
||||||
from nova import api
|
from nova import api
|
||||||
from nova import wsgi
|
from nova import wsgi
|
||||||
wsgi.run_server(api.API(), FLAGS.api_port)
|
server = wsgi.Server()
|
||||||
|
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
|
||||||
|
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
|
||||||
|
server.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from nova import utils
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
from nova.compute import monitor
|
from nova.compute import monitor
|
||||||
|
|
||||||
@@ -41,6 +42,7 @@ logging.getLogger('boto').setLevel(logging.WARN)
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
|
|||||||
@@ -359,9 +359,14 @@ class ProjectCommands(object):
|
|||||||
def zipfile(self, project_id, user_id, filename='nova.zip'):
|
def zipfile(self, project_id, user_id, filename='nova.zip'):
|
||||||
"""Exports credentials for project to a zip file
|
"""Exports credentials for project to a zip file
|
||||||
arguments: project_id user_id [filename='nova.zip]"""
|
arguments: project_id user_id [filename='nova.zip]"""
|
||||||
zip_file = self.manager.get_credentials(user_id, project_id)
|
try:
|
||||||
with open(filename, 'w') as f:
|
zip_file = self.manager.get_credentials(user_id, project_id)
|
||||||
f.write(zip_file)
|
with open(filename, 'w') as f:
|
||||||
|
f.write(zip_file)
|
||||||
|
except db.api.NoMoreNetworks:
|
||||||
|
print ('No more networks available. If this is a new '
|
||||||
|
'installation, you need\nto call something like this:\n\n'
|
||||||
|
' nova-manage network create 10.0.0.0/8 10 64\n\n')
|
||||||
|
|
||||||
|
|
||||||
class FloatingIpCommands(object):
|
class FloatingIpCommands(object):
|
||||||
@@ -467,7 +472,7 @@ def methods_of(obj):
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Parse options and call the appropriate class/method."""
|
"""Parse options and call the appropriate class/method."""
|
||||||
utils.default_flagfile('/etc/nova/nova-manage.conf')
|
utils.default_flagfile()
|
||||||
argv = FLAGS(sys.argv)
|
argv = FLAGS(sys.argv)
|
||||||
|
|
||||||
if FLAGS.verbose:
|
if FLAGS.verbose:
|
||||||
|
|||||||
@@ -42,8 +42,8 @@ FLAGS = flags.FLAGS
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
utils.default_flagfile()
|
|
||||||
application = handler.get_application() # pylint: disable-msg=C0103
|
application = handler.get_application() # pylint: disable-msg=C0103
|
||||||
|
|||||||
@@ -34,9 +34,11 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
|
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
|
|||||||
@@ -34,9 +34,11 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|||||||
|
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
|
|||||||
@@ -22,25 +22,28 @@ Nova User API client library.
|
|||||||
import base64
|
import base64
|
||||||
import boto
|
import boto
|
||||||
import httplib
|
import httplib
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
from boto.ec2.regioninfo import RegionInfo
|
from boto.ec2.regioninfo import RegionInfo
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
|
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
|
||||||
DEFAULT_REGION = 'nova'
|
DEFAULT_REGION = 'nova'
|
||||||
DEFAULT_ACCESS_KEY = 'admin'
|
|
||||||
DEFAULT_SECRET_KEY = 'admin'
|
|
||||||
|
|
||||||
|
|
||||||
class UserInfo(object):
|
class UserInfo(object):
|
||||||
"""
|
"""
|
||||||
Information about a Nova user, as parsed through SAX
|
Information about a Nova user, as parsed through SAX.
|
||||||
fields include:
|
|
||||||
username
|
**Fields Include**
|
||||||
accesskey
|
|
||||||
secretkey
|
* username
|
||||||
|
* accesskey
|
||||||
|
* secretkey
|
||||||
|
* file (optional) containing zip of X509 cert & rc file
|
||||||
|
|
||||||
and an optional field containing a zip with X509 cert & rc
|
|
||||||
file
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, connection=None, username=None, endpoint=None):
|
def __init__(self, connection=None, username=None, endpoint=None):
|
||||||
@@ -68,9 +71,13 @@ class UserInfo(object):
|
|||||||
class UserRole(object):
|
class UserRole(object):
|
||||||
"""
|
"""
|
||||||
Information about a Nova user's role, as parsed through SAX.
|
Information about a Nova user's role, as parsed through SAX.
|
||||||
Fields include:
|
|
||||||
role
|
**Fields include**
|
||||||
|
|
||||||
|
* role
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, connection=None):
|
def __init__(self, connection=None):
|
||||||
self.connection = connection
|
self.connection = connection
|
||||||
self.role = None
|
self.role = None
|
||||||
@@ -90,12 +97,15 @@ class UserRole(object):
|
|||||||
|
|
||||||
class ProjectInfo(object):
|
class ProjectInfo(object):
|
||||||
"""
|
"""
|
||||||
Information about a Nova project, as parsed through SAX
|
Information about a Nova project, as parsed through SAX.
|
||||||
Fields include:
|
|
||||||
projectname
|
**Fields include**
|
||||||
description
|
|
||||||
projectManagerId
|
* projectname
|
||||||
memberIds
|
* description
|
||||||
|
* projectManagerId
|
||||||
|
* memberIds
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, connection=None):
|
def __init__(self, connection=None):
|
||||||
@@ -127,8 +137,11 @@ class ProjectInfo(object):
|
|||||||
class ProjectMember(object):
|
class ProjectMember(object):
|
||||||
"""
|
"""
|
||||||
Information about a Nova project member, as parsed through SAX.
|
Information about a Nova project member, as parsed through SAX.
|
||||||
Fields include:
|
|
||||||
memberId
|
**Fields include**
|
||||||
|
|
||||||
|
* memberId
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, connection=None):
|
def __init__(self, connection=None):
|
||||||
@@ -150,14 +163,18 @@ class ProjectMember(object):
|
|||||||
|
|
||||||
class HostInfo(object):
|
class HostInfo(object):
|
||||||
"""
|
"""
|
||||||
Information about a Nova Host, as parsed through SAX:
|
Information about a Nova Host, as parsed through SAX.
|
||||||
Disk stats
|
|
||||||
Running Instances
|
**Fields Include**
|
||||||
Memory stats
|
|
||||||
CPU stats
|
* Disk stats
|
||||||
Network address info
|
* Running Instances
|
||||||
Firewall info
|
* Memory stats
|
||||||
Bridge and devices
|
* CPU stats
|
||||||
|
* Network address info
|
||||||
|
* Firewall info
|
||||||
|
* Bridge and devices
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, connection=None):
|
def __init__(self, connection=None):
|
||||||
@@ -177,9 +194,13 @@ class HostInfo(object):
|
|||||||
|
|
||||||
|
|
||||||
class NovaAdminClient(object):
|
class NovaAdminClient(object):
|
||||||
def __init__(self, clc_url=DEFAULT_CLC_URL, region=DEFAULT_REGION,
|
def __init__(
|
||||||
access_key=DEFAULT_ACCESS_KEY, secret_key=DEFAULT_SECRET_KEY,
|
self,
|
||||||
**kwargs):
|
clc_url=DEFAULT_CLC_URL,
|
||||||
|
region=DEFAULT_REGION,
|
||||||
|
access_key=FLAGS.aws_access_key_id,
|
||||||
|
secret_key=FLAGS.aws_secret_access_key,
|
||||||
|
**kwargs):
|
||||||
parts = self.split_clc_url(clc_url)
|
parts = self.split_clc_url(clc_url)
|
||||||
|
|
||||||
self.clc_url = clc_url
|
self.clc_url = clc_url
|
||||||
@@ -257,9 +278,12 @@ class NovaAdminClient(object):
|
|||||||
[('item', UserRole)])
|
[('item', UserRole)])
|
||||||
|
|
||||||
def get_user_roles(self, user, project=None):
|
def get_user_roles(self, user, project=None):
|
||||||
"""Returns a list of roles for the given user. Omitting project will
|
"""Returns a list of roles for the given user.
|
||||||
return any global roles that the user has. Specifying project will
|
|
||||||
return only project specific roles."""
|
Omitting project will return any global roles that the user has.
|
||||||
|
Specifying project will return only project specific roles.
|
||||||
|
|
||||||
|
"""
|
||||||
params = {'User': user}
|
params = {'User': user}
|
||||||
if project:
|
if project:
|
||||||
params['Project'] = project
|
params['Project'] = project
|
||||||
|
|||||||
@@ -15,12 +15,12 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
"""
|
"""Fake LDAP server for test harness, backs to ReDIS.
|
||||||
Fake LDAP server for test harnesses.
|
|
||||||
|
|
||||||
This class does very little error checking, and knows nothing about ldap
|
This class does very little error checking, and knows nothing about ldap
|
||||||
class definitions. It implements the minimum emulation of the python ldap
|
class definitions. It implements the minimum emulation of the python ldap
|
||||||
library to work with nova.
|
library to work with nova.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
@@ -77,9 +77,8 @@ def initialize(_uri):
|
|||||||
def _match_query(query, attrs):
|
def _match_query(query, attrs):
|
||||||
"""Match an ldap query to an attribute dictionary.
|
"""Match an ldap query to an attribute dictionary.
|
||||||
|
|
||||||
&, |, and ! are supported in the query. No syntax checking is performed,
|
The characters &, |, and ! are supported in the query. No syntax checking
|
||||||
so malformed querys will not work correctly.
|
is performed, so malformed querys will not work correctly.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# cut off the parentheses
|
# cut off the parentheses
|
||||||
inner = query[1:-1]
|
inner = query[1:-1]
|
||||||
|
|||||||
@@ -40,6 +40,8 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
|
|||||||
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
|
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
|
||||||
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
|
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
|
||||||
'OU for Users')
|
'OU for Users')
|
||||||
|
flags.DEFINE_boolean('ldap_user_modify_only', False,
|
||||||
|
'Modify attributes for users instead of creating/deleting')
|
||||||
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
|
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
|
||||||
'OU for Projects')
|
'OU for Projects')
|
||||||
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
|
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
|
||||||
@@ -89,8 +91,7 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def get_user(self, uid):
|
def get_user(self, uid):
|
||||||
"""Retrieve user by id"""
|
"""Retrieve user by id"""
|
||||||
attr = self.__find_object(self.__uid_to_dn(uid),
|
attr = self.__get_ldap_user(uid)
|
||||||
'(objectclass=novaUser)')
|
|
||||||
return self.__to_user(attr)
|
return self.__to_user(attr)
|
||||||
|
|
||||||
def get_user_from_access_key(self, access):
|
def get_user_from_access_key(self, access):
|
||||||
@@ -110,7 +111,12 @@ class LdapDriver(object):
|
|||||||
"""Retrieve list of users"""
|
"""Retrieve list of users"""
|
||||||
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
||||||
'(objectclass=novaUser)')
|
'(objectclass=novaUser)')
|
||||||
return [self.__to_user(attr) for attr in attrs]
|
users = []
|
||||||
|
for attr in attrs:
|
||||||
|
user = self.__to_user(attr)
|
||||||
|
if user is not None:
|
||||||
|
users.append(user)
|
||||||
|
return users
|
||||||
|
|
||||||
def get_projects(self, uid=None):
|
def get_projects(self, uid=None):
|
||||||
"""Retrieve list of projects"""
|
"""Retrieve list of projects"""
|
||||||
@@ -125,21 +131,52 @@ class LdapDriver(object):
|
|||||||
"""Create a user"""
|
"""Create a user"""
|
||||||
if self.__user_exists(name):
|
if self.__user_exists(name):
|
||||||
raise exception.Duplicate("LDAP user %s already exists" % name)
|
raise exception.Duplicate("LDAP user %s already exists" % name)
|
||||||
attr = [
|
if FLAGS.ldap_user_modify_only:
|
||||||
('objectclass', ['person',
|
if self.__ldap_user_exists(name):
|
||||||
'organizationalPerson',
|
# Retrieve user by name
|
||||||
'inetOrgPerson',
|
user = self.__get_ldap_user(name)
|
||||||
'novaUser']),
|
# Entry could be malformed, test for missing attrs.
|
||||||
('ou', [FLAGS.ldap_user_unit]),
|
# Malformed entries are useless, replace attributes found.
|
||||||
('uid', [name]),
|
attr = []
|
||||||
('sn', [name]),
|
if 'secretKey' in user.keys():
|
||||||
('cn', [name]),
|
attr.append((self.ldap.MOD_REPLACE, 'secretKey', \
|
||||||
('secretKey', [secret_key]),
|
[secret_key]))
|
||||||
('accessKey', [access_key]),
|
else:
|
||||||
('isAdmin', [str(is_admin).upper()]),
|
attr.append((self.ldap.MOD_ADD, 'secretKey', \
|
||||||
]
|
[secret_key]))
|
||||||
self.conn.add_s(self.__uid_to_dn(name), attr)
|
if 'accessKey' in user.keys():
|
||||||
return self.__to_user(dict(attr))
|
attr.append((self.ldap.MOD_REPLACE, 'accessKey', \
|
||||||
|
[access_key]))
|
||||||
|
else:
|
||||||
|
attr.append((self.ldap.MOD_ADD, 'accessKey', \
|
||||||
|
[access_key]))
|
||||||
|
if 'isAdmin' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \
|
||||||
|
[str(is_admin).upper()]))
|
||||||
|
else:
|
||||||
|
attr.append((self.ldap.MOD_ADD, 'isAdmin', \
|
||||||
|
[str(is_admin).upper()]))
|
||||||
|
self.conn.modify_s(self.__uid_to_dn(name), attr)
|
||||||
|
return self.get_user(name)
|
||||||
|
else:
|
||||||
|
raise exception.NotFound("LDAP object for %s doesn't exist"
|
||||||
|
% name)
|
||||||
|
else:
|
||||||
|
attr = [
|
||||||
|
('objectclass', ['person',
|
||||||
|
'organizationalPerson',
|
||||||
|
'inetOrgPerson',
|
||||||
|
'novaUser']),
|
||||||
|
('ou', [FLAGS.ldap_user_unit]),
|
||||||
|
('uid', [name]),
|
||||||
|
('sn', [name]),
|
||||||
|
('cn', [name]),
|
||||||
|
('secretKey', [secret_key]),
|
||||||
|
('accessKey', [access_key]),
|
||||||
|
('isAdmin', [str(is_admin).upper()]),
|
||||||
|
]
|
||||||
|
self.conn.add_s(self.__uid_to_dn(name), attr)
|
||||||
|
return self.__to_user(dict(attr))
|
||||||
|
|
||||||
def create_project(self, name, manager_uid,
|
def create_project(self, name, manager_uid,
|
||||||
description=None, member_uids=None):
|
description=None, member_uids=None):
|
||||||
@@ -155,7 +192,7 @@ class LdapDriver(object):
|
|||||||
if description is None:
|
if description is None:
|
||||||
description = name
|
description = name
|
||||||
members = []
|
members = []
|
||||||
if member_uids != None:
|
if member_uids is not None:
|
||||||
for member_uid in member_uids:
|
for member_uid in member_uids:
|
||||||
if not self.__user_exists(member_uid):
|
if not self.__user_exists(member_uid):
|
||||||
raise exception.NotFound("Project can't be created "
|
raise exception.NotFound("Project can't be created "
|
||||||
@@ -256,7 +293,24 @@ class LdapDriver(object):
|
|||||||
if not self.__user_exists(uid):
|
if not self.__user_exists(uid):
|
||||||
raise exception.NotFound("User %s doesn't exist" % uid)
|
raise exception.NotFound("User %s doesn't exist" % uid)
|
||||||
self.__remove_from_all(uid)
|
self.__remove_from_all(uid)
|
||||||
self.conn.delete_s(self.__uid_to_dn(uid))
|
if FLAGS.ldap_user_modify_only:
|
||||||
|
# Delete attributes
|
||||||
|
attr = []
|
||||||
|
# Retrieve user by name
|
||||||
|
user = self.__get_ldap_user(uid)
|
||||||
|
if 'secretKey' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_DELETE, 'secretKey', \
|
||||||
|
user['secretKey']))
|
||||||
|
if 'accessKey' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_DELETE, 'accessKey', \
|
||||||
|
user['accessKey']))
|
||||||
|
if 'isAdmin' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_DELETE, 'isAdmin', \
|
||||||
|
user['isAdmin']))
|
||||||
|
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
||||||
|
else:
|
||||||
|
# Delete entry
|
||||||
|
self.conn.delete_s(self.__uid_to_dn(uid))
|
||||||
|
|
||||||
def delete_project(self, project_id):
|
def delete_project(self, project_id):
|
||||||
"""Delete a project"""
|
"""Delete a project"""
|
||||||
@@ -265,7 +319,7 @@ class LdapDriver(object):
|
|||||||
self.__delete_group(project_dn)
|
self.__delete_group(project_dn)
|
||||||
|
|
||||||
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
|
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
|
||||||
"""Modify an existing project"""
|
"""Modify an existing user"""
|
||||||
if not access_key and not secret_key and admin is None:
|
if not access_key and not secret_key and admin is None:
|
||||||
return
|
return
|
||||||
attr = []
|
attr = []
|
||||||
@@ -279,11 +333,21 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def __user_exists(self, uid):
|
def __user_exists(self, uid):
|
||||||
"""Check if user exists"""
|
"""Check if user exists"""
|
||||||
return self.get_user(uid) != None
|
return self.get_user(uid) is not None
|
||||||
|
|
||||||
|
def __ldap_user_exists(self, uid):
|
||||||
|
"""Check if the user exists in ldap"""
|
||||||
|
return self.__get_ldap_user(uid) is not None
|
||||||
|
|
||||||
def __project_exists(self, project_id):
|
def __project_exists(self, project_id):
|
||||||
"""Check if project exists"""
|
"""Check if project exists"""
|
||||||
return self.get_project(project_id) != None
|
return self.get_project(project_id) is not None
|
||||||
|
|
||||||
|
def __get_ldap_user(self, uid):
|
||||||
|
"""Retrieve LDAP user entry by id"""
|
||||||
|
attr = self.__find_object(self.__uid_to_dn(uid),
|
||||||
|
'(objectclass=novaUser)')
|
||||||
|
return attr
|
||||||
|
|
||||||
def __find_object(self, dn, query=None, scope=None):
|
def __find_object(self, dn, query=None, scope=None):
|
||||||
"""Find an object by dn and query"""
|
"""Find an object by dn and query"""
|
||||||
@@ -330,12 +394,12 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def __group_exists(self, dn):
|
def __group_exists(self, dn):
|
||||||
"""Check if group exists"""
|
"""Check if group exists"""
|
||||||
return self.__find_object(dn, '(objectclass=groupOfNames)') != None
|
return self.__find_object(dn, '(objectclass=groupOfNames)') is not None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __role_to_dn(role, project_id=None):
|
def __role_to_dn(role, project_id=None):
|
||||||
"""Convert role to corresponding dn"""
|
"""Convert role to corresponding dn"""
|
||||||
if project_id == None:
|
if project_id is None:
|
||||||
return FLAGS.__getitem__("ldap_%s" % role).value
|
return FLAGS.__getitem__("ldap_%s" % role).value
|
||||||
else:
|
else:
|
||||||
return 'cn=%s,cn=%s,%s' % (role,
|
return 'cn=%s,cn=%s,%s' % (role,
|
||||||
@@ -349,7 +413,7 @@ class LdapDriver(object):
|
|||||||
raise exception.Duplicate("Group can't be created because "
|
raise exception.Duplicate("Group can't be created because "
|
||||||
"group %s already exists" % name)
|
"group %s already exists" % name)
|
||||||
members = []
|
members = []
|
||||||
if member_uids != None:
|
if member_uids is not None:
|
||||||
for member_uid in member_uids:
|
for member_uid in member_uids:
|
||||||
if not self.__user_exists(member_uid):
|
if not self.__user_exists(member_uid):
|
||||||
raise exception.NotFound("Group can't be created "
|
raise exception.NotFound("Group can't be created "
|
||||||
@@ -375,7 +439,7 @@ class LdapDriver(object):
|
|||||||
res = self.__find_object(group_dn,
|
res = self.__find_object(group_dn,
|
||||||
'(member=%s)' % self.__uid_to_dn(uid),
|
'(member=%s)' % self.__uid_to_dn(uid),
|
||||||
self.ldap.SCOPE_BASE)
|
self.ldap.SCOPE_BASE)
|
||||||
return res != None
|
return res is not None
|
||||||
|
|
||||||
def __add_to_group(self, uid, group_dn):
|
def __add_to_group(self, uid, group_dn):
|
||||||
"""Add user to group"""
|
"""Add user to group"""
|
||||||
@@ -447,18 +511,22 @@ class LdapDriver(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def __to_user(attr):
|
def __to_user(attr):
|
||||||
"""Convert ldap attributes to User object"""
|
"""Convert ldap attributes to User object"""
|
||||||
if attr == None:
|
if attr is None:
|
||||||
|
return None
|
||||||
|
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
|
||||||
|
and 'isAdmin' in attr.keys()):
|
||||||
|
return {
|
||||||
|
'id': attr['uid'][0],
|
||||||
|
'name': attr['cn'][0],
|
||||||
|
'access': attr['accessKey'][0],
|
||||||
|
'secret': attr['secretKey'][0],
|
||||||
|
'admin': (attr['isAdmin'][0] == 'TRUE')}
|
||||||
|
else:
|
||||||
return None
|
return None
|
||||||
return {
|
|
||||||
'id': attr['uid'][0],
|
|
||||||
'name': attr['cn'][0],
|
|
||||||
'access': attr['accessKey'][0],
|
|
||||||
'secret': attr['secretKey'][0],
|
|
||||||
'admin': (attr['isAdmin'][0] == 'TRUE')}
|
|
||||||
|
|
||||||
def __to_project(self, attr):
|
def __to_project(self, attr):
|
||||||
"""Convert ldap attributes to Project object"""
|
"""Convert ldap attributes to Project object"""
|
||||||
if attr == None:
|
if attr is None:
|
||||||
return None
|
return None
|
||||||
member_dns = attr.get('member', [])
|
member_dns = attr.get('member', [])
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -84,12 +84,11 @@ class AuthBase(object):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def safe_id(cls, obj):
|
def safe_id(cls, obj):
|
||||||
"""Safe get object id
|
"""Safely get object id.
|
||||||
|
|
||||||
This method will return the id of the object if the object
|
This method will return the id of the object if the object
|
||||||
is of this class, otherwise it will return the original object.
|
is of this class, otherwise it will return the original object.
|
||||||
This allows methods to accept objects or ids as paramaters.
|
This allows methods to accept objects or ids as paramaters.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if isinstance(obj, cls):
|
if isinstance(obj, cls):
|
||||||
return obj.id
|
return obj.id
|
||||||
@@ -625,6 +624,10 @@ class AuthManager(object):
|
|||||||
with self.driver() as drv:
|
with self.driver() as drv:
|
||||||
drv.modify_user(uid, access_key, secret_key, admin)
|
drv.modify_user(uid, access_key, secret_key, admin)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_key_pairs(context):
|
||||||
|
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
|
||||||
|
|
||||||
def get_credentials(self, user, project=None):
|
def get_credentials(self, user, project=None):
|
||||||
"""Get credential zip for user in project"""
|
"""Get credential zip for user in project"""
|
||||||
if not isinstance(user, User):
|
if not isinstance(user, User):
|
||||||
|
|||||||
84
nova/auth/nova_openldap.schema
Normal file
84
nova/auth/nova_openldap.schema
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
#
|
||||||
|
# Person object for Nova
|
||||||
|
# inetorgperson with extra attributes
|
||||||
|
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
# using internet experimental oid arc as per BP64 3.1
|
||||||
|
objectidentifier novaSchema 1.3.6.1.3.1.666.666
|
||||||
|
objectidentifier novaAttrs novaSchema:3
|
||||||
|
objectidentifier novaOCs novaSchema:4
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:1
|
||||||
|
NAME 'accessKey'
|
||||||
|
DESC 'Key for accessing data'
|
||||||
|
EQUALITY caseIgnoreMatch
|
||||||
|
SUBSTR caseIgnoreSubstringsMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:2
|
||||||
|
NAME 'secretKey'
|
||||||
|
DESC 'Secret key'
|
||||||
|
EQUALITY caseIgnoreMatch
|
||||||
|
SUBSTR caseIgnoreSubstringsMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:3
|
||||||
|
NAME 'keyFingerprint'
|
||||||
|
DESC 'Fingerprint of private key'
|
||||||
|
EQUALITY caseIgnoreMatch
|
||||||
|
SUBSTR caseIgnoreSubstringsMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:4
|
||||||
|
NAME 'isAdmin'
|
||||||
|
DESC 'Is user an administrator?'
|
||||||
|
EQUALITY booleanMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:5
|
||||||
|
NAME 'projectManager'
|
||||||
|
DESC 'Project Managers of a project'
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
|
||||||
|
)
|
||||||
|
|
||||||
|
objectClass (
|
||||||
|
novaOCs:1
|
||||||
|
NAME 'novaUser'
|
||||||
|
DESC 'access and secret keys'
|
||||||
|
AUXILIARY
|
||||||
|
MUST ( uid )
|
||||||
|
MAY ( accessKey $ secretKey $ isAdmin )
|
||||||
|
)
|
||||||
|
|
||||||
|
objectClass (
|
||||||
|
novaOCs:2
|
||||||
|
NAME 'novaKeyPair'
|
||||||
|
DESC 'Key pair for User'
|
||||||
|
SUP top
|
||||||
|
STRUCTURAL
|
||||||
|
MUST ( cn $ sshPublicKey $ keyFingerprint )
|
||||||
|
)
|
||||||
|
|
||||||
|
objectClass (
|
||||||
|
novaOCs:3
|
||||||
|
NAME 'novaProject'
|
||||||
|
DESC 'Container for project'
|
||||||
|
SUP groupOfNames
|
||||||
|
STRUCTURAL
|
||||||
|
MUST ( cn $ projectManager )
|
||||||
|
)
|
||||||
16
nova/auth/nova_sun.schema
Normal file
16
nova/auth/nova_sun.schema
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#
|
||||||
|
# Person object for Nova
|
||||||
|
# inetorgperson with extra attributes
|
||||||
|
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
||||||
|
# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com>
|
||||||
|
#
|
||||||
|
# using internet experimental oid arc as per BP64 3.1
|
||||||
|
dn: cn=schema
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE)
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
|
||||||
|
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
|
||||||
|
objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
|
||||||
|
objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )
|
||||||
119
nova/auth/opendj.sh
Executable file
119
nova/auth/opendj.sh
Executable file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
# LDAP INSTALL SCRIPT - IS IDEMPOTENT, does not scrub users
|
||||||
|
|
||||||
|
apt-get install -y ldap-utils python-ldap openjdk-6-jre
|
||||||
|
|
||||||
|
if [ ! -d "/usr/opendj" ]
|
||||||
|
then
|
||||||
|
# TODO(rlane): Wikimedia Foundation is the current package maintainer.
|
||||||
|
# After the package is included in Ubuntu's channel, change this.
|
||||||
|
wget http://apt.wikimedia.org/wikimedia/pool/main/o/opendj/opendj_2.4.0-7_amd64.deb
|
||||||
|
dpkg -i opendj_2.4.0-7_amd64.deb
|
||||||
|
fi
|
||||||
|
|
||||||
|
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
||||||
|
schemapath='/var/opendj/instance/config/schema'
|
||||||
|
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
|
||||||
|
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
|
||||||
|
chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
|
||||||
|
chown opendj:opendj $schemapath/98-nova_sun.ldif
|
||||||
|
|
||||||
|
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
|
||||||
|
# LDAP Client Settings
|
||||||
|
URI ldap://localhost
|
||||||
|
BASE dc=example,dc=com
|
||||||
|
BINDDN cn=Directory Manager
|
||||||
|
SIZELIMIT 0
|
||||||
|
TIMELIMIT 0
|
||||||
|
LDAP_CONF_EOF
|
||||||
|
|
||||||
|
cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
|
||||||
|
# This is the root of the directory tree
|
||||||
|
dn: dc=example,dc=com
|
||||||
|
description: Example.Com, your trusted non-existent corporation.
|
||||||
|
dc: example
|
||||||
|
o: Example.Com
|
||||||
|
objectClass: top
|
||||||
|
objectClass: dcObject
|
||||||
|
objectClass: organization
|
||||||
|
|
||||||
|
# Subtree for users
|
||||||
|
dn: ou=Users,dc=example,dc=com
|
||||||
|
ou: Users
|
||||||
|
description: Users
|
||||||
|
objectClass: organizationalUnit
|
||||||
|
|
||||||
|
# Subtree for groups
|
||||||
|
dn: ou=Groups,dc=example,dc=com
|
||||||
|
ou: Groups
|
||||||
|
description: Groups
|
||||||
|
objectClass: organizationalUnit
|
||||||
|
|
||||||
|
# Subtree for system accounts
|
||||||
|
dn: ou=System,dc=example,dc=com
|
||||||
|
ou: System
|
||||||
|
description: Special accounts used by software applications.
|
||||||
|
objectClass: organizationalUnit
|
||||||
|
|
||||||
|
# Special Account for Authentication:
|
||||||
|
dn: uid=authenticate,ou=System,dc=example,dc=com
|
||||||
|
uid: authenticate
|
||||||
|
ou: System
|
||||||
|
description: Special account for authenticating users
|
||||||
|
userPassword: {MD5}TLnIqASP0CKUR3/LGkEZGg==
|
||||||
|
objectClass: account
|
||||||
|
objectClass: simpleSecurityObject
|
||||||
|
|
||||||
|
# create the sysadmin entry
|
||||||
|
|
||||||
|
dn: cn=developers,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: developers
|
||||||
|
description: IT admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: sysadmins
|
||||||
|
description: IT admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=netadmins,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: netadmins
|
||||||
|
description: Network admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: cloudadmins
|
||||||
|
description: Cloud admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=itsec,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: itsec
|
||||||
|
description: IT security users group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
BASE_LDIF_EOF
|
||||||
|
|
||||||
|
/etc/init.d/opendj stop
|
||||||
|
su - opendj -c '/usr/opendj/setup -i -b "dc=example,dc=com" -l /etc/ldap/base.ldif -S -w changeme -O -n --noPropertiesFile'
|
||||||
|
/etc/init.d/opendj start
|
||||||
19
nova/auth/openssh-lpk_openldap.schema
Normal file
19
nova/auth/openssh-lpk_openldap.schema
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
#
|
||||||
|
# LDAP Public Key Patch schema for use with openssh-ldappubkey
|
||||||
|
# Author: Eric AUGE <eau@phear.org>
|
||||||
|
#
|
||||||
|
# Based on the proposal of : Mark Ruijter
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# octetString SYNTAX
|
||||||
|
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
|
||||||
|
DESC 'MANDATORY: OpenSSH Public key'
|
||||||
|
EQUALITY octetStringMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
|
||||||
|
|
||||||
|
# printableString SYNTAX yes|no
|
||||||
|
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
|
||||||
|
DESC 'MANDATORY: OpenSSH LPK objectclass'
|
||||||
|
MAY ( sshPublicKey $ uid )
|
||||||
|
)
|
||||||
10
nova/auth/openssh-lpk_sun.schema
Normal file
10
nova/auth/openssh-lpk_sun.schema
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#
|
||||||
|
# LDAP Public Key Patch schema for use with openssh-ldappubkey
|
||||||
|
# Author: Eric AUGE <eau@phear.org>
|
||||||
|
#
|
||||||
|
# Schema for Sun Directory Server.
|
||||||
|
# Based on the original schema, modified by Stefan Fischer.
|
||||||
|
#
|
||||||
|
dn: cn=schema
|
||||||
|
attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
|
||||||
|
objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) )
|
||||||
@@ -20,115 +20,9 @@
|
|||||||
|
|
||||||
apt-get install -y slapd ldap-utils python-ldap
|
apt-get install -y slapd ldap-utils python-ldap
|
||||||
|
|
||||||
cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
|
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
||||||
#
|
cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
|
||||||
# LDAP Public Key Patch schema for use with openssh-ldappubkey
|
cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema
|
||||||
# Author: Eric AUGE <eau@phear.org>
|
|
||||||
#
|
|
||||||
# Based on the proposal of : Mark Ruijter
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
# octetString SYNTAX
|
|
||||||
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
|
|
||||||
DESC 'MANDATORY: OpenSSH Public key'
|
|
||||||
EQUALITY octetStringMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
|
|
||||||
|
|
||||||
# printableString SYNTAX yes|no
|
|
||||||
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
|
|
||||||
DESC 'MANDATORY: OpenSSH LPK objectclass'
|
|
||||||
MAY ( sshPublicKey $ uid )
|
|
||||||
)
|
|
||||||
LPK_SCHEMA_EOF
|
|
||||||
|
|
||||||
cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
|
|
||||||
#
|
|
||||||
# Person object for Nova
|
|
||||||
# inetorgperson with extra attributes
|
|
||||||
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# using internet experimental oid arc as per BP64 3.1
|
|
||||||
objectidentifier novaSchema 1.3.6.1.3.1.666.666
|
|
||||||
objectidentifier novaAttrs novaSchema:3
|
|
||||||
objectidentifier novaOCs novaSchema:4
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:1
|
|
||||||
NAME 'accessKey'
|
|
||||||
DESC 'Key for accessing data'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:2
|
|
||||||
NAME 'secretKey'
|
|
||||||
DESC 'Secret key'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:3
|
|
||||||
NAME 'keyFingerprint'
|
|
||||||
DESC 'Fingerprint of private key'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:4
|
|
||||||
NAME 'isAdmin'
|
|
||||||
DESC 'Is user an administrator?'
|
|
||||||
EQUALITY booleanMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:5
|
|
||||||
NAME 'projectManager'
|
|
||||||
DESC 'Project Managers of a project'
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:1
|
|
||||||
NAME 'novaUser'
|
|
||||||
DESC 'access and secret keys'
|
|
||||||
AUXILIARY
|
|
||||||
MUST ( uid )
|
|
||||||
MAY ( accessKey $ secretKey $ isAdmin )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:2
|
|
||||||
NAME 'novaKeyPair'
|
|
||||||
DESC 'Key pair for User'
|
|
||||||
SUP top
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ sshPublicKey $ keyFingerprint )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:3
|
|
||||||
NAME 'novaProject'
|
|
||||||
DESC 'Container for project'
|
|
||||||
SUP groupOfNames
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ projectManager )
|
|
||||||
)
|
|
||||||
|
|
||||||
NOVA_SCHEMA_EOF
|
|
||||||
|
|
||||||
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
||||||
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ where they're used.
|
|||||||
import getopt
|
import getopt
|
||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
|
import string
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import gflags
|
import gflags
|
||||||
@@ -38,11 +39,12 @@ class FlagValues(gflags.FlagValues):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, extra_context=None):
|
||||||
gflags.FlagValues.__init__(self)
|
gflags.FlagValues.__init__(self)
|
||||||
self.__dict__['__dirty'] = []
|
self.__dict__['__dirty'] = []
|
||||||
self.__dict__['__was_already_parsed'] = False
|
self.__dict__['__was_already_parsed'] = False
|
||||||
self.__dict__['__stored_argv'] = []
|
self.__dict__['__stored_argv'] = []
|
||||||
|
self.__dict__['__extra_context'] = extra_context
|
||||||
|
|
||||||
def __call__(self, argv):
|
def __call__(self, argv):
|
||||||
# We're doing some hacky stuff here so that we don't have to copy
|
# We're doing some hacky stuff here so that we don't have to copy
|
||||||
@@ -112,7 +114,7 @@ class FlagValues(gflags.FlagValues):
|
|||||||
def ParseNewFlags(self):
|
def ParseNewFlags(self):
|
||||||
if '__stored_argv' not in self.__dict__:
|
if '__stored_argv' not in self.__dict__:
|
||||||
return
|
return
|
||||||
new_flags = FlagValues()
|
new_flags = FlagValues(self)
|
||||||
for k in self.__dict__['__dirty']:
|
for k in self.__dict__['__dirty']:
|
||||||
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
|
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
|
||||||
|
|
||||||
@@ -134,10 +136,32 @@ class FlagValues(gflags.FlagValues):
|
|||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
if self.IsDirty(name):
|
if self.IsDirty(name):
|
||||||
self.ParseNewFlags()
|
self.ParseNewFlags()
|
||||||
return gflags.FlagValues.__getattr__(self, name)
|
val = gflags.FlagValues.__getattr__(self, name)
|
||||||
|
if type(val) is str:
|
||||||
|
tmpl = string.Template(val)
|
||||||
|
context = [self, self.__dict__['__extra_context']]
|
||||||
|
return tmpl.substitute(StrWrapper(context))
|
||||||
|
return val
|
||||||
|
|
||||||
|
|
||||||
|
class StrWrapper(object):
|
||||||
|
"""Wrapper around FlagValues objects
|
||||||
|
|
||||||
|
Wraps FlagValues objects for string.Template so that we're
|
||||||
|
sure to return strings."""
|
||||||
|
def __init__(self, context_objs):
|
||||||
|
self.context_objs = context_objs
|
||||||
|
|
||||||
|
def __getitem__(self, name):
|
||||||
|
for context in self.context_objs:
|
||||||
|
val = getattr(context, name, False)
|
||||||
|
if val:
|
||||||
|
return str(val)
|
||||||
|
raise KeyError(name)
|
||||||
|
|
||||||
FLAGS = FlagValues()
|
FLAGS = FlagValues()
|
||||||
|
gflags.FLAGS = FLAGS
|
||||||
|
gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
|
||||||
|
|
||||||
gflags.FLAGS = FLAGS
|
gflags.FLAGS = FLAGS
|
||||||
|
|
||||||
@@ -184,6 +208,8 @@ DEFINE_list('region_list',
|
|||||||
[],
|
[],
|
||||||
'list of region=url pairs separated by commas')
|
'list of region=url pairs separated by commas')
|
||||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||||
|
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||||
|
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||||
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
|
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
|
||||||
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
||||||
@@ -201,9 +227,9 @@ DEFINE_integer('rabbit_port', 5672, 'rabbit port')
|
|||||||
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
|
||||||
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
|
||||||
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
||||||
|
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
|
||||||
|
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
|
||||||
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
||||||
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
|
|
||||||
DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
|
||||||
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
|
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
|
||||||
'Url to ec2 api server')
|
'Url to ec2 api server')
|
||||||
|
|
||||||
@@ -223,21 +249,24 @@ DEFINE_string('vpn_key_suffix',
|
|||||||
|
|
||||||
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
||||||
|
|
||||||
|
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
|
||||||
|
"Top-level directory for maintaining nova's state")
|
||||||
|
|
||||||
DEFINE_string('sql_connection',
|
DEFINE_string('sql_connection',
|
||||||
'sqlite:///%s/nova.sqlite' % os.path.abspath("./"),
|
'sqlite:///$state_path/nova.sqlite',
|
||||||
'connection string for sql database')
|
'connection string for sql database')
|
||||||
|
|
||||||
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
|
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
|
||||||
'Manager for compute')
|
'Manager for compute')
|
||||||
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
|
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
|
||||||
'Manager for network')
|
'Manager for network')
|
||||||
DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
|
DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
|
||||||
'Manager for volume')
|
'Manager for volume')
|
||||||
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
||||||
'Manager for scheduler')
|
'Manager for scheduler')
|
||||||
|
|
||||||
# The service to use for image search and retrieval
|
# The service to use for image search and retrieval
|
||||||
DEFINE_string('image_service', 'nova.image.service.LocalImageService',
|
DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
|
||||||
'The service to use for retrieving and searching for images.')
|
'The service to use for retrieving and searching for images.')
|
||||||
|
|
||||||
DEFINE_string('host', socket.gethostname(),
|
DEFINE_string('host', socket.gethostname(),
|
||||||
|
|||||||
34
nova/rpc.py
34
nova/rpc.py
@@ -24,6 +24,7 @@ No fan-out support yet.
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from carrot import connection as carrot_connection
|
from carrot import connection as carrot_connection
|
||||||
@@ -37,8 +38,8 @@ from nova import fakerabbit
|
|||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import context
|
from nova import context
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
LOG = logging.getLogger('amqplib')
|
LOG = logging.getLogger('amqplib')
|
||||||
LOG.setLevel(logging.DEBUG)
|
LOG.setLevel(logging.DEBUG)
|
||||||
@@ -82,8 +83,24 @@ class Consumer(messaging.Consumer):
|
|||||||
Contains methods for connecting the fetch method to async loops
|
Contains methods for connecting the fetch method to async loops
|
||||||
"""
|
"""
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.failed_connection = False
|
for i in xrange(FLAGS.rabbit_max_retries):
|
||||||
super(Consumer, self).__init__(*args, **kwargs)
|
if i > 0:
|
||||||
|
time.sleep(FLAGS.rabbit_retry_interval)
|
||||||
|
try:
|
||||||
|
super(Consumer, self).__init__(*args, **kwargs)
|
||||||
|
self.failed_connection = False
|
||||||
|
break
|
||||||
|
except: # Catching all because carrot sucks
|
||||||
|
logging.exception("AMQP server on %s:%d is unreachable." \
|
||||||
|
" Trying again in %d seconds." % (
|
||||||
|
FLAGS.rabbit_host,
|
||||||
|
FLAGS.rabbit_port,
|
||||||
|
FLAGS.rabbit_retry_interval))
|
||||||
|
self.failed_connection = True
|
||||||
|
if self.failed_connection:
|
||||||
|
logging.exception("Unable to connect to AMQP server" \
|
||||||
|
" after %d tries. Shutting down." % FLAGS.rabbit_max_retries)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
|
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
|
||||||
"""Wraps the parent fetch with some logic for failed connections"""
|
"""Wraps the parent fetch with some logic for failed connections"""
|
||||||
@@ -91,11 +108,12 @@ class Consumer(messaging.Consumer):
|
|||||||
# refactored into some sort of connection manager object
|
# refactored into some sort of connection manager object
|
||||||
try:
|
try:
|
||||||
if self.failed_connection:
|
if self.failed_connection:
|
||||||
# NOTE(vish): conn is defined in the parent class, we can
|
# NOTE(vish): connection is defined in the parent class, we can
|
||||||
# recreate it as long as we create the backend too
|
# recreate it as long as we create the backend too
|
||||||
# pylint: disable-msg=W0201
|
# pylint: disable-msg=W0201
|
||||||
self.conn = Connection.recreate()
|
self.connection = Connection.recreate()
|
||||||
self.backend = self.conn.create_backend()
|
self.backend = self.connection.create_backend()
|
||||||
|
self.declare()
|
||||||
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
||||||
if self.failed_connection:
|
if self.failed_connection:
|
||||||
logging.error("Reconnected to queue")
|
logging.error("Reconnected to queue")
|
||||||
@@ -206,6 +224,7 @@ class DirectConsumer(Consumer):
|
|||||||
self.routing_key = msg_id
|
self.routing_key = msg_id
|
||||||
self.exchange = msg_id
|
self.exchange = msg_id
|
||||||
self.auto_delete = True
|
self.auto_delete = True
|
||||||
|
self.exclusive = True
|
||||||
super(DirectConsumer, self).__init__(connection=connection)
|
super(DirectConsumer, self).__init__(connection=connection)
|
||||||
|
|
||||||
|
|
||||||
@@ -262,6 +281,9 @@ def _unpack_context(msg):
|
|||||||
"""Unpack context from msg."""
|
"""Unpack context from msg."""
|
||||||
context_dict = {}
|
context_dict = {}
|
||||||
for key in list(msg.keys()):
|
for key in list(msg.keys()):
|
||||||
|
# NOTE(vish): Some versions of python don't like unicode keys
|
||||||
|
# in kwargs.
|
||||||
|
key = str(key)
|
||||||
if key.startswith('_context_'):
|
if key.startswith('_context_'):
|
||||||
value = msg.pop(key)
|
value = msg.pop(key)
|
||||||
context_dict[key[9:]] = value
|
context_dict[key[9:]] = value
|
||||||
|
|||||||
@@ -42,6 +42,8 @@ flags.DEFINE_bool('daemonize', False, 'daemonize this process')
|
|||||||
# clutter.
|
# clutter.
|
||||||
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
|
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
|
||||||
flags.DEFINE_string('logfile', None, 'log file to output to')
|
flags.DEFINE_string('logfile', None, 'log file to output to')
|
||||||
|
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
|
||||||
|
'(will be prepended to $logfile)')
|
||||||
flags.DEFINE_string('pidfile', None, 'pid file to output to')
|
flags.DEFINE_string('pidfile', None, 'pid file to output to')
|
||||||
flags.DEFINE_string('working_directory', './', 'working directory...')
|
flags.DEFINE_string('working_directory', './', 'working directory...')
|
||||||
flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
|
flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
|
||||||
@@ -119,6 +121,8 @@ def daemonize(args, name, main):
|
|||||||
else:
|
else:
|
||||||
if not FLAGS.logfile:
|
if not FLAGS.logfile:
|
||||||
FLAGS.logfile = '%s.log' % name
|
FLAGS.logfile = '%s.log' % name
|
||||||
|
if FLAGS.logdir:
|
||||||
|
FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
|
||||||
logfile = logging.FileHandler(FLAGS.logfile)
|
logfile = logging.FileHandler(FLAGS.logfile)
|
||||||
logfile.setFormatter(formatter)
|
logfile.setFormatter(formatter)
|
||||||
logger.addHandler(logfile)
|
logger.addHandler(logfile)
|
||||||
|
|||||||
@@ -34,10 +34,6 @@ from nova.api.ec2 import apirequest
|
|||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
FLAGS.FAKE_subdomain = 'ec2'
|
|
||||||
|
|
||||||
|
|
||||||
class FakeHttplibSocket(object):
|
class FakeHttplibSocket(object):
|
||||||
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
|
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
|
||||||
def __init__(self, response_string):
|
def __init__(self, response_string):
|
||||||
@@ -83,7 +79,7 @@ class FakeHttplibConnection(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class XmlConversionTestCase(test.BaseTestCase):
|
class XmlConversionTestCase(test.TrialTestCase):
|
||||||
"""Unit test api xml conversion"""
|
"""Unit test api xml conversion"""
|
||||||
def test_number_conversion(self):
|
def test_number_conversion(self):
|
||||||
conv = apirequest._try_convert
|
conv = apirequest._try_convert
|
||||||
@@ -100,7 +96,7 @@ class XmlConversionTestCase(test.BaseTestCase):
|
|||||||
self.assertEqual(conv('-0'), 0)
|
self.assertEqual(conv('-0'), 0)
|
||||||
|
|
||||||
|
|
||||||
class ApiEc2TestCase(test.BaseTestCase):
|
class ApiEc2TestCase(test.TrialTestCase):
|
||||||
"""Unit test for the cloud controller on an EC2 API"""
|
"""Unit test for the cloud controller on an EC2 API"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ApiEc2TestCase, self).setUp()
|
super(ApiEc2TestCase, self).setUp()
|
||||||
@@ -109,7 +105,7 @@ class ApiEc2TestCase(test.BaseTestCase):
|
|||||||
|
|
||||||
self.host = '127.0.0.1'
|
self.host = '127.0.0.1'
|
||||||
|
|
||||||
self.app = api.API()
|
self.app = api.API('ec2')
|
||||||
|
|
||||||
def expect_http(self, host=None, is_secure=False):
|
def expect_http(self, host=None, is_secure=False):
|
||||||
"""Returns a new EC2 connection"""
|
"""Returns a new EC2 connection"""
|
||||||
@@ -242,7 +238,7 @@ class ApiEc2TestCase(test.BaseTestCase):
|
|||||||
self.assertEquals(int(group.rules[0].from_port), 80)
|
self.assertEquals(int(group.rules[0].from_port), 80)
|
||||||
self.assertEquals(int(group.rules[0].to_port), 81)
|
self.assertEquals(int(group.rules[0].to_port), 81)
|
||||||
self.assertEquals(len(group.rules[0].grants), 1)
|
self.assertEquals(len(group.rules[0].grants), 1)
|
||||||
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
||||||
|
|
||||||
self.expect_http()
|
self.expect_http()
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|||||||
@@ -91,6 +91,41 @@ class CloudTestCase(test.TrialTestCase):
|
|||||||
# NOTE(vish): create depends on pool, so just call helper directly
|
# NOTE(vish): create depends on pool, so just call helper directly
|
||||||
return cloud._gen_key(self.context, self.context.user.id, name)
|
return cloud._gen_key(self.context, self.context.user.id, name)
|
||||||
|
|
||||||
|
def test_describe_addresses(self):
|
||||||
|
"""Makes sure describe addresses runs without raising an exception"""
|
||||||
|
address = "10.10.10.10"
|
||||||
|
db.floating_ip_create(self.context,
|
||||||
|
{'address': address,
|
||||||
|
'host': FLAGS.host})
|
||||||
|
self.cloud.allocate_address(self.context)
|
||||||
|
self.cloud.describe_addresses(self.context)
|
||||||
|
self.cloud.release_address(self.context,
|
||||||
|
public_ip=address)
|
||||||
|
greenthread.sleep(0.3)
|
||||||
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
|
def test_associate_disassociate_address(self):
|
||||||
|
"""Verifies associate runs cleanly without raising an exception"""
|
||||||
|
address = "10.10.10.10"
|
||||||
|
db.floating_ip_create(self.context,
|
||||||
|
{'address': address,
|
||||||
|
'host': FLAGS.host})
|
||||||
|
self.cloud.allocate_address(self.context)
|
||||||
|
inst = db.instance_create(self.context, {})
|
||||||
|
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
||||||
|
ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
|
||||||
|
self.cloud.associate_address(self.context,
|
||||||
|
instance_id=ec2_id,
|
||||||
|
public_ip=address)
|
||||||
|
self.cloud.disassociate_address(self.context,
|
||||||
|
public_ip=address)
|
||||||
|
self.cloud.release_address(self.context,
|
||||||
|
public_ip=address)
|
||||||
|
greenthread.sleep(0.3)
|
||||||
|
self.network.deallocate_fixed_ip(self.context, fixed)
|
||||||
|
db.instance_destroy(self.context, inst['id'])
|
||||||
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
def test_console_output(self):
|
def test_console_output(self):
|
||||||
image_id = FLAGS.default_image
|
image_id = FLAGS.default_image
|
||||||
instance_type = FLAGS.default_instance_type
|
instance_type = FLAGS.default_instance_type
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ from nova import flags
|
|||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
|
from nova.compute import api as compute_api
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
@@ -43,6 +44,7 @@ class ComputeTestCase(test.TrialTestCase):
|
|||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
network_manager='nova.network.manager.FlatManager')
|
network_manager='nova.network.manager.FlatManager')
|
||||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||||
|
self.compute_api = compute_api.ComputeAPI()
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
@@ -66,6 +68,32 @@ class ComputeTestCase(test.TrialTestCase):
|
|||||||
inst['ami_launch_index'] = 0
|
inst['ami_launch_index'] = 0
|
||||||
return db.instance_create(self.context, inst)['id']
|
return db.instance_create(self.context, inst)['id']
|
||||||
|
|
||||||
|
def test_create_instance_defaults_display_name(self):
|
||||||
|
"""Verify that an instance cannot be created without a display_name."""
|
||||||
|
cases = [dict(), dict(display_name=None)]
|
||||||
|
for instance in cases:
|
||||||
|
ref = self.compute_api.create_instances(self.context,
|
||||||
|
FLAGS.default_instance_type, None, **instance)
|
||||||
|
try:
|
||||||
|
self.assertNotEqual(ref[0].display_name, None)
|
||||||
|
finally:
|
||||||
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
|
|
||||||
|
def test_create_instance_associates_security_groups(self):
|
||||||
|
"""Make sure create_instances associates security groups"""
|
||||||
|
values = {'name': 'default',
|
||||||
|
'description': 'default',
|
||||||
|
'user_id': self.user.id,
|
||||||
|
'project_id': self.project.id}
|
||||||
|
group = db.security_group_create(self.context, values)
|
||||||
|
ref = self.compute_api.create_instances(self.context,
|
||||||
|
FLAGS.default_instance_type, None, security_group=['default'])
|
||||||
|
try:
|
||||||
|
self.assertEqual(len(ref[0]['security_groups']), 1)
|
||||||
|
finally:
|
||||||
|
db.security_group_destroy(self.context, group['id'])
|
||||||
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_run_terminate(self):
|
def test_run_terminate(self):
|
||||||
"""Make sure it is possible to run and terminate instance"""
|
"""Make sure it is possible to run and terminate instance"""
|
||||||
|
|||||||
@@ -21,9 +21,10 @@ from nova import flags
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||||
FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
|
FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
|
||||||
FLAGS.connection_type = 'fake'
|
FLAGS.connection_type = 'fake'
|
||||||
FLAGS.fake_rabbit = True
|
FLAGS.fake_rabbit = True
|
||||||
|
flags.DECLARE('auth_driver', 'nova.auth.manager')
|
||||||
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
|
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||||
flags.DECLARE('network_size', 'nova.network.manager')
|
flags.DECLARE('network_size', 'nova.network.manager')
|
||||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||||
@@ -31,9 +32,11 @@ flags.DECLARE('fake_network', 'nova.network.manager')
|
|||||||
FLAGS.network_size = 16
|
FLAGS.network_size = 16
|
||||||
FLAGS.num_networks = 5
|
FLAGS.num_networks = 5
|
||||||
FLAGS.fake_network = True
|
FLAGS.fake_network = True
|
||||||
flags.DECLARE('num_shelves', 'nova.volume.manager')
|
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||||
flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
|
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||||
|
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||||
FLAGS.num_shelves = 2
|
FLAGS.num_shelves = 2
|
||||||
FLAGS.blades_per_shelf = 4
|
FLAGS.blades_per_shelf = 4
|
||||||
|
FLAGS.iscsi_num_targets = 8
|
||||||
FLAGS.verbose = True
|
FLAGS.verbose = True
|
||||||
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
||||||
|
|||||||
52
nova/tests/misc_unittest.py
Normal file
52
nova/tests/misc_unittest.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from nova import test
|
||||||
|
from nova.utils import parse_mailmap, str_dict_replace
|
||||||
|
|
||||||
|
|
||||||
|
class ProjectTestCase(test.TrialTestCase):
|
||||||
|
def test_authors_up_to_date(self):
|
||||||
|
if os.path.exists('../.bzr'):
|
||||||
|
contributors = set()
|
||||||
|
|
||||||
|
mailmap = parse_mailmap('../.mailmap')
|
||||||
|
|
||||||
|
import bzrlib.workingtree
|
||||||
|
tree = bzrlib.workingtree.WorkingTree.open('..')
|
||||||
|
tree.lock_read()
|
||||||
|
parents = tree.get_parent_ids()
|
||||||
|
g = tree.branch.repository.get_graph()
|
||||||
|
for p in parents[1:]:
|
||||||
|
rev_ids = [r for r, _ in g.iter_ancestry(parents)
|
||||||
|
if r != "null:"]
|
||||||
|
revs = tree.branch.repository.get_revisions(rev_ids)
|
||||||
|
for r in revs:
|
||||||
|
for author in r.get_apparent_authors():
|
||||||
|
email = author.split(' ')[-1]
|
||||||
|
contributors.add(str_dict_replace(email, mailmap))
|
||||||
|
|
||||||
|
authors_file = open('../Authors', 'r').read()
|
||||||
|
|
||||||
|
missing = set()
|
||||||
|
for contributor in contributors:
|
||||||
|
if not contributor in authors_file:
|
||||||
|
missing.add(contributor)
|
||||||
|
|
||||||
|
self.assertTrue(len(missing) == 0,
|
||||||
|
'%r not listed in Authors' % missing)
|
||||||
@@ -41,7 +41,6 @@ class NetworkTestCase(test.TrialTestCase):
|
|||||||
# flags in the corresponding section in nova-dhcpbridge
|
# flags in the corresponding section in nova-dhcpbridge
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
fake_network=True,
|
fake_network=True,
|
||||||
auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
|
|
||||||
network_size=16,
|
network_size=16,
|
||||||
num_networks=5)
|
num_networks=5)
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
@@ -127,6 +126,7 @@ class NetworkTestCase(test.TrialTestCase):
|
|||||||
self.network.deallocate_floating_ip(self.context, float_addr)
|
self.network.deallocate_floating_ip(self.context, float_addr)
|
||||||
self.network.deallocate_fixed_ip(self.context, fix_addr)
|
self.network.deallocate_fixed_ip(self.context, fix_addr)
|
||||||
release_ip(fix_addr)
|
release_ip(fix_addr)
|
||||||
|
db.floating_ip_destroy(context.get_admin_context(), float_addr)
|
||||||
|
|
||||||
def test_allocate_deallocate_fixed_ip(self):
|
def test_allocate_deallocate_fixed_ip(self):
|
||||||
"""Makes sure that we can allocate and deallocate a fixed ip"""
|
"""Makes sure that we can allocate and deallocate a fixed ip"""
|
||||||
|
|||||||
@@ -94,11 +94,12 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
for i in range(FLAGS.quota_instances):
|
for i in range(FLAGS.quota_instances):
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
instance_ids.append(instance_id)
|
instance_ids.append(instance_id)
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
|
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
||||||
self.context,
|
self.context,
|
||||||
min_count=1,
|
min_count=1,
|
||||||
max_count=1,
|
max_count=1,
|
||||||
instance_type='m1.small')
|
instance_type='m1.small',
|
||||||
|
image_id='fake')
|
||||||
for instance_id in instance_ids:
|
for instance_id in instance_ids:
|
||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@@ -106,11 +107,12 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
instance_ids = []
|
instance_ids = []
|
||||||
instance_id = self._create_instance(cores=4)
|
instance_id = self._create_instance(cores=4)
|
||||||
instance_ids.append(instance_id)
|
instance_ids.append(instance_id)
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
|
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
||||||
self.context,
|
self.context,
|
||||||
min_count=1,
|
min_count=1,
|
||||||
max_count=1,
|
max_count=1,
|
||||||
instance_type='m1.small')
|
instance_type='m1.small',
|
||||||
|
image_id='fake')
|
||||||
for instance_id in instance_ids:
|
for instance_id in instance_ids:
|
||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@@ -119,7 +121,7 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
for i in range(FLAGS.quota_volumes):
|
for i in range(FLAGS.quota_volumes):
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
volume_ids.append(volume_id)
|
volume_ids.append(volume_id)
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.create_volume,
|
self.assertRaises(quota.QuotaError, self.cloud.create_volume,
|
||||||
self.context,
|
self.context,
|
||||||
size=10)
|
size=10)
|
||||||
for volume_id in volume_ids:
|
for volume_id in volume_ids:
|
||||||
@@ -129,7 +131,7 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
volume_ids = []
|
volume_ids = []
|
||||||
volume_id = self._create_volume(size=20)
|
volume_id = self._create_volume(size=20)
|
||||||
volume_ids.append(volume_id)
|
volume_ids.append(volume_id)
|
||||||
self.assertRaises(cloud.QuotaError,
|
self.assertRaises(quota.QuotaError,
|
||||||
self.cloud.create_volume,
|
self.cloud.create_volume,
|
||||||
self.context,
|
self.context,
|
||||||
size=10)
|
size=10)
|
||||||
@@ -138,16 +140,14 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
|
|
||||||
def test_too_many_addresses(self):
|
def test_too_many_addresses(self):
|
||||||
address = '192.168.0.100'
|
address = '192.168.0.100'
|
||||||
try:
|
db.floating_ip_create(context.get_admin_context(),
|
||||||
db.floating_ip_get_by_address(context.get_admin_context(), address)
|
{'address': address, 'host': FLAGS.host})
|
||||||
except exception.NotFound:
|
|
||||||
db.floating_ip_create(context.get_admin_context(),
|
|
||||||
{'address': address, 'host': FLAGS.host})
|
|
||||||
float_addr = self.network.allocate_floating_ip(self.context,
|
float_addr = self.network.allocate_floating_ip(self.context,
|
||||||
self.project.id)
|
self.project.id)
|
||||||
# NOTE(vish): This assert never fails. When cloud attempts to
|
# NOTE(vish): This assert never fails. When cloud attempts to
|
||||||
# make an rpc.call, the test just finishes with OK. It
|
# make an rpc.call, the test just finishes with OK. It
|
||||||
# appears to be something in the magic inline callbacks
|
# appears to be something in the magic inline callbacks
|
||||||
# that is breaking.
|
# that is breaking.
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.allocate_address,
|
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
|
||||||
self.context)
|
self.context)
|
||||||
|
db.floating_ip_destroy(context.get_admin_context(), address)
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
|||||||
max_cores=4,
|
max_cores=4,
|
||||||
max_gigabytes=4,
|
max_gigabytes=4,
|
||||||
network_manager='nova.network.manager.FlatManager',
|
network_manager='nova.network.manager.FlatManager',
|
||||||
volume_driver='nova.volume.driver.FakeAOEDriver',
|
volume_driver='nova.volume.driver.FakeISCSIDriver',
|
||||||
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
|
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
|
||||||
self.scheduler = manager.SchedulerManager()
|
self.scheduler = manager.SchedulerManager()
|
||||||
self.manager = auth_manager.AuthManager()
|
self.manager = auth_manager.AuthManager()
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ Unit Tests for remote procedure calls using queue
|
|||||||
import mox
|
import mox
|
||||||
|
|
||||||
from twisted.application.app import startApplication
|
from twisted.application.app import startApplication
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
from nova import context
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
@@ -48,7 +48,7 @@ class ExtendedService(service.Service):
|
|||||||
return 'service'
|
return 'service'
|
||||||
|
|
||||||
|
|
||||||
class ServiceManagerTestCase(test.BaseTestCase):
|
class ServiceManagerTestCase(test.TrialTestCase):
|
||||||
"""Test cases for Services"""
|
"""Test cases for Services"""
|
||||||
|
|
||||||
def test_attribute_error_for_no_manager(self):
|
def test_attribute_error_for_no_manager(self):
|
||||||
@@ -75,13 +75,12 @@ class ServiceManagerTestCase(test.BaseTestCase):
|
|||||||
self.assertEqual(serv.test_method(), 'service')
|
self.assertEqual(serv.test_method(), 'service')
|
||||||
|
|
||||||
|
|
||||||
class ServiceTestCase(test.BaseTestCase):
|
class ServiceTestCase(test.TrialTestCase):
|
||||||
"""Test cases for Services"""
|
"""Test cases for Services"""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ServiceTestCase, self).setUp()
|
super(ServiceTestCase, self).setUp()
|
||||||
self.mox.StubOutWithMock(service, 'db')
|
self.mox.StubOutWithMock(service, 'db')
|
||||||
self.context = context.get_admin_context()
|
|
||||||
|
|
||||||
def test_create(self):
|
def test_create(self):
|
||||||
host = 'foo'
|
host = 'foo'
|
||||||
@@ -144,87 +143,103 @@ class ServiceTestCase(test.BaseTestCase):
|
|||||||
# whether it is disconnected, it looks for a variable on itself called
|
# whether it is disconnected, it looks for a variable on itself called
|
||||||
# 'model_disconnected' and report_state doesn't really do much so this
|
# 'model_disconnected' and report_state doesn't really do much so this
|
||||||
# these are mostly just for coverage
|
# these are mostly just for coverage
|
||||||
def test_report_state(self):
|
@defer.inlineCallbacks
|
||||||
host = 'foo'
|
|
||||||
binary = 'bar'
|
|
||||||
service_ref = {'host': host,
|
|
||||||
'binary': binary,
|
|
||||||
'report_count': 0,
|
|
||||||
'id': 1}
|
|
||||||
service.db.__getattr__('report_state')
|
|
||||||
service.db.service_get_by_args(self.context,
|
|
||||||
host,
|
|
||||||
binary).AndReturn(service_ref)
|
|
||||||
service.db.service_update(self.context, service_ref['id'],
|
|
||||||
mox.ContainsKeyValue('report_count', 1))
|
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
|
||||||
s = service.Service()
|
|
||||||
rv = yield s.report_state(host, binary)
|
|
||||||
|
|
||||||
def test_report_state_no_service(self):
|
def test_report_state_no_service(self):
|
||||||
host = 'foo'
|
host = 'foo'
|
||||||
binary = 'bar'
|
binary = 'bar'
|
||||||
|
topic = 'test'
|
||||||
service_create = {'host': host,
|
service_create = {'host': host,
|
||||||
'binary': binary,
|
'binary': binary,
|
||||||
|
'topic': topic,
|
||||||
'report_count': 0}
|
'report_count': 0}
|
||||||
service_ref = {'host': host,
|
service_ref = {'host': host,
|
||||||
'binary': binary,
|
'binary': binary,
|
||||||
'report_count': 0,
|
'topic': topic,
|
||||||
'id': 1}
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
|
||||||
service.db.__getattr__('report_state')
|
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||||
service.db.service_get_by_args(self.context,
|
|
||||||
host,
|
host,
|
||||||
binary).AndRaise(exception.NotFound())
|
binary).AndRaise(exception.NotFound())
|
||||||
service.db.service_create(self.context,
|
service.db.service_create(mox.IgnoreArg(),
|
||||||
service_create).AndReturn(service_ref)
|
service_create).AndReturn(service_ref)
|
||||||
service.db.service_get(self.context,
|
service.db.service_get(mox.IgnoreArg(),
|
||||||
service_ref['id']).AndReturn(service_ref)
|
service_ref['id']).AndReturn(service_ref)
|
||||||
service.db.service_update(self.context, service_ref['id'],
|
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
|
||||||
mox.ContainsKeyValue('report_count', 1))
|
mox.ContainsKeyValue('report_count', 1))
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
s = service.Service()
|
serv = service.Service(host,
|
||||||
rv = yield s.report_state(host, binary)
|
binary,
|
||||||
|
topic,
|
||||||
|
'nova.tests.service_unittest.FakeManager')
|
||||||
|
serv.startService()
|
||||||
|
yield serv.report_state()
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_report_state_newly_disconnected(self):
|
def test_report_state_newly_disconnected(self):
|
||||||
host = 'foo'
|
host = 'foo'
|
||||||
binary = 'bar'
|
binary = 'bar'
|
||||||
|
topic = 'test'
|
||||||
|
service_create = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'topic': topic,
|
||||||
|
'report_count': 0}
|
||||||
service_ref = {'host': host,
|
service_ref = {'host': host,
|
||||||
'binary': binary,
|
'binary': binary,
|
||||||
'report_count': 0,
|
'topic': topic,
|
||||||
'id': 1}
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
|
||||||
service.db.__getattr__('report_state')
|
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||||
service.db.service_get_by_args(self.context,
|
host,
|
||||||
host,
|
binary).AndRaise(exception.NotFound())
|
||||||
binary).AndRaise(Exception())
|
service.db.service_create(mox.IgnoreArg(),
|
||||||
|
service_create).AndReturn(service_ref)
|
||||||
|
service.db.service_get(mox.IgnoreArg(),
|
||||||
|
mox.IgnoreArg()).AndRaise(Exception())
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
s = service.Service()
|
serv = service.Service(host,
|
||||||
rv = yield s.report_state(host, binary)
|
binary,
|
||||||
|
topic,
|
||||||
self.assert_(s.model_disconnected)
|
'nova.tests.service_unittest.FakeManager')
|
||||||
|
serv.startService()
|
||||||
|
yield serv.report_state()
|
||||||
|
self.assert_(serv.model_disconnected)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
def test_report_state_newly_connected(self):
|
def test_report_state_newly_connected(self):
|
||||||
host = 'foo'
|
host = 'foo'
|
||||||
binary = 'bar'
|
binary = 'bar'
|
||||||
|
topic = 'test'
|
||||||
|
service_create = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'topic': topic,
|
||||||
|
'report_count': 0}
|
||||||
service_ref = {'host': host,
|
service_ref = {'host': host,
|
||||||
'binary': binary,
|
'binary': binary,
|
||||||
'report_count': 0,
|
'topic': topic,
|
||||||
'id': 1}
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
|
||||||
service.db.__getattr__('report_state')
|
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||||
service.db.service_get_by_args(self.context,
|
host,
|
||||||
host,
|
binary).AndRaise(exception.NotFound())
|
||||||
binary).AndReturn(service_ref)
|
service.db.service_create(mox.IgnoreArg(),
|
||||||
service.db.service_update(self.context, service_ref['id'],
|
service_create).AndReturn(service_ref)
|
||||||
|
service.db.service_get(mox.IgnoreArg(),
|
||||||
|
service_ref['id']).AndReturn(service_ref)
|
||||||
|
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
|
||||||
mox.ContainsKeyValue('report_count', 1))
|
mox.ContainsKeyValue('report_count', 1))
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
s = service.Service()
|
serv = service.Service(host,
|
||||||
s.model_disconnected = True
|
binary,
|
||||||
rv = yield s.report_state(host, binary)
|
topic,
|
||||||
|
'nova.tests.service_unittest.FakeManager')
|
||||||
|
serv.startService()
|
||||||
|
serv.model_disconnected = True
|
||||||
|
yield serv.report_state()
|
||||||
|
|
||||||
self.assert_(not s.model_disconnected)
|
self.assert_(not serv.model_disconnected)
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ class LibvirtConnTestCase(test.TrialTestCase):
|
|||||||
FLAGS.libvirt_type = libvirt_type
|
FLAGS.libvirt_type = libvirt_type
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = libvirt_conn.LibvirtConnection(True)
|
||||||
|
|
||||||
uri, template = conn.get_uri_and_template()
|
uri, _template, _rescue = conn.get_uri_and_templates()
|
||||||
self.assertEquals(uri, expected_uri)
|
self.assertEquals(uri, expected_uri)
|
||||||
|
|
||||||
xml = conn.to_xml(instance_ref)
|
xml = conn.to_xml(instance_ref)
|
||||||
@@ -114,7 +114,7 @@ class LibvirtConnTestCase(test.TrialTestCase):
|
|||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
FLAGS.libvirt_type = libvirt_type
|
FLAGS.libvirt_type = libvirt_type
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = libvirt_conn.LibvirtConnection(True)
|
||||||
uri, template = conn.get_uri_and_template()
|
uri, _template, _rescue = conn.get_uri_and_templates()
|
||||||
self.assertEquals(uri, testuri)
|
self.assertEquals(uri, testuri)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
|
|||||||
@@ -16,7 +16,8 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
"""
|
"""
|
||||||
Tests for Volume Code
|
Tests for Volume Code.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@@ -33,7 +34,8 @@ FLAGS = flags.FLAGS
|
|||||||
|
|
||||||
|
|
||||||
class VolumeTestCase(test.TrialTestCase):
|
class VolumeTestCase(test.TrialTestCase):
|
||||||
"""Test Case for volumes"""
|
"""Test Case for volumes."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
super(VolumeTestCase, self).setUp()
|
super(VolumeTestCase, self).setUp()
|
||||||
@@ -44,7 +46,7 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_volume(size='0'):
|
def _create_volume(size='0'):
|
||||||
"""Create a volume object"""
|
"""Create a volume object."""
|
||||||
vol = {}
|
vol = {}
|
||||||
vol['size'] = size
|
vol['size'] = size
|
||||||
vol['user_id'] = 'fake'
|
vol['user_id'] = 'fake'
|
||||||
@@ -56,7 +58,7 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_create_delete_volume(self):
|
def test_create_delete_volume(self):
|
||||||
"""Test volume can be created and deleted"""
|
"""Test volume can be created and deleted."""
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
yield self.volume.create_volume(self.context, volume_id)
|
yield self.volume.create_volume(self.context, volume_id)
|
||||||
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
|
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
|
||||||
@@ -70,7 +72,7 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_too_big_volume(self):
|
def test_too_big_volume(self):
|
||||||
"""Ensure failure if a too large of a volume is requested"""
|
"""Ensure failure if a too large of a volume is requested."""
|
||||||
# FIXME(vish): validation needs to move into the data layer in
|
# FIXME(vish): validation needs to move into the data layer in
|
||||||
# volume_create
|
# volume_create
|
||||||
defer.returnValue(True)
|
defer.returnValue(True)
|
||||||
@@ -83,9 +85,9 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_too_many_volumes(self):
|
def test_too_many_volumes(self):
|
||||||
"""Ensure that NoMoreBlades is raised when we run out of volumes"""
|
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
|
||||||
vols = []
|
vols = []
|
||||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
total_slots = FLAGS.iscsi_num_targets
|
||||||
for _index in xrange(total_slots):
|
for _index in xrange(total_slots):
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
yield self.volume.create_volume(self.context, volume_id)
|
yield self.volume.create_volume(self.context, volume_id)
|
||||||
@@ -93,14 +95,14 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
self.assertFailure(self.volume.create_volume(self.context,
|
self.assertFailure(self.volume.create_volume(self.context,
|
||||||
volume_id),
|
volume_id),
|
||||||
db.NoMoreBlades)
|
db.NoMoreTargets)
|
||||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||||
for volume_id in vols:
|
for volume_id in vols:
|
||||||
yield self.volume.delete_volume(self.context, volume_id)
|
yield self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_run_attach_detach_volume(self):
|
def test_run_attach_detach_volume(self):
|
||||||
"""Make sure volume can be attached and detached from instance"""
|
"""Make sure volume can be attached and detached from instance."""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 'ami-test'
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
@@ -148,23 +150,22 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_concurrent_volumes_get_different_blades(self):
|
def test_concurrent_volumes_get_different_targets(self):
|
||||||
"""Ensure multiple concurrent volumes get different blades"""
|
"""Ensure multiple concurrent volumes get different targets."""
|
||||||
volume_ids = []
|
volume_ids = []
|
||||||
shelf_blades = []
|
targets = []
|
||||||
|
|
||||||
def _check(volume_id):
|
def _check(volume_id):
|
||||||
"""Make sure blades aren't duplicated"""
|
"""Make sure targets aren't duplicated."""
|
||||||
volume_ids.append(volume_id)
|
volume_ids.append(volume_id)
|
||||||
admin_context = context.get_admin_context()
|
admin_context = context.get_admin_context()
|
||||||
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(admin_context,
|
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
|
||||||
volume_id)
|
volume_id)
|
||||||
shelf_blade = '%s.%s' % (shelf_id, blade_id)
|
self.assert_(iscsi_target not in targets)
|
||||||
self.assert_(shelf_blade not in shelf_blades)
|
targets.append(iscsi_target)
|
||||||
shelf_blades.append(shelf_blade)
|
logging.debug("Target %s allocated", iscsi_target)
|
||||||
logging.debug("Blade %s allocated", shelf_blade)
|
|
||||||
deferreds = []
|
deferreds = []
|
||||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
total_slots = FLAGS.iscsi_num_targets
|
||||||
for _index in xrange(total_slots):
|
for _index in xrange(total_slots):
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
d = self.volume.create_volume(self.context, volume_id)
|
d = self.volume.create_volume(self.context, volume_id)
|
||||||
|
|||||||
@@ -43,6 +43,8 @@ else:
|
|||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
|
||||||
|
'(will be prepended to $logfile)')
|
||||||
|
|
||||||
|
|
||||||
class TwistdServerOptions(ServerOptions):
|
class TwistdServerOptions(ServerOptions):
|
||||||
@@ -246,6 +248,8 @@ def serve(filename):
|
|||||||
FLAGS.logfile = '%s.log' % name
|
FLAGS.logfile = '%s.log' % name
|
||||||
elif FLAGS.logfile.endswith('twistd.log'):
|
elif FLAGS.logfile.endswith('twistd.log'):
|
||||||
FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
|
FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
|
||||||
|
if FLAGS.logdir:
|
||||||
|
FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
|
||||||
if not FLAGS.prefix:
|
if not FLAGS.prefix:
|
||||||
FLAGS.prefix = name
|
FLAGS.prefix = name
|
||||||
elif FLAGS.prefix.endswith('twisted'):
|
elif FLAGS.prefix.endswith('twisted'):
|
||||||
|
|||||||
@@ -49,11 +49,12 @@ from nova import flags
|
|||||||
from nova import twistd
|
from nova import twistd
|
||||||
|
|
||||||
from nova.tests.access_unittest import *
|
from nova.tests.access_unittest import *
|
||||||
from nova.tests.auth_unittest import *
|
|
||||||
from nova.tests.api_unittest import *
|
from nova.tests.api_unittest import *
|
||||||
|
from nova.tests.auth_unittest import *
|
||||||
from nova.tests.cloud_unittest import *
|
from nova.tests.cloud_unittest import *
|
||||||
from nova.tests.compute_unittest import *
|
from nova.tests.compute_unittest import *
|
||||||
from nova.tests.flags_unittest import *
|
from nova.tests.flags_unittest import *
|
||||||
|
from nova.tests.misc_unittest import *
|
||||||
from nova.tests.network_unittest import *
|
from nova.tests.network_unittest import *
|
||||||
from nova.tests.objectstore_unittest import *
|
from nova.tests.objectstore_unittest import *
|
||||||
from nova.tests.process_unittest import *
|
from nova.tests.process_unittest import *
|
||||||
@@ -64,8 +65,8 @@ from nova.tests.service_unittest import *
|
|||||||
from nova.tests.twistd_unittest import *
|
from nova.tests.twistd_unittest import *
|
||||||
from nova.tests.validator_unittest import *
|
from nova.tests.validator_unittest import *
|
||||||
from nova.tests.virt_unittest import *
|
from nova.tests.virt_unittest import *
|
||||||
from nova.tests.volume_unittest import *
|
|
||||||
from nova.tests.virt_unittest import *
|
from nova.tests.virt_unittest import *
|
||||||
|
from nova.tests.volume_unittest import *
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|||||||
Reference in New Issue
Block a user