merged trunk, fixed conflicts and tests
This commit is contained in:
10
.bzrignore
10
.bzrignore
@@ -1,3 +1,13 @@
|
|||||||
run_tests.err.log
|
run_tests.err.log
|
||||||
.nova-venv
|
.nova-venv
|
||||||
ChangeLog
|
ChangeLog
|
||||||
|
_trial_temp
|
||||||
|
keys
|
||||||
|
networks
|
||||||
|
nova.sqlite
|
||||||
|
CA/cacert.pem
|
||||||
|
CA/index.txt*
|
||||||
|
CA/openssl.cnf
|
||||||
|
CA/serial*
|
||||||
|
CA/newcerts/*.pem
|
||||||
|
CA/private/cakey.pem
|
||||||
|
|||||||
5
Authors
5
Authors
@@ -3,8 +3,10 @@ Anne Gentle <anne@openstack.org>
|
|||||||
Anthony Young <sleepsonthefloor@gmail.com>
|
Anthony Young <sleepsonthefloor@gmail.com>
|
||||||
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
||||||
Chris Behrens <cbehrens@codestud.com>
|
Chris Behrens <cbehrens@codestud.com>
|
||||||
|
Chmouel Boudjnah <chmouel@chmouel.com>
|
||||||
Dean Troyer <dtroyer@gmail.com>
|
Dean Troyer <dtroyer@gmail.com>
|
||||||
Devin Carlen <devin.carlen@gmail.com>
|
Devin Carlen <devin.carlen@gmail.com>
|
||||||
|
Eldar Nugaev <enugaev@griddynamics.com>
|
||||||
Eric Day <eday@oddments.org>
|
Eric Day <eday@oddments.org>
|
||||||
Ewan Mellor <ewan.mellor@citrix.com>
|
Ewan Mellor <ewan.mellor@citrix.com>
|
||||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
@@ -20,8 +22,11 @@ Michael Gundlach <michael.gundlach@rackspace.com>
|
|||||||
Monty Taylor <mordred@inaugust.com>
|
Monty Taylor <mordred@inaugust.com>
|
||||||
Paul Voccio <paul@openstack.org>
|
Paul Voccio <paul@openstack.org>
|
||||||
Rick Clark <rick@openstack.org>
|
Rick Clark <rick@openstack.org>
|
||||||
|
Ryan Lucio <rlucio@internap.com>
|
||||||
|
Sandy Walsh <sandy.walsh@rackspace.com>
|
||||||
Soren Hansen <soren.hansen@rackspace.com>
|
Soren Hansen <soren.hansen@rackspace.com>
|
||||||
Todd Willey <todd@ansolabs.com>
|
Todd Willey <todd@ansolabs.com>
|
||||||
|
Trey Morris <trey.morris@rackspace.com>
|
||||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
Zhixue Wu <Zhixue.Wu@citrix.com>
|
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||||
|
|||||||
23
bin/nova-api
23
bin/nova-api
@@ -17,10 +17,10 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
"""
|
|
||||||
Nova API daemon.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
"""Starter script for Nova API."""
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -32,9 +32,13 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
|
from nova import api
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import server
|
from nova import wsgi
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
||||||
@@ -43,15 +47,10 @@ flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
|
|||||||
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
|
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
|
||||||
|
|
||||||
|
|
||||||
def main(_args):
|
if __name__ == '__main__':
|
||||||
from nova import api
|
utils.default_flagfile()
|
||||||
from nova import wsgi
|
FLAGS(sys.argv)
|
||||||
server = wsgi.Server()
|
server = wsgi.Server()
|
||||||
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
|
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
|
||||||
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
|
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
|
||||||
server.wait()
|
server.wait()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
utils.default_flagfile()
|
|
||||||
server.serve('nova-api', main)
|
|
||||||
|
|||||||
65
bin/nova-combined
Executable file
65
bin/nova-combined
Executable file
@@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Combined starter script for Nova services."""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from nova import api
|
||||||
|
from nova import flags
|
||||||
|
from nova import service
|
||||||
|
from nova import utils
|
||||||
|
from nova import wsgi
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
||||||
|
flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host')
|
||||||
|
flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
|
||||||
|
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
|
FLAGS(sys.argv)
|
||||||
|
|
||||||
|
compute = service.Service.create(binary='nova-compute')
|
||||||
|
network = service.Service.create(binary='nova-network')
|
||||||
|
volume = service.Service.create(binary='nova-volume')
|
||||||
|
scheduler = service.Service.create(binary='nova-scheduler')
|
||||||
|
#objectstore = service.Service.create(binary='nova-objectstore')
|
||||||
|
|
||||||
|
service.serve(compute, network, volume, scheduler)
|
||||||
|
|
||||||
|
server = wsgi.Server()
|
||||||
|
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
|
||||||
|
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
|
||||||
|
server.wait()
|
||||||
@@ -17,10 +17,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""Starter script for Nova Compute."""
|
||||||
Twistd daemon for the nova compute nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -32,14 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import twistd
|
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
twistd.serve(__file__)
|
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
application = service.Service.create() # pylint: disable=C0103
|
service.serve()
|
||||||
|
service.wait()
|
||||||
|
|||||||
@@ -21,6 +21,7 @@
|
|||||||
Handle lease database updates from DHCP servers.
|
Handle lease database updates from DHCP servers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gettext
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -33,6 +34,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
|||||||
@@ -21,6 +21,7 @@
|
|||||||
Download images from Canonical Image Store
|
Download images from Canonical Image Store
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gettext
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
@@ -37,6 +38,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.objectstore import image
|
from nova.objectstore import image
|
||||||
|
|||||||
@@ -21,6 +21,7 @@
|
|||||||
Daemon for Nova RRD based instance resource monitoring.
|
Daemon for Nova RRD based instance resource monitoring.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
@@ -34,6 +35,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
from nova.compute import monitor
|
from nova.compute import monitor
|
||||||
@@ -42,10 +45,10 @@ logging.getLogger('boto').setLevel(logging.WARN)
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
utils.default_flagfile()
|
|
||||||
logging.warn('Starting instance monitor')
|
logging.warn('Starting instance monitor')
|
||||||
# pylint: disable-msg=C0103
|
# pylint: disable-msg=C0103
|
||||||
monitor = monitor.InstanceMonitor()
|
monitor = monitor.InstanceMonitor()
|
||||||
|
|||||||
@@ -53,6 +53,7 @@
|
|||||||
CLI interface for nova management.
|
CLI interface for nova management.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gettext
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -68,6 +69,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import crypto
|
from nova import crypto
|
||||||
from nova import db
|
from nova import db
|
||||||
@@ -379,9 +382,14 @@ class ProjectCommands(object):
|
|||||||
def zipfile(self, project_id, user_id, filename='nova.zip'):
|
def zipfile(self, project_id, user_id, filename='nova.zip'):
|
||||||
"""Exports credentials for project to a zip file
|
"""Exports credentials for project to a zip file
|
||||||
arguments: project_id user_id [filename='nova.zip]"""
|
arguments: project_id user_id [filename='nova.zip]"""
|
||||||
zip_file = self.manager.get_credentials(user_id, project_id)
|
try:
|
||||||
with open(filename, 'w') as f:
|
zip_file = self.manager.get_credentials(user_id, project_id)
|
||||||
f.write(zip_file)
|
with open(filename, 'w') as f:
|
||||||
|
f.write(zip_file)
|
||||||
|
except db.api.NoMoreNetworks:
|
||||||
|
print ('No more networks available. If this is a new '
|
||||||
|
'installation, you need\nto call something like this:\n\n'
|
||||||
|
' nova-manage network create 10.0.0.0/8 10 64\n\n')
|
||||||
|
|
||||||
|
|
||||||
class FloatingIpCommands(object):
|
class FloatingIpCommands(object):
|
||||||
|
|||||||
@@ -17,10 +17,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""Starter script for Nova Network."""
|
||||||
Twistd daemon for the nova network nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -32,14 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import twistd
|
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
twistd.serve(__file__)
|
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
application = service.Service.create() # pylint: disable-msg=C0103
|
service.serve()
|
||||||
|
service.wait()
|
||||||
|
|||||||
@@ -21,6 +21,7 @@
|
|||||||
Twisted daemon for nova objectstore. Supports S3 API.
|
Twisted daemon for nova objectstore. Supports S3 API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -32,6 +33,8 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
@@ -42,8 +45,8 @@ FLAGS = flags.FLAGS
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
utils.default_flagfile()
|
|
||||||
application = handler.get_application() # pylint: disable-msg=C0103
|
application = handler.get_application() # pylint: disable-msg=C0103
|
||||||
|
|||||||
@@ -17,10 +17,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""Starter script for Nova Scheduler."""
|
||||||
Twistd daemon for the nova scheduler nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -32,14 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import twistd
|
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
twistd.serve(__file__)
|
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
application = service.Service.create()
|
service.serve()
|
||||||
|
service.wait()
|
||||||
|
|||||||
@@ -17,10 +17,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""Starter script for Nova Volume."""
|
||||||
Twistd daemon for the nova volume nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -32,14 +34,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
sys.path.insert(0, possible_topdir)
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from nova import service
|
from nova import service
|
||||||
from nova import twistd
|
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
twistd.serve(__file__)
|
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
|
||||||
utils.default_flagfile()
|
utils.default_flagfile()
|
||||||
application = service.Service.create() # pylint: disable-msg=C0103
|
service.serve()
|
||||||
|
service.wait()
|
||||||
|
|||||||
@@ -40,6 +40,8 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
|
|||||||
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
|
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
|
||||||
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
|
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
|
||||||
'OU for Users')
|
'OU for Users')
|
||||||
|
flags.DEFINE_boolean('ldap_user_modify_only', False,
|
||||||
|
'Modify attributes for users instead of creating/deleting')
|
||||||
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
|
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
|
||||||
'OU for Projects')
|
'OU for Projects')
|
||||||
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
|
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
|
||||||
@@ -89,8 +91,7 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def get_user(self, uid):
|
def get_user(self, uid):
|
||||||
"""Retrieve user by id"""
|
"""Retrieve user by id"""
|
||||||
attr = self.__find_object(self.__uid_to_dn(uid),
|
attr = self.__get_ldap_user(uid)
|
||||||
'(objectclass=novaUser)')
|
|
||||||
return self.__to_user(attr)
|
return self.__to_user(attr)
|
||||||
|
|
||||||
def get_user_from_access_key(self, access):
|
def get_user_from_access_key(self, access):
|
||||||
@@ -110,7 +111,12 @@ class LdapDriver(object):
|
|||||||
"""Retrieve list of users"""
|
"""Retrieve list of users"""
|
||||||
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
||||||
'(objectclass=novaUser)')
|
'(objectclass=novaUser)')
|
||||||
return [self.__to_user(attr) for attr in attrs]
|
users = []
|
||||||
|
for attr in attrs:
|
||||||
|
user = self.__to_user(attr)
|
||||||
|
if user is not None:
|
||||||
|
users.append(user)
|
||||||
|
return users
|
||||||
|
|
||||||
def get_projects(self, uid=None):
|
def get_projects(self, uid=None):
|
||||||
"""Retrieve list of projects"""
|
"""Retrieve list of projects"""
|
||||||
@@ -125,21 +131,52 @@ class LdapDriver(object):
|
|||||||
"""Create a user"""
|
"""Create a user"""
|
||||||
if self.__user_exists(name):
|
if self.__user_exists(name):
|
||||||
raise exception.Duplicate("LDAP user %s already exists" % name)
|
raise exception.Duplicate("LDAP user %s already exists" % name)
|
||||||
attr = [
|
if FLAGS.ldap_user_modify_only:
|
||||||
('objectclass', ['person',
|
if self.__ldap_user_exists(name):
|
||||||
'organizationalPerson',
|
# Retrieve user by name
|
||||||
'inetOrgPerson',
|
user = self.__get_ldap_user(name)
|
||||||
'novaUser']),
|
# Entry could be malformed, test for missing attrs.
|
||||||
('ou', [FLAGS.ldap_user_unit]),
|
# Malformed entries are useless, replace attributes found.
|
||||||
('uid', [name]),
|
attr = []
|
||||||
('sn', [name]),
|
if 'secretKey' in user.keys():
|
||||||
('cn', [name]),
|
attr.append((self.ldap.MOD_REPLACE, 'secretKey', \
|
||||||
('secretKey', [secret_key]),
|
[secret_key]))
|
||||||
('accessKey', [access_key]),
|
else:
|
||||||
('isAdmin', [str(is_admin).upper()]),
|
attr.append((self.ldap.MOD_ADD, 'secretKey', \
|
||||||
]
|
[secret_key]))
|
||||||
self.conn.add_s(self.__uid_to_dn(name), attr)
|
if 'accessKey' in user.keys():
|
||||||
return self.__to_user(dict(attr))
|
attr.append((self.ldap.MOD_REPLACE, 'accessKey', \
|
||||||
|
[access_key]))
|
||||||
|
else:
|
||||||
|
attr.append((self.ldap.MOD_ADD, 'accessKey', \
|
||||||
|
[access_key]))
|
||||||
|
if 'isAdmin' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \
|
||||||
|
[str(is_admin).upper()]))
|
||||||
|
else:
|
||||||
|
attr.append((self.ldap.MOD_ADD, 'isAdmin', \
|
||||||
|
[str(is_admin).upper()]))
|
||||||
|
self.conn.modify_s(self.__uid_to_dn(name), attr)
|
||||||
|
return self.get_user(name)
|
||||||
|
else:
|
||||||
|
raise exception.NotFound("LDAP object for %s doesn't exist"
|
||||||
|
% name)
|
||||||
|
else:
|
||||||
|
attr = [
|
||||||
|
('objectclass', ['person',
|
||||||
|
'organizationalPerson',
|
||||||
|
'inetOrgPerson',
|
||||||
|
'novaUser']),
|
||||||
|
('ou', [FLAGS.ldap_user_unit]),
|
||||||
|
('uid', [name]),
|
||||||
|
('sn', [name]),
|
||||||
|
('cn', [name]),
|
||||||
|
('secretKey', [secret_key]),
|
||||||
|
('accessKey', [access_key]),
|
||||||
|
('isAdmin', [str(is_admin).upper()]),
|
||||||
|
]
|
||||||
|
self.conn.add_s(self.__uid_to_dn(name), attr)
|
||||||
|
return self.__to_user(dict(attr))
|
||||||
|
|
||||||
def create_project(self, name, manager_uid,
|
def create_project(self, name, manager_uid,
|
||||||
description=None, member_uids=None):
|
description=None, member_uids=None):
|
||||||
@@ -155,7 +192,7 @@ class LdapDriver(object):
|
|||||||
if description is None:
|
if description is None:
|
||||||
description = name
|
description = name
|
||||||
members = []
|
members = []
|
||||||
if member_uids != None:
|
if member_uids is not None:
|
||||||
for member_uid in member_uids:
|
for member_uid in member_uids:
|
||||||
if not self.__user_exists(member_uid):
|
if not self.__user_exists(member_uid):
|
||||||
raise exception.NotFound("Project can't be created "
|
raise exception.NotFound("Project can't be created "
|
||||||
@@ -256,7 +293,24 @@ class LdapDriver(object):
|
|||||||
if not self.__user_exists(uid):
|
if not self.__user_exists(uid):
|
||||||
raise exception.NotFound("User %s doesn't exist" % uid)
|
raise exception.NotFound("User %s doesn't exist" % uid)
|
||||||
self.__remove_from_all(uid)
|
self.__remove_from_all(uid)
|
||||||
self.conn.delete_s(self.__uid_to_dn(uid))
|
if FLAGS.ldap_user_modify_only:
|
||||||
|
# Delete attributes
|
||||||
|
attr = []
|
||||||
|
# Retrieve user by name
|
||||||
|
user = self.__get_ldap_user(uid)
|
||||||
|
if 'secretKey' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_DELETE, 'secretKey', \
|
||||||
|
user['secretKey']))
|
||||||
|
if 'accessKey' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_DELETE, 'accessKey', \
|
||||||
|
user['accessKey']))
|
||||||
|
if 'isAdmin' in user.keys():
|
||||||
|
attr.append((self.ldap.MOD_DELETE, 'isAdmin', \
|
||||||
|
user['isAdmin']))
|
||||||
|
self.conn.modify_s(self.__uid_to_dn(uid), attr)
|
||||||
|
else:
|
||||||
|
# Delete entry
|
||||||
|
self.conn.delete_s(self.__uid_to_dn(uid))
|
||||||
|
|
||||||
def delete_project(self, project_id):
|
def delete_project(self, project_id):
|
||||||
"""Delete a project"""
|
"""Delete a project"""
|
||||||
@@ -265,7 +319,7 @@ class LdapDriver(object):
|
|||||||
self.__delete_group(project_dn)
|
self.__delete_group(project_dn)
|
||||||
|
|
||||||
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
|
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
|
||||||
"""Modify an existing project"""
|
"""Modify an existing user"""
|
||||||
if not access_key and not secret_key and admin is None:
|
if not access_key and not secret_key and admin is None:
|
||||||
return
|
return
|
||||||
attr = []
|
attr = []
|
||||||
@@ -279,11 +333,21 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def __user_exists(self, uid):
|
def __user_exists(self, uid):
|
||||||
"""Check if user exists"""
|
"""Check if user exists"""
|
||||||
return self.get_user(uid) != None
|
return self.get_user(uid) is not None
|
||||||
|
|
||||||
|
def __ldap_user_exists(self, uid):
|
||||||
|
"""Check if the user exists in ldap"""
|
||||||
|
return self.__get_ldap_user(uid) is not None
|
||||||
|
|
||||||
def __project_exists(self, project_id):
|
def __project_exists(self, project_id):
|
||||||
"""Check if project exists"""
|
"""Check if project exists"""
|
||||||
return self.get_project(project_id) != None
|
return self.get_project(project_id) is not None
|
||||||
|
|
||||||
|
def __get_ldap_user(self, uid):
|
||||||
|
"""Retrieve LDAP user entry by id"""
|
||||||
|
attr = self.__find_object(self.__uid_to_dn(uid),
|
||||||
|
'(objectclass=novaUser)')
|
||||||
|
return attr
|
||||||
|
|
||||||
def __find_object(self, dn, query=None, scope=None):
|
def __find_object(self, dn, query=None, scope=None):
|
||||||
"""Find an object by dn and query"""
|
"""Find an object by dn and query"""
|
||||||
@@ -330,12 +394,12 @@ class LdapDriver(object):
|
|||||||
|
|
||||||
def __group_exists(self, dn):
|
def __group_exists(self, dn):
|
||||||
"""Check if group exists"""
|
"""Check if group exists"""
|
||||||
return self.__find_object(dn, '(objectclass=groupOfNames)') != None
|
return self.__find_object(dn, '(objectclass=groupOfNames)') is not None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __role_to_dn(role, project_id=None):
|
def __role_to_dn(role, project_id=None):
|
||||||
"""Convert role to corresponding dn"""
|
"""Convert role to corresponding dn"""
|
||||||
if project_id == None:
|
if project_id is None:
|
||||||
return FLAGS.__getitem__("ldap_%s" % role).value
|
return FLAGS.__getitem__("ldap_%s" % role).value
|
||||||
else:
|
else:
|
||||||
return 'cn=%s,cn=%s,%s' % (role,
|
return 'cn=%s,cn=%s,%s' % (role,
|
||||||
@@ -349,7 +413,7 @@ class LdapDriver(object):
|
|||||||
raise exception.Duplicate("Group can't be created because "
|
raise exception.Duplicate("Group can't be created because "
|
||||||
"group %s already exists" % name)
|
"group %s already exists" % name)
|
||||||
members = []
|
members = []
|
||||||
if member_uids != None:
|
if member_uids is not None:
|
||||||
for member_uid in member_uids:
|
for member_uid in member_uids:
|
||||||
if not self.__user_exists(member_uid):
|
if not self.__user_exists(member_uid):
|
||||||
raise exception.NotFound("Group can't be created "
|
raise exception.NotFound("Group can't be created "
|
||||||
@@ -375,7 +439,7 @@ class LdapDriver(object):
|
|||||||
res = self.__find_object(group_dn,
|
res = self.__find_object(group_dn,
|
||||||
'(member=%s)' % self.__uid_to_dn(uid),
|
'(member=%s)' % self.__uid_to_dn(uid),
|
||||||
self.ldap.SCOPE_BASE)
|
self.ldap.SCOPE_BASE)
|
||||||
return res != None
|
return res is not None
|
||||||
|
|
||||||
def __add_to_group(self, uid, group_dn):
|
def __add_to_group(self, uid, group_dn):
|
||||||
"""Add user to group"""
|
"""Add user to group"""
|
||||||
@@ -447,18 +511,22 @@ class LdapDriver(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def __to_user(attr):
|
def __to_user(attr):
|
||||||
"""Convert ldap attributes to User object"""
|
"""Convert ldap attributes to User object"""
|
||||||
if attr == None:
|
if attr is None:
|
||||||
|
return None
|
||||||
|
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
|
||||||
|
and 'isAdmin' in attr.keys()):
|
||||||
|
return {
|
||||||
|
'id': attr['uid'][0],
|
||||||
|
'name': attr['cn'][0],
|
||||||
|
'access': attr['accessKey'][0],
|
||||||
|
'secret': attr['secretKey'][0],
|
||||||
|
'admin': (attr['isAdmin'][0] == 'TRUE')}
|
||||||
|
else:
|
||||||
return None
|
return None
|
||||||
return {
|
|
||||||
'id': attr['uid'][0],
|
|
||||||
'name': attr['cn'][0],
|
|
||||||
'access': attr['accessKey'][0],
|
|
||||||
'secret': attr['secretKey'][0],
|
|
||||||
'admin': (attr['isAdmin'][0] == 'TRUE')}
|
|
||||||
|
|
||||||
def __to_project(self, attr):
|
def __to_project(self, attr):
|
||||||
"""Convert ldap attributes to Project object"""
|
"""Convert ldap attributes to Project object"""
|
||||||
if attr == None:
|
if attr is None:
|
||||||
return None
|
return None
|
||||||
member_dns = attr.get('member', [])
|
member_dns = attr.get('member', [])
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -621,6 +621,10 @@ class AuthManager(object):
|
|||||||
with self.driver() as drv:
|
with self.driver() as drv:
|
||||||
drv.modify_user(uid, access_key, secret_key, admin)
|
drv.modify_user(uid, access_key, secret_key, admin)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_key_pairs(context):
|
||||||
|
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
|
||||||
|
|
||||||
def get_credentials(self, user, project=None, use_dmz=True):
|
def get_credentials(self, user, project=None, use_dmz=True):
|
||||||
"""Get credential zip for user in project"""
|
"""Get credential zip for user in project"""
|
||||||
if not isinstance(user, User):
|
if not isinstance(user, User):
|
||||||
|
|||||||
84
nova/auth/nova_openldap.schema
Normal file
84
nova/auth/nova_openldap.schema
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
#
|
||||||
|
# Person object for Nova
|
||||||
|
# inetorgperson with extra attributes
|
||||||
|
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
# using internet experimental oid arc as per BP64 3.1
|
||||||
|
objectidentifier novaSchema 1.3.6.1.3.1.666.666
|
||||||
|
objectidentifier novaAttrs novaSchema:3
|
||||||
|
objectidentifier novaOCs novaSchema:4
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:1
|
||||||
|
NAME 'accessKey'
|
||||||
|
DESC 'Key for accessing data'
|
||||||
|
EQUALITY caseIgnoreMatch
|
||||||
|
SUBSTR caseIgnoreSubstringsMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:2
|
||||||
|
NAME 'secretKey'
|
||||||
|
DESC 'Secret key'
|
||||||
|
EQUALITY caseIgnoreMatch
|
||||||
|
SUBSTR caseIgnoreSubstringsMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:3
|
||||||
|
NAME 'keyFingerprint'
|
||||||
|
DESC 'Fingerprint of private key'
|
||||||
|
EQUALITY caseIgnoreMatch
|
||||||
|
SUBSTR caseIgnoreSubstringsMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:4
|
||||||
|
NAME 'isAdmin'
|
||||||
|
DESC 'Is user an administrator?'
|
||||||
|
EQUALITY booleanMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
||||||
|
SINGLE-VALUE
|
||||||
|
)
|
||||||
|
|
||||||
|
attributetype (
|
||||||
|
novaAttrs:5
|
||||||
|
NAME 'projectManager'
|
||||||
|
DESC 'Project Managers of a project'
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
|
||||||
|
)
|
||||||
|
|
||||||
|
objectClass (
|
||||||
|
novaOCs:1
|
||||||
|
NAME 'novaUser'
|
||||||
|
DESC 'access and secret keys'
|
||||||
|
AUXILIARY
|
||||||
|
MUST ( uid )
|
||||||
|
MAY ( accessKey $ secretKey $ isAdmin )
|
||||||
|
)
|
||||||
|
|
||||||
|
objectClass (
|
||||||
|
novaOCs:2
|
||||||
|
NAME 'novaKeyPair'
|
||||||
|
DESC 'Key pair for User'
|
||||||
|
SUP top
|
||||||
|
STRUCTURAL
|
||||||
|
MUST ( cn $ sshPublicKey $ keyFingerprint )
|
||||||
|
)
|
||||||
|
|
||||||
|
objectClass (
|
||||||
|
novaOCs:3
|
||||||
|
NAME 'novaProject'
|
||||||
|
DESC 'Container for project'
|
||||||
|
SUP groupOfNames
|
||||||
|
STRUCTURAL
|
||||||
|
MUST ( cn $ projectManager )
|
||||||
|
)
|
||||||
16
nova/auth/nova_sun.schema
Normal file
16
nova/auth/nova_sun.schema
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#
|
||||||
|
# Person object for Nova
|
||||||
|
# inetorgperson with extra attributes
|
||||||
|
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
||||||
|
# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com>
|
||||||
|
#
|
||||||
|
# using internet experimental oid arc as per BP64 3.1
|
||||||
|
dn: cn=schema
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE)
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||||
|
attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
|
||||||
|
objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
|
||||||
|
objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
|
||||||
|
objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )
|
||||||
119
nova/auth/opendj.sh
Executable file
119
nova/auth/opendj.sh
Executable file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
# LDAP INSTALL SCRIPT - IS IDEMPOTENT, does not scrub users
|
||||||
|
|
||||||
|
apt-get install -y ldap-utils python-ldap openjdk-6-jre
|
||||||
|
|
||||||
|
if [ ! -d "/usr/opendj" ]
|
||||||
|
then
|
||||||
|
# TODO(rlane): Wikimedia Foundation is the current package maintainer.
|
||||||
|
# After the package is included in Ubuntu's channel, change this.
|
||||||
|
wget http://apt.wikimedia.org/wikimedia/pool/main/o/opendj/opendj_2.4.0-7_amd64.deb
|
||||||
|
dpkg -i opendj_2.4.0-7_amd64.deb
|
||||||
|
fi
|
||||||
|
|
||||||
|
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
||||||
|
schemapath='/var/opendj/instance/config/schema'
|
||||||
|
cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
|
||||||
|
cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
|
||||||
|
chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
|
||||||
|
chown opendj:opendj $schemapath/98-nova_sun.ldif
|
||||||
|
|
||||||
|
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
|
||||||
|
# LDAP Client Settings
|
||||||
|
URI ldap://localhost
|
||||||
|
BASE dc=example,dc=com
|
||||||
|
BINDDN cn=Directory Manager
|
||||||
|
SIZELIMIT 0
|
||||||
|
TIMELIMIT 0
|
||||||
|
LDAP_CONF_EOF
|
||||||
|
|
||||||
|
cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
|
||||||
|
# This is the root of the directory tree
|
||||||
|
dn: dc=example,dc=com
|
||||||
|
description: Example.Com, your trusted non-existent corporation.
|
||||||
|
dc: example
|
||||||
|
o: Example.Com
|
||||||
|
objectClass: top
|
||||||
|
objectClass: dcObject
|
||||||
|
objectClass: organization
|
||||||
|
|
||||||
|
# Subtree for users
|
||||||
|
dn: ou=Users,dc=example,dc=com
|
||||||
|
ou: Users
|
||||||
|
description: Users
|
||||||
|
objectClass: organizationalUnit
|
||||||
|
|
||||||
|
# Subtree for groups
|
||||||
|
dn: ou=Groups,dc=example,dc=com
|
||||||
|
ou: Groups
|
||||||
|
description: Groups
|
||||||
|
objectClass: organizationalUnit
|
||||||
|
|
||||||
|
# Subtree for system accounts
|
||||||
|
dn: ou=System,dc=example,dc=com
|
||||||
|
ou: System
|
||||||
|
description: Special accounts used by software applications.
|
||||||
|
objectClass: organizationalUnit
|
||||||
|
|
||||||
|
# Special Account for Authentication:
|
||||||
|
dn: uid=authenticate,ou=System,dc=example,dc=com
|
||||||
|
uid: authenticate
|
||||||
|
ou: System
|
||||||
|
description: Special account for authenticating users
|
||||||
|
userPassword: {MD5}TLnIqASP0CKUR3/LGkEZGg==
|
||||||
|
objectClass: account
|
||||||
|
objectClass: simpleSecurityObject
|
||||||
|
|
||||||
|
# create the sysadmin entry
|
||||||
|
|
||||||
|
dn: cn=developers,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: developers
|
||||||
|
description: IT admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: sysadmins
|
||||||
|
description: IT admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=netadmins,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: netadmins
|
||||||
|
description: Network admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: cloudadmins
|
||||||
|
description: Cloud admin group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
|
||||||
|
dn: cn=itsec,ou=Groups,dc=example,dc=com
|
||||||
|
objectclass: groupOfNames
|
||||||
|
cn: itsec
|
||||||
|
description: IT security users group
|
||||||
|
member: uid=admin,ou=Users,dc=example,dc=com
|
||||||
|
BASE_LDIF_EOF
|
||||||
|
|
||||||
|
/etc/init.d/opendj stop
|
||||||
|
su - opendj -c '/usr/opendj/setup -i -b "dc=example,dc=com" -l /etc/ldap/base.ldif -S -w changeme -O -n --noPropertiesFile'
|
||||||
|
/etc/init.d/opendj start
|
||||||
19
nova/auth/openssh-lpk_openldap.schema
Normal file
19
nova/auth/openssh-lpk_openldap.schema
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
#
|
||||||
|
# LDAP Public Key Patch schema for use with openssh-ldappubkey
|
||||||
|
# Author: Eric AUGE <eau@phear.org>
|
||||||
|
#
|
||||||
|
# Based on the proposal of : Mark Ruijter
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# octetString SYNTAX
|
||||||
|
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
|
||||||
|
DESC 'MANDATORY: OpenSSH Public key'
|
||||||
|
EQUALITY octetStringMatch
|
||||||
|
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
|
||||||
|
|
||||||
|
# printableString SYNTAX yes|no
|
||||||
|
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
|
||||||
|
DESC 'MANDATORY: OpenSSH LPK objectclass'
|
||||||
|
MAY ( sshPublicKey $ uid )
|
||||||
|
)
|
||||||
10
nova/auth/openssh-lpk_sun.schema
Normal file
10
nova/auth/openssh-lpk_sun.schema
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#
|
||||||
|
# LDAP Public Key Patch schema for use with openssh-ldappubkey
|
||||||
|
# Author: Eric AUGE <eau@phear.org>
|
||||||
|
#
|
||||||
|
# Schema for Sun Directory Server.
|
||||||
|
# Based on the original schema, modified by Stefan Fischer.
|
||||||
|
#
|
||||||
|
dn: cn=schema
|
||||||
|
attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
|
||||||
|
objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) )
|
||||||
@@ -20,115 +20,9 @@
|
|||||||
|
|
||||||
apt-get install -y slapd ldap-utils python-ldap
|
apt-get install -y slapd ldap-utils python-ldap
|
||||||
|
|
||||||
cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
|
abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
|
||||||
#
|
cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
|
||||||
# LDAP Public Key Patch schema for use with openssh-ldappubkey
|
cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema
|
||||||
# Author: Eric AUGE <eau@phear.org>
|
|
||||||
#
|
|
||||||
# Based on the proposal of : Mark Ruijter
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
# octetString SYNTAX
|
|
||||||
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
|
|
||||||
DESC 'MANDATORY: OpenSSH Public key'
|
|
||||||
EQUALITY octetStringMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
|
|
||||||
|
|
||||||
# printableString SYNTAX yes|no
|
|
||||||
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
|
|
||||||
DESC 'MANDATORY: OpenSSH LPK objectclass'
|
|
||||||
MAY ( sshPublicKey $ uid )
|
|
||||||
)
|
|
||||||
LPK_SCHEMA_EOF
|
|
||||||
|
|
||||||
cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
|
|
||||||
#
|
|
||||||
# Person object for Nova
|
|
||||||
# inetorgperson with extra attributes
|
|
||||||
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# using internet experimental oid arc as per BP64 3.1
|
|
||||||
objectidentifier novaSchema 1.3.6.1.3.1.666.666
|
|
||||||
objectidentifier novaAttrs novaSchema:3
|
|
||||||
objectidentifier novaOCs novaSchema:4
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:1
|
|
||||||
NAME 'accessKey'
|
|
||||||
DESC 'Key for accessing data'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:2
|
|
||||||
NAME 'secretKey'
|
|
||||||
DESC 'Secret key'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:3
|
|
||||||
NAME 'keyFingerprint'
|
|
||||||
DESC 'Fingerprint of private key'
|
|
||||||
EQUALITY caseIgnoreMatch
|
|
||||||
SUBSTR caseIgnoreSubstringsMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:4
|
|
||||||
NAME 'isAdmin'
|
|
||||||
DESC 'Is user an administrator?'
|
|
||||||
EQUALITY booleanMatch
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
|
|
||||||
SINGLE-VALUE
|
|
||||||
)
|
|
||||||
|
|
||||||
attributetype (
|
|
||||||
novaAttrs:5
|
|
||||||
NAME 'projectManager'
|
|
||||||
DESC 'Project Managers of a project'
|
|
||||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:1
|
|
||||||
NAME 'novaUser'
|
|
||||||
DESC 'access and secret keys'
|
|
||||||
AUXILIARY
|
|
||||||
MUST ( uid )
|
|
||||||
MAY ( accessKey $ secretKey $ isAdmin )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:2
|
|
||||||
NAME 'novaKeyPair'
|
|
||||||
DESC 'Key pair for User'
|
|
||||||
SUP top
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ sshPublicKey $ keyFingerprint )
|
|
||||||
)
|
|
||||||
|
|
||||||
objectClass (
|
|
||||||
novaOCs:3
|
|
||||||
NAME 'novaProject'
|
|
||||||
DESC 'Container for project'
|
|
||||||
SUP groupOfNames
|
|
||||||
STRUCTURAL
|
|
||||||
MUST ( cn $ projectManager )
|
|
||||||
)
|
|
||||||
|
|
||||||
NOVA_SCHEMA_EOF
|
|
||||||
|
|
||||||
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
|
||||||
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
|
||||||
|
|||||||
@@ -159,6 +159,7 @@ class StrWrapper(object):
|
|||||||
return str(val)
|
return str(val)
|
||||||
raise KeyError(name)
|
raise KeyError(name)
|
||||||
|
|
||||||
|
|
||||||
FLAGS = FlagValues()
|
FLAGS = FlagValues()
|
||||||
gflags.FLAGS = FLAGS
|
gflags.FLAGS = FLAGS
|
||||||
gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
|
gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
|
||||||
@@ -183,6 +184,12 @@ DEFINE_list = _wrapper(gflags.DEFINE_list)
|
|||||||
DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
|
DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
|
||||||
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
|
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
|
||||||
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
|
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
|
||||||
|
DEFINE_flag = _wrapper(gflags.DEFINE_flag)
|
||||||
|
|
||||||
|
|
||||||
|
HelpFlag = gflags.HelpFlag
|
||||||
|
HelpshortFlag = gflags.HelpshortFlag
|
||||||
|
HelpXMLFlag = gflags.HelpXMLFlag
|
||||||
|
|
||||||
|
|
||||||
def DECLARE(name, module_string, flag_values=FLAGS):
|
def DECLARE(name, module_string, flag_values=FLAGS):
|
||||||
@@ -224,7 +231,6 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
|
|||||||
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
|
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
|
||||||
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
|
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
|
||||||
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
||||||
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server (for infrastructure')
|
|
||||||
DEFINE_string('cc_dmz', '127.0.0.1', 'ip of api server (for instances)')
|
DEFINE_string('cc_dmz', '127.0.0.1', 'ip of api server (for instances)')
|
||||||
DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
||||||
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
|
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
|
||||||
@@ -263,7 +269,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
|||||||
'Manager for scheduler')
|
'Manager for scheduler')
|
||||||
|
|
||||||
# The service to use for image search and retrieval
|
# The service to use for image search and retrieval
|
||||||
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
|
DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
|
||||||
'The service to use for retrieving and searching for images.')
|
'The service to use for retrieving and searching for images.')
|
||||||
|
|
||||||
DEFINE_string('host', socket.gethostname(),
|
DEFINE_string('host', socket.gethostname(),
|
||||||
|
|||||||
209
nova/process.py
209
nova/process.py
@@ -1,209 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2010 FathomDB Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Process pool using twisted threading
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import StringIO
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.internet import error
|
|
||||||
from twisted.internet import protocol
|
|
||||||
from twisted.internet import reactor
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova.exception import ProcessExecutionError
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
flags.DEFINE_integer('process_pool_size', 4,
|
|
||||||
'Number of processes to use in the process pool')
|
|
||||||
|
|
||||||
|
|
||||||
# This is based on _BackRelay from twister.internal.utils, but modified to
|
|
||||||
# capture both stdout and stderr, without odd stderr handling, and also to
|
|
||||||
# handle stdin
|
|
||||||
class BackRelayWithInput(protocol.ProcessProtocol):
|
|
||||||
"""
|
|
||||||
Trivial protocol for communicating with a process and turning its output
|
|
||||||
into the result of a L{Deferred}.
|
|
||||||
|
|
||||||
@ivar deferred: A L{Deferred} which will be called back with all of stdout
|
|
||||||
and all of stderr as well (as a tuple). C{terminate_on_stderr} is true
|
|
||||||
and any bytes are received over stderr, this will fire with an
|
|
||||||
L{_ProcessExecutionError} instance and the attribute will be set to
|
|
||||||
C{None}.
|
|
||||||
|
|
||||||
@ivar onProcessEnded: If C{terminate_on_stderr} is false and bytes are
|
|
||||||
received over stderr, this attribute will refer to a L{Deferred} which
|
|
||||||
will be called back when the process ends. This C{Deferred} is also
|
|
||||||
associated with the L{_ProcessExecutionError} which C{deferred} fires
|
|
||||||
with earlier in this case so that users can determine when the process
|
|
||||||
has actually ended, in addition to knowing when bytes have been
|
|
||||||
received via stderr.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, deferred, cmd, started_deferred=None,
|
|
||||||
terminate_on_stderr=False, check_exit_code=True,
|
|
||||||
process_input=None):
|
|
||||||
self.deferred = deferred
|
|
||||||
self.cmd = cmd
|
|
||||||
self.stdout = StringIO.StringIO()
|
|
||||||
self.stderr = StringIO.StringIO()
|
|
||||||
self.started_deferred = started_deferred
|
|
||||||
self.terminate_on_stderr = terminate_on_stderr
|
|
||||||
self.check_exit_code = check_exit_code
|
|
||||||
self.process_input = process_input
|
|
||||||
self.on_process_ended = None
|
|
||||||
|
|
||||||
def _build_execution_error(self, exit_code=None):
|
|
||||||
return ProcessExecutionError(cmd=self.cmd,
|
|
||||||
exit_code=exit_code,
|
|
||||||
stdout=self.stdout.getvalue(),
|
|
||||||
stderr=self.stderr.getvalue())
|
|
||||||
|
|
||||||
def errReceived(self, text):
|
|
||||||
self.stderr.write(text)
|
|
||||||
if self.terminate_on_stderr and (self.deferred is not None):
|
|
||||||
self.on_process_ended = defer.Deferred()
|
|
||||||
self.deferred.errback(self._build_execution_error())
|
|
||||||
self.deferred = None
|
|
||||||
self.transport.loseConnection()
|
|
||||||
|
|
||||||
def outReceived(self, text):
|
|
||||||
self.stdout.write(text)
|
|
||||||
|
|
||||||
def processEnded(self, reason):
|
|
||||||
if self.deferred is not None:
|
|
||||||
stdout, stderr = self.stdout.getvalue(), self.stderr.getvalue()
|
|
||||||
exit_code = reason.value.exitCode
|
|
||||||
if self.check_exit_code and exit_code != 0:
|
|
||||||
self.deferred.errback(self._build_execution_error(exit_code))
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
if self.check_exit_code:
|
|
||||||
reason.trap(error.ProcessDone)
|
|
||||||
self.deferred.callback((stdout, stderr))
|
|
||||||
except:
|
|
||||||
# NOTE(justinsb): This logic is a little suspicious to me.
|
|
||||||
# If the callback throws an exception, then errback will
|
|
||||||
# be called also. However, this is what the unit tests
|
|
||||||
# test for.
|
|
||||||
exec_error = self._build_execution_error(exit_code)
|
|
||||||
self.deferred.errback(exec_error)
|
|
||||||
elif self.on_process_ended is not None:
|
|
||||||
self.on_process_ended.errback(reason)
|
|
||||||
|
|
||||||
def connectionMade(self):
|
|
||||||
if self.started_deferred:
|
|
||||||
self.started_deferred.callback(self)
|
|
||||||
if self.process_input:
|
|
||||||
self.transport.write(str(self.process_input))
|
|
||||||
self.transport.closeStdin()
|
|
||||||
|
|
||||||
|
|
||||||
def get_process_output(executable, args=None, env=None, path=None,
|
|
||||||
process_reactor=None, check_exit_code=True,
|
|
||||||
process_input=None, started_deferred=None,
|
|
||||||
terminate_on_stderr=False):
|
|
||||||
if process_reactor is None:
|
|
||||||
process_reactor = reactor
|
|
||||||
args = args and args or ()
|
|
||||||
env = env and env and {}
|
|
||||||
deferred = defer.Deferred()
|
|
||||||
cmd = executable
|
|
||||||
if args:
|
|
||||||
cmd = " ".join([cmd] + args)
|
|
||||||
logging.debug("Running cmd: %s", cmd)
|
|
||||||
process_handler = BackRelayWithInput(
|
|
||||||
deferred,
|
|
||||||
cmd,
|
|
||||||
started_deferred=started_deferred,
|
|
||||||
check_exit_code=check_exit_code,
|
|
||||||
process_input=process_input,
|
|
||||||
terminate_on_stderr=terminate_on_stderr)
|
|
||||||
# NOTE(vish): commands come in as unicode, but self.executes needs
|
|
||||||
# strings or process.spawn raises a deprecation warning
|
|
||||||
executable = str(executable)
|
|
||||||
if not args is None:
|
|
||||||
args = [str(x) for x in args]
|
|
||||||
process_reactor.spawnProcess(process_handler, executable,
|
|
||||||
(executable,) + tuple(args), env, path)
|
|
||||||
return deferred
|
|
||||||
|
|
||||||
|
|
||||||
class ProcessPool(object):
|
|
||||||
""" A simple process pool implementation using Twisted's Process bits.
|
|
||||||
|
|
||||||
This is pretty basic right now, but hopefully the API will be the correct
|
|
||||||
one so that it can be optimized later.
|
|
||||||
"""
|
|
||||||
def __init__(self, size=None):
|
|
||||||
self.size = size and size or FLAGS.process_pool_size
|
|
||||||
self._pool = defer.DeferredSemaphore(self.size)
|
|
||||||
|
|
||||||
def simple_execute(self, cmd, **kw):
|
|
||||||
""" Weak emulation of the old utils.execute() function.
|
|
||||||
|
|
||||||
This only exists as a way to quickly move old execute methods to
|
|
||||||
this new style of code.
|
|
||||||
|
|
||||||
NOTE(termie): This will break on args with spaces in them.
|
|
||||||
"""
|
|
||||||
parsed = cmd.split(' ')
|
|
||||||
executable, args = parsed[0], parsed[1:]
|
|
||||||
return self.execute(executable, args, **kw)
|
|
||||||
|
|
||||||
def execute(self, *args, **kw):
|
|
||||||
deferred = self._pool.acquire()
|
|
||||||
|
|
||||||
def _associate_process(proto):
|
|
||||||
deferred.process = proto.transport
|
|
||||||
return proto.transport
|
|
||||||
|
|
||||||
started = defer.Deferred()
|
|
||||||
started.addCallback(_associate_process)
|
|
||||||
kw.setdefault('started_deferred', started)
|
|
||||||
|
|
||||||
deferred.process = None
|
|
||||||
deferred.started = started
|
|
||||||
|
|
||||||
deferred.addCallback(lambda _: get_process_output(*args, **kw))
|
|
||||||
deferred.addBoth(self._release)
|
|
||||||
return deferred
|
|
||||||
|
|
||||||
def _release(self, retval=None):
|
|
||||||
self._pool.release()
|
|
||||||
return retval
|
|
||||||
|
|
||||||
|
|
||||||
class SharedPool(object):
|
|
||||||
_instance = None
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
if SharedPool._instance is None:
|
|
||||||
self.__class__._instance = ProcessPool()
|
|
||||||
|
|
||||||
def __getattr__(self, key):
|
|
||||||
return getattr(self._instance, key)
|
|
||||||
|
|
||||||
|
|
||||||
def simple_execute(cmd, **kwargs):
|
|
||||||
return SharedPool().simple_execute(cmd, **kwargs)
|
|
||||||
83
nova/rpc.py
83
nova/rpc.py
@@ -25,18 +25,18 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import traceback
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from carrot import connection as carrot_connection
|
from carrot import connection as carrot_connection
|
||||||
from carrot import messaging
|
from carrot import messaging
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.internet import task
|
|
||||||
|
|
||||||
|
from nova import context
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import fakerabbit
|
from nova import fakerabbit
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import context
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -128,17 +128,9 @@ class Consumer(messaging.Consumer):
|
|||||||
|
|
||||||
def attach_to_eventlet(self):
|
def attach_to_eventlet(self):
|
||||||
"""Only needed for unit tests!"""
|
"""Only needed for unit tests!"""
|
||||||
def fetch_repeatedly():
|
timer = utils.LoopingCall(self.fetch, enable_callbacks=True)
|
||||||
while True:
|
timer.start(0.1)
|
||||||
self.fetch(enable_callbacks=True)
|
return timer
|
||||||
greenthread.sleep(0.1)
|
|
||||||
greenthread.spawn(fetch_repeatedly)
|
|
||||||
|
|
||||||
def attach_to_twisted(self):
|
|
||||||
"""Attach a callback to twisted that fires 10 times a second"""
|
|
||||||
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
|
|
||||||
loop.start(interval=0.1)
|
|
||||||
return loop
|
|
||||||
|
|
||||||
|
|
||||||
class Publisher(messaging.Publisher):
|
class Publisher(messaging.Publisher):
|
||||||
@@ -196,11 +188,13 @@ class AdapterConsumer(TopicConsumer):
|
|||||||
node_func = getattr(self.proxy, str(method))
|
node_func = getattr(self.proxy, str(method))
|
||||||
node_args = dict((str(k), v) for k, v in args.iteritems())
|
node_args = dict((str(k), v) for k, v in args.iteritems())
|
||||||
# NOTE(vish): magic is fun!
|
# NOTE(vish): magic is fun!
|
||||||
# pylint: disable-msg=W0142
|
try:
|
||||||
d = defer.maybeDeferred(node_func, context=ctxt, **node_args)
|
rval = node_func(context=ctxt, **node_args)
|
||||||
if msg_id:
|
if msg_id:
|
||||||
d.addCallback(lambda rval: msg_reply(msg_id, rval, None))
|
msg_reply(msg_id, rval, None)
|
||||||
d.addErrback(lambda e: msg_reply(msg_id, None, e))
|
except Exception as e:
|
||||||
|
if msg_id:
|
||||||
|
msg_reply(msg_id, None, sys.exc_info())
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
@@ -242,13 +236,15 @@ class DirectPublisher(Publisher):
|
|||||||
def msg_reply(msg_id, reply=None, failure=None):
|
def msg_reply(msg_id, reply=None, failure=None):
|
||||||
"""Sends a reply or an error on the channel signified by msg_id
|
"""Sends a reply or an error on the channel signified by msg_id
|
||||||
|
|
||||||
failure should be a twisted failure object"""
|
failure should be a sys.exc_info() tuple.
|
||||||
|
|
||||||
|
"""
|
||||||
if failure:
|
if failure:
|
||||||
message = failure.getErrorMessage()
|
message = str(failure[1])
|
||||||
traceback = failure.getTraceback()
|
tb = traceback.format_exception(*failure)
|
||||||
logging.error("Returning exception %s to caller", message)
|
logging.error("Returning exception %s to caller", message)
|
||||||
logging.error(traceback)
|
logging.error(tb)
|
||||||
failure = (failure.type.__name__, str(failure.value), traceback)
|
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||||
conn = Connection.instance()
|
conn = Connection.instance()
|
||||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||||
try:
|
try:
|
||||||
@@ -313,7 +309,6 @@ def call(context, topic, msg):
|
|||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
|
|
||||||
class WaitMessage(object):
|
class WaitMessage(object):
|
||||||
|
|
||||||
def __call__(self, data, message):
|
def __call__(self, data, message):
|
||||||
"""Acks message and sets result."""
|
"""Acks message and sets result."""
|
||||||
message.ack()
|
message.ack()
|
||||||
@@ -337,41 +332,15 @@ def call(context, topic, msg):
|
|||||||
except StopIteration:
|
except StopIteration:
|
||||||
pass
|
pass
|
||||||
consumer.close()
|
consumer.close()
|
||||||
|
# NOTE(termie): this is a little bit of a change from the original
|
||||||
|
# non-eventlet code where returning a Failure
|
||||||
|
# instance from a deferred call is very similar to
|
||||||
|
# raising an exception
|
||||||
|
if isinstance(wait_msg.result, Exception):
|
||||||
|
raise wait_msg.result
|
||||||
return wait_msg.result
|
return wait_msg.result
|
||||||
|
|
||||||
|
|
||||||
def call_twisted(context, topic, msg):
|
|
||||||
"""Sends a message on a topic and wait for a response"""
|
|
||||||
LOG.debug("Making asynchronous call...")
|
|
||||||
msg_id = uuid.uuid4().hex
|
|
||||||
msg.update({'_msg_id': msg_id})
|
|
||||||
LOG.debug("MSG_ID is %s" % (msg_id))
|
|
||||||
_pack_context(msg, context)
|
|
||||||
|
|
||||||
conn = Connection.instance()
|
|
||||||
d = defer.Deferred()
|
|
||||||
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
|
|
||||||
|
|
||||||
def deferred_receive(data, message):
|
|
||||||
"""Acks message and callbacks or errbacks"""
|
|
||||||
message.ack()
|
|
||||||
if data['failure']:
|
|
||||||
return d.errback(RemoteError(*data['failure']))
|
|
||||||
else:
|
|
||||||
return d.callback(data['result'])
|
|
||||||
|
|
||||||
consumer.register_callback(deferred_receive)
|
|
||||||
injected = consumer.attach_to_twisted()
|
|
||||||
|
|
||||||
# clean up after the injected listened and return x
|
|
||||||
d.addCallback(lambda x: injected.stop() and x or x)
|
|
||||||
|
|
||||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
|
||||||
publisher.send(msg)
|
|
||||||
publisher.close()
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def cast(context, topic, msg):
|
def cast(context, topic, msg):
|
||||||
"""Sends a message on a topic without waiting for a response"""
|
"""Sends a message on a topic without waiting for a response"""
|
||||||
LOG.debug("Making asynchronous cast...")
|
LOG.debug("Making asynchronous cast...")
|
||||||
|
|||||||
151
nova/server.py
151
nova/server.py
@@ -1,151 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Base functionality for nova daemons - gradually being replaced with twistd.py.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import daemon
|
|
||||||
from daemon import pidlockfile
|
|
||||||
import logging
|
|
||||||
import logging.handlers
|
|
||||||
import os
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
flags.DEFINE_bool('daemonize', False, 'daemonize this process')
|
|
||||||
# NOTE(termie): right now I am defaulting to using syslog when we daemonize
|
|
||||||
# it may be better to do something else -shrug-
|
|
||||||
# NOTE(Devin): I think we should let each process have its own log file
|
|
||||||
# and put it in /var/logs/nova/(appname).log
|
|
||||||
# This makes debugging much easier and cuts down on sys log
|
|
||||||
# clutter.
|
|
||||||
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
|
|
||||||
flags.DEFINE_string('logfile', None, 'log file to output to')
|
|
||||||
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
|
|
||||||
'(will be prepended to $logfile)')
|
|
||||||
flags.DEFINE_string('pidfile', None, 'pid file to output to')
|
|
||||||
flags.DEFINE_string('working_directory', './', 'working directory...')
|
|
||||||
flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
|
|
||||||
flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run')
|
|
||||||
|
|
||||||
|
|
||||||
def stop(pidfile):
|
|
||||||
"""
|
|
||||||
Stop the daemon
|
|
||||||
"""
|
|
||||||
# Get the pid from the pidfile
|
|
||||||
try:
|
|
||||||
pid = int(open(pidfile, 'r').read().strip())
|
|
||||||
except IOError:
|
|
||||||
message = "pidfile %s does not exist. Daemon not running?\n"
|
|
||||||
sys.stderr.write(message % pidfile)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Try killing the daemon process
|
|
||||||
try:
|
|
||||||
while 1:
|
|
||||||
os.kill(pid, signal.SIGTERM)
|
|
||||||
time.sleep(0.1)
|
|
||||||
except OSError, err:
|
|
||||||
err = str(err)
|
|
||||||
if err.find("No such process") > 0:
|
|
||||||
if os.path.exists(pidfile):
|
|
||||||
os.remove(pidfile)
|
|
||||||
else:
|
|
||||||
print str(err)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def serve(name, main):
|
|
||||||
"""Controller for server"""
|
|
||||||
argv = FLAGS(sys.argv)
|
|
||||||
|
|
||||||
if not FLAGS.pidfile:
|
|
||||||
FLAGS.pidfile = '%s.pid' % name
|
|
||||||
|
|
||||||
logging.debug("Full set of FLAGS: \n\n\n")
|
|
||||||
for flag in FLAGS:
|
|
||||||
logging.debug("%s : %s", flag, FLAGS.get(flag, None))
|
|
||||||
|
|
||||||
action = 'start'
|
|
||||||
if len(argv) > 1:
|
|
||||||
action = argv.pop()
|
|
||||||
|
|
||||||
if action == 'stop':
|
|
||||||
stop(FLAGS.pidfile)
|
|
||||||
sys.exit()
|
|
||||||
elif action == 'restart':
|
|
||||||
stop(FLAGS.pidfile)
|
|
||||||
elif action == 'start':
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
print 'usage: %s [options] [start|stop|restart]' % argv[0]
|
|
||||||
sys.exit(1)
|
|
||||||
daemonize(argv, name, main)
|
|
||||||
|
|
||||||
|
|
||||||
def daemonize(args, name, main):
|
|
||||||
"""Does the work of daemonizing the process"""
|
|
||||||
logging.getLogger('amqplib').setLevel(logging.WARN)
|
|
||||||
files_to_keep = []
|
|
||||||
if FLAGS.daemonize:
|
|
||||||
logger = logging.getLogger()
|
|
||||||
formatter = logging.Formatter(
|
|
||||||
name + '(%(name)s): %(levelname)s %(message)s')
|
|
||||||
if FLAGS.use_syslog and not FLAGS.logfile:
|
|
||||||
syslog = logging.handlers.SysLogHandler(address='/dev/log')
|
|
||||||
syslog.setFormatter(formatter)
|
|
||||||
logger.addHandler(syslog)
|
|
||||||
files_to_keep.append(syslog.socket)
|
|
||||||
else:
|
|
||||||
if not FLAGS.logfile:
|
|
||||||
FLAGS.logfile = '%s.log' % name
|
|
||||||
if FLAGS.logdir:
|
|
||||||
FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
|
|
||||||
logfile = logging.FileHandler(FLAGS.logfile)
|
|
||||||
logfile.setFormatter(formatter)
|
|
||||||
logger.addHandler(logfile)
|
|
||||||
files_to_keep.append(logfile.stream)
|
|
||||||
stdin, stdout, stderr = None, None, None
|
|
||||||
else:
|
|
||||||
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
|
|
||||||
|
|
||||||
if FLAGS.verbose:
|
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
|
||||||
else:
|
|
||||||
logging.getLogger().setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
with daemon.DaemonContext(
|
|
||||||
detach_process=FLAGS.daemonize,
|
|
||||||
working_directory=FLAGS.working_directory,
|
|
||||||
pidfile=pidlockfile.TimeoutPIDLockFile(FLAGS.pidfile,
|
|
||||||
acquire_timeout=1,
|
|
||||||
threaded=False),
|
|
||||||
stdin=stdin,
|
|
||||||
stdout=stdout,
|
|
||||||
stderr=stderr,
|
|
||||||
uid=FLAGS.uid,
|
|
||||||
gid=FLAGS.gid,
|
|
||||||
files_preserve=files_to_keep):
|
|
||||||
main(args)
|
|
||||||
@@ -35,7 +35,7 @@ class Context(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class AccessTestCase(test.TrialTestCase):
|
class AccessTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(AccessTestCase, self).setUp()
|
super(AccessTestCase, self).setUp()
|
||||||
um = manager.AuthManager()
|
um = manager.AuthManager()
|
||||||
|
|||||||
@@ -323,12 +323,12 @@ class AuthManagerTestCase(object):
|
|||||||
self.assertTrue(user.is_admin())
|
self.assertTrue(user.is_admin())
|
||||||
|
|
||||||
|
|
||||||
class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase):
|
class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
|
||||||
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
AuthManagerTestCase.__init__(self)
|
AuthManagerTestCase.__init__(self)
|
||||||
test.TrialTestCase.__init__(self, *args, **kwargs)
|
test.TestCase.__init__(self, *args, **kwargs)
|
||||||
import nova.auth.fakeldap as fakeldap
|
import nova.auth.fakeldap as fakeldap
|
||||||
FLAGS.redis_db = 8
|
FLAGS.redis_db = 8
|
||||||
if FLAGS.flush_db:
|
if FLAGS.flush_db:
|
||||||
@@ -340,7 +340,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase):
|
|||||||
self.skip = True
|
self.skip = True
|
||||||
|
|
||||||
|
|
||||||
class AuthManagerDbTestCase(AuthManagerTestCase, test.TrialTestCase):
|
class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase):
|
||||||
auth_driver = 'nova.auth.dbdriver.DbDriver'
|
auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -27,8 +27,6 @@ import tempfile
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
from twisted.internet import defer
|
|
||||||
import unittest
|
|
||||||
from xml.etree import ElementTree
|
from xml.etree import ElementTree
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
@@ -53,7 +51,7 @@ IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images')
|
|||||||
os.makedirs(IMAGES_PATH)
|
os.makedirs(IMAGES_PATH)
|
||||||
|
|
||||||
|
|
||||||
class CloudTestCase(test.TrialTestCase):
|
class CloudTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(CloudTestCase, self).setUp()
|
super(CloudTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake', images_path=IMAGES_PATH)
|
self.flags(connection_type='fake', images_path=IMAGES_PATH)
|
||||||
@@ -126,6 +124,19 @@ class CloudTestCase(test.TrialTestCase):
|
|||||||
db.instance_destroy(self.context, inst['id'])
|
db.instance_destroy(self.context, inst['id'])
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
|
def test_describe_volumes(self):
|
||||||
|
"""Makes sure describe_volumes works and filters results."""
|
||||||
|
vol1 = db.volume_create(self.context, {})
|
||||||
|
vol2 = db.volume_create(self.context, {})
|
||||||
|
result = self.cloud.describe_volumes(self.context)
|
||||||
|
self.assertEqual(len(result['volumeSet']), 2)
|
||||||
|
result = self.cloud.describe_volumes(self.context,
|
||||||
|
volume_id=[vol2['ec2_id']])
|
||||||
|
self.assertEqual(len(result['volumeSet']), 1)
|
||||||
|
self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id'])
|
||||||
|
db.volume_destroy(self.context, vol1['id'])
|
||||||
|
db.volume_destroy(self.context, vol2['id'])
|
||||||
|
|
||||||
def test_console_output(self):
|
def test_console_output(self):
|
||||||
image_id = FLAGS.default_image
|
image_id = FLAGS.default_image
|
||||||
instance_type = FLAGS.default_instance_type
|
instance_type = FLAGS.default_instance_type
|
||||||
@@ -186,7 +197,7 @@ class CloudTestCase(test.TrialTestCase):
|
|||||||
logging.debug("Need to watch instance %s until it's running..." %
|
logging.debug("Need to watch instance %s until it's running..." %
|
||||||
instance['instance_id'])
|
instance['instance_id'])
|
||||||
while True:
|
while True:
|
||||||
rv = yield defer.succeed(time.sleep(1))
|
greenthread.sleep(1)
|
||||||
info = self.cloud._get_instance(instance['instance_id'])
|
info = self.cloud._get_instance(instance['instance_id'])
|
||||||
logging.debug(info['state'])
|
logging.debug(info['state'])
|
||||||
if info['state'] == power_state.RUNNING:
|
if info['state'] == power_state.RUNNING:
|
||||||
|
|||||||
@@ -22,8 +22,6 @@ Tests For Compute
|
|||||||
import datetime
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@@ -31,11 +29,13 @@ from nova import flags
|
|||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
|
from nova.compute import api as compute_api
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
class ComputeTestCase(test.TrialTestCase):
|
class ComputeTestCase(test.TestCase):
|
||||||
"""Test case for compute"""
|
"""Test case for compute"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
@@ -43,6 +43,7 @@ class ComputeTestCase(test.TrialTestCase):
|
|||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
network_manager='nova.network.manager.FlatManager')
|
network_manager='nova.network.manager.FlatManager')
|
||||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||||
|
self.compute_api = compute_api.ComputeAPI()
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
@@ -66,45 +67,48 @@ class ComputeTestCase(test.TrialTestCase):
|
|||||||
inst['ami_launch_index'] = 0
|
inst['ami_launch_index'] = 0
|
||||||
return db.instance_create(self.context, inst)['id']
|
return db.instance_create(self.context, inst)['id']
|
||||||
|
|
||||||
|
def test_create_instance_defaults_display_name(self):
|
||||||
|
"""Verify that an instance cannot be created without a display_name."""
|
||||||
|
cases = [dict(), dict(display_name=None)]
|
||||||
|
for instance in cases:
|
||||||
|
ref = self.compute_api.create_instances(self.context,
|
||||||
|
FLAGS.default_instance_type, None, **instance)
|
||||||
|
try:
|
||||||
|
self.assertNotEqual(ref[0].display_name, None)
|
||||||
|
finally:
|
||||||
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
|
|
||||||
def test_create_instance_associates_security_groups(self):
|
def test_create_instance_associates_security_groups(self):
|
||||||
"""Make sure create_instance associates security groups"""
|
"""Make sure create_instances associates security groups"""
|
||||||
inst = {}
|
|
||||||
inst['user_id'] = self.user.id
|
|
||||||
inst['project_id'] = self.project.id
|
|
||||||
values = {'name': 'default',
|
values = {'name': 'default',
|
||||||
'description': 'default',
|
'description': 'default',
|
||||||
'user_id': self.user.id,
|
'user_id': self.user.id,
|
||||||
'project_id': self.project.id}
|
'project_id': self.project.id}
|
||||||
group = db.security_group_create(self.context, values)
|
group = db.security_group_create(self.context, values)
|
||||||
ref = self.compute.create_instance(self.context,
|
ref = self.compute_api.create_instances(self.context,
|
||||||
security_groups=[group['id']],
|
FLAGS.default_instance_type, None, security_group=['default'])
|
||||||
**inst)
|
|
||||||
# reload to get groups
|
|
||||||
instance_ref = db.instance_get(self.context, ref['id'])
|
|
||||||
try:
|
try:
|
||||||
self.assertEqual(len(instance_ref['security_groups']), 1)
|
self.assertEqual(len(ref[0]['security_groups']), 1)
|
||||||
finally:
|
finally:
|
||||||
db.security_group_destroy(self.context, group['id'])
|
db.security_group_destroy(self.context, group['id'])
|
||||||
db.instance_destroy(self.context, instance_ref['id'])
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_run_terminate(self):
|
def test_run_terminate(self):
|
||||||
"""Make sure it is possible to run and terminate instance"""
|
"""Make sure it is possible to run and terminate instance"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
|
|
||||||
yield self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
|
|
||||||
instances = db.instance_get_all(context.get_admin_context())
|
instances = db.instance_get_all(context.get_admin_context())
|
||||||
logging.info("Running instances: %s", instances)
|
logging.info("Running instances: %s", instances)
|
||||||
self.assertEqual(len(instances), 1)
|
self.assertEqual(len(instances), 1)
|
||||||
|
|
||||||
yield self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
instances = db.instance_get_all(context.get_admin_context())
|
instances = db.instance_get_all(context.get_admin_context())
|
||||||
logging.info("After terminating instances: %s", instances)
|
logging.info("After terminating instances: %s", instances)
|
||||||
self.assertEqual(len(instances), 0)
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_run_terminate_timestamps(self):
|
def test_run_terminate_timestamps(self):
|
||||||
"""Make sure timestamps are set for launched and destroyed"""
|
"""Make sure timestamps are set for launched and destroyed"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
@@ -112,42 +116,40 @@ class ComputeTestCase(test.TrialTestCase):
|
|||||||
self.assertEqual(instance_ref['launched_at'], None)
|
self.assertEqual(instance_ref['launched_at'], None)
|
||||||
self.assertEqual(instance_ref['deleted_at'], None)
|
self.assertEqual(instance_ref['deleted_at'], None)
|
||||||
launch = datetime.datetime.utcnow()
|
launch = datetime.datetime.utcnow()
|
||||||
yield self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
self.assert_(instance_ref['launched_at'] > launch)
|
self.assert_(instance_ref['launched_at'] > launch)
|
||||||
self.assertEqual(instance_ref['deleted_at'], None)
|
self.assertEqual(instance_ref['deleted_at'], None)
|
||||||
terminate = datetime.datetime.utcnow()
|
terminate = datetime.datetime.utcnow()
|
||||||
yield self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
self.context = self.context.elevated(True)
|
self.context = self.context.elevated(True)
|
||||||
instance_ref = db.instance_get(self.context, instance_id)
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
self.assert_(instance_ref['launched_at'] < terminate)
|
self.assert_(instance_ref['launched_at'] < terminate)
|
||||||
self.assert_(instance_ref['deleted_at'] > terminate)
|
self.assert_(instance_ref['deleted_at'] > terminate)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_reboot(self):
|
def test_reboot(self):
|
||||||
"""Ensure instance can be rebooted"""
|
"""Ensure instance can be rebooted"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
yield self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
yield self.compute.reboot_instance(self.context, instance_id)
|
self.compute.reboot_instance(self.context, instance_id)
|
||||||
yield self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_console_output(self):
|
def test_console_output(self):
|
||||||
"""Make sure we can get console output from instance"""
|
"""Make sure we can get console output from instance"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
yield self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
|
|
||||||
console = yield self.compute.get_console_output(self.context,
|
console = self.compute.get_console_output(self.context,
|
||||||
instance_id)
|
instance_id)
|
||||||
self.assert_(console)
|
self.assert_(console)
|
||||||
yield self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_run_instance_existing(self):
|
def test_run_instance_existing(self):
|
||||||
"""Ensure failure when running an instance that already exists"""
|
"""Ensure failure when running an instance that already exists"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
yield self.compute.run_instance(self.context, instance_id)
|
self.compute.run_instance(self.context, instance_id)
|
||||||
self.assertFailure(self.compute.run_instance(self.context,
|
self.assertRaises(exception.Error,
|
||||||
instance_id),
|
self.compute.run_instance,
|
||||||
exception.Error)
|
self.context,
|
||||||
yield self.compute.terminate_instance(self.context, instance_id)
|
instance_id)
|
||||||
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ FLAGS = flags.FLAGS
|
|||||||
flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only')
|
flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only')
|
||||||
|
|
||||||
|
|
||||||
class FlagsTestCase(test.TrialTestCase):
|
class FlagsTestCase(test.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(FlagsTestCase, self).setUp()
|
super(FlagsTestCase, self).setUp()
|
||||||
|
|||||||
@@ -15,34 +15,41 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.utils import parse_mailmap, str_dict_replace
|
from nova.utils import parse_mailmap, str_dict_replace
|
||||||
|
|
||||||
|
|
||||||
class ProjectTestCase(test.TrialTestCase):
|
class ProjectTestCase(test.TestCase):
|
||||||
def test_authors_up_to_date(self):
|
def test_authors_up_to_date(self):
|
||||||
if os.path.exists('../.bzr'):
|
if os.path.exists('../.bzr'):
|
||||||
log_cmd = subprocess.Popen(["bzr", "log", "-n0"],
|
contributors = set()
|
||||||
stdout=subprocess.PIPE)
|
|
||||||
changelog = log_cmd.communicate()[0]
|
|
||||||
mailmap = parse_mailmap('../.mailmap')
|
mailmap = parse_mailmap('../.mailmap')
|
||||||
|
|
||||||
contributors = set()
|
import bzrlib.workingtree
|
||||||
for l in changelog.split('\n'):
|
tree = bzrlib.workingtree.WorkingTree.open('..')
|
||||||
l = l.strip()
|
tree.lock_read()
|
||||||
if (l.startswith('author:') or l.startswith('committer:')
|
try:
|
||||||
and not l == 'committer: Tarmac'):
|
parents = tree.get_parent_ids()
|
||||||
email = l.split(' ')[-1]
|
g = tree.branch.repository.get_graph()
|
||||||
contributors.add(str_dict_replace(email, mailmap))
|
for p in parents[1:]:
|
||||||
|
rev_ids = [r for r, _ in g.iter_ancestry(parents)
|
||||||
|
if r != "null:"]
|
||||||
|
revs = tree.branch.repository.get_revisions(rev_ids)
|
||||||
|
for r in revs:
|
||||||
|
for author in r.get_apparent_authors():
|
||||||
|
email = author.split(' ')[-1]
|
||||||
|
contributors.add(str_dict_replace(email, mailmap))
|
||||||
|
|
||||||
authors_file = open('../Authors', 'r').read()
|
authors_file = open('../Authors', 'r').read()
|
||||||
|
|
||||||
missing = set()
|
missing = set()
|
||||||
for contributor in contributors:
|
for contributor in contributors:
|
||||||
if not contributor in authors_file:
|
if not contributor in authors_file:
|
||||||
missing.add(contributor)
|
missing.add(contributor)
|
||||||
|
|
||||||
self.assertTrue(len(missing) == 0,
|
self.assertTrue(len(missing) == 0,
|
||||||
'%r not listed in Authors' % missing)
|
'%r not listed in Authors' % missing)
|
||||||
|
finally:
|
||||||
|
tree.unlock()
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ from nova.auth import manager
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
class NetworkTestCase(test.TrialTestCase):
|
class NetworkTestCase(test.TestCase):
|
||||||
"""Test cases for network code"""
|
"""Test cases for network code"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NetworkTestCase, self).setUp()
|
super(NetworkTestCase, self).setUp()
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
|
|||||||
os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
|
os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
|
||||||
|
|
||||||
|
|
||||||
class ObjectStoreTestCase(test.TrialTestCase):
|
class ObjectStoreTestCase(test.TestCase):
|
||||||
"""Test objectstore API directly."""
|
"""Test objectstore API directly."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -191,7 +191,7 @@ class TestSite(server.Site):
|
|||||||
protocol = TestHTTPChannel
|
protocol = TestHTTPChannel
|
||||||
|
|
||||||
|
|
||||||
class S3APITestCase(test.TrialTestCase):
|
class S3APITestCase(test.TestCase):
|
||||||
"""Test objectstore through S3 API."""
|
"""Test objectstore through S3 API."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
|||||||
@@ -1,132 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.internet import reactor
|
|
||||||
from xml.etree import ElementTree
|
|
||||||
|
|
||||||
from nova import exception
|
|
||||||
from nova import flags
|
|
||||||
from nova import process
|
|
||||||
from nova import test
|
|
||||||
from nova import utils
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class ProcessTestCase(test.TrialTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
|
||||||
super(ProcessTestCase, self).setUp()
|
|
||||||
|
|
||||||
def test_execute_stdout(self):
|
|
||||||
pool = process.ProcessPool(2)
|
|
||||||
d = pool.simple_execute('echo test')
|
|
||||||
|
|
||||||
def _check(rv):
|
|
||||||
self.assertEqual(rv[0], 'test\n')
|
|
||||||
self.assertEqual(rv[1], '')
|
|
||||||
|
|
||||||
d.addCallback(_check)
|
|
||||||
d.addErrback(self.fail)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_execute_stderr(self):
|
|
||||||
pool = process.ProcessPool(2)
|
|
||||||
d = pool.simple_execute('cat BAD_FILE', check_exit_code=False)
|
|
||||||
|
|
||||||
def _check(rv):
|
|
||||||
self.assertEqual(rv[0], '')
|
|
||||||
self.assert_('No such file' in rv[1])
|
|
||||||
|
|
||||||
d.addCallback(_check)
|
|
||||||
d.addErrback(self.fail)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_execute_unexpected_stderr(self):
|
|
||||||
pool = process.ProcessPool(2)
|
|
||||||
d = pool.simple_execute('cat BAD_FILE')
|
|
||||||
d.addCallback(lambda x: self.fail('should have raised an error'))
|
|
||||||
d.addErrback(lambda failure: failure.trap(IOError))
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_max_processes(self):
|
|
||||||
pool = process.ProcessPool(2)
|
|
||||||
d1 = pool.simple_execute('sleep 0.01')
|
|
||||||
d2 = pool.simple_execute('sleep 0.01')
|
|
||||||
d3 = pool.simple_execute('sleep 0.005')
|
|
||||||
d4 = pool.simple_execute('sleep 0.005')
|
|
||||||
|
|
||||||
called = []
|
|
||||||
|
|
||||||
def _called(rv, name):
|
|
||||||
called.append(name)
|
|
||||||
|
|
||||||
d1.addCallback(_called, 'd1')
|
|
||||||
d2.addCallback(_called, 'd2')
|
|
||||||
d3.addCallback(_called, 'd3')
|
|
||||||
d4.addCallback(_called, 'd4')
|
|
||||||
|
|
||||||
# Make sure that d3 and d4 had to wait on the other two and were called
|
|
||||||
# in order
|
|
||||||
# NOTE(termie): there may be a race condition in this test if for some
|
|
||||||
# reason one of the sleeps takes longer to complete
|
|
||||||
# than it should
|
|
||||||
d4.addCallback(lambda x: self.assertEqual(called[2], 'd3'))
|
|
||||||
d4.addCallback(lambda x: self.assertEqual(called[3], 'd4'))
|
|
||||||
d4.addErrback(self.fail)
|
|
||||||
return d4
|
|
||||||
|
|
||||||
def test_kill_long_process(self):
|
|
||||||
pool = process.ProcessPool(2)
|
|
||||||
|
|
||||||
d1 = pool.simple_execute('sleep 1')
|
|
||||||
d2 = pool.simple_execute('sleep 0.005')
|
|
||||||
|
|
||||||
timeout = reactor.callLater(0.1, self.fail, 'should have been killed')
|
|
||||||
|
|
||||||
# kill d1 and wait on it to end then cancel the timeout
|
|
||||||
d2.addCallback(lambda _: d1.process.signalProcess('KILL'))
|
|
||||||
d2.addCallback(lambda _: d1)
|
|
||||||
d2.addBoth(lambda _: timeout.active() and timeout.cancel())
|
|
||||||
d2.addErrback(self.fail)
|
|
||||||
return d2
|
|
||||||
|
|
||||||
def test_process_exit_is_contained(self):
|
|
||||||
pool = process.ProcessPool(2)
|
|
||||||
|
|
||||||
d1 = pool.simple_execute('sleep 1')
|
|
||||||
d1.addCallback(lambda x: self.fail('should have errbacked'))
|
|
||||||
d1.addErrback(lambda fail: fail.trap(IOError))
|
|
||||||
reactor.callLater(0.05, d1.process.signalProcess, 'KILL')
|
|
||||||
|
|
||||||
return d1
|
|
||||||
|
|
||||||
def test_shared_pool_is_singleton(self):
|
|
||||||
pool1 = process.SharedPool()
|
|
||||||
pool2 = process.SharedPool()
|
|
||||||
self.assertEqual(id(pool1._instance), id(pool2._instance))
|
|
||||||
|
|
||||||
def test_shared_pool_works_as_singleton(self):
|
|
||||||
d1 = process.simple_execute('sleep 1')
|
|
||||||
d2 = process.simple_execute('sleep 0.005')
|
|
||||||
# lp609749: would have failed with
|
|
||||||
# exceptions.AssertionError: Someone released me too many times:
|
|
||||||
# too many tokens!
|
|
||||||
return d1
|
|
||||||
@@ -32,7 +32,7 @@ from nova.api.ec2 import cloud
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
class QuotaTestCase(test.TrialTestCase):
|
class QuotaTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
super(QuotaTestCase, self).setUp()
|
super(QuotaTestCase, self).setUp()
|
||||||
@@ -94,11 +94,12 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
for i in range(FLAGS.quota_instances):
|
for i in range(FLAGS.quota_instances):
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
instance_ids.append(instance_id)
|
instance_ids.append(instance_id)
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
|
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
||||||
self.context,
|
self.context,
|
||||||
min_count=1,
|
min_count=1,
|
||||||
max_count=1,
|
max_count=1,
|
||||||
instance_type='m1.small')
|
instance_type='m1.small',
|
||||||
|
image_id='fake')
|
||||||
for instance_id in instance_ids:
|
for instance_id in instance_ids:
|
||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@@ -106,11 +107,12 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
instance_ids = []
|
instance_ids = []
|
||||||
instance_id = self._create_instance(cores=4)
|
instance_id = self._create_instance(cores=4)
|
||||||
instance_ids.append(instance_id)
|
instance_ids.append(instance_id)
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
|
self.assertRaises(quota.QuotaError, self.cloud.run_instances,
|
||||||
self.context,
|
self.context,
|
||||||
min_count=1,
|
min_count=1,
|
||||||
max_count=1,
|
max_count=1,
|
||||||
instance_type='m1.small')
|
instance_type='m1.small',
|
||||||
|
image_id='fake')
|
||||||
for instance_id in instance_ids:
|
for instance_id in instance_ids:
|
||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@@ -119,7 +121,7 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
for i in range(FLAGS.quota_volumes):
|
for i in range(FLAGS.quota_volumes):
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
volume_ids.append(volume_id)
|
volume_ids.append(volume_id)
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.create_volume,
|
self.assertRaises(quota.QuotaError, self.cloud.create_volume,
|
||||||
self.context,
|
self.context,
|
||||||
size=10)
|
size=10)
|
||||||
for volume_id in volume_ids:
|
for volume_id in volume_ids:
|
||||||
@@ -129,7 +131,7 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
volume_ids = []
|
volume_ids = []
|
||||||
volume_id = self._create_volume(size=20)
|
volume_id = self._create_volume(size=20)
|
||||||
volume_ids.append(volume_id)
|
volume_ids.append(volume_id)
|
||||||
self.assertRaises(cloud.QuotaError,
|
self.assertRaises(quota.QuotaError,
|
||||||
self.cloud.create_volume,
|
self.cloud.create_volume,
|
||||||
self.context,
|
self.context,
|
||||||
size=10)
|
size=10)
|
||||||
@@ -146,6 +148,6 @@ class QuotaTestCase(test.TrialTestCase):
|
|||||||
# make an rpc.call, the test just finishes with OK. It
|
# make an rpc.call, the test just finishes with OK. It
|
||||||
# appears to be something in the magic inline callbacks
|
# appears to be something in the magic inline callbacks
|
||||||
# that is breaking.
|
# that is breaking.
|
||||||
self.assertRaises(cloud.QuotaError, self.cloud.allocate_address,
|
self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
|
||||||
self.context)
|
self.context)
|
||||||
db.floating_ip_destroy(context.get_admin_context(), address)
|
db.floating_ip_destroy(context.get_admin_context(), address)
|
||||||
|
|||||||
@@ -20,8 +20,6 @@ Unit Tests for remote procedure calls using queue
|
|||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
@@ -31,7 +29,7 @@ from nova import test
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
class RpcTestCase(test.TrialTestCase):
|
class RpcTestCase(test.TestCase):
|
||||||
"""Test cases for rpc"""
|
"""Test cases for rpc"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(RpcTestCase, self).setUp()
|
super(RpcTestCase, self).setUp()
|
||||||
@@ -40,23 +38,22 @@ class RpcTestCase(test.TrialTestCase):
|
|||||||
self.consumer = rpc.AdapterConsumer(connection=self.conn,
|
self.consumer = rpc.AdapterConsumer(connection=self.conn,
|
||||||
topic='test',
|
topic='test',
|
||||||
proxy=self.receiver)
|
proxy=self.receiver)
|
||||||
self.consumer.attach_to_twisted()
|
self.consumer.attach_to_eventlet()
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
def test_call_succeed(self):
|
def test_call_succeed(self):
|
||||||
"""Get a value through rpc call"""
|
"""Get a value through rpc call"""
|
||||||
value = 42
|
value = 42
|
||||||
result = yield rpc.call_twisted(self.context,
|
result = rpc.call(self.context, 'test', {"method": "echo",
|
||||||
'test', {"method": "echo",
|
|
||||||
"args": {"value": value}})
|
"args": {"value": value}})
|
||||||
self.assertEqual(value, result)
|
self.assertEqual(value, result)
|
||||||
|
|
||||||
def test_context_passed(self):
|
def test_context_passed(self):
|
||||||
"""Makes sure a context is passed through rpc call"""
|
"""Makes sure a context is passed through rpc call"""
|
||||||
value = 42
|
value = 42
|
||||||
result = yield rpc.call_twisted(self.context,
|
result = rpc.call(self.context,
|
||||||
'test', {"method": "context",
|
'test', {"method": "context",
|
||||||
"args": {"value": value}})
|
"args": {"value": value}})
|
||||||
self.assertEqual(self.context.to_dict(), result)
|
self.assertEqual(self.context.to_dict(), result)
|
||||||
|
|
||||||
def test_call_exception(self):
|
def test_call_exception(self):
|
||||||
@@ -67,14 +64,17 @@ class RpcTestCase(test.TrialTestCase):
|
|||||||
to an int in the test.
|
to an int in the test.
|
||||||
"""
|
"""
|
||||||
value = 42
|
value = 42
|
||||||
self.assertFailure(rpc.call_twisted(self.context, 'test',
|
self.assertRaises(rpc.RemoteError,
|
||||||
{"method": "fail",
|
rpc.call,
|
||||||
"args": {"value": value}}),
|
self.context,
|
||||||
rpc.RemoteError)
|
'test',
|
||||||
|
{"method": "fail",
|
||||||
|
"args": {"value": value}})
|
||||||
try:
|
try:
|
||||||
yield rpc.call_twisted(self.context,
|
rpc.call(self.context,
|
||||||
'test', {"method": "fail",
|
'test',
|
||||||
"args": {"value": value}})
|
{"method": "fail",
|
||||||
|
"args": {"value": value}})
|
||||||
self.fail("should have thrown rpc.RemoteError")
|
self.fail("should have thrown rpc.RemoteError")
|
||||||
except rpc.RemoteError as exc:
|
except rpc.RemoteError as exc:
|
||||||
self.assertEqual(int(exc.value), value)
|
self.assertEqual(int(exc.value), value)
|
||||||
@@ -89,13 +89,13 @@ class TestReceiver(object):
|
|||||||
def echo(context, value):
|
def echo(context, value):
|
||||||
"""Simply returns whatever value is sent in"""
|
"""Simply returns whatever value is sent in"""
|
||||||
logging.debug("Received %s", value)
|
logging.debug("Received %s", value)
|
||||||
return defer.succeed(value)
|
return value
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def context(context, value):
|
def context(context, value):
|
||||||
"""Returns dictionary version of context"""
|
"""Returns dictionary version of context"""
|
||||||
logging.debug("Received %s", context)
|
logging.debug("Received %s", context)
|
||||||
return defer.succeed(context.to_dict())
|
return context.to_dict()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def fail(context, value):
|
def fail(context, value):
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ class TestDriver(driver.Scheduler):
|
|||||||
return 'named_host'
|
return 'named_host'
|
||||||
|
|
||||||
|
|
||||||
class SchedulerTestCase(test.TrialTestCase):
|
class SchedulerTestCase(test.TestCase):
|
||||||
"""Test case for scheduler"""
|
"""Test case for scheduler"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(SchedulerTestCase, self).setUp()
|
super(SchedulerTestCase, self).setUp()
|
||||||
@@ -73,7 +73,7 @@ class SchedulerTestCase(test.TrialTestCase):
|
|||||||
scheduler.named_method(ctxt, 'topic', num=7)
|
scheduler.named_method(ctxt, 'topic', num=7)
|
||||||
|
|
||||||
|
|
||||||
class SimpleDriverTestCase(test.TrialTestCase):
|
class SimpleDriverTestCase(test.TestCase):
|
||||||
"""Test case for simple driver"""
|
"""Test case for simple driver"""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(SimpleDriverTestCase, self).setUp()
|
super(SimpleDriverTestCase, self).setUp()
|
||||||
@@ -122,12 +122,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
|||||||
'nova-compute',
|
'nova-compute',
|
||||||
'compute',
|
'compute',
|
||||||
FLAGS.compute_manager)
|
FLAGS.compute_manager)
|
||||||
compute1.startService()
|
compute1.start()
|
||||||
compute2 = service.Service('host2',
|
compute2 = service.Service('host2',
|
||||||
'nova-compute',
|
'nova-compute',
|
||||||
'compute',
|
'compute',
|
||||||
FLAGS.compute_manager)
|
FLAGS.compute_manager)
|
||||||
compute2.startService()
|
compute2.start()
|
||||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||||
self.assertEqual(len(hosts), 2)
|
self.assertEqual(len(hosts), 2)
|
||||||
compute1.kill()
|
compute1.kill()
|
||||||
@@ -139,12 +139,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
|||||||
'nova-compute',
|
'nova-compute',
|
||||||
'compute',
|
'compute',
|
||||||
FLAGS.compute_manager)
|
FLAGS.compute_manager)
|
||||||
compute1.startService()
|
compute1.start()
|
||||||
compute2 = service.Service('host2',
|
compute2 = service.Service('host2',
|
||||||
'nova-compute',
|
'nova-compute',
|
||||||
'compute',
|
'compute',
|
||||||
FLAGS.compute_manager)
|
FLAGS.compute_manager)
|
||||||
compute2.startService()
|
compute2.start()
|
||||||
instance_id1 = self._create_instance()
|
instance_id1 = self._create_instance()
|
||||||
compute1.run_instance(self.context, instance_id1)
|
compute1.run_instance(self.context, instance_id1)
|
||||||
instance_id2 = self._create_instance()
|
instance_id2 = self._create_instance()
|
||||||
@@ -162,12 +162,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
|||||||
'nova-compute',
|
'nova-compute',
|
||||||
'compute',
|
'compute',
|
||||||
FLAGS.compute_manager)
|
FLAGS.compute_manager)
|
||||||
compute1.startService()
|
compute1.start()
|
||||||
compute2 = service.Service('host2',
|
compute2 = service.Service('host2',
|
||||||
'nova-compute',
|
'nova-compute',
|
||||||
'compute',
|
'compute',
|
||||||
FLAGS.compute_manager)
|
FLAGS.compute_manager)
|
||||||
compute2.startService()
|
compute2.start()
|
||||||
instance_ids1 = []
|
instance_ids1 = []
|
||||||
instance_ids2 = []
|
instance_ids2 = []
|
||||||
for index in xrange(FLAGS.max_cores):
|
for index in xrange(FLAGS.max_cores):
|
||||||
@@ -195,12 +195,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
|||||||
'nova-volume',
|
'nova-volume',
|
||||||
'volume',
|
'volume',
|
||||||
FLAGS.volume_manager)
|
FLAGS.volume_manager)
|
||||||
volume1.startService()
|
volume1.start()
|
||||||
volume2 = service.Service('host2',
|
volume2 = service.Service('host2',
|
||||||
'nova-volume',
|
'nova-volume',
|
||||||
'volume',
|
'volume',
|
||||||
FLAGS.volume_manager)
|
FLAGS.volume_manager)
|
||||||
volume2.startService()
|
volume2.start()
|
||||||
volume_id1 = self._create_volume()
|
volume_id1 = self._create_volume()
|
||||||
volume1.create_volume(self.context, volume_id1)
|
volume1.create_volume(self.context, volume_id1)
|
||||||
volume_id2 = self._create_volume()
|
volume_id2 = self._create_volume()
|
||||||
@@ -218,12 +218,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
|||||||
'nova-volume',
|
'nova-volume',
|
||||||
'volume',
|
'volume',
|
||||||
FLAGS.volume_manager)
|
FLAGS.volume_manager)
|
||||||
volume1.startService()
|
volume1.start()
|
||||||
volume2 = service.Service('host2',
|
volume2 = service.Service('host2',
|
||||||
'nova-volume',
|
'nova-volume',
|
||||||
'volume',
|
'volume',
|
||||||
FLAGS.volume_manager)
|
FLAGS.volume_manager)
|
||||||
volume2.startService()
|
volume2.start()
|
||||||
volume_ids1 = []
|
volume_ids1 = []
|
||||||
volume_ids2 = []
|
volume_ids2 = []
|
||||||
for index in xrange(FLAGS.max_gigabytes):
|
for index in xrange(FLAGS.max_gigabytes):
|
||||||
|
|||||||
@@ -22,9 +22,6 @@ Unit Tests for remote procedure calls using queue
|
|||||||
|
|
||||||
import mox
|
import mox
|
||||||
|
|
||||||
from twisted.application.app import startApplication
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
@@ -48,7 +45,7 @@ class ExtendedService(service.Service):
|
|||||||
return 'service'
|
return 'service'
|
||||||
|
|
||||||
|
|
||||||
class ServiceManagerTestCase(test.TrialTestCase):
|
class ServiceManagerTestCase(test.TestCase):
|
||||||
"""Test cases for Services"""
|
"""Test cases for Services"""
|
||||||
|
|
||||||
def test_attribute_error_for_no_manager(self):
|
def test_attribute_error_for_no_manager(self):
|
||||||
@@ -63,7 +60,7 @@ class ServiceManagerTestCase(test.TrialTestCase):
|
|||||||
'test',
|
'test',
|
||||||
'test',
|
'test',
|
||||||
'nova.tests.service_unittest.FakeManager')
|
'nova.tests.service_unittest.FakeManager')
|
||||||
serv.startService()
|
serv.start()
|
||||||
self.assertEqual(serv.test_method(), 'manager')
|
self.assertEqual(serv.test_method(), 'manager')
|
||||||
|
|
||||||
def test_override_manager_method(self):
|
def test_override_manager_method(self):
|
||||||
@@ -71,11 +68,11 @@ class ServiceManagerTestCase(test.TrialTestCase):
|
|||||||
'test',
|
'test',
|
||||||
'test',
|
'test',
|
||||||
'nova.tests.service_unittest.FakeManager')
|
'nova.tests.service_unittest.FakeManager')
|
||||||
serv.startService()
|
serv.start()
|
||||||
self.assertEqual(serv.test_method(), 'service')
|
self.assertEqual(serv.test_method(), 'service')
|
||||||
|
|
||||||
|
|
||||||
class ServiceTestCase(test.TrialTestCase):
|
class ServiceTestCase(test.TestCase):
|
||||||
"""Test cases for Services"""
|
"""Test cases for Services"""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -94,8 +91,6 @@ class ServiceTestCase(test.TrialTestCase):
|
|||||||
self.mox.StubOutWithMock(rpc,
|
self.mox.StubOutWithMock(rpc,
|
||||||
'AdapterConsumer',
|
'AdapterConsumer',
|
||||||
use_mock_anything=True)
|
use_mock_anything=True)
|
||||||
self.mox.StubOutWithMock(
|
|
||||||
service.task, 'LoopingCall', use_mock_anything=True)
|
|
||||||
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
||||||
topic=topic,
|
topic=topic,
|
||||||
proxy=mox.IsA(service.Service)).AndReturn(
|
proxy=mox.IsA(service.Service)).AndReturn(
|
||||||
@@ -106,19 +101,8 @@ class ServiceTestCase(test.TrialTestCase):
|
|||||||
proxy=mox.IsA(service.Service)).AndReturn(
|
proxy=mox.IsA(service.Service)).AndReturn(
|
||||||
rpc.AdapterConsumer)
|
rpc.AdapterConsumer)
|
||||||
|
|
||||||
rpc.AdapterConsumer.attach_to_twisted()
|
rpc.AdapterConsumer.attach_to_eventlet()
|
||||||
rpc.AdapterConsumer.attach_to_twisted()
|
rpc.AdapterConsumer.attach_to_eventlet()
|
||||||
|
|
||||||
# Stub out looping call a bit needlessly since we don't have an easy
|
|
||||||
# way to cancel it (yet) when the tests finishes
|
|
||||||
service.task.LoopingCall(mox.IgnoreArg()).AndReturn(
|
|
||||||
service.task.LoopingCall)
|
|
||||||
service.task.LoopingCall.start(interval=mox.IgnoreArg(),
|
|
||||||
now=mox.IgnoreArg())
|
|
||||||
service.task.LoopingCall(mox.IgnoreArg()).AndReturn(
|
|
||||||
service.task.LoopingCall)
|
|
||||||
service.task.LoopingCall.start(interval=mox.IgnoreArg(),
|
|
||||||
now=mox.IgnoreArg())
|
|
||||||
|
|
||||||
service_create = {'host': host,
|
service_create = {'host': host,
|
||||||
'binary': binary,
|
'binary': binary,
|
||||||
@@ -136,14 +120,14 @@ class ServiceTestCase(test.TrialTestCase):
|
|||||||
service_create).AndReturn(service_ref)
|
service_create).AndReturn(service_ref)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
startApplication(app, False)
|
app.start()
|
||||||
|
app.stop()
|
||||||
self.assert_(app)
|
self.assert_(app)
|
||||||
|
|
||||||
# We're testing sort of weird behavior in how report_state decides
|
# We're testing sort of weird behavior in how report_state decides
|
||||||
# whether it is disconnected, it looks for a variable on itself called
|
# whether it is disconnected, it looks for a variable on itself called
|
||||||
# 'model_disconnected' and report_state doesn't really do much so this
|
# 'model_disconnected' and report_state doesn't really do much so this
|
||||||
# these are mostly just for coverage
|
# these are mostly just for coverage
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_report_state_no_service(self):
|
def test_report_state_no_service(self):
|
||||||
host = 'foo'
|
host = 'foo'
|
||||||
binary = 'bar'
|
binary = 'bar'
|
||||||
@@ -173,10 +157,9 @@ class ServiceTestCase(test.TrialTestCase):
|
|||||||
binary,
|
binary,
|
||||||
topic,
|
topic,
|
||||||
'nova.tests.service_unittest.FakeManager')
|
'nova.tests.service_unittest.FakeManager')
|
||||||
serv.startService()
|
serv.start()
|
||||||
yield serv.report_state()
|
serv.report_state()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_report_state_newly_disconnected(self):
|
def test_report_state_newly_disconnected(self):
|
||||||
host = 'foo'
|
host = 'foo'
|
||||||
binary = 'bar'
|
binary = 'bar'
|
||||||
@@ -204,11 +187,10 @@ class ServiceTestCase(test.TrialTestCase):
|
|||||||
binary,
|
binary,
|
||||||
topic,
|
topic,
|
||||||
'nova.tests.service_unittest.FakeManager')
|
'nova.tests.service_unittest.FakeManager')
|
||||||
serv.startService()
|
serv.start()
|
||||||
yield serv.report_state()
|
serv.report_state()
|
||||||
self.assert_(serv.model_disconnected)
|
self.assert_(serv.model_disconnected)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_report_state_newly_connected(self):
|
def test_report_state_newly_connected(self):
|
||||||
host = 'foo'
|
host = 'foo'
|
||||||
binary = 'bar'
|
binary = 'bar'
|
||||||
@@ -238,8 +220,8 @@ class ServiceTestCase(test.TrialTestCase):
|
|||||||
binary,
|
binary,
|
||||||
topic,
|
topic,
|
||||||
'nova.tests.service_unittest.FakeManager')
|
'nova.tests.service_unittest.FakeManager')
|
||||||
serv.startService()
|
serv.start()
|
||||||
serv.model_disconnected = True
|
serv.model_disconnected = True
|
||||||
yield serv.report_state()
|
serv.report_state()
|
||||||
|
|
||||||
self.assert_(not serv.model_disconnected)
|
self.assert_(not serv.model_disconnected)
|
||||||
|
|||||||
@@ -1,42 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova import test
|
|
||||||
from nova import validate
|
|
||||||
|
|
||||||
|
|
||||||
class ValidationTestCase(test.TrialTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(ValidationTestCase, self).setUp()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(ValidationTestCase, self).tearDown()
|
|
||||||
|
|
||||||
def test_type_validation(self):
|
|
||||||
self.assertTrue(type_case("foo", 5, 1))
|
|
||||||
self.assertRaises(TypeError, type_case, "bar", "5", 1)
|
|
||||||
self.assertRaises(TypeError, type_case, None, 5, 1)
|
|
||||||
|
|
||||||
|
|
||||||
@validate.typetest(instanceid=str, size=int, number_of_instances=int)
|
|
||||||
def type_case(instanceid, size, number_of_instances):
|
|
||||||
return True
|
|
||||||
@@ -30,7 +30,7 @@ FLAGS = flags.FLAGS
|
|||||||
flags.DECLARE('instances_path', 'nova.compute.manager')
|
flags.DECLARE('instances_path', 'nova.compute.manager')
|
||||||
|
|
||||||
|
|
||||||
class LibvirtConnTestCase(test.TrialTestCase):
|
class LibvirtConnTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LibvirtConnTestCase, self).setUp()
|
super(LibvirtConnTestCase, self).setUp()
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
@@ -123,7 +123,7 @@ class LibvirtConnTestCase(test.TrialTestCase):
|
|||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
|
|
||||||
|
|
||||||
class NWFilterTestCase(test.TrialTestCase):
|
class NWFilterTestCase(test.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NWFilterTestCase, self).setUp()
|
super(NWFilterTestCase, self).setUp()
|
||||||
@@ -235,7 +235,7 @@ class NWFilterTestCase(test.TrialTestCase):
|
|||||||
'project_id': 'fake'})
|
'project_id': 'fake'})
|
||||||
inst_id = instance_ref['id']
|
inst_id = instance_ref['id']
|
||||||
|
|
||||||
def _ensure_all_called(_):
|
def _ensure_all_called():
|
||||||
instance_filter = 'nova-instance-%s' % instance_ref['name']
|
instance_filter = 'nova-instance-%s' % instance_ref['name']
|
||||||
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
|
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
|
||||||
for required in [secgroup_filter, 'allow-dhcp-server',
|
for required in [secgroup_filter, 'allow-dhcp-server',
|
||||||
@@ -253,7 +253,6 @@ class NWFilterTestCase(test.TrialTestCase):
|
|||||||
instance = db.instance_get(self.context, inst_id)
|
instance = db.instance_get(self.context, inst_id)
|
||||||
|
|
||||||
d = self.fw.setup_nwfilters_for_instance(instance)
|
d = self.fw.setup_nwfilters_for_instance(instance)
|
||||||
d.addCallback(_ensure_all_called)
|
_ensure_all_called()
|
||||||
d.addCallback(lambda _: self.teardown_security_group())
|
self.teardown_security_group()
|
||||||
|
|
||||||
return d
|
return d
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ Tests for Volume Code.
|
|||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import db
|
from nova import db
|
||||||
@@ -33,7 +31,7 @@ from nova import utils
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
class VolumeTestCase(test.TrialTestCase):
|
class VolumeTestCase(test.TestCase):
|
||||||
"""Test Case for volumes."""
|
"""Test Case for volumes."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -56,51 +54,48 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
vol['attach_status'] = "detached"
|
vol['attach_status'] = "detached"
|
||||||
return db.volume_create(context.get_admin_context(), vol)['id']
|
return db.volume_create(context.get_admin_context(), vol)['id']
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_create_delete_volume(self):
|
def test_create_delete_volume(self):
|
||||||
"""Test volume can be created and deleted."""
|
"""Test volume can be created and deleted."""
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
yield self.volume.create_volume(self.context, volume_id)
|
self.volume.create_volume(self.context, volume_id)
|
||||||
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
|
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
|
||||||
volume_id).id)
|
volume_id).id)
|
||||||
|
|
||||||
yield self.volume.delete_volume(self.context, volume_id)
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
self.assertRaises(exception.NotFound,
|
self.assertRaises(exception.NotFound,
|
||||||
db.volume_get,
|
db.volume_get,
|
||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_too_big_volume(self):
|
def test_too_big_volume(self):
|
||||||
"""Ensure failure if a too large of a volume is requested."""
|
"""Ensure failure if a too large of a volume is requested."""
|
||||||
# FIXME(vish): validation needs to move into the data layer in
|
# FIXME(vish): validation needs to move into the data layer in
|
||||||
# volume_create
|
# volume_create
|
||||||
defer.returnValue(True)
|
return True
|
||||||
try:
|
try:
|
||||||
volume_id = self._create_volume('1001')
|
volume_id = self._create_volume('1001')
|
||||||
yield self.volume.create_volume(self.context, volume_id)
|
self.volume.create_volume(self.context, volume_id)
|
||||||
self.fail("Should have thrown TypeError")
|
self.fail("Should have thrown TypeError")
|
||||||
except TypeError:
|
except TypeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_too_many_volumes(self):
|
def test_too_many_volumes(self):
|
||||||
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
|
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
|
||||||
vols = []
|
vols = []
|
||||||
total_slots = FLAGS.iscsi_num_targets
|
total_slots = FLAGS.iscsi_num_targets
|
||||||
for _index in xrange(total_slots):
|
for _index in xrange(total_slots):
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
yield self.volume.create_volume(self.context, volume_id)
|
self.volume.create_volume(self.context, volume_id)
|
||||||
vols.append(volume_id)
|
vols.append(volume_id)
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
self.assertFailure(self.volume.create_volume(self.context,
|
self.assertRaises(db.NoMoreTargets,
|
||||||
volume_id),
|
self.volume.create_volume,
|
||||||
db.NoMoreTargets)
|
self.context,
|
||||||
|
volume_id)
|
||||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||||
for volume_id in vols:
|
for volume_id in vols:
|
||||||
yield self.volume.delete_volume(self.context, volume_id)
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_run_attach_detach_volume(self):
|
def test_run_attach_detach_volume(self):
|
||||||
"""Make sure volume can be attached and detached from instance."""
|
"""Make sure volume can be attached and detached from instance."""
|
||||||
inst = {}
|
inst = {}
|
||||||
@@ -115,15 +110,15 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
instance_id = db.instance_create(self.context, inst)['id']
|
instance_id = db.instance_create(self.context, inst)['id']
|
||||||
mountpoint = "/dev/sdf"
|
mountpoint = "/dev/sdf"
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
yield self.volume.create_volume(self.context, volume_id)
|
self.volume.create_volume(self.context, volume_id)
|
||||||
if FLAGS.fake_tests:
|
if FLAGS.fake_tests:
|
||||||
db.volume_attached(self.context, volume_id, instance_id,
|
db.volume_attached(self.context, volume_id, instance_id,
|
||||||
mountpoint)
|
mountpoint)
|
||||||
else:
|
else:
|
||||||
yield self.compute.attach_volume(self.context,
|
self.compute.attach_volume(self.context,
|
||||||
instance_id,
|
instance_id,
|
||||||
volume_id,
|
volume_id,
|
||||||
mountpoint)
|
mountpoint)
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['status'], "in-use")
|
self.assertEqual(vol['status'], "in-use")
|
||||||
self.assertEqual(vol['attach_status'], "attached")
|
self.assertEqual(vol['attach_status'], "attached")
|
||||||
@@ -131,25 +126,26 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
instance_ref = db.volume_get_instance(self.context, volume_id)
|
instance_ref = db.volume_get_instance(self.context, volume_id)
|
||||||
self.assertEqual(instance_ref['id'], instance_id)
|
self.assertEqual(instance_ref['id'], instance_id)
|
||||||
|
|
||||||
self.assertFailure(self.volume.delete_volume(self.context, volume_id),
|
self.assertRaises(exception.Error,
|
||||||
exception.Error)
|
self.volume.delete_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
if FLAGS.fake_tests:
|
if FLAGS.fake_tests:
|
||||||
db.volume_detached(self.context, volume_id)
|
db.volume_detached(self.context, volume_id)
|
||||||
else:
|
else:
|
||||||
yield self.compute.detach_volume(self.context,
|
self.compute.detach_volume(self.context,
|
||||||
instance_id,
|
instance_id,
|
||||||
volume_id)
|
volume_id)
|
||||||
vol = db.volume_get(self.context, volume_id)
|
vol = db.volume_get(self.context, volume_id)
|
||||||
self.assertEqual(vol['status'], "available")
|
self.assertEqual(vol['status'], "available")
|
||||||
|
|
||||||
yield self.volume.delete_volume(self.context, volume_id)
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
self.assertRaises(exception.Error,
|
self.assertRaises(exception.Error,
|
||||||
db.volume_get,
|
db.volume_get,
|
||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
db.instance_destroy(self.context, instance_id)
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def test_concurrent_volumes_get_different_targets(self):
|
def test_concurrent_volumes_get_different_targets(self):
|
||||||
"""Ensure multiple concurrent volumes get different targets."""
|
"""Ensure multiple concurrent volumes get different targets."""
|
||||||
volume_ids = []
|
volume_ids = []
|
||||||
@@ -164,15 +160,11 @@ class VolumeTestCase(test.TrialTestCase):
|
|||||||
self.assert_(iscsi_target not in targets)
|
self.assert_(iscsi_target not in targets)
|
||||||
targets.append(iscsi_target)
|
targets.append(iscsi_target)
|
||||||
logging.debug("Target %s allocated", iscsi_target)
|
logging.debug("Target %s allocated", iscsi_target)
|
||||||
deferreds = []
|
|
||||||
total_slots = FLAGS.iscsi_num_targets
|
total_slots = FLAGS.iscsi_num_targets
|
||||||
for _index in xrange(total_slots):
|
for _index in xrange(total_slots):
|
||||||
volume_id = self._create_volume()
|
volume_id = self._create_volume()
|
||||||
d = self.volume.create_volume(self.context, volume_id)
|
d = self.volume.create_volume(self.context, volume_id)
|
||||||
d.addCallback(_check)
|
_check(d)
|
||||||
d.addErrback(self.fail)
|
|
||||||
deferreds.append(d)
|
|
||||||
yield defer.DeferredList(deferreds)
|
|
||||||
for volume_id in volume_ids:
|
for volume_id in volume_ids:
|
||||||
self.volume.delete_volume(self.context, volume_id)
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
|
|||||||
@@ -1,94 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Decorators for argument validation, courtesy of
|
|
||||||
http://rmi.net/~lutz/rangetest.html"""
|
|
||||||
|
|
||||||
|
|
||||||
def rangetest(**argchecks):
|
|
||||||
"""Validate ranges for both + defaults"""
|
|
||||||
|
|
||||||
def onDecorator(func):
|
|
||||||
"""onCall remembers func and argchecks"""
|
|
||||||
import sys
|
|
||||||
code = func.__code__ if sys.version_info[0] == 3 else func.func_code
|
|
||||||
allargs = code.co_varnames[:code.co_argcount]
|
|
||||||
funcname = func.__name__
|
|
||||||
|
|
||||||
def onCall(*pargs, **kargs):
|
|
||||||
# all pargs match first N args by position
|
|
||||||
# the rest must be in kargs or omitted defaults
|
|
||||||
positionals = list(allargs)
|
|
||||||
positionals = positionals[:len(pargs)]
|
|
||||||
|
|
||||||
for (argname, (low, high)) in argchecks.items():
|
|
||||||
# for all args to be checked
|
|
||||||
if argname in kargs:
|
|
||||||
# was passed by name
|
|
||||||
if float(kargs[argname]) < low or \
|
|
||||||
float(kargs[argname]) > high:
|
|
||||||
errmsg = '{0} argument "{1}" not in {2}..{3}'
|
|
||||||
errmsg = errmsg.format(funcname, argname, low, high)
|
|
||||||
raise TypeError(errmsg)
|
|
||||||
|
|
||||||
elif argname in positionals:
|
|
||||||
# was passed by position
|
|
||||||
position = positionals.index(argname)
|
|
||||||
if float(pargs[position]) < low or \
|
|
||||||
float(pargs[position]) > high:
|
|
||||||
errmsg = '{0} argument "{1}" with value of {4} ' \
|
|
||||||
'not in {2}..{3}'
|
|
||||||
errmsg = errmsg.format(funcname, argname, low, high,
|
|
||||||
pargs[position])
|
|
||||||
raise TypeError(errmsg)
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return func(*pargs, **kargs) # okay: run original call
|
|
||||||
return onCall
|
|
||||||
return onDecorator
|
|
||||||
|
|
||||||
|
|
||||||
def typetest(**argchecks):
|
|
||||||
def onDecorator(func):
|
|
||||||
import sys
|
|
||||||
code = func.__code__ if sys.version_info[0] == 3 else func.func_code
|
|
||||||
allargs = code.co_varnames[:code.co_argcount]
|
|
||||||
funcname = func.__name__
|
|
||||||
|
|
||||||
def onCall(*pargs, **kargs):
|
|
||||||
positionals = list(allargs)[:len(pargs)]
|
|
||||||
for (argname, typeof) in argchecks.items():
|
|
||||||
if argname in kargs:
|
|
||||||
if not isinstance(kargs[argname], typeof):
|
|
||||||
errmsg = '{0} argument "{1}" not of type {2}'
|
|
||||||
errmsg = errmsg.format(funcname, argname, typeof)
|
|
||||||
raise TypeError(errmsg)
|
|
||||||
elif argname in positionals:
|
|
||||||
position = positionals.index(argname)
|
|
||||||
if not isinstance(pargs[position], typeof):
|
|
||||||
errmsg = '{0} argument "{1}" with value of {2} ' \
|
|
||||||
'not of type {3}'
|
|
||||||
errmsg = errmsg.format(funcname, argname,
|
|
||||||
pargs[position], typeof)
|
|
||||||
raise TypeError(errmsg)
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
return func(*pargs, **kargs)
|
|
||||||
return onCall
|
|
||||||
return onDecorator
|
|
||||||
11
run_tests.py
11
run_tests.py
@@ -39,10 +39,16 @@ Due to our use of multiprocessing it we frequently get some ignorable
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
import __main__
|
import __main__
|
||||||
|
import gettext
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
gettext.install('nova', unicode=1)
|
||||||
|
|
||||||
from twisted.scripts import trial as trial_script
|
from twisted.scripts import trial as trial_script
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@@ -56,15 +62,12 @@ from nova.tests.compute_unittest import *
|
|||||||
from nova.tests.flags_unittest import *
|
from nova.tests.flags_unittest import *
|
||||||
from nova.tests.misc_unittest import *
|
from nova.tests.misc_unittest import *
|
||||||
from nova.tests.network_unittest import *
|
from nova.tests.network_unittest import *
|
||||||
from nova.tests.objectstore_unittest import *
|
#from nova.tests.objectstore_unittest import *
|
||||||
from nova.tests.process_unittest import *
|
|
||||||
from nova.tests.quota_unittest import *
|
from nova.tests.quota_unittest import *
|
||||||
from nova.tests.rpc_unittest import *
|
from nova.tests.rpc_unittest import *
|
||||||
from nova.tests.scheduler_unittest import *
|
from nova.tests.scheduler_unittest import *
|
||||||
from nova.tests.service_unittest import *
|
from nova.tests.service_unittest import *
|
||||||
from nova.tests.twistd_unittest import *
|
from nova.tests.twistd_unittest import *
|
||||||
from nova.tests.validator_unittest import *
|
|
||||||
from nova.tests.virt_unittest import *
|
|
||||||
from nova.tests.virt_unittest import *
|
from nova.tests.virt_unittest import *
|
||||||
from nova.tests.volume_unittest import *
|
from nova.tests.volume_unittest import *
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user