Merged with trunk to resolve merge conflicts.

This commit is contained in:
Ewan Mellor
2010-07-29 09:04:54 +01:00
97 changed files with 2401 additions and 2408 deletions

View File

@@ -1 +1,3 @@
run_tests.err.log
.nova-venv
ChangeLog

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@ keys
build/*
build-stamp
nova.egg-info
.nova-venv

22
MANIFEST.in Normal file
View File

@@ -0,0 +1,22 @@
include HACKING LICENSE run_tests.py run_tests.sh
include README builddeb.sh exercise_rsapi.py
include ChangeLog
graft CA
graft doc
graft smoketests
graft tools
include nova/auth/novarc.template
include nova/auth/slap.sh
include nova/cloudpipe/bootscript.sh
include nova/cloudpipe/client.ovpn.template
include nova/compute/fakevirtinstance.xml
include nova/compute/interfaces.template
include nova/compute/libvirt.xml.template
include nova/tests/CA/
include nova/tests/CA/cacert.pem
include nova/tests/CA/private/
include nova/tests/CA/private/cakey.pem
include nova/tests/bundle/
include nova/tests/bundle/1mb.manifest.xml
include nova/tests/bundle/1mb.part.0
include nova/tests/bundle/1mb.part.1

View File

@@ -29,7 +29,7 @@ from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova.auth import users
from nova.auth import manager
from nova.compute import model
from nova.endpoint import admin
from nova.endpoint import api

View File

@@ -19,83 +19,14 @@
"""
Twistd daemon for the nova compute nodes.
Receives messages via AMQP, manages pool of worker threads
for async tasks.
"""
import logging
import os
import sys
# NOTE(termie): kludge so that we can run this from the bin directory in the
# checkout without having to screw with paths
NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova')
if os.path.exists(NOVA_PATH):
sys.path.insert(0, os.path.dirname(NOVA_PATH))
from carrot import connection
from carrot import messaging
from twisted.internet import task
from twisted.application import service
from nova import flags
from nova import rpc
from nova import twistd
from nova.compute import node
from nova.compute import service
FLAGS = flags.FLAGS
# NOTE(termie): This file will necessarily be re-imported under different
# context when the twistd.serve() call is made below so any
# flags we define here will have to be conditionally defined,
# flags defined by imported modules are safe.
if 'node_report_state_interval' not in FLAGS:
flags.DEFINE_integer('node_report_state_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
logging.getLogger().setLevel(logging.DEBUG)
def main():
logging.warn('Starting compute node')
n = node.Node()
d = n.adopt_instances()
d.addCallback(lambda x: logging.info('Adopted %d instances', x))
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % FLAGS.compute_topic,
proxy=n)
consumer_node = rpc.AdapterConsumer(
connection=conn,
topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name),
proxy=n)
bin_name = os.path.basename(__file__)
pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name)
pulse.start(interval=FLAGS.node_report_state_interval, now=False)
injected = consumer_all.attach_to_twisted()
injected = consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application(bin_name)
n.setServiceParent(application)
return application
# NOTE(termie): When this script is executed from the commandline what it will
# actually do is tell the twistd application runner that it
# should run this file as a twistd application (see below).
if __name__ == '__main__':
twistd.serve(__file__)
# NOTE(termie): When this script is loaded by the twistd application runner
# this code path will be executed and twistd will expect a
# variable named 'application' to be available, it will then
# handle starting it and stopping it.
if __name__ == '__builtin__':
application = main()
application = service.ComputeService.create()

View File

@@ -76,9 +76,9 @@ def main():
FLAGS.fake_rabbit = True
FLAGS.redis_db = 8
FLAGS.network_size = 32
FLAGS.fake_libvirt=True
FLAGS.connection_type = 'fake'
FLAGS.fake_network=True
FLAGS.fake_users = True
FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver'
action = argv[1]
if action in ['add','del','old']:
mac = argv[2]

View File

@@ -27,7 +27,7 @@ import time
from nova import flags
from nova import utils
from nova.auth import users
from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.cloudpipe import pipelib
@@ -42,7 +42,7 @@ class NetworkCommands(object):
class VpnCommands(object):
def __init__(self):
self.manager = users.UserManager.instance()
self.manager = manager.AuthManager()
self.instdir = model.InstanceDirectory()
self.pipe = pipelib.CloudPipe(cloud.CloudController())
@@ -90,7 +90,7 @@ class VpnCommands(object):
class RoleCommands(object):
def __init__(self):
self.manager = users.UserManager.instance()
self.manager = manager.AuthManager()
def add(self, user, role, project=None):
"""adds role to user
@@ -113,7 +113,7 @@ class RoleCommands(object):
class UserCommands(object):
def __init__(self):
self.manager = users.UserManager.instance()
self.manager = manager.AuthManager()
def __print_export(self, user):
print 'export EC2_ACCESS_KEY=%s' % user.access
@@ -153,7 +153,7 @@ class UserCommands(object):
class ProjectCommands(object):
def __init__(self):
self.manager = users.UserManager.instance()
self.manager = manager.AuthManager()
def add(self, project, user):
"""adds user to project

32
bin/nova-network Executable file
View File

@@ -0,0 +1,32 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Twistd daemon for the nova network nodes.
"""
from nova import twistd
from nova.network import service
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
application = service.NetworkService.create()

View File

@@ -4,20 +4,20 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WSGI daemon for the main API endpoint.
WSGI daemon for the main API endpoint.
"""
import logging
@@ -28,14 +28,14 @@ from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova.auth import users
from nova.auth import manager
from nova.endpoint import rackspace
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
def main(_argv):
user_manager = users.UserManager()
user_manager = manager.AuthManager()
api_instance = rackspace.Api(user_manager)
conn = rpc.Connection.instance()
rpc_consumer = rpc.AdapterConsumer(connection=conn,

View File

@@ -18,52 +18,15 @@
# under the License.
"""
Tornado Storage daemon manages AoE volumes via AMQP messaging.
Twistd daemon for the nova volume nodes.
"""
import logging
from tornado import ioloop
from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova.volume import storage
FLAGS = flags.FLAGS
flags.DEFINE_integer('storage_report_state_interval', 10,
'seconds between broadcasting state to cloud',
lower_bound=1)
def main(argv):
bs = storage.BlockStore()
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % FLAGS.storage_topic,
proxy=bs)
consumer_node = rpc.AdapterConsumer(
connection=conn,
topic='%s.%s' % (FLAGS.storage_topic, FLAGS.node_name),
proxy=bs)
io_inst = ioloop.IOLoop.instance()
scheduler = ioloop.PeriodicCallback(
lambda: bs.report_state(),
FLAGS.storage_report_state_interval * 1000,
io_loop=io_inst)
injected = consumer_all.attachToTornado(io_inst)
injected = consumer_node.attachToTornado(io_inst)
scheduler.start()
io_inst.start()
from nova import twistd
from nova.volume import service
if __name__ == '__main__':
utils.default_flagfile()
server.serve('nova-volume', main)
twistd.serve(__file__)
if __name__ == '__builtin__':
application = service.VolumeService.create()

232
debian/changelog vendored
View File

@@ -1,232 +0,0 @@
nova (0.2.3-1) UNRELEASED; urgency=low
* Relax the Twisted dependency to python-twisted-core (rather than the
full stack).
* Move nova related configuration files into /etc/nova/.
* Add a dependency on nginx from nova-objectsstore and install a
suitable configuration file.
* Ship the CA directory in nova-common.
* Add a default flag file for nova-manage to help it find the CA.
* If set, pass KernelId and RamdiskId from RunInstances call to the
target compute node.
* Added --network_path setting to nova-compute's flagfile.
* Move templates from python directories to /usr/share/nova.
* Add debian/nova-common.dirs to create
var/lib/nova/{buckets,CA,images,instances,keys,networks}
* Don't pass --daemonize=1 to nova-compute. It's already daemonising
by default.
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jul 2010 12:00:00 -0700
nova (0.2.2-10) UNRELEASED; urgency=low
* Fixed extra space in vblade-persist
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 13 Jul 2010 19:00:00 -0700
nova (0.2.2-9) UNRELEASED; urgency=low
* Fixed invalid dn bug in ldap for adding roles
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 12 Jul 2010 15:20:00 -0700
nova (0.2.2-8) UNRELEASED; urgency=low
* Added a missing comma
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 08 Jul 2010 10:05:00 -0700
nova (0.2.2-7) UNRELEASED; urgency=low
* Missing files from twisted patch
* License upedates
* Reformatting/cleanup
* Users/ldap bugfixes
* Merge fixes
* Documentation updates
* Vpn key creation fix
* Multiple shelves for volumes
-- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 07 Jul 2010 18:45:00 -0700
nova (0.2.2-6) UNRELEASED; urgency=low
* Fix to make Key Injection work again
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jun 2010 21:35:00 -0700
nova (0.2.2-5) UNRELEASED; urgency=low
* Lowered message callback frequency to stop compute and volume
from eating tons of cpu
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jun 2010 14:15:00 -0700
nova (0.2.2-4) UNRELEASED; urgency=low
* Documentation fixes
* Uncaught exceptions now log properly
* Nova Manage zip exporting works again
* Twisted threads no longer interrupt system calls
-- Vishvananda Ishaya <vishvananda@gmail.com> Sun, 13 Jun 2010 01:40:00 -0700
nova (0.2.2-3) UNRELEASED; urgency=low
* Fixes to api calls
* More accurate documentation
* Removal of buggy multiprocessing
* Asynchronus execution of shell commands
* Fix of messaging race condition
* Test redis database cleaned out on each run of tests
* Smoketest updates
-- Vishvananda Ishaya <vishvananda@gmail.com> Fri, 12 Jun 2010 20:10:00 -0700
nova (0.2.2-2) UNRELEASED; urgency=low
* Bugfixes to volume code
* Instances no longer use keeper
* Sectors off by one fix
* State reported properly by instances
-- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 03 Jun 2010 15:21:00 -0700
nova (0.2.2-1) UNRELEASED; urgency=low
* First release based on nova/cc
* Major rewrites to volumes and instances
* Addition of cloudpipe and rbac
* Major bugfixes
-- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 02 Jun 2010 17:42:00 -0700
nova (0.2.1-1) UNRELEASED; urgency=low
* Support ephemeral (local) space for instances
* instance related fixes
* fix network & cloudpipe bugs
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 12:14:00 -0700
nova (0.2.0-20) UNRELEASED; urgency=low
* template files are in proper folder
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 12:14:00 -0700
nova (0.2.0-19) UNRELEASED; urgency=low
* removed mox dependency and added templates to install
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 25 May 2010 11:53:00 -0700
nova (0.2.0-18) UNRELEASED; urgency=low
* api server properly sends instance status code
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 24 May 2010 17:18:00 -0700
nova (0.2.0-17) UNRELEASED; urgency=low
* redis-backed datastore
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 24 May 2010 16:28:00 -0700
nova (0.2.0-16) UNRELEASED; urgency=low
* make sure twistd.pid is really overriden
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 22:18:47 -0700
nova (0.2.0-15) UNRELEASED; urgency=low
* rpc shouldn't require tornado unless you are using attach_to_tornado
-- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 21:59:00 -0700
nova (0.2.0-14) UNRELEASED; urgency=low
* quicky init scripts for the other services, based on nova-objectstore
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 21:49:43 -0700
nova (0.2.0-13) UNRELEASED; urgency=low
* init script for nova-objectstore
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 21:33:25 -0700
nova (0.2.0-12) UNRELEASED; urgency=low
* kvm, kpartx required for nova-compute
-- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 21:32:00 -0700
nova (0.2.0-11) UNRELEASED; urgency=low
* Need to include the python modules in nova-common.install as well.
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 20:04:27 -0700
nova (0.2.0-10) UNRELEASED; urgency=low
* add more requirements to bin packages
-- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 19:54:00 -0700
nova (0.2.0-9) UNRELEASED; urgency=low
* nova bin packages should depend on the same version of nova-common they
were built from.
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:46:34 -0700
nova (0.2.0-8) UNRELEASED; urgency=low
* Require libvirt 0.8.1 or newer for nova-compute
-- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 18:33:00 -0700
nova (0.2.0-7) UNRELEASED; urgency=low
* Split bins into separate packages
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:46:34 -0700
nova (0.2.0-6) UNRELEASED; urgency=low
* Add python-m2crypto to deps
-- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 18:33:00 -0700
nova (0.2.0-5) UNRELEASED; urgency=low
* Add python-gflags to deps
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:28:50 -0700
nova (0.2.0-4) UNRELEASED; urgency=low
* install scripts
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:16:27 -0700
nova (0.2.0-3) UNRELEASED; urgency=low
* debian build goop
-- Manish Singh <yosh@gimp.org> Sun, 23 May 2010 18:06:37 -0700
nova (0.2.0-2) UNRELEASED; urgency=low
* improved requirements
-- Jesse Andrews <anotherjesse@gmail.com> Sun, 23 May 2010 17:42:00 -0700
nova (0.2.0-1) UNRELEASED; urgency=low
* initial release
-- Jesse Andrews <anotherjesse@gmail.com> Fri, 21 May 2010 12:28:00 -0700

1
debian/compat vendored
View File

@@ -1 +0,0 @@
7

136
debian/control vendored
View File

@@ -1,136 +0,0 @@
Source: nova
Section: net
Priority: extra
Maintainer: Jesse Andrews <jesse@ansolabs.com>
Build-Depends: debhelper (>= 7), redis-server (>=2:2.0.0~rc1), python-m2crypto
Build-Depends-Indep: python-support, python-setuptools
Standards-Version: 3.8.4
XS-Python-Version: 2.6
Package: nova-common
Architecture: all
Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted-core, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
Provides: ${python:Provides}
Description: Nova Cloud Computing - common files
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This package contains things that are needed by all parts of Nova.
Package: nova-compute
Architecture: all
Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.7.5), curl, ${python:Depends}, ${misc:Depends}
Description: Nova Cloud Computing - compute node
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This is the package you will install on the nodes that will run your
virtual machines.
Package: nova-volume
Architecture: all
Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends}
Description: Nova Cloud Computing - storage
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This is the package you will install on your storage nodes.
Package: nova-api
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
Description: Nova Cloud Computing - API frontend
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This package provides the API frontend.
Package: nova-objectstore
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
Description: Nova Cloud Computing - object store
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This is the package you will install on the nodes that will contain your
object store.
Package: nova-instancemonitor
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
Description: Nova instance monitor
Package: nova-tools
Architecture: all
Depends: python-boto, ${python:Depends}, ${misc:Depends}
Description: Nova Cloud Computing - management tools
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This package contains admin tools for Nova.

View File

@@ -1,6 +0,0 @@
--daemonize=1
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--networks_path=/var/lib/nova/networks
--dhcpbridge_flagfile=/etc/nova/nova-dhcpbridge.conf
--fake_users=1

69
debian/nova-api.init vendored
View File

@@ -1,69 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-api
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-api
# Description: nova-api
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-api
DAEMON_ARGS="--flagfile=/etc/nova/nova-api.conf"
PIDFILE=/var/run/nova-api.pid
ENABLED=true
if test -f /etc/default/nova-api; then
. /etc/default/nova-api
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova api" "nova-api"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova api" "nova-api"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-api && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-api {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

View File

@@ -1,3 +0,0 @@
bin/nova-api usr/bin
debian/nova-api.conf etc/nova
debian/nova-dhcpbridge.conf etc/nova

View File

@@ -1,11 +0,0 @@
etc/nova
var/lib/nova/buckets
var/lib/nova/CA
var/lib/nova/CA/INTER
var/lib/nova/CA/newcerts
var/lib/nova/CA/private
var/lib/nova/CA/reqs
var/lib/nova/images
var/lib/nova/instances
var/lib/nova/keys
var/lib/nova/networks

View File

@@ -1,10 +0,0 @@
bin/nova-manage usr/bin
debian/nova-manage.conf etc/nova
nova/auth/novarc.template usr/share/nova
nova/cloudpipe/client.ovpn.template usr/share/nova
nova/compute/libvirt.xml.template usr/share/nova
nova/compute/interfaces.template usr/share/nova
usr/lib/python*/*-packages/nova/*
CA/openssl.cnf.tmpl var/lib/nova/CA
CA/geninter.sh var/lib/nova/CA
CA/genrootca.sh var/lib/nova/CA

View File

@@ -1,8 +0,0 @@
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--instances_path=/var/lib/nova/instances
--simple_network_template=/usr/share/nova/interfaces.template
--libvirt_xml_template=/usr/share/nova/libvirt.xml.template
--vpn_client_template=/usr/share/nova/client.ovpn.template
--credentials_template=/usr/share/nova/novarc.template
--fake_users=1

View File

@@ -1,69 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-compute
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-compute
# Description: nova-compute
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-compute
DAEMON_ARGS="--flagfile=/etc/nova/nova-compute.conf"
PIDFILE=/var/run/nova-compute.pid
ENABLED=true
if test -f /etc/default/nova-compute; then
. /etc/default/nova-compute
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova compute" "nova-compute"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova compute" "nova-compute"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-compute && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-compute {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

View File

@@ -1,2 +0,0 @@
bin/nova-compute usr/bin
debian/nova-compute.conf etc/nova

View File

@@ -1,2 +0,0 @@
--networks_path=/var/lib/nova/networks
--fake_users=1

View File

@@ -1,69 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-instancemonitor
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-instancemonitor
# Description: nova-instancemonitor
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-instancemonitor
DAEMON_ARGS="--flagfile=/etc/nova.conf"
PIDFILE=/var/run/nova-instancemonitor.pid
ENABLED=false
if test -f /etc/default/nova-instancemonitor; then
. /etc/default/nova-instancemonitor
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova compute" "nova-instancemonitor"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova compute" "nova-instancemonitor"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-instancemonitor && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-instancemonitor {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

View File

@@ -1 +0,0 @@
bin/nova-instancemonitor usr/bin

View File

@@ -1,4 +0,0 @@
--ca_path=/var/lib/nova/CA
--credentials_template=/usr/share/nova/novarc.template
--keys_path=/var/lib/nova/keys
--vpn_client_template=/usr/share/nova/client.ovpn.template

View File

@@ -1,6 +0,0 @@
--daemonize=1
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--fake_users=1
--images_path=/var/lib/nova/images
--buckets_path=/var/lib/nova/buckets

View File

@@ -1,69 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-objectstore
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-objectstore
# Description: nova-objectstore
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-objectstore
DAEMON_ARGS="--flagfile=/etc/nova/nova-objectstore.conf"
PIDFILE=/var/run/nova-objectstore.pid
ENABLED=true
if test -f /etc/default/nova-objectstore; then
. /etc/default/nova-objectstore
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova objectstore" "nova-objectstore"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova objectstore" "nova-objectstore"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-objectstore && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-objectstore {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

View File

@@ -1,2 +0,0 @@
bin/nova-objectstore usr/bin
debian/nova-objectstore.conf etc/nova

View File

@@ -1,6 +0,0 @@
--daemonize=1
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--fake_users=1
--images_path=/var/lib/nova/images
--buckets_path=/var/lib/nova/buckets

View File

@@ -1,69 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-volume
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-volume
# Description: nova-volume
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-volume
DAEMON_ARGS="--flagfile=/etc/nova/nova-volume.conf"
PIDFILE=/var/run/nova-volume.pid
ENABLED=true
if test -f /etc/default/nova-volume; then
. /etc/default/nova-volume
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova volume" "nova-volume"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova volume" "nova-volume"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-volume && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-volume {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

View File

@@ -1,2 +0,0 @@
bin/nova-volume usr/bin
debian/nova-volume.conf etc/nova

1
debian/pycompat vendored
View File

@@ -1 +0,0 @@
2

1
debian/pyversions vendored
View File

@@ -1 +0,0 @@
2.6-

4
debian/rules vendored
View File

@@ -1,4 +0,0 @@
#!/usr/bin/make -f
%:
dh $@

View File

0
doc/build/.placeholder vendored Normal file
View File

0
doc/source/_static/.gitignore vendored Normal file
View File

View File

0
doc/source/_templates/.gitignore vendored Normal file
View File

View File

View File

@@ -16,8 +16,7 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('/Users/jmckenty/Projects/cc'))
sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')])
sys.path.append([os.path.abspath('../nova'), os.path.abspath('..'), os.path.abspath('../bin')])
# -- General configuration -----------------------------------------------------
@@ -25,7 +24,6 @@ sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspa
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
#sphinx_to_github = False
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
@@ -49,9 +47,9 @@ copyright = u'2010, United States Government as represented by the Administrator
# built documents.
#
# The short X.Y version.
version = '0.42'
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.42'
release = '0.9.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -68,7 +66,7 @@ release = '0.42'
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
@@ -176,7 +174,7 @@ htmlhelp_basename = 'novadoc'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'nova.tex', u'nova Documentation',
('index', 'Nova.tex', u'Nova Documentation',
u'Anso Labs, LLC', 'manual'),
]
@@ -199,4 +197,6 @@ latex_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'swift': ('http://swift.openstack.org', None)}

View File

@@ -18,10 +18,10 @@
Nova Fakes
==========
The :mod:`fakevirt` Module
The :mod:`virt.fake` Module
--------------------------
.. automodule:: nova.fakevirt
.. automodule:: nova.virt.fake
:members:
:undoc-members:
:show-inheritance:

View File

@@ -43,7 +43,6 @@ Contents:
nova
fakes
binaries
todo
modules
packages

View File

@@ -23,6 +23,7 @@ import base64
import boto
from boto.ec2.regioninfo import RegionInfo
class UserInfo(object):
"""
Information about a Nova user, as parsed through SAX
@@ -56,6 +57,64 @@ class UserInfo(object):
elif name == 'secretkey':
self.secretkey = str(value)
class ProjectInfo(object):
"""
Information about a Nova project, as parsed through SAX
Fields include:
projectname
description
projectManagerId
memberIds
"""
def __init__(self, connection=None):
self.connection = connection
self.projectname = None
self.description = None
self.projectManagerId = None
self.memberIds = []
def __repr__(self):
return 'ProjectInfo:%s' % self.projectname
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'projectname':
self.projectname = value
elif name == 'description':
self.description = value
elif name == 'projectManagerId':
self.projectManagerId = value
elif name == 'memberId':
self.memberIds.append(value)
else:
setattr(self, name, str(value))
class ProjectMember(object):
"""
Information about a Nova project member, as parsed through SAX.
Fields include:
memberId
"""
def __init__(self, connection=None):
self.connection = connection
self.memberId = None
def __repr__(self):
return 'ProjectMember:%s' % self.memberId
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'member':
self.memberId = value
else:
setattr(self, name, str(value))
class HostInfo(object):
"""
Information about a Nova Host, as parsed through SAX:
@@ -99,20 +158,20 @@ class NovaAdminClient(object):
**kwargs)
self.apiconn.APIVersion = 'nova'
def connection_for(self, username, **kwargs):
def connection_for(self, username, project, **kwargs):
"""
Returns a boto ec2 connection for the given username.
"""
user = self.get_user(username)
access_key = '%s:%s' % (user.accesskey, project)
return boto.connect_ec2(
aws_access_key_id=user.accesskey,
aws_access_key_id=access_key,
aws_secret_access_key=user.secretkey,
is_secure=False,
region=RegionInfo(None, self.region, self.clc_ip),
port=8773,
path='/services/Cloud',
**kwargs
)
**kwargs)
def get_users(self):
""" grabs the list of all users """
@@ -137,6 +196,102 @@ class NovaAdminClient(object):
""" deletes a user """
return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo)
def add_user_role(self, user, role, project=None):
"""
Add a role to a user either globally or for a specific project.
"""
return self.modify_user_role(user, role, project=project,
operation='add')
def remove_user_role(self, user, role, project=None):
"""
Remove a role from a user either globally or for a specific project.
"""
return self.modify_user_role(user, role, project=project,
operation='remove')
def modify_user_role(self, user, role, project=None, operation='add',
**kwargs):
"""
Add or remove a role for a user and project.
"""
params = {'User': user,
'Role': role,
'Project': project,
'Operation': operation}
return self.apiconn.get_status('ModifyUserRole', params)
def get_projects(self, user=None):
"""
Returns a list of all projects.
"""
if user:
params = {'User': user}
else:
params = {}
return self.apiconn.get_list('DescribeProjects',
params,
[('item', ProjectInfo)])
def get_project(self, name):
"""
Returns a single project with the specified name.
"""
project = self.apiconn.get_object('DescribeProject',
{'Name': name},
ProjectInfo)
if project.projectname != None:
return project
def create_project(self, projectname, manager_user, description=None,
member_users=None):
"""
Creates a new project.
"""
params = {'Name': projectname,
'ManagerUser': manager_user,
'Description': description,
'MemberUsers': member_users}
return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
def delete_project(self, projectname):
"""
Permanently deletes the specified project.
"""
return self.apiconn.get_object('DeregisterProject',
{'Name': projectname},
ProjectInfo)
def get_project_members(self, name):
"""
Returns a list of members of a project.
"""
return self.apiconn.get_list('DescribeProjectMembers',
{'Name': name},
[('item', ProjectMember)])
def add_project_member(self, user, project):
"""
Adds a user to a project.
"""
return self.modify_project_member(user, project, operation='add')
def remove_project_member(self, user, project):
"""
Removes a user from a project.
"""
return self.modify_project_member(user, project, operation='remove')
def modify_project_member(self, user, project, operation='add'):
"""
Adds or removes a user from a project.
"""
params = {'User': user,
'Project': project,
'Operation': operation}
return self.apiconn.get_status('ModifyProjectMember', params)
def get_zip(self, username):
""" returns the content of a zip file containing novarc and access credentials. """
return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file

484
nova/auth/ldapdriver.py Normal file
View File

@@ -0,0 +1,484 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Auth driver for ldap. Includes FakeLdapDriver.
It should be easy to create a replacement for this driver supporting
other backends by creating another class that exposes the same
public methods.
"""
import logging
import sys
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ldap_url', 'ldap://localhost',
'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
'DN of admin user')
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
'OU for Users')
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
'OU for Projects')
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
'OU for Roles')
# NOTE(vish): mapping with these flags is necessary because we're going
# to tie in to an existing ldap schema
flags.DEFINE_string('ldap_cloudadmin',
'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
flags.DEFINE_string('ldap_itsec',
'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
flags.DEFINE_string('ldap_sysadmin',
'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
flags.DEFINE_string('ldap_netadmin',
'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
# in which we may want to change the interface a bit more.
class LdapDriver(object):
"""Ldap Auth driver
Defines enter and exit and therefore supports the with/as syntax.
"""
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
def __enter__(self):
"""Creates the connection to LDAP"""
self.conn = self.ldap.initialize(FLAGS.ldap_url)
self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
return self
def __exit__(self, type, value, traceback):
"""Destroys the connection to LDAP"""
self.conn.unbind_s()
return False
def get_user(self, uid):
"""Retrieve user by id"""
attr = self.__find_object(self.__uid_to_dn(uid),
'(objectclass=novaUser)')
return self.__to_user(attr)
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
query = '(accessKey=%s)' % access
dn = FLAGS.ldap_user_subtree
return self.__to_user(self.__find_object(dn, query))
def get_key_pair(self, uid, key_name):
"""Retrieve key pair by uid and key name"""
dn = 'cn=%s,%s' % (key_name,
self.__uid_to_dn(uid))
attr = self.__find_object(dn, '(objectclass=novaKeyPair)')
return self.__to_key_pair(uid, attr)
def get_project(self, pid):
"""Retrieve project by id"""
dn = 'cn=%s,%s' % (pid,
FLAGS.ldap_project_subtree)
attr = self.__find_object(dn, '(objectclass=novaProject)')
return self.__to_project(attr)
def get_users(self):
"""Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
'(objectclass=novaUser)')
return [self.__to_user(attr) for attr in attrs]
def get_key_pairs(self, uid):
"""Retrieve list of key pairs"""
attrs = self.__find_objects(self.__uid_to_dn(uid),
'(objectclass=novaKeyPair)')
return [self.__to_key_pair(uid, attr) for attr in attrs]
def get_projects(self, uid=None):
"""Retrieve list of projects"""
filter = '(objectclass=novaProject)'
if uid:
filter = "(&%s(member=%s))" % (filter, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
filter)
return [self.__to_project(attr) for attr in attrs]
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
if self.__user_exists(name):
raise exception.Duplicate("LDAP user %s already exists" % name)
attr = [
('objectclass', ['person',
'organizationalPerson',
'inetOrgPerson',
'novaUser']),
('ou', [FLAGS.ldap_user_unit]),
('uid', [name]),
('sn', [name]),
('cn', [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
('isAdmin', [str(is_admin).upper()]),
]
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
def create_key_pair(self, uid, key_name, public_key, fingerprint):
"""Create a key pair"""
# TODO(vish): possibly refactor this to store keys in their own ou
# and put dn reference in the user object
attr = [
('objectclass', ['novaKeyPair']),
('cn', [key_name]),
('sshPublicKey', [public_key]),
('keyFingerprint', [fingerprint]),
]
self.conn.add_s('cn=%s,%s' % (key_name,
self.__uid_to_dn(uid)),
attr)
return self.__to_key_pair(uid, dict(attr))
def create_project(self, name, manager_uid,
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
raise exception.Duplicate("Project can't be created because "
"project %s already exists" % name)
if not self.__user_exists(manager_uid):
raise exception.NotFound("Project can't be created because "
"manager %s doesn't exist" % manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
description = name
members = []
if member_uids != None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Project can't be created "
"because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
members.append(manager_dn)
attr = [
('objectclass', ['novaProject']),
('cn', [name]),
('description', [description]),
('projectManager', [manager_dn]),
('member', members)
]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
return self.__to_project(dict(attr))
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
return self.__add_to_group(uid, dn)
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
return self.__remove_from_group(uid, dn)
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
return self.__is_in_group(uid, dn)
def has_role(self, uid, role, project_id=None):
"""Check if user has role
If project is specified, it checks for local role, otherwise it
checks for global role
"""
role_dn = self.__role_to_dn(role, project_id)
return self.__is_in_group(uid, role_dn)
def add_role(self, uid, role, project_id=None):
"""Add role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
if not self.__group_exists(role_dn):
# create the role if it doesn't exist
description = '%s role for %s' % (role, project_id)
self.__create_group(role_dn, role, uid, description)
else:
return self.__add_to_group(uid, role_dn)
def remove_role(self, uid, role, project_id=None):
"""Remove role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s doesn't exist" % uid)
self.__delete_key_pairs(uid)
self.__remove_from_all(uid)
self.conn.delete_s('uid=%s,%s' % (uid,
FLAGS.ldap_user_subtree))
def delete_key_pair(self, uid, key_name):
"""Delete a key pair"""
if not self.__key_pair_exists(uid, key_name):
raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
(key_name, uid))
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
FLAGS.ldap_user_subtree))
def delete_project(self, name):
"""Delete a project"""
project_dn = 'cn=%s,%s' % (name, FLAGS.ldap_project_subtree)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
def __user_exists(self, name):
"""Check if user exists"""
return self.get_user(name) != None
def __key_pair_exists(self, uid, key_name):
"""Check if key pair exists"""
return self.get_user(uid) != None
return self.get_key_pair(uid, key_name) != None
def __project_exists(self, name):
"""Check if project exists"""
return self.get_project(name) != None
def __find_object(self, dn, query = None):
"""Find an object by dn and query"""
objects = self.__find_objects(dn, query)
if len(objects) == 0:
return None
return objects[0]
def __find_dns(self, dn, query=None):
"""Find dns by query"""
try:
res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the DNs
return [dn for dn, attributes in res]
def __find_objects(self, dn, query = None):
"""Find objects by query"""
try:
res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the attributes
return [attributes for dn, attributes in res]
def __find_role_dns(self, tree):
"""Find dns of role objects in given tree"""
return self.__find_dns(tree,
'(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
def __find_group_dns_with_member(self, tree, uid):
"""Find dns of group objects in a given tree that contain member"""
dns = self.__find_dns(tree,
'(&(objectclass=groupOfNames)(member=%s))' %
self.__uid_to_dn(uid))
return dns
def __group_exists(self, dn):
"""Check if group exists"""
return self.__find_object(dn, '(objectclass=groupOfNames)') != None
def __delete_key_pairs(self, uid):
"""Delete all key pairs for user"""
keys = self.get_key_pairs(uid)
if keys != None:
for key in keys:
self.delete_key_pair(uid, key['name'])
def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id == None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
return 'cn=%s,cn=%s,%s' % (role,
project_id,
FLAGS.ldap_project_subtree)
def __create_group(self, group_dn, name, uid,
description, member_uids = None):
"""Create a group"""
if self.__group_exists(group_dn):
raise exception.Duplicate("Group can't be created because "
"group %s already exists" % name)
members = []
if member_uids != None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Group can't be created "
"because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
members.append(dn)
attr = [
('objectclass', ['groupOfNames']),
('cn', [name]),
('description', [description]),
('member', members)
]
self.conn.add_s(group_dn, attr)
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be searched in group "
"becuase the user doesn't exist" % (uid,))
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid))
return res != None
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be added to the group "
"becuase the user doesn't exist" % (uid,))
if not self.__group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" %
(group_dn,))
if self.__is_in_group(uid, group_dn):
raise exception.Duplicate("User %s is already a member of "
"the group %s" % (uid, group_dn))
attr = [
(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" %
(group_dn,))
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from the "
"group because the user doesn't exist" % (uid,))
if not self.__is_in_group(uid, group_dn):
raise exception.NotFound("User %s is not a member of the group" %
(uid,))
self.__safe_remove_from_group(uid, group_dn)
def __safe_remove_from_group(self, uid, group_dn):
"""Remove user from group, deleting group if user is last member"""
# FIXME(vish): what if deleted user is a project manager?
attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
logging.debug("Attempted to remove the last member of a group. "
"Deleting the group at %s instead." % group_dn )
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s can't be removed from all "
"because the user doesn't exist" % (uid,))
dn = self.__uid_to_dn(uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
self.__safe_remove_from_group(uid, role_dn)
project_dns = self.__find_group_dns_with_member(
FLAGS.ldap_project_subtree, uid)
for project_dn in project_dns:
self.__safe_remove_from_group(uid, role_dn)
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):
"""Delete all roles for project"""
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
def __to_user(self, attr):
"""Convert ldap attributes to User object"""
if attr == None:
return None
return {
'id': attr['uid'][0],
'name': attr['cn'][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
'admin': (attr['isAdmin'][0] == 'TRUE')
}
def __to_key_pair(self, owner, attr):
"""Convert ldap attributes to KeyPair object"""
if attr == None:
return None
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'owner_id': owner,
'public_key': attr['sshPublicKey'][0],
'fingerprint': attr['keyFingerprint'][0],
}
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr == None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]
}
def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
return dn.split(',')[0].split('=')[1]
def __uid_to_dn(self, dn):
"""Convert uid to dn"""
return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree)
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
def __init__(self):
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']

807
nova/auth/manager.py Normal file
View File

@@ -0,0 +1,807 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Nova authentication management
"""
import logging
import os
import shutil
import string
import sys
import tempfile
import uuid
import zipfile
from nova import crypto
from nova import datastore
from nova import exception
from nova import flags
from nova import objectstore # for flags
from nova import utils
from nova.auth import ldapdriver # for flags
from nova.auth import signer
FLAGS = flags.FLAGS
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
flags.DEFINE_list('superuser_roles', ['cloudadmin'],
'Roles that ignore rbac checking completely')
# NOTE(vish): a user with one of these roles will have it for every
# project, even if he or she is not a member of the project
flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
flags.DEFINE_bool('use_vpn', True, 'Support per-project vpns')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('vpn_client_template',
utils.abspath('cloudpipe/client.ovpn.template'),
'Template for creating users vpn file')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
flags.DEFINE_integer('vpn_start_port', 1000,
'Start port for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_end_port', 2000,
'End port for the cloudpipe VPN servers')
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
flags.DEFINE_string('vpn_ip', '127.0.0.1',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
'Driver that auth manager uses')
class AuthBase(object):
"""Base class for objects relating to auth
Objects derived from this class should be stupid data objects with
an id member. They may optionally contain methods that delegate to
AuthManager, but should not implement logic themselves.
"""
@classmethod
def safe_id(cls, obj):
"""Safe get object id
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
This allows methods to accept objects or ids as paramaters.
"""
if isinstance(obj, cls):
return obj.id
else:
return obj
class User(AuthBase):
"""Object representing a user"""
def __init__(self, id, name, access, secret, admin):
self.id = id
self.name = name
self.access = access
self.secret = secret
self.admin = admin
def is_superuser(self):
return AuthManager().is_superuser(self)
def is_admin(self):
return AuthManager().is_admin(self)
def has_role(self, role):
return AuthManager().has_role(self, role)
def add_role(self, role):
return AuthManager().add_role(self, role)
def remove_role(self, role):
return AuthManager().remove_role(self, role)
def is_project_member(self, project):
return AuthManager().is_project_member(self, project)
def is_project_manager(self, project):
return AuthManager().is_project_manager(self, project)
def generate_key_pair(self, name):
return AuthManager().generate_key_pair(self.id, name)
def create_key_pair(self, name, public_key, fingerprint):
return AuthManager().create_key_pair(self.id,
name,
public_key,
fingerprint)
def get_key_pair(self, name):
return AuthManager().get_key_pair(self.id, name)
def delete_key_pair(self, name):
return AuthManager().delete_key_pair(self.id, name)
def get_key_pairs(self):
return AuthManager().get_key_pairs(self.id)
def __repr__(self):
return "User('%s', '%s', '%s', '%s', %s)" % (self.id,
self.name,
self.access,
self.secret,
self.admin)
class KeyPair(AuthBase):
"""Represents an ssh key returned from the datastore
Even though this object is named KeyPair, only the public key and
fingerprint is stored. The user's private key is not saved.
"""
def __init__(self, id, name, owner_id, public_key, fingerprint):
self.id = id
self.name = name
self.owner_id = owner_id
self.public_key = public_key
self.fingerprint = fingerprint
def __repr__(self):
return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id,
self.name,
self.owner_id,
self.public_key,
self.fingerprint)
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
def __init__(self, id, name, project_manager_id, description, member_ids):
self.id = id
self.name = name
self.project_manager_id = project_manager_id
self.description = description
self.member_ids = member_ids
@property
def project_manager(self):
return AuthManager().get_user(self.project_manager_id)
@property
def vpn_ip(self):
ip, port = AuthManager().get_project_vpn_data(self)
return ip
@property
def vpn_port(self):
ip, port = AuthManager().get_project_vpn_data(self)
return port
def has_manager(self, user):
return AuthManager().is_project_manager(user, self)
def has_member(self, user):
return AuthManager().is_project_member(user, self)
def add_role(self, user, role):
return AuthManager().add_role(user, role, self)
def remove_role(self, user, role):
return AuthManager().remove_role(user, role, self)
def has_role(self, user, role):
return AuthManager().has_role(user, role, self)
def get_credentials(self, user):
return AuthManager().get_credentials(user, self)
def __repr__(self):
return "Project('%s', '%s', '%s', '%s', %s)" % (self.id,
self.name,
self.project_manager_id,
self.description,
self.member_ids)
class NoMorePorts(exception.Error):
pass
class Vpn(datastore.BasicModel):
"""Manages vpn ips and ports for projects"""
def __init__(self, project_id):
self.project_id = project_id
super(Vpn, self).__init__()
@property
def identifier(self):
"""Identifier used for key in redis"""
return self.project_id
@classmethod
def create(cls, project_id):
"""Creates a vpn for project
This method finds a free ip and port and stores the associated
values in the datastore.
"""
# TODO(vish): get list of vpn ips from redis
port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
vpn = cls(project_id)
# save ip for project
vpn['project'] = project_id
vpn['ip'] = FLAGS.vpn_ip
vpn['port'] = port
vpn.save()
return vpn
@classmethod
def find_free_port_for_ip(cls, ip):
"""Finds a free port for a given ip from the redis set"""
# TODO(vish): these redis commands should be generalized and
# placed into a base class. Conceptually, it is
# similar to an association, but we are just
# storing a set of values instead of keys that
# should be turned into objects.
redis = datastore.Redis.instance()
key = 'ip:%s:ports' % ip
# TODO(vish): these ports should be allocated through an admin
# command instead of a flag
if (not redis.exists(key) and
not redis.exists(cls._redis_association_name('ip', ip))):
for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
redis.sadd(key, i)
port = redis.spop(key)
if not port:
raise NoMorePorts()
return port
@classmethod
def num_ports_for_ip(cls, ip):
"""Calculates the number of free ports for a given ip"""
return datastore.Redis.instance().scard('ip:%s:ports' % ip)
@property
def ip(self):
"""The ip assigned to the project"""
return self['ip']
@property
def port(self):
"""The port assigned to the project"""
return int(self['port'])
def save(self):
"""Saves the association to the given ip"""
self.associate_with('ip', self.ip)
super(Vpn, self).save()
def destroy(self):
"""Cleans up datastore and adds port back to pool"""
self.unassociate_with('ip', self.ip)
datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
super(Vpn, self).destroy()
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
Methods accept objects or ids.
AuthManager uses a driver object to make requests to the data backend.
See ldapdriver for reference.
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
_instance=None
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance:
cls._instance = super(AuthManager, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self, driver=None, *args, **kwargs):
"""Inits the driver from parameter or flag
__init__ is run every time AuthManager() is called, so we only
reset the driver if it is not set or a new driver is specified.
"""
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
check_type='ec2', headers=None):
"""Authenticates AWS request using access key and signature
If the project is not specified, attempts to authenticate to
a project with the same name as the user. This way, older tools
that have no project knowledge will still work.
@type access: str
@param access: Access key for user in the form "access:project".
@type signature: str
@param signature: Signature of the request.
@type params: list of str
@param params: Web paramaters used for the signature.
@type verb: str
@param verb: Web request verb ('GET' or 'POST').
@type server_string: str
@param server_string: Web request server string.
@type path: str
@param path: Web request path.
@type check_type: str
@param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
S3. Any other value will cause signature not to be
checked.
@type headers: list
@param headers: HTTP headers passed with the request (only needed for
s3 signature checks)
@rtype: tuple (User, Project)
@return: User and project that the request represents.
"""
# TODO(vish): check for valid timestamp
(access_key, sep, project_id) = access.partition(':')
logging.info('Looking up user: %r', access_key)
user = self.get_user_from_access_key(access_key)
logging.info('user: %r', user)
if user == None:
raise exception.NotFound('No user found for access key %s' %
access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id is '':
project_id = user.name
project = self.get_project(project_id)
if project == None:
raise exception.NotFound('No project called %s could be found' %
project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
raise exception.NotFound('User %s is not a member of project %s' %
(user.id, project.id))
if check_type == 's3':
expected_signature = signer.Signer(user.secret.encode()).s3_authorization(headers, verb, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
if signature != expected_signature:
raise exception.NotAuthorized('Signature does not match')
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
if signature != expected_signature:
raise exception.NotAuthorized('Signature does not match')
return (user, project)
def is_superuser(self, user):
"""Checks for superuser status, allowing user to bypass rbac
@type user: User or uid
@param user: User to check.
@rtype: bool
@return: True for superuser.
"""
if not isinstance(user, User):
user = self.get_user(user)
# NOTE(vish): admin flag on user represents superuser
if user.admin:
return True
for role in FLAGS.superuser_roles:
if self.has_role(user, role):
return True
def is_admin(self, user):
"""Checks for admin status, allowing user to access all projects
@type user: User or uid
@param user: User to check.
@rtype: bool
@return: True for admin.
"""
if not isinstance(user, User):
user = self.get_user(user)
if self.is_superuser(user):
return True
for role in FLAGS.global_roles:
if self.has_role(user, role):
return True
def has_role(self, user, role, project=None):
"""Checks existence of role for user
If project is not specified, checks for a global role. If project
is specified, checks for the union of the global role and the
project role.
Role 'projectmanager' only works for projects and simply checks to
see if the user is the project_manager of the specified project. It
is the same as calling is_project_manager(user, project).
@type user: User or uid
@param user: User to check.
@type role: str
@param role: Role to check.
@type project: Project or project_id
@param project: Project in which to look for local role.
@rtype: bool
@return: True if the user has the role.
"""
with self.driver() as drv:
if role == 'projectmanager':
if not project:
raise exception.Error("Must specify project")
return self.is_project_manager(user, project)
global_role = drv.has_role(User.safe_id(user),
role,
None)
if not global_role:
return global_role
if not project or role in FLAGS.global_roles:
return global_role
return drv.has_role(User.safe_id(user),
role,
Project.safe_id(project))
def add_role(self, user, role, project=None):
"""Adds role for user
If project is not specified, adds a global role. If project
is specified, adds a local role.
The 'projectmanager' role is special and can't be added or removed.
@type user: User or uid
@param user: User to which to add role.
@type role: str
@param role: Role to add.
@type project: Project or project_id
@param project: Project in which to add local role.
"""
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
def remove_role(self, user, role, project=None):
"""Removes role for user
If project is not specified, removes a global role. If project
is specified, removes a local role.
The 'projectmanager' role is special and can't be added or removed.
@type user: User or uid
@param user: User from which to remove role.
@type role: str
@param role: Role to remove.
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
project_dict = drv.get_project(pid)
if project_dict:
return Project(**project_dict)
def get_projects(self, user=None):
"""Retrieves list of projects, optionally filtered by user"""
with self.driver() as drv:
project_list = drv.get_projects(User.safe_id(user))
if not project_list:
return []
return [Project(**project_dict) for project_dict in project_list]
def create_project(self, name, manager_user,
description=None, member_users=None):
"""Create a project
@type name: str
@param name: Name of the project to create. The name will also be
used as the project id.
@type manager_user: User or uid
@param manager_user: This user will be the project manager.
@type description: str
@param project: Description of the project. If no description is
specified, the name of the project will be used.
@type member_users: list of User or uid
@param: Initial project members. The project manager will always be
added as a member, even if he isn't specified in this list.
@rtype: Project
@return: The new project.
"""
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with self.driver() as drv:
project_dict = drv.create_project(name,
User.safe_id(manager_user),
description,
member_users)
if project_dict:
if FLAGS.use_vpn:
Vpn.create(project_dict['id'])
return Project(**project_dict)
def add_to_project(self, user, project):
"""Add user to project"""
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) == project.project_manager_id
def is_project_member(self, user, project):
"""Checks to see if user is a member of project"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) in project.member_ids
def remove_from_project(self, user, project):
"""Removes a user from a project"""
with self.driver() as drv:
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
def get_project_vpn_data(self, project):
"""Gets vpn ip and port for project
@type project: Project or project_id
@param project: Project from which to get associated vpn data
@rvalue: tuple of (str, str)
@return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
vpn = Vpn.lookup(Project.safe_id(project))
if not vpn:
return None, None
return (vpn.ip, vpn.port)
def delete_project(self, project):
"""Deletes a project"""
with self.driver() as drv:
return drv.delete_project(Project.safe_id(project))
def get_user(self, uid):
"""Retrieves a user by id"""
with self.driver() as drv:
user_dict = drv.get_user(uid)
if user_dict:
return User(**user_dict)
def get_user_from_access_key(self, access_key):
"""Retrieves a user by access key"""
with self.driver() as drv:
user_dict = drv.get_user_from_access_key(access_key)
if user_dict:
return User(**user_dict)
def get_users(self):
"""Retrieves a list of all users"""
with self.driver() as drv:
user_list = drv.get_users()
if not user_list:
return []
return [User(**user_dict) for user_dict in user_list]
def create_user(self, name, access=None, secret=None, admin=False):
"""Creates a user
@type name: str
@param name: Name of the user to create.
@type access: str
@param access: Access Key (defaults to a random uuid)
@type secret: str
@param secret: Secret Key (defaults to a random uuid)
@type admin: bool
@param admin: Whether to set the admin flag. The admin flag gives
superuser status regardless of roles specifed for the user.
@type create_project: bool
@param: Whether to create a project for the user with the same name.
@rtype: User
@return: The new user.
"""
if access == None: access = str(uuid.uuid4())
if secret == None: secret = str(uuid.uuid4())
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
return User(**user_dict)
def delete_user(self, user):
"""Deletes a user"""
with self.driver() as drv:
drv.delete_user(User.safe_id(user))
def generate_key_pair(self, user, key_name):
"""Generates a key pair for a user
Generates a public and private key, stores the public key using the
key_name, and returns the private key and fingerprint.
@type user: User or uid
@param user: User for which to create key pair.
@type key_name: str
@param key_name: Name to use for the generated KeyPair.
@rtype: tuple (private_key, fingerprint)
@return: A tuple containing the private_key and fingerprint.
"""
# NOTE(vish): generating key pair is slow so check for legal
# creation before creating keypair
uid = User.safe_id(user)
with self.driver() as drv:
if not drv.get_user(uid):
raise exception.NotFound("User %s doesn't exist" % user)
if drv.get_key_pair(uid, key_name):
raise exception.Duplicate("The keypair %s already exists"
% key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
self.create_key_pair(uid, key_name, public_key, fingerprint)
return private_key, fingerprint
def create_key_pair(self, user, key_name, public_key, fingerprint):
"""Creates a key pair for user"""
with self.driver() as drv:
kp_dict = drv.create_key_pair(User.safe_id(user),
key_name,
public_key,
fingerprint)
if kp_dict:
return KeyPair(**kp_dict)
def get_key_pair(self, user, key_name):
"""Retrieves a key pair for user"""
with self.driver() as drv:
kp_dict = drv.get_key_pair(User.safe_id(user), key_name)
if kp_dict:
return KeyPair(**kp_dict)
def get_key_pairs(self, user):
"""Retrieves all key pairs for user"""
with self.driver() as drv:
kp_list = drv.get_key_pairs(User.safe_id(user))
if not kp_list:
return []
return [KeyPair(**kp_dict) for kp_dict in kp_list]
def delete_key_pair(self, user, key_name):
"""Deletes a key pair for user"""
with self.driver() as drv:
drv.delete_key_pair(User.safe_id(user), key_name)
def get_credentials(self, user, project=None):
"""Get credential zip for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
rc = self.__generate_rc(user.access, user.secret, pid)
private_key, signed_cert = self._generate_x509_cert(user.id, pid)
vpn = Vpn.lookup(pid)
if not vpn:
raise exception.Error("No vpn data allocated for project %s" %
project.name)
configfile = open(FLAGS.vpn_client_template,"r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
ip=vpn.ip,
port=vpn.port)
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
zippy.writestr(FLAGS.credential_rc_file, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
zippy.writestr("nebula-client.conf", config)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()
with open(zf, 'rb') as f:
buffer = f.read()
shutil.rmtree(tmpdir)
return buffer
def __generate_rc(self, access, secret, pid):
"""Generate rc file for user"""
rc = open(FLAGS.credentials_template).read()
rc = rc % { 'access': access,
'project': pid,
'secret': secret,
'ec2': FLAGS.ec2_url,
's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file,
}
return rc
def _generate_x509_cert(self, uid, pid):
"""Generate x509 cert for user"""
(private_key, csr) = crypto.generate_x509_cert(
self.__cert_subject(uid))
# TODO(joshua): This should be async call back to the cloud controller
signed_cert = crypto.sign_csr(csr, pid)
return (private_key, signed_cert)
def __cert_subject(self, uid):
"""Helper to generate cert subject"""
return FLAGS.credential_cert_subject % (uid, utils.isotime())

View File

@@ -17,7 +17,7 @@
# under the License.
from nova import exception
from nova.auth import users
from nova.auth import manager
def allow(*roles):

View File

@@ -48,6 +48,7 @@ import hashlib
import hmac
import logging
import urllib
import boto.utils
from nova.exception import Error
@@ -59,6 +60,13 @@ class Signer(object):
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def s3_authorization(self, headers, verb, path):
c_string = boto.utils.canonical_string(verb, path, headers)
hmac = self.hmac.copy()
hmac.update(c_string)
b64_hmac = base64.encodestring(hmac.digest()).strip()
return b64_hmac
def generate(self, params, verb, server_string, path):
if params['SignatureVersion'] == '0':
return self._calc_signature_0(params)

View File

@@ -1,974 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Nova users and user management, including RBAC hooks.
"""
import datetime
import logging
import os
import shutil
import signer
import string
import tempfile
import uuid
import zipfile
try:
import ldap
except Exception, e:
import fakeldap as ldap
import fakeldap
# TODO(termie): clean up these imports
from nova import datastore
from nova import exception
from nova import flags
from nova import crypto
from nova import utils
from nova import objectstore # for flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ldap_url', 'ldap://localhost',
'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com',
'DN of admin user')
flags.DEFINE_string('user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('user_ldap_subtree', 'ou=Users,dc=example,dc=com',
'OU for Users')
flags.DEFINE_string('project_ldap_subtree', 'ou=Groups,dc=example,dc=com',
'OU for Projects')
flags.DEFINE_string('role_ldap_subtree', 'ou=Groups,dc=example,dc=com',
'OU for Roles')
# NOTE(vish): mapping with these flags is necessary because we're going
# to tie in to an existing ldap schema
flags.DEFINE_string('ldap_cloudadmin',
'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'cn for Cloud Admins')
flags.DEFINE_string('ldap_itsec',
'cn=itsec,ou=Groups,dc=example,dc=com', 'cn for ItSec')
flags.DEFINE_string('ldap_sysadmin',
'cn=sysadmins,ou=Groups,dc=example,dc=com', 'cn for Sysadmins')
flags.DEFINE_string('ldap_netadmin',
'cn=netadmins,ou=Groups,dc=example,dc=com', 'cn for NetAdmins')
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
flags.DEFINE_list('superuser_roles', ['cloudadmin'],
'roles that ignore rbac checking completely')
# NOTE(vish): a user with one of these roles will have it for every
# project, even if he or she is not a member of the project
flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'roles that apply to all projects')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('vpn_client_template',
utils.abspath('cloudpipe/client.ovpn.template'),
'Template for creating users vpn file')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
flags.DEFINE_integer('vpn_start_port', 1000,
'Start port for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_end_port', 2000,
'End port for the cloudpipe VPN servers')
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
flags.DEFINE_string('vpn_ip', '127.0.0.1',
'Public IP for the cloudpipe VPN servers')
class AuthBase(object):
@classmethod
def safe_id(cls, obj):
"""Safe get object id.
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
This allows methods to accept objects or ids as paramaters.
"""
if isinstance(obj, cls):
return obj.id
else:
return obj
class User(AuthBase):
"""id and name are currently the same"""
def __init__(self, id, name, access, secret, admin):
self.id = id
self.name = name
self.access = access
self.secret = secret
self.admin = admin
def is_superuser(self):
"""allows user to bypass rbac completely"""
if self.admin:
return True
for role in FLAGS.superuser_roles:
if self.has_role(role):
return True
def is_admin(self):
"""allows user to see objects from all projects"""
if self.is_superuser():
return True
for role in FLAGS.global_roles:
if self.has_role(role):
return True
def has_role(self, role):
return UserManager.instance().has_role(self, role)
def add_role(self, role):
return UserManager.instance().add_role(self, role)
def remove_role(self, role):
return UserManager.instance().remove_role(self, role)
def is_project_member(self, project):
return UserManager.instance().is_project_member(self, project)
def is_project_manager(self, project):
return UserManager.instance().is_project_manager(self, project)
def generate_rc(self, project=None):
if project is None:
project = self.id
rc = open(FLAGS.credentials_template).read()
rc = rc % { 'access': self.access,
'project': project,
'secret': self.secret,
'ec2': FLAGS.ec2_url,
's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file,
}
return rc
def generate_key_pair(self, name):
return UserManager.instance().generate_key_pair(self.id, name)
def create_key_pair(self, name, public_key, fingerprint):
return UserManager.instance().create_key_pair(self.id,
name,
public_key,
fingerprint)
def get_key_pair(self, name):
return UserManager.instance().get_key_pair(self.id, name)
def delete_key_pair(self, name):
return UserManager.instance().delete_key_pair(self.id, name)
def get_key_pairs(self):
return UserManager.instance().get_key_pairs(self.id)
def __repr__(self):
return "User('%s', '%s', '%s', '%s', %s)" % (
self.id, self.name, self.access, self.secret, self.admin)
class KeyPair(AuthBase):
def __init__(self, id, owner_id, public_key, fingerprint):
self.id = id
self.name = id
self.owner_id = owner_id
self.public_key = public_key
self.fingerprint = fingerprint
def delete(self):
return UserManager.instance().delete_key_pair(self.owner, self.name)
def __repr__(self):
return "KeyPair('%s', '%s', '%s', '%s')" % (
self.id, self.owner_id, self.public_key, self.fingerprint)
class Group(AuthBase):
"""id and name are currently the same"""
def __init__(self, id, description = None, member_ids = None):
self.id = id
self.name = id
self.description = description
self.member_ids = member_ids
def has_member(self, user):
return User.safe_id(user) in self.member_ids
def __repr__(self):
return "Group('%s', '%s', %s)" % (
self.id, self.description, self.member_ids)
class Project(Group):
def __init__(self, id, project_manager_id, description, member_ids):
self.project_manager_id = project_manager_id
super(Project, self).__init__(id, description, member_ids)
@property
def project_manager(self):
return UserManager.instance().get_user(self.project_manager_id)
def has_manager(self, user):
return User.safe_id(user) == self.project_manager_id
def add_role(self, user, role):
return UserManager.instance().add_role(user, role, self)
def remove_role(self, user, role):
return UserManager.instance().remove_role(user, role, self)
def has_role(self, user, role):
return UserManager.instance().has_role(user, role, self)
@property
def vpn_ip(self):
return Vpn(self.id).ip
@property
def vpn_port(self):
return Vpn(self.id).port
def get_credentials(self, user):
if not isinstance(user, User):
user = UserManager.instance().get_user(user)
rc = user.generate_rc(self.id)
private_key, signed_cert = self.generate_x509_cert(user)
configfile = open(FLAGS.vpn_client_template,"r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
ip=self.vpn_ip,
port=self.vpn_port)
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
zippy.writestr(FLAGS.credential_rc_file, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
zippy.writestr("nebula-client.conf", config)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(self.id))
zippy.close()
with open(zf, 'rb') as f:
buffer = f.read()
shutil.rmtree(tmpdir)
return buffer
def generate_x509_cert(self, user):
return UserManager.instance().generate_x509_cert(user, self)
def __repr__(self):
return "Project('%s', '%s', '%s', %s)" % (
self.id, self.project_manager_id,
self.description, self.member_ids)
class NoMorePorts(exception.Error):
pass
class Vpn(datastore.BasicModel):
def __init__(self, project_id):
self.project_id = project_id
super(Vpn, self).__init__()
@property
def identifier(self):
return self.project_id
@classmethod
def create(cls, project_id):
# TODO(vish): get list of vpn ips from redis
port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
vpn = cls(project_id)
# save ip for project
vpn['project'] = project_id
vpn['ip'] = FLAGS.vpn_ip
vpn['port'] = port
vpn.save()
return vpn
@classmethod
def find_free_port_for_ip(cls, ip):
# TODO(vish): these redis commands should be generalized and
# placed into a base class. Conceptually, it is
# similar to an association, but we are just
# storing a set of values instead of keys that
# should be turned into objects.
redis = datastore.Redis.instance()
key = 'ip:%s:ports' % ip
# TODO(vish): these ports should be allocated through an admin
# command instead of a flag
if (not redis.exists(key) and
not redis.exists(cls._redis_association_name('ip', ip))):
for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
redis.sadd(key, i)
port = redis.spop(key)
if not port:
raise NoMorePorts()
return port
@classmethod
def num_ports_for_ip(cls, ip):
return datastore.Redis.instance().scard('ip:%s:ports' % ip)
@property
def ip(self):
return self['ip']
@property
def port(self):
return int(self['port'])
def save(self):
self.associate_with('ip', self.ip)
super(Vpn, self).save()
def destroy(self):
self.unassociate_with('ip', self.ip)
datastore.Redis.instance().sadd('ip:%s:ports' % self.ip, self.port)
super(Vpn, self).destroy()
class UserManager(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
raise Exception('Attempted to instantiate singleton')
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
inst = UserManager()
cls._instance = inst
if FLAGS.fake_users:
try:
inst.create_user('fake', 'fake', 'fake')
except: pass
try:
inst.create_user('user', 'user', 'user')
except: pass
try:
inst.create_user('admin', 'admin', 'admin', True)
except: pass
return cls._instance
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
verify_signature=True):
# TODO: Check for valid timestamp
(access_key, sep, project_name) = access.partition(':')
user = self.get_user_from_access_key(access_key)
if user == None:
raise exception.NotFound('No user found for access key %s' %
access_key)
if project_name is '':
project_name = user.name
project = self.get_project(project_name)
if project == None:
raise exception.NotFound('No project called %s could be found' %
project_name)
if not user.is_admin() and not project.has_member(user):
raise exception.NotFound('User %s is not a member of project %s' %
(user.id, project.id))
if verify_signature:
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
if signature != expected_signature:
raise exception.NotAuthorized('Signature does not match')
return (user, project)
def has_role(self, user, role, project=None):
with LDAPWrapper() as conn:
if role == 'projectmanager':
if not project:
raise exception.Error("Must specify project")
return self.is_project_manager(user, project)
global_role = conn.has_role(User.safe_id(user),
role,
None)
if not global_role:
return global_role
if not project or role in FLAGS.global_roles:
return global_role
return conn.has_role(User.safe_id(user),
role,
Project.safe_id(project))
def add_role(self, user, role, project=None):
with LDAPWrapper() as conn:
return conn.add_role(User.safe_id(user), role,
Project.safe_id(project))
def remove_role(self, user, role, project=None):
with LDAPWrapper() as conn:
return conn.remove_role(User.safe_id(user), role,
Project.safe_id(project))
def create_project(self, name, manager_user,
description=None, member_users=None):
if member_users:
member_users = [User.safe_id(u) for u in member_users]
# NOTE(vish): try to associate a vpn ip and port first because
# if it throws an exception, we save having to
# create and destroy a project
Vpn.create(name)
with LDAPWrapper() as conn:
return conn.create_project(name,
User.safe_id(manager_user),
description,
member_users)
def get_projects(self):
with LDAPWrapper() as conn:
return conn.find_projects()
def get_project(self, project):
with LDAPWrapper() as conn:
return conn.find_project(Project.safe_id(project))
def add_to_project(self, user, project):
with LDAPWrapper() as conn:
return conn.add_to_project(User.safe_id(user),
Project.safe_id(project))
def is_project_manager(self, user, project):
if not isinstance(project, Project):
project = self.get_project(project)
return project.has_manager(user)
def is_project_member(self, user, project):
if isinstance(project, Project):
return project.has_member(user)
else:
with LDAPWrapper() as conn:
return conn.is_in_project(User.safe_id(user), project)
def remove_from_project(self, user, project):
with LDAPWrapper() as conn:
return conn.remove_from_project(User.safe_id(user),
Project.safe_id(project))
def delete_project(self, project):
with LDAPWrapper() as conn:
return conn.delete_project(Project.safe_id(project))
def get_user(self, uid):
with LDAPWrapper() as conn:
return conn.find_user(uid)
def get_user_from_access_key(self, access_key):
with LDAPWrapper() as conn:
return conn.find_user_by_access_key(access_key)
def get_users(self):
with LDAPWrapper() as conn:
return conn.find_users()
def create_user(self, user, access=None, secret=None,
admin=False, create_project=True):
if access == None: access = str(uuid.uuid4())
if secret == None: secret = str(uuid.uuid4())
with LDAPWrapper() as conn:
user = User.safe_id(user)
result = conn.create_user(user, access, secret, admin)
if create_project:
# NOTE(vish): if the project creation fails, we delete
# the user and return an exception
try:
conn.create_project(user, user, user)
except Exception:
with LDAPWrapper() as conn:
conn.delete_user(user)
raise
return result
def delete_user(self, user, delete_project=True):
with LDAPWrapper() as conn:
user = User.safe_id(user)
if delete_project:
try:
conn.delete_project(user)
except exception.NotFound:
pass
conn.delete_user(user)
def generate_key_pair(self, user, key_name):
# generating key pair is slow so delay generation
# until after check
user = User.safe_id(user)
with LDAPWrapper() as conn:
if not conn.user_exists(user):
raise exception.NotFound("User %s doesn't exist" % user)
if conn.key_pair_exists(user, key_name):
raise exception.Duplicate("The keypair %s already exists"
% key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
self.create_key_pair(User.safe_id(user), key_name,
public_key, fingerprint)
return private_key, fingerprint
def create_key_pair(self, user, key_name, public_key, fingerprint):
with LDAPWrapper() as conn:
return conn.create_key_pair(User.safe_id(user), key_name,
public_key, fingerprint)
def get_key_pair(self, user, key_name):
with LDAPWrapper() as conn:
return conn.find_key_pair(User.safe_id(user), key_name)
def get_key_pairs(self, user):
with LDAPWrapper() as conn:
return conn.find_key_pairs(User.safe_id(user))
def delete_key_pair(self, user, key_name):
with LDAPWrapper() as conn:
conn.delete_key_pair(User.safe_id(user), key_name)
def generate_x509_cert(self, user, project):
(private_key, csr) = crypto.generate_x509_cert(
self.__cert_subject(User.safe_id(user)))
# TODO - This should be async call back to the cloud controller
signed_cert = crypto.sign_csr(csr, Project.safe_id(project))
return (private_key, signed_cert)
def __cert_subject(self, uid):
# FIXME(ja) - this should be pulled from a global configuration
return FLAGS.credential_cert_subject % (uid, utils.isotime())
class LDAPWrapper(object):
def __init__(self):
self.user = FLAGS.user_dn
self.passwd = FLAGS.ldap_password
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.conn.unbind_s()
return False
def connect(self):
""" connect to ldap as admin user """
if FLAGS.fake_users:
self.NO_SUCH_OBJECT = fakeldap.NO_SUCH_OBJECT
self.OBJECT_CLASS_VIOLATION = fakeldap.OBJECT_CLASS_VIOLATION
self.conn = fakeldap.initialize(FLAGS.ldap_url)
else:
self.NO_SUCH_OBJECT = ldap.NO_SUCH_OBJECT
self.OBJECT_CLASS_VIOLATION = ldap.OBJECT_CLASS_VIOLATION
self.conn = ldap.initialize(FLAGS.ldap_url)
self.conn.simple_bind_s(self.user, self.passwd)
def find_object(self, dn, query = None):
objects = self.find_objects(dn, query)
if len(objects) == 0:
return None
return objects[0]
def find_dns(self, dn, query=None):
try:
res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
except self.NO_SUCH_OBJECT:
return []
# just return the DNs
return [dn for dn, attributes in res]
def find_objects(self, dn, query = None):
try:
res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
except self.NO_SUCH_OBJECT:
return []
# just return the attributes
return [attributes for dn, attributes in res]
def find_users(self):
attrs = self.find_objects(FLAGS.user_ldap_subtree,
'(objectclass=novaUser)')
return [self.__to_user(attr) for attr in attrs]
def find_key_pairs(self, uid):
attrs = self.find_objects(self.__uid_to_dn(uid),
'(objectclass=novaKeyPair)')
return [self.__to_key_pair(uid, attr) for attr in attrs]
def find_projects(self):
attrs = self.find_objects(FLAGS.project_ldap_subtree,
'(objectclass=novaProject)')
return [self.__to_project(attr) for attr in attrs]
def find_roles(self, tree):
attrs = self.find_objects(tree,
'(&(objectclass=groupOfNames)(!(objectclass=novaProject)))')
return [self.__to_group(attr) for attr in attrs]
def find_group_dns_with_member(self, tree, uid):
dns = self.find_dns(tree,
'(&(objectclass=groupOfNames)(member=%s))' %
self.__uid_to_dn(uid))
return dns
def find_user(self, uid):
attr = self.find_object(self.__uid_to_dn(uid),
'(objectclass=novaUser)')
return self.__to_user(attr)
def find_key_pair(self, uid, key_name):
dn = 'cn=%s,%s' % (key_name,
self.__uid_to_dn(uid))
attr = self.find_object(dn, '(objectclass=novaKeyPair)')
return self.__to_key_pair(uid, attr)
def find_group(self, dn):
"""uses dn directly instead of custructing it from name"""
attr = self.find_object(dn, '(objectclass=groupOfNames)')
return self.__to_group(attr)
def find_project(self, name):
dn = 'cn=%s,%s' % (name,
FLAGS.project_ldap_subtree)
attr = self.find_object(dn, '(objectclass=novaProject)')
return self.__to_project(attr)
def user_exists(self, name):
return self.find_user(name) != None
def key_pair_exists(self, uid, key_name):
return self.find_key_pair(uid, key_name) != None
def project_exists(self, name):
return self.find_project(name) != None
def group_exists(self, dn):
return self.find_group(dn) != None
def delete_key_pairs(self, uid):
keys = self.find_key_pairs(uid)
if keys != None:
for key in keys:
self.delete_key_pair(uid, key.name)
def create_user(self, name, access_key, secret_key, is_admin):
if self.user_exists(name):
raise exception.Duplicate("LDAP user %s already exists" % name)
attr = [
('objectclass', ['person',
'organizationalPerson',
'inetOrgPerson',
'novaUser']),
('ou', [FLAGS.user_unit]),
('uid', [name]),
('sn', [name]),
('cn', [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
('isAdmin', [str(is_admin).upper()]),
]
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
def create_project(self, name, manager_uid,
description=None, member_uids=None):
if self.project_exists(name):
raise exception.Duplicate("Project can't be created because "
"project %s already exists" % name)
if not self.user_exists(manager_uid):
raise exception.NotFound("Project can't be created because "
"manager %s doesn't exist" % manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
description = name
members = []
if member_uids != None:
for member_uid in member_uids:
if not self.user_exists(member_uid):
raise exception.NotFound("Project can't be created "
"because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
members.append(manager_dn)
attr = [
('objectclass', ['novaProject']),
('cn', [name]),
('description', [description]),
('projectManager', [manager_dn]),
('member', members)
]
self.conn.add_s('cn=%s,%s' % (name, FLAGS.project_ldap_subtree), attr)
return self.__to_project(dict(attr))
def add_to_project(self, uid, project_id):
dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
return self.add_to_group(uid, dn)
def remove_from_project(self, uid, project_id):
dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
return self.remove_from_group(uid, dn)
def is_in_project(self, uid, project_id):
dn = 'cn=%s,%s' % (project_id, FLAGS.project_ldap_subtree)
return self.is_in_group(uid, dn)
def __role_to_dn(self, role, project_id=None):
if project_id == None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
return 'cn=%s,cn=%s,%s' % (role,
project_id,
FLAGS.project_ldap_subtree)
def __create_group(self, group_dn, name, uid,
description, member_uids = None):
if self.group_exists(group_dn):
raise exception.Duplicate("Group can't be created because "
"group %s already exists" % name)
members = []
if member_uids != None:
for member_uid in member_uids:
if not self.user_exists(member_uid):
raise exception.NotFound("Group can't be created "
"because user %s doesn't exist" % member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
members.append(dn)
attr = [
('objectclass', ['groupOfNames']),
('cn', [name]),
('description', [description]),
('member', members)
]
self.conn.add_s(group_dn, attr)
return self.__to_group(dict(attr))
def has_role(self, uid, role, project_id=None):
role_dn = self.__role_to_dn(role, project_id)
return self.is_in_group(uid, role_dn)
def add_role(self, uid, role, project_id=None):
role_dn = self.__role_to_dn(role, project_id)
if not self.group_exists(role_dn):
# create the role if it doesn't exist
description = '%s role for %s' % (role, project_id)
self.__create_group(role_dn, role, uid, description)
else:
return self.add_to_group(uid, role_dn)
def remove_role(self, uid, role, project_id=None):
role_dn = self.__role_to_dn(role, project_id)
return self.remove_from_group(uid, role_dn)
def is_in_group(self, uid, group_dn):
if not self.user_exists(uid):
raise exception.NotFound("User %s can't be searched in group "
"becuase the user doesn't exist" % (uid,))
if not self.group_exists(group_dn):
return False
res = self.find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid))
return res != None
def add_to_group(self, uid, group_dn):
if not self.user_exists(uid):
raise exception.NotFound("User %s can't be added to the group "
"becuase the user doesn't exist" % (uid,))
if not self.group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" %
(group_dn,))
if self.is_in_group(uid, group_dn):
raise exception.Duplicate("User %s is already a member of "
"the group %s" % (uid, group_dn))
attr = [
(ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))
]
self.conn.modify_s(group_dn, attr)
def remove_from_group(self, uid, group_dn):
if not self.group_exists(group_dn):
raise exception.NotFound("The group at dn %s doesn't exist" %
(group_dn,))
if not self.user_exists(uid):
raise exception.NotFound("User %s can't be removed from the "
"group because the user doesn't exist" % (uid,))
if not self.is_in_group(uid, group_dn):
raise exception.NotFound("User %s is not a member of the group" %
(uid,))
self._safe_remove_from_group(group_dn, uid)
def _safe_remove_from_group(self, group_dn, uid):
# FIXME(vish): what if deleted user is a project manager?
attr = [(ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
try:
self.conn.modify_s(group_dn, attr)
except self.OBJECT_CLASS_VIOLATION:
logging.debug("Attempted to remove the last member of a group. "
"Deleting the group at %s instead." % group_dn )
self.delete_group(group_dn)
def remove_from_all(self, uid):
if not self.user_exists(uid):
raise exception.NotFound("User %s can't be removed from all "
"because the user doesn't exist" % (uid,))
dn = self.__uid_to_dn(uid)
role_dns = self.find_group_dns_with_member(
FLAGS.role_ldap_subtree, uid)
for role_dn in role_dns:
self._safe_remove_from_group(role_dn, uid)
project_dns = self.find_group_dns_with_member(
FLAGS.project_ldap_subtree, uid)
for project_dn in project_dns:
self._safe_remove_from_group(project_dn, uid)
def create_key_pair(self, uid, key_name, public_key, fingerprint):
"""create's a public key in the directory underneath the user"""
# TODO(vish): possibly refactor this to store keys in their own ou
# and put dn reference in the user object
attr = [
('objectclass', ['novaKeyPair']),
('cn', [key_name]),
('sshPublicKey', [public_key]),
('keyFingerprint', [fingerprint]),
]
self.conn.add_s('cn=%s,%s' % (key_name,
self.__uid_to_dn(uid)),
attr)
return self.__to_key_pair(uid, dict(attr))
def find_user_by_access_key(self, access):
query = '(accessKey=%s)' % access
dn = FLAGS.user_ldap_subtree
return self.__to_user(self.find_object(dn, query))
def delete_user(self, uid):
if not self.user_exists(uid):
raise exception.NotFound("User %s doesn't exist" % uid)
self.delete_key_pairs(uid)
self.remove_from_all(uid)
self.conn.delete_s('uid=%s,%s' % (uid,
FLAGS.user_ldap_subtree))
def delete_key_pair(self, uid, key_name):
if not self.key_pair_exists(uid, key_name):
raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
(key_name, uid))
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
FLAGS.user_ldap_subtree))
def delete_group(self, group_dn):
if not self.group_exists(group_dn):
raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
self.conn.delete_s(group_dn)
def delete_roles(self, project_dn):
roles = self.find_roles(project_dn)
for role in roles:
self.delete_group('cn=%s,%s' % (role.id, project_dn))
def delete_project(self, name):
project_dn = 'cn=%s,%s' % (name, FLAGS.project_ldap_subtree)
self.delete_roles(project_dn)
self.delete_group(project_dn)
def __to_user(self, attr):
if attr == None:
return None
return User(
id = attr['uid'][0],
name = attr['cn'][0],
access = attr['accessKey'][0],
secret = attr['secretKey'][0],
admin = (attr['isAdmin'][0] == 'TRUE')
)
def __to_key_pair(self, owner, attr):
if attr == None:
return None
return KeyPair(
id = attr['cn'][0],
owner_id = owner,
public_key = attr['sshPublicKey'][0],
fingerprint = attr['keyFingerprint'][0],
)
def __to_group(self, attr):
if attr == None:
return None
member_dns = attr.get('member', [])
return Group(
id = attr['cn'][0],
description = attr.get('description', [None])[0],
member_ids = [self.__dn_to_uid(x) for x in member_dns]
)
def __to_project(self, attr):
if attr == None:
return None
member_dns = attr.get('member', [])
return Project(
id = attr['cn'][0],
project_manager_id = self.__dn_to_uid(attr['projectManager'][0]),
description = attr.get('description', [None])[0],
member_ids = [self.__dn_to_uid(x) for x in member_dns]
)
def __dn_to_uid(self, dn):
return dn.split(',')[0].split('=')[1]
def __uid_to_dn(self, dn):
return 'uid=%s,%s' % (dn, FLAGS.user_ldap_subtree)

View File

@@ -103,7 +103,7 @@ class BasicModel(object):
@classmethod
def _redis_name(cls):
return cls.override_type or cls.__name__
return cls.override_type or cls.__name__.lower()
@classmethod
def lookup(cls, identifier):
@@ -184,21 +184,19 @@ class BasicModel(object):
@absorb_connection_error
def add_to_index(self):
"""Each insance of Foo has its id tracked int the set named Foos"""
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().sadd(set_name, self.identifier)
@absorb_connection_error
def remove_from_index(self):
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().srem(set_name, self.identifier)
@absorb_connection_error
def remove_from_index(self):
"""Remove id of this instance from the set tracking ids of this type"""
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().srem(set_name, self.identifier)
@absorb_connection_error
def associate_with(self, foreign_type, foreign_id):
"""Add this class id into the set foreign_type:foreign_id:this_types"""
# note the extra 's' on the end is for plurality
# to match the old data without requiring a migration of any sort
self.add_associated_model_to_its_set(foreign_type, foreign_id)
@@ -208,21 +206,24 @@ class BasicModel(object):
@absorb_connection_error
def unassociate_with(self, foreign_type, foreign_id):
"""Delete from foreign_type:foreign_id:this_types set"""
redis_set = self.__class__._redis_association_name(foreign_type,
foreign_id)
Redis.instance().srem(redis_set, self.identifier)
def add_associated_model_to_its_set(self, my_type, my_id):
def add_associated_model_to_its_set(self, model_type, model_id):
"""
When associating an X to a Y, save Y for newer timestamp, etc, and to
make sure to save it if Y is a new record.
If the model_type isn't found as a usable class, ignore it, this can
happen when associating to things stored in LDAP (user, project, ...).
"""
table = globals()
klsname = my_type.capitalize()
klsname = model_type.capitalize()
if table.has_key(klsname):
my_class = table[klsname]
my_inst = my_class(my_id)
my_inst.save()
else:
logging.warning("no model class for %s when building"
" association from %s",
klsname, self)
model_class = table[klsname]
model_inst = model_class(model_id)
model_inst.save()
@absorb_connection_error
def save(self):

View File

@@ -22,9 +22,10 @@ Admin API controller, exposed through http via the api worker.
import base64
from nova.auth import users
from nova.auth import manager
from nova.compute import model
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -32,8 +33,17 @@ def user_dict(user, base64_file=None):
'username': user.id,
'accesskey': user.access,
'secretkey': user.secret,
'file': base64_file,
}
'file': base64_file}
else:
return {}
def project_dict(project):
"""Convert the project object to a result dict"""
if project:
return {
'projectname': project.id,
'project_manager_id': project.project_manager_id,
'description': project.description}
else:
return {}
@@ -69,18 +79,18 @@ class AdminController(object):
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
return user_dict(users.UserManager.instance().get_user(name))
return user_dict(manager.AuthManager().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
[user_dict(u) for u in users.UserManager.instance().get_users()] }
[user_dict(u) for u in manager.AuthManager().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
return user_dict(users.UserManager.instance().create_user(name))
return user_dict(manager.AuthManager().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
@@ -88,7 +98,20 @@ class AdminController(object):
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
users.UserManager.instance().delete_user(name)
manager.AuthManager().delete_user(name)
return True
@admin_only
def modify_user_role(self, context, user, role, project=None,
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
if operation == 'add':
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError('operation must be add or remove')
return True
@@ -100,10 +123,57 @@ class AdminController(object):
"""
if project is None:
project = name
project = users.UserManager.instance().get_project(project)
user = users.UserManager.instance().get_user(name)
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
def describe_project(self, context, name, **kwargs):
"""Returns project data, including member ids."""
return project_dict(manager.AuthManager().get_project(name))
@admin_only
def describe_projects(self, context, user=None, **kwargs):
"""Returns all projects - should be changed to deal with a list."""
return {'projectSet':
[project_dict(u) for u in
manager.AuthManager().get_projects(user=user)]}
@admin_only
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
return project_dict(
manager.AuthManager().create_project(
name,
manager_user,
description=None,
member_users=None))
@admin_only
def deregister_project(self, context, name):
"""Permanently deletes a project."""
manager.AuthManager().delete_project(name)
return True
@admin_only
def describe_project_members(self, context, name, **kwargs):
project = manager.AuthManager().get_project(name)
result = {
'members': [{'member': m} for m in project.member_ids]}
return result
@admin_only
def modify_project_member(self, context, user, project, operation, **kwargs):
"""Add or remove a user from a project."""
if operation =='add':
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError('operation must be add or remove')
return True
@admin_only
def describe_hosts(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:

View File

@@ -35,7 +35,7 @@ from nova import crypto
from nova import exception
from nova import flags
from nova import utils
from nova.auth import users
from nova.auth import manager
import nova.cloudpipe.api
from nova.endpoint import cloud
@@ -266,7 +266,7 @@ class APIRequestHandler(tornado.web.RequestHandler):
# Authenticate the request.
try:
(user, project) = users.UserManager.instance().authenticate(
(user, project) = manager.AuthManager().authenticate(
access,
signature,
auth_params,

View File

@@ -23,7 +23,6 @@ datastore.
"""
import base64
import json
import logging
import os
import time
@@ -35,12 +34,13 @@ from nova import flags
from nova import rpc
from nova import utils
from nova.auth import rbac
from nova.auth import users
from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.compute import node
from nova.compute.instance_types import INSTANCE_TYPES
from nova.compute import service as compute_service
from nova.endpoint import images
from nova.volume import storage
from nova.volume import service as volume_service
FLAGS = flags.FLAGS
@@ -48,10 +48,10 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
def _gen_key(user_id, key_name):
""" Tuck this into UserManager """
""" Tuck this into AuthManager """
try:
manager = users.UserManager.instance()
private_key, fingerprint = manager.generate_key_pair(user_id, key_name)
mgr = manager.AuthManager()
private_key, fingerprint = mgr.generate_key_pair(user_id, key_name)
except Exception as ex:
return {'exception': ex}
return {'private_key': private_key, 'fingerprint': fingerprint}
@@ -76,7 +76,7 @@ class CloudController(object):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
volume = storage.get_volume(volume_id)
volume = volume_service.get_volume(volume_id)
yield volume
def __str__(self):
@@ -103,7 +103,7 @@ class CloudController(object):
result = {}
for instance in self.instdir.all:
if instance['project_id'] == project_id:
line = '%s slots=%d' % (instance['private_dns_name'], node.INSTANCE_TYPES[instance['instance_type']]['vcpus'])
line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
@@ -296,8 +296,8 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
# TODO(vish): refactor this to create the volume object here and tell storage to create it
res = rpc.call(FLAGS.storage_topic, {"method": "create_volume",
# TODO(vish): refactor this to create the volume object here and tell service to create it
res = rpc.call(FLAGS.volume_topic, {"method": "create_volume",
"args" : {"size": size,
"user_id": context.user.id,
"project_id": context.project.id}})
@@ -331,7 +331,7 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
volume = storage.get_volume(volume_id)
volume = volume_service.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -516,13 +516,17 @@ class CloudController(object):
# get defaults from imagestore
image_id = image['imageId']
kernel_id = image.get('kernelId', None)
ramdisk_id = image.get('ramdiskId', None)
kernel_id = image.get('kernelId', FLAGS.default_kernel)
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
# API parameters overrides of defaults
kernel_id = kwargs.get('kernel_id', kernel_id)
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
# make sure we have access to kernel and ramdisk
self._get_image(context, kernel_id)
self._get_image(context, ramdisk_id)
logging.debug("Going to run instances...")
reservation_id = utils.generate_uid('r')
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
@@ -578,7 +582,7 @@ class CloudController(object):
"args": {"instance_id" : inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make the NetworkComputeNode figure out the network name from ip.
# TODO: Make Network figure out the network name from ip.
return defer.succeed(self._format_instances(
context, reservation_id))
@@ -628,8 +632,8 @@ class CloudController(object):
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume = self._get_volume(context, volume_id)
storage_node = volume['node_name']
rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
volume_node = volume['node_name']
rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
{"method": "delete_volume",
"args" : {"volume_id": volume_id}})
return defer.succeed(True)

View File

@@ -34,12 +34,11 @@ from nova import exception
from nova import flags
from nova import rpc
from nova import utils
from nova.auth import users
from nova.auth import manager
from nova.compute import model
from nova.compute import network
from nova.endpoint import images
from nova.endpoint import wsgi
from nova.volume import storage
FLAGS = flags.FLAGS
@@ -78,11 +77,11 @@ class Api(object):
def build_context(self, env):
rv = {}
if env.has_key("HTTP_X_AUTH_TOKEN"):
rv['user'] = users.UserManager.instance().get_user_from_access_key(
rv['user'] = manager.AuthManager().get_user_from_access_key(
env['HTTP_X_AUTH_TOKEN']
)
if rv['user']:
rv['project'] = users.UserManager.instance().get_project(
rv['project'] = manager.AuthManager().get_project(
rv['user'].name
)
return rv

View File

@@ -44,6 +44,12 @@ class Duplicate(Error):
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:

View File

@@ -1,112 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api. Allows nova testing w/o KVM and libvirt.
"""
import StringIO
from xml.etree import ElementTree
class FakeVirtConnection(object):
# FIXME: networkCreateXML, listNetworks don't do anything since
# they aren't exercised in tests yet
def __init__(self):
self.next_index = 0
self.instances = {}
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def lookupByID(self, i):
return self.instances[str(i)]
def listDomainsID(self):
return self.instances.keys()
def listNetworks(self):
return []
def lookupByName(self, instance_id):
for x in self.instances.values():
if x.name() == instance_id:
return x
raise Exception('no instance found for instance_id: %s' % instance_id)
def networkCreateXML(self, xml):
pass
def createXML(self, xml, flags):
# parse the xml :(
xml_stringio = StringIO.StringIO(xml)
my_xml = ElementTree.parse(xml_stringio)
name = my_xml.find('name').text
fake_instance = FakeVirtInstance(conn=self,
index=str(self.next_index),
name=name,
xml=my_xml)
self.instances[str(self.next_index)] = fake_instance
self.next_index += 1
def _removeInstance(self, i):
self.instances.pop(str(i))
class FakeVirtInstance(object):
NOSTATE = 0x00
RUNNING = 0x01
BLOCKED = 0x02
PAUSED = 0x03
SHUTDOWN = 0x04
SHUTOFF = 0x05
CRASHED = 0x06
def __init__(self, conn, index, name, xml):
self._conn = conn
self._destroyed = False
self._name = name
self._index = index
self._state = self.RUNNING
def name(self):
return self._name
def destroy(self):
if self._state == self.SHUTOFF:
raise Exception('instance already destroyed: %s' % self.name())
self._state = self.SHUTDOWN
self._conn._removeInstance(self._index)
def info(self):
return [self._state, 0, 2, 0, 0]
def XMLDesc(self, flags):
return open('fakevirtinstance.xml', 'r').read()
def blockStats(self, disk):
return [0L, 0L, 0L, 0L, null]
def interfaceStats(self, iface):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]

View File

@@ -36,17 +36,17 @@ DEFINE_bool = DEFINE_bool
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('storage_topic', 'storage', 'the topic storage nodes listen on')
DEFINE_bool('fake_libvirt', False,
'whether to use a fake libvirt or not')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses')
DEFINE_bool('fake_users', False, 'use fake users')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
@@ -75,6 +75,8 @@ DEFINE_string('vpn_key_suffix',
'-key',
'Suffix to add to project name for vpn key')
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
# UNUSED
DEFINE_string('node_availability_zone',
'nova',

View File

@@ -151,6 +151,7 @@ class TopicPublisher(Publisher):
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
super(TopicPublisher, self).__init__(connection=connection)
@@ -242,7 +243,7 @@ def send_message(topic, message, wait=True):
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
exchange="nova",
exchange=FLAGS.control_exchange,
exchange_type="topic",
routing_key=topic)
publisher.send(message)

103
nova/service.py Normal file
View File

@@ -0,0 +1,103 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generic Node baseclass for all workers that run on hosts
"""
import inspect
import logging
import os
from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
from nova import datastore
from nova import flags
from nova import rpc
from nova.compute import model
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
class Service(object, service.Service):
"""Base class for workers that run on hosts"""
@classmethod
def create(cls,
report_interval=None, # defaults to flag
bin_name=None, # defaults to basename of executable
topic=None): # defaults to basename - "nova-" part
"""Instantiates class and passes back application object"""
if not report_interval:
# NOTE(vish): set here because if it is set to flag in the
# parameter list, it wrongly uses the default
report_interval = FLAGS.report_interval
# NOTE(vish): magic to automatically determine bin_name and topic
if not bin_name:
bin_name = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = bin_name.rpartition("nova-")[2]
logging.warn("Starting %s node" % topic)
node_instance = cls()
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % topic,
proxy=node_instance)
consumer_node = rpc.AdapterConsumer(
connection=conn,
topic='%s.%s' % (topic, FLAGS.node_name),
proxy=node_instance)
pulse = task.LoopingCall(node_instance.report_state,
FLAGS.node_name,
bin_name)
pulse.start(interval=report_interval, now=False)
consumer_all.attach_to_twisted()
consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application(bin_name)
node_instance.setServiceParent(application)
return application
@defer.inlineCallbacks
def report_state(self, nodename, daemon):
# TODO(termie): make this pattern be more elegant. -todd
try:
record = model.Daemon(nodename, daemon)
record.heartbeat()
if getattr(self, "model_disconnected", False):
self.model_disconnected = False
logging.error("Recovered model server connection!")
except datastore.ConnectionError, ex:
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception("model server went away")
yield

View File

@@ -156,9 +156,9 @@ class BaseTestCase(TrialTestCase):
Example (callback chain, ugly):
d = self.node.terminate_instance(instance_id) # a Deferred instance
d = self.compute.terminate_instance(instance_id) # a Deferred instance
def _describe(_):
d_desc = self.node.describe_instances() # another Deferred instance
d_desc = self.compute.describe_instances() # another Deferred instance
return d_desc
def _checkDescribe(rv):
self.assertEqual(rv, [])
@@ -169,8 +169,8 @@ class BaseTestCase(TrialTestCase):
Example (inline callbacks! yay!):
yield self.node.terminate_instance(instance_id)
rv = yield self.node.describe_instances()
yield self.compute.terminate_instance(instance_id)
rv = yield self.compute.describe_instances()
self.assertEqual(rv, [])
If the test fits the Inline Callbacks pattern we will automatically

View File

@@ -22,7 +22,7 @@ import logging
from nova import exception
from nova import flags
from nova import test
from nova.auth.users import UserManager
from nova.auth import manager
from nova.auth import rbac
@@ -33,9 +33,9 @@ class Context(object):
class AccessTestCase(test.BaseTestCase):
def setUp(self):
super(AccessTestCase, self).setUp()
FLAGS.fake_libvirt = True
FLAGS.connection_type = 'fake'
FLAGS.fake_storage = True
um = UserManager.instance()
um = manager.AuthManager()
# Make test users
try:
self.testadmin = um.create_user('testadmin')
@@ -79,7 +79,7 @@ class AccessTestCase(test.BaseTestCase):
#user is set in each test
def tearDown(self):
um = UserManager.instance()
um = manager.AuthManager()
# Delete the test project
um.delete_project('testproj')
# Delete the test user

View File

@@ -26,7 +26,7 @@ from twisted.internet import defer
from nova import flags
from nova import test
from nova.auth import users
from nova.auth import manager
from nova.endpoint import api
from nova.endpoint import cloud
@@ -43,7 +43,11 @@ def boto_to_tornado(method, path, headers, data, host, connection=None):
connection should be a FakeTornadoHttpConnection instance
"""
headers = httpserver.HTTPHeaders()
try:
headers = httpserver.HTTPHeaders()
except AttributeError:
from tornado import httputil
headers = httputil.HTTPHeaders()
for k, v in headers.iteritems():
headers[k] = v
@@ -150,7 +154,7 @@ class ApiEc2TestCase(test.BaseTestCase):
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.users = users.UserManager.instance()
self.manager = manager.AuthManager()
self.cloud = cloud.CloudController()
self.host = '127.0.0.1'
@@ -175,25 +179,22 @@ class ApiEc2TestCase(test.BaseTestCase):
def test_describe_instances(self):
self.expect_http()
self.mox.ReplayAll()
try:
self.users.create_user('fake', 'fake', 'fake')
except Exception, _err:
pass # User may already exist
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
self.assertEqual(self.ec2.get_all_instances(), [])
self.users.delete_user('fake')
self.manager.delete_project(project)
self.manager.delete_user(user)
def test_get_all_key_pairs(self):
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8)))
try:
self.users.create_user('fake', 'fake', 'fake')
except Exception, _err:
pass # User may already exist
self.users.generate_key_pair('fake', keyname)
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
self.manager.generate_key_pair(user.id, keyname)
rv = self.ec2.get_all_key_pairs()
self.assertTrue(filter(lambda k: k.name == keyname, rv))
self.users.delete_user('fake')
self.manager.delete_project(project)
self.manager.delete_user(user)

View File

@@ -25,41 +25,41 @@ import unittest
from nova import crypto
from nova import flags
from nova import test
from nova.auth import users
from nova.auth import manager
from nova.endpoint import cloud
FLAGS = flags.FLAGS
class UserTestCase(test.BaseTestCase):
class AuthTestCase(test.BaseTestCase):
flush_db = False
def setUp(self):
super(UserTestCase, self).setUp()
self.flags(fake_libvirt=True,
super(AuthTestCase, self).setUp()
self.flags(connection_type='fake',
fake_storage=True)
self.users = users.UserManager.instance()
self.manager = manager.AuthManager()
def test_001_can_create_users(self):
self.users.create_user('test1', 'access', 'secret')
self.users.create_user('test2')
self.manager.create_user('test1', 'access', 'secret')
self.manager.create_user('test2')
def test_002_can_get_user(self):
user = self.users.get_user('test1')
user = self.manager.get_user('test1')
def test_003_can_retreive_properties(self):
user = self.users.get_user('test1')
user = self.manager.get_user('test1')
self.assertEqual('test1', user.id)
self.assertEqual('access', user.access)
self.assertEqual('secret', user.secret)
def test_004_signature_is_valid(self):
#self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? ))
#self.assertTrue(self.manager.authenticate( **boto.generate_url ... ? ? ? ))
pass
#raise NotImplementedError
def test_005_can_get_credentials(self):
return
credentials = self.users.get_user('test1').get_credentials()
credentials = self.manager.get_user('test1').get_credentials()
self.assertEqual(credentials,
'export EC2_ACCESS_KEY="access"\n' +
'export EC2_SECRET_KEY="secret"\n' +
@@ -68,14 +68,14 @@ class UserTestCase(test.BaseTestCase):
'export EC2_USER_ID="test1"\n')
def test_006_test_key_storage(self):
user = self.users.get_user('test1')
user = self.manager.get_user('test1')
user.create_key_pair('public', 'key', 'fingerprint')
key = user.get_key_pair('public')
self.assertEqual('key', key.public_key)
self.assertEqual('fingerprint', key.fingerprint)
def test_007_test_key_generation(self):
user = self.users.get_user('test1')
user = self.manager.get_user('test1')
private_key, fingerprint = user.generate_key_pair('public2')
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
@@ -87,71 +87,71 @@ class UserTestCase(test.BaseTestCase):
converted.split(" ")[1].strip())
def test_008_can_list_key_pairs(self):
keys = self.users.get_user('test1').get_key_pairs()
keys = self.manager.get_user('test1').get_key_pairs()
self.assertTrue(filter(lambda k: k.name == 'public', keys))
self.assertTrue(filter(lambda k: k.name == 'public2', keys))
def test_009_can_delete_key_pair(self):
self.users.get_user('test1').delete_key_pair('public')
keys = self.users.get_user('test1').get_key_pairs()
self.manager.get_user('test1').delete_key_pair('public')
keys = self.manager.get_user('test1').get_key_pairs()
self.assertFalse(filter(lambda k: k.name == 'public', keys))
def test_010_can_list_users(self):
users = self.users.get_users()
users = self.manager.get_users()
logging.warn(users)
self.assertTrue(filter(lambda u: u.id == 'test1', users))
def test_101_can_add_user_role(self):
self.assertFalse(self.users.has_role('test1', 'itsec'))
self.users.add_role('test1', 'itsec')
self.assertTrue(self.users.has_role('test1', 'itsec'))
self.assertFalse(self.manager.has_role('test1', 'itsec'))
self.manager.add_role('test1', 'itsec')
self.assertTrue(self.manager.has_role('test1', 'itsec'))
def test_199_can_remove_user_role(self):
self.assertTrue(self.users.has_role('test1', 'itsec'))
self.users.remove_role('test1', 'itsec')
self.assertFalse(self.users.has_role('test1', 'itsec'))
self.assertTrue(self.manager.has_role('test1', 'itsec'))
self.manager.remove_role('test1', 'itsec')
self.assertFalse(self.manager.has_role('test1', 'itsec'))
def test_201_can_create_project(self):
project = self.users.create_project('testproj', 'test1', 'A test project', ['test1'])
self.assertTrue(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
project = self.manager.create_project('testproj', 'test1', 'A test project', ['test1'])
self.assertTrue(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))
self.assertEqual(project.name, 'testproj')
self.assertEqual(project.description, 'A test project')
self.assertEqual(project.project_manager_id, 'test1')
self.assertTrue(project.has_member('test1'))
def test_202_user1_is_project_member(self):
self.assertTrue(self.users.get_user('test1').is_project_member('testproj'))
self.assertTrue(self.manager.get_user('test1').is_project_member('testproj'))
def test_203_user2_is_not_project_member(self):
self.assertFalse(self.users.get_user('test2').is_project_member('testproj'))
self.assertFalse(self.manager.get_user('test2').is_project_member('testproj'))
def test_204_user1_is_project_manager(self):
self.assertTrue(self.users.get_user('test1').is_project_manager('testproj'))
self.assertTrue(self.manager.get_user('test1').is_project_manager('testproj'))
def test_205_user2_is_not_project_manager(self):
self.assertFalse(self.users.get_user('test2').is_project_manager('testproj'))
self.assertFalse(self.manager.get_user('test2').is_project_manager('testproj'))
def test_206_can_add_user_to_project(self):
self.users.add_to_project('test2', 'testproj')
self.assertTrue(self.users.get_project('testproj').has_member('test2'))
self.manager.add_to_project('test2', 'testproj')
self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
def test_208_can_remove_user_from_project(self):
self.users.remove_from_project('test2', 'testproj')
self.assertFalse(self.users.get_project('testproj').has_member('test2'))
self.manager.remove_from_project('test2', 'testproj')
self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
def test_209_can_generate_x509(self):
# MUST HAVE RUN CLOUD SETUP BY NOW
self.cloud = cloud.CloudController()
self.cloud.setup()
private_key, signed_cert_string = self.users.get_project('testproj').generate_x509_cert('test1')
logging.debug(signed_cert_string)
_key, cert_str = self.manager._generate_x509_cert('test1', 'testproj')
logging.debug(cert_str)
# Need to verify that it's signed by the right intermediate CA
full_chain = crypto.fetch_ca(project_id='testproj', chain=True)
int_cert = crypto.fetch_ca(project_id='testproj', chain=False)
cloud_cert = crypto.fetch_ca()
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
signed_cert = X509.load_cert_string(signed_cert_string)
signed_cert = X509.load_cert_string(cert_str)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
cloud_cert = X509.load_cert_string(cloud_cert)
@@ -164,42 +164,51 @@ class UserTestCase(test.BaseTestCase):
self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey()))
def test_210_can_add_project_role(self):
project = self.users.get_project('testproj')
project = self.manager.get_project('testproj')
self.assertFalse(project.has_role('test1', 'sysadmin'))
self.users.add_role('test1', 'sysadmin')
self.manager.add_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
project.add_role('test1', 'sysadmin')
self.assertTrue(project.has_role('test1', 'sysadmin'))
def test_211_can_remove_project_role(self):
project = self.users.get_project('testproj')
project = self.manager.get_project('testproj')
self.assertTrue(project.has_role('test1', 'sysadmin'))
project.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
self.users.remove_role('test1', 'sysadmin')
self.manager.remove_role('test1', 'sysadmin')
self.assertFalse(project.has_role('test1', 'sysadmin'))
def test_212_vpn_ip_and_port_looks_valid(self):
project = self.users.get_project('testproj')
project = self.manager.get_project('testproj')
self.assert_(project.vpn_ip)
self.assert_(project.vpn_port >= FLAGS.vpn_start_port)
self.assert_(project.vpn_port <= FLAGS.vpn_end_port)
def test_213_too_many_vpns(self):
for i in xrange(users.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
users.Vpn.create("vpnuser%s" % i)
self.assertRaises(users.NoMorePorts, users.Vpn.create, "boom")
vpns = []
for i in xrange(manager.Vpn.num_ports_for_ip(FLAGS.vpn_ip)):
vpns.append(manager.Vpn.create("vpnuser%s" % i))
self.assertRaises(manager.NoMorePorts, manager.Vpn.create, "boom")
for vpn in vpns:
vpn.destroy()
def test_214_can_retrieve_project_by_user(self):
project = self.manager.create_project('testproj2', 'test2', 'Another test project', ['test2'])
self.assert_(len(self.manager.get_projects()) > 1)
self.assertEqual(len(self.manager.get_projects('test2')), 1)
def test_299_can_delete_project(self):
self.users.delete_project('testproj')
self.assertFalse(filter(lambda p: p.name == 'testproj', self.users.get_projects()))
self.manager.delete_project('testproj')
self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))
self.manager.delete_project('testproj2')
def test_999_can_delete_users(self):
self.users.delete_user('test1')
users = self.users.get_users()
self.manager.delete_user('test1')
users = self.manager.get_users()
self.assertFalse(filter(lambda u: u.id == 'test1', users))
self.users.delete_user('test2')
self.assertEqual(self.users.get_user('test2'), None)
self.manager.delete_user('test2')
self.assertEqual(self.manager.get_user('test2'), None)
if __name__ == "__main__":

View File

@@ -27,8 +27,8 @@ from xml.etree import ElementTree
from nova import flags
from nova import rpc
from nova import test
from nova.auth import users
from nova.compute import node
from nova.auth import manager
from nova.compute import service
from nova.endpoint import api
from nova.endpoint import cloud
@@ -39,9 +39,8 @@ FLAGS = flags.FLAGS
class CloudTestCase(test.BaseTestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
fake_users=True)
self.flags(connection_type='fake',
fake_storage=True)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
@@ -53,37 +52,37 @@ class CloudTestCase(test.BaseTestCase):
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a node
self.node = node.Node()
self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
# set up a service
self.compute = service.ComputeService()
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.node)
self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
proxy=self.compute)
self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop))
try:
users.UserManager.instance().create_user('admin', 'admin', 'admin')
manager.AuthManager().create_user('admin', 'admin', 'admin')
except: pass
admin = users.UserManager.instance().get_user('admin')
project = users.UserManager.instance().create_project('proj', 'admin', 'proj')
admin = manager.AuthManager().get_user('admin')
project = manager.AuthManager().create_project('proj', 'admin', 'proj')
self.context = api.APIRequestContext(handler=None,project=project,user=admin)
def tearDown(self):
users.UserManager.instance().delete_project('proj')
users.UserManager.instance().delete_user('admin')
manager.AuthManager().delete_project('proj')
manager.AuthManager().delete_user('admin')
def test_console_output(self):
if FLAGS.fake_libvirt:
if FLAGS.connection_type == 'fake':
logging.debug("Can't test instances without a real virtual env.")
return
instance_id = 'foo'
inst = yield self.node.run_instance(instance_id)
inst = yield self.compute.run_instance(instance_id)
output = yield self.cloud.get_console_output(self.context, [instance_id])
logging.debug(output)
self.assert_(output)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
def test_run_instances(self):
if FLAGS.fake_libvirt:
if FLAGS.connection_type == 'fake':
logging.debug("Can't test instances without a real virtual env.")
return
image_id = FLAGS.default_image
@@ -104,7 +103,7 @@ class CloudTestCase(test.BaseTestCase):
break
self.assert_(rv)
if not FLAGS.fake_libvirt:
if connection_type != 'fake':
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
@@ -112,7 +111,7 @@ class CloudTestCase(test.BaseTestCase):
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
logging.debug("Terminating instance %s" % instance['instance_id'])
rv = yield self.node.terminate_instance(instance['instance_id'])
rv = yield self.compute.terminate_instance(instance['instance_id'])
def test_instance_update_state(self):
def instance(num):

View File

@@ -26,7 +26,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.compute import model
from nova.compute import node
from nova.compute import service
FLAGS = flags.FLAGS
@@ -53,14 +53,13 @@ class InstanceXmlTestCase(test.TrialTestCase):
# rv = yield first_node.terminate_instance(instance_id)
class NodeConnectionTestCase(test.TrialTestCase):
class ComputeConnectionTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(NodeConnectionTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
fake_users=True)
self.node = node.Node()
super(ComputeConnectionTestCase, self).setUp()
self.flags(connection_type='fake',
fake_storage=True)
self.compute = service.ComputeService()
def create_instance(self):
instdir = model.InstanceDirectory()
@@ -81,48 +80,48 @@ class NodeConnectionTestCase(test.TrialTestCase):
def test_run_describe_terminate(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
logging.info("Running instances: %s", rv)
self.assertEqual(rv[instance_id].name, instance_id)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
logging.info("After terminating instances: %s", rv)
self.assertEqual(rv, {})
@defer.inlineCallbacks
def test_reboot(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
yield self.node.reboot_instance(instance_id)
yield self.compute.reboot_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_console_output(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
console = yield self.node.get_console_output(instance_id)
console = yield self.compute.get_console_output(instance_id)
self.assert_(console)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_run_instance_existing(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
self.assertRaises(exception.Error, self.node.run_instance, instance_id)
rv = yield self.node.terminate_instance(instance_id)
self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
rv = yield self.compute.terminate_instance(instance_id)

View File

@@ -20,9 +20,9 @@ from nova import flags
FLAGS = flags.FLAGS
FLAGS.fake_libvirt = True
FLAGS.connection_type = 'fake'
FLAGS.fake_storage = True
FLAGS.fake_rabbit = True
FLAGS.fake_network = True
FLAGS.fake_users = True
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
FLAGS.verbose = True

View File

@@ -1,75 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mox
import StringIO
import time
from tornado import ioloop
from twisted.internet import defer
import unittest
from xml.etree import ElementTree
from nova import cloud
from nova import exception
from nova import flags
from nova import node
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
class AdminTestCase(test.BaseTestCase):
def setUp(self):
super(AdminTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_rabbit=True)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.INFO)
# set up our cloud
self.cloud = cloud.CloudController()
self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.cloud_topic,
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a node
self.node = node.Node()
self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.node)
self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
def test_flush_terminated(self):
# Launch an instance
# Wait until it's running
# Terminate it
# Wait until it's terminated
# Flush terminated nodes
# ASSERT that it's gone
pass

View File

@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta
import logging
import time
from twisted.internet import defer
@@ -25,7 +26,6 @@ from nova import flags
from nova import test
from nova import utils
from nova.compute import model
from nova.compute import node
FLAGS = flags.FLAGS
@@ -34,9 +34,8 @@ FLAGS = flags.FLAGS
class ModelTestCase(test.TrialTestCase):
def setUp(self):
super(ModelTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
fake_users=True)
self.flags(connection_type='fake',
fake_storage=True)
def tearDown(self):
model.Instance('i-test').destroy()
@@ -66,6 +65,12 @@ class ModelTestCase(test.TrialTestCase):
daemon.save()
return daemon
def create_session_token(self):
session_token = model.SessionToken('tk12341234')
session_token['user'] = 'testuser'
session_token.save()
return session_token
@defer.inlineCallbacks
def test_create_instance(self):
"""store with create_instace, then test that a load finds it"""
@@ -204,3 +209,91 @@ class ModelTestCase(test.TrialTestCase):
if x.identifier == 'testhost:nova-testdaemon':
found = True
self.assertTrue(found)
@defer.inlineCallbacks
def test_create_session_token(self):
"""create"""
d = yield self.create_session_token()
d = model.SessionToken(d.token)
self.assertFalse(d.is_new_record())
@defer.inlineCallbacks
def test_delete_session_token(self):
"""create, then destroy, then make sure loads a new record"""
instance = yield self.create_session_token()
yield instance.destroy()
newinst = yield model.SessionToken(instance.token)
self.assertTrue(newinst.is_new_record())
@defer.inlineCallbacks
def test_session_token_added_to_set(self):
"""create, then check that it is included in list"""
instance = yield self.create_session_token()
found = False
for x in model.SessionToken.all():
if x.identifier == instance.token:
found = True
self.assert_(found)
@defer.inlineCallbacks
def test_session_token_associates_user(self):
"""create, then check that it is listed for the user"""
instance = yield self.create_session_token()
found = False
for x in model.SessionToken.associated_to('user', 'testuser'):
if x.identifier == instance.identifier:
found = True
self.assertTrue(found)
@defer.inlineCallbacks
def test_session_token_generation(self):
instance = yield model.SessionToken.generate('username', 'TokenType')
self.assertFalse(instance.is_new_record())
@defer.inlineCallbacks
def test_find_generated_session_token(self):
instance = yield model.SessionToken.generate('username', 'TokenType')
found = yield model.SessionToken.lookup(instance.identifier)
self.assert_(found)
def test_update_session_token_expiry(self):
instance = model.SessionToken('tk12341234')
oldtime = datetime.utcnow()
instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT)
instance.update_expiry()
expiry = utils.parse_isotime(instance['expiry'])
self.assert_(expiry > datetime.utcnow())
@defer.inlineCallbacks
def test_session_token_lookup_when_expired(self):
instance = yield model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
instance.save()
inst = model.SessionToken.lookup(instance.identifier)
self.assertFalse(inst)
@defer.inlineCallbacks
def test_session_token_lookup_when_not_expired(self):
instance = yield model.SessionToken.generate("testuser")
inst = model.SessionToken.lookup(instance.identifier)
self.assert_(inst)
@defer.inlineCallbacks
def test_session_token_is_expired_when_expired(self):
instance = yield model.SessionToken.generate("testuser")
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
self.assert_(instance.is_expired())
@defer.inlineCallbacks
def test_session_token_is_expired_when_not_expired(self):
instance = yield model.SessionToken.generate("testuser")
self.assertFalse(instance.is_expired())
@defer.inlineCallbacks
def test_session_token_ttl(self):
instance = yield model.SessionToken.generate("testuser")
now = datetime.utcnow()
delta = timedelta(hours=1)
instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
# give 5 seconds of fuzziness
self.assert_(abs(instance.ttl() - FLAGS.auth_token_ttl) < 5)

View File

@@ -23,7 +23,7 @@ import logging
from nova import flags
from nova import test
from nova import utils
from nova.auth import users
from nova.auth import manager
from nova.compute import network
from nova.compute.exception import NoMoreAddresses
@@ -32,66 +32,71 @@ FLAGS = flags.FLAGS
class NetworkTestCase(test.TrialTestCase):
def setUp(self):
super(NetworkTestCase, self).setUp()
self.flags(fake_libvirt=True,
# NOTE(vish): if you change these flags, make sure to change the
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_storage=True,
fake_network=True,
auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
self.manager = manager.AuthManager()
self.dnsmasq = FakeDNSMasq()
try:
self.manager.create_user('netuser', 'netuser', 'netuser')
except: pass
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.projects.append(self.manager.create_project('netuser',
'netuser',
'netuser'))
for i in range(0, 6):
name = 'project%s' % i
if not self.manager.get_project(name):
self.manager.create_project(name, 'netuser', name)
self.projects.append(self.manager.create_project(name,
'netuser',
name))
self.network = network.PublicNetworkController()
def tearDown(self):
super(NetworkTestCase, self).tearDown()
for i in range(0, 6):
name = 'project%s' % i
self.manager.delete_project(name)
self.manager.delete_user('netuser')
for project in self.projects:
self.manager.delete_project(project)
self.manager.delete_user(self.user)
def test_public_network_allocation(self):
pubnet = IPy.IP(flags.FLAGS.public_range)
address = self.network.allocate_ip("netuser", "project0", "public")
address = self.network.allocate_ip(self.user.id, self.projects[0].id, "public")
self.assertTrue(IPy.IP(address) in pubnet)
self.assertTrue(IPy.IP(address) in self.network.network)
def test_allocate_deallocate_ip(self):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
self.user.id, self.projects[0].id, utils.generate_mac())
logging.debug("Was allocated %s" % (address))
net = network.get_project_network("project0", "default")
self.assertEqual(True, is_in_project(address, "project0"))
net = network.get_project_network(self.projects[0].id, "default")
self.assertEqual(True, is_in_project(address, self.projects[0].id))
mac = utils.generate_mac()
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
# Doesn't go away until it's dhcp released
self.assertEqual(True, is_in_project(address, "project0"))
self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, "project0"))
self.assertEqual(False, is_in_project(address, self.projects[0].id))
def test_range_allocation(self):
mac = utils.generate_mac()
secondmac = utils.generate_mac()
hostname = "test-host"
address = network.allocate_ip(
"netuser", "project0", mac)
self.user.id, self.projects[0].id, mac)
secondaddress = network.allocate_ip(
"netuser", "project1", secondmac)
net = network.get_project_network("project0", "default")
secondnet = network.get_project_network("project1", "default")
self.user, self.projects[1].id, secondmac)
net = network.get_project_network(self.projects[0].id, "default")
secondnet = network.get_project_network(self.projects[1].id, "default")
self.assertEqual(True, is_in_project(address, "project0"))
self.assertEqual(True, is_in_project(secondaddress, "project1"))
self.assertEqual(False, is_in_project(address, "project1"))
self.assertEqual(True, is_in_project(address, self.projects[0].id))
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
self.assertEqual(False, is_in_project(address, self.projects[1].id))
# Addresses are allocated before they're issued
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
@@ -100,34 +105,34 @@ class NetworkTestCase(test.TrialTestCase):
rv = network.deallocate_ip(address)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, "project0"))
self.assertEqual(False, is_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
self.assertEqual(True, is_in_project(secondaddress, "project1"))
self.assertEqual(True, is_in_project(secondaddress, self.projects[1].id))
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
self.assertEqual(False, is_in_project(secondaddress, "project1"))
self.assertEqual(False, is_in_project(secondaddress, self.projects[1].id))
def test_subnet_edge(self):
secondaddress = network.allocate_ip("netuser", "project0",
secondaddress = network.allocate_ip(self.user.id, self.projects[0].id,
utils.generate_mac())
hostname = "toomany-hosts"
for project in range(1,5):
project_id = "project%s" % (project)
for i in range(1,5):
project_id = self.projects[i].id
mac = utils.generate_mac()
mac2 = utils.generate_mac()
mac3 = utils.generate_mac()
address = network.allocate_ip(
"netuser", project_id, mac)
self.user, project_id, mac)
address2 = network.allocate_ip(
"netuser", project_id, mac2)
self.user, project_id, mac2)
address3 = network.allocate_ip(
"netuser", project_id, mac3)
self.assertEqual(False, is_in_project(address, "project0"))
self.assertEqual(False, is_in_project(address2, "project0"))
self.assertEqual(False, is_in_project(address3, "project0"))
self.user, project_id, mac3)
self.assertEqual(False, is_in_project(address, self.projects[0].id))
self.assertEqual(False, is_in_project(address2, self.projects[0].id))
self.assertEqual(False, is_in_project(address3, self.projects[0].id))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
@@ -135,7 +140,7 @@ class NetworkTestCase(test.TrialTestCase):
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
net = network.get_project_network("project0", "default")
net = network.get_project_network(self.projects[0].id, "default")
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
@@ -153,34 +158,36 @@ class NetworkTestCase(test.TrialTestCase):
environment's setup.
Network size is set in test fixture's setUp method.
There are FLAGS.cnt_vpn_clients addresses reserved for VPN (NUM_RESERVED_VPN_IPS)
And there are NUM_STATIC_IPS that are always reserved by Nova for the necessary
services (gateway, CloudPipe, etc)
So we should get flags.network_size - (NUM_STATIC_IPS +
NUM_PREALLOCATED_IPS +
So we should get flags.network_size - (NUM_STATIC_IPS +
NUM_PREALLOCATED_IPS +
NUM_RESERVED_VPN_IPS)
usable addresses
"""
net = network.get_project_network("project0", "default")
net = network.get_project_network(self.projects[0].id, "default")
# Determine expected number of available IP addresses
num_static_ips = net.num_static_ips
num_preallocated_ips = len(net.hosts.keys())
num_reserved_vpn_ips = flags.FLAGS.cnt_vpn_clients
num_available_ips = flags.FLAGS.network_size - (num_static_ips + num_preallocated_ips + num_reserved_vpn_ips)
num_available_ips = flags.FLAGS.network_size - (num_static_ips +
num_preallocated_ips +
num_reserved_vpn_ips)
hostname = "toomany-hosts"
macs = {}
addresses = {}
for i in range(0, (num_available_ips - 1)):
macs[i] = utils.generate_mac()
addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
addresses[i] = network.allocate_ip(self.user.id, self.projects[0].id, macs[i])
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())
self.assertRaises(NoMoreAddresses, network.allocate_ip, self.user.id, self.projects[0].id, utils.generate_mac())
for i in range(0, (num_available_ips - 1)):
rv = network.deallocate_ip(addresses[i])

View File

@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import boto
import glob
import hashlib
import logging
@@ -25,9 +26,16 @@ import tempfile
from nova import flags
from nova import objectstore
from nova.objectstore import bucket # for buckets_path flag
from nova.objectstore import image # for images_path flag
from nova import test
from nova.auth import users
from nova.auth import manager
from nova.objectstore.handler import S3
from nova.exception import NotEmpty, NotFound, NotAuthorized
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from twisted.internet import reactor, threads, defer
from twisted.web import http, server
FLAGS = flags.FLAGS
@@ -51,13 +59,12 @@ os.makedirs(os.path.join(oss_tempdir, 'buckets'))
class ObjectStoreTestCase(test.BaseTestCase):
def setUp(self):
super(ObjectStoreTestCase, self).setUp()
self.flags(fake_users=True,
buckets_path=os.path.join(oss_tempdir, 'buckets'),
self.flags(buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
logging.getLogger().setLevel(logging.DEBUG)
self.um = users.UserManager.instance()
self.um = manager.AuthManager()
try:
self.um.create_user('user1')
except: pass
@@ -96,49 +103,37 @@ class ObjectStoreTestCase(test.BaseTestCase):
# another user is not authorized
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
self.assert_(bucket.is_authorized(self.context) == False)
self.assertFalse(bucket.is_authorized(self.context))
# admin is authorized to use bucket
self.context.user = self.um.get_user('admin_user')
self.context.project = None
self.assert_(bucket.is_authorized(self.context))
self.assertTrue(bucket.is_authorized(self.context))
# new buckets are empty
self.assert_(bucket.list_keys()['Contents'] == [])
self.assertTrue(bucket.list_keys()['Contents'] == [])
# storing keys works
bucket['foo'] = "bar"
self.assert_(len(bucket.list_keys()['Contents']) == 1)
self.assertEquals(len(bucket.list_keys()['Contents']), 1)
self.assert_(bucket['foo'].read() == 'bar')
self.assertEquals(bucket['foo'].read(), 'bar')
# md5 of key works
self.assert_(bucket['foo'].md5 == hashlib.md5('bar').hexdigest())
self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
# deleting non-empty bucket throws exception
exception = False
try:
bucket.delete()
except:
exception = True
self.assert_(exception)
# deleting non-empty bucket should throw a NotEmpty exception
self.assertRaises(NotEmpty, bucket.delete)
# deleting key
del bucket['foo']
# deleting empty button
# deleting empty bucket
bucket.delete()
# accessing deleted bucket throws exception
exception = False
try:
objectstore.bucket.Bucket('new_bucket')
except:
exception = True
self.assert_(exception)
self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
def test_images(self):
self.context.user = self.um.get_user('user1')
@@ -167,37 +162,108 @@ class ObjectStoreTestCase(test.BaseTestCase):
# verify image permissions
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
self.assert_(my_img.is_authorized(self.context) == False)
self.assertFalse(my_img.is_authorized(self.context))
# class ApiObjectStoreTestCase(test.BaseTestCase):
# def setUp(self):
# super(ApiObjectStoreTestCase, self).setUp()
# FLAGS.fake_users = True
# FLAGS.buckets_path = os.path.join(tempdir, 'buckets')
# FLAGS.images_path = os.path.join(tempdir, 'images')
# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA')
#
# self.users = users.UserManager.instance()
# self.app = handler.Application(self.users)
#
# self.host = '127.0.0.1'
#
# self.conn = boto.s3.connection.S3Connection(
# aws_access_key_id=user.access,
# aws_secret_access_key=user.secret,
# is_secure=False,
# calling_format=boto.s3.connection.OrdinaryCallingFormat(),
# port=FLAGS.s3_port,
# host=FLAGS.s3_host)
#
# self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
#
# def tearDown(self):
# FLAGS.Reset()
# super(ApiObjectStoreTestCase, self).tearDown()
#
# def test_describe_instances(self):
# self.expect_http()
# self.mox.ReplayAll()
#
# self.assertEqual(self.ec2.get_all_instances(), [])
class TestHTTPChannel(http.HTTPChannel):
# Otherwise we end up with an unclean reactor
def checkPersistence(self, _, __):
return False
class TestSite(server.Site):
protocol = TestHTTPChannel
class S3APITestCase(test.TrialTestCase):
def setUp(self):
super(S3APITestCase, self).setUp()
FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
FLAGS.buckets_path = os.path.join(oss_tempdir, 'buckets')
self.um = manager.AuthManager()
self.admin_user = self.um.create_user('admin', admin=True)
self.admin_project = self.um.create_project('admin', self.admin_user)
shutil.rmtree(FLAGS.buckets_path)
os.mkdir(FLAGS.buckets_path)
root = S3()
self.site = TestSite(root)
self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1')
self.tcp_port = self.listening_port.getHost().port
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
self.conn = S3Connection(aws_access_key_id=self.admin_user.access,
aws_secret_access_key=self.admin_user.secret,
host='127.0.0.1',
port=self.tcp_port,
is_secure=False,
calling_format=OrdinaryCallingFormat())
# Don't attempt to reuse connections
def get_http_connection(host, is_secure):
return self.conn.new_http_connection(host, is_secure)
self.conn.get_http_connection = get_http_connection
def _ensure_empty_list(self, l):
self.assertEquals(len(l), 0, "List was not empty")
return True
def _ensure_only_bucket(self, l, name):
self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
self.assertEquals(l[0].name, name, "Wrong name")
def test_000_list_buckets(self):
d = threads.deferToThread(self.conn.get_all_buckets)
d.addCallback(self._ensure_empty_list)
return d
def test_001_create_and_delete_bucket(self):
bucket_name = 'testbucket'
d = threads.deferToThread(self.conn.create_bucket, bucket_name)
d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
def ensure_only_bucket(l, name):
self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
self.assertEquals(l[0].name, name, "Wrong name")
d.addCallback(ensure_only_bucket, bucket_name)
d.addCallback(lambda _:threads.deferToThread(self.conn.delete_bucket, bucket_name))
d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
d.addCallback(self._ensure_empty_list)
return d
def test_002_create_bucket_and_key_and_delete_key_again(self):
bucket_name = 'testbucket'
key_name = 'somekey'
key_contents = 'somekey'
d = threads.deferToThread(self.conn.create_bucket, bucket_name)
d.addCallback(lambda b:threads.deferToThread(b.new_key, key_name))
d.addCallback(lambda k:threads.deferToThread(k.set_contents_from_string, key_contents))
def ensure_key_contents(bucket_name, key_name, contents):
bucket = self.conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
self.assertEquals(key.get_contents_as_string(), contents, "Bad contents")
d.addCallback(lambda _:threads.deferToThread(ensure_key_contents, bucket_name, key_name, key_contents))
def delete_key(bucket_name, key_name):
bucket = self.conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.delete()
d.addCallback(lambda _:threads.deferToThread(delete_key, bucket_name, key_name))
d.addCallback(lambda _:threads.deferToThread(self.conn.get_bucket, bucket_name))
d.addCallback(lambda b:threads.deferToThread(b.get_all_keys))
d.addCallback(self._ensure_empty_list)
return d
def tearDown(self):
self.um.delete_user('admin')
self.um.delete_project('admin')
return defer.DeferredList([defer.maybeDeferred(self.listening_port.stopListening)])
super(S3APITestCase, self).tearDown()

View File

@@ -120,10 +120,3 @@ class ProcessTestCase(test.TrialTestCase):
pool2 = process.SharedPool()
self.assertEqual(id(pool1._instance), id(pool2._instance))
def test_shared_pool_works_as_singleton(self):
d1 = process.simple_execute('sleep 1')
d2 = process.simple_execute('sleep 0.005')
# lp609749: would have failed with
# exceptions.AssertionError: Someone released me too many times:
# too many tokens!
return d1

View File

@@ -20,9 +20,8 @@ from nova import flags
FLAGS = flags.FLAGS
FLAGS.fake_libvirt = False
FLAGS.connection_type = 'libvirt'
FLAGS.fake_storage = False
FLAGS.fake_rabbit = False
FLAGS.fake_network = False
FLAGS.fake_users = False
FLAGS.verbose = False

View File

@@ -34,7 +34,7 @@ class StorageTestCase(test.TrialTestCase):
super(StorageTestCase, self).setUp()
self.mynode = node.Node()
self.mystorage = None
self.flags(fake_libvirt=True,
self.flags(connection_type='fake',
fake_storage=True)
self.mystorage = storage.BlockStore()

View File

@@ -0,0 +1,115 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nova import compute
from nova import exception
from nova import flags
from nova import test
from nova.volume import service as volume_service
FLAGS = flags.FLAGS
class VolumeTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
self.compute = compute.service.ComputeService()
self.volume = None
self.flags(connection_type='fake',
fake_storage=True)
self.volume = volume_service.VolumeService()
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
project_id = 'fake'
volume_id = self.volume.create_volume(vol_size, user_id, project_id)
# TODO(termie): get_volume returns differently than create_volume
self.assertEqual(volume_id,
volume_service.get_volume(volume_id)['volume_id'])
rv = self.volume.delete_volume(volume_id)
self.assertRaises(exception.Error,
volume_service.get_volume,
volume_id)
def test_too_big_volume(self):
vol_size = '1001'
user_id = 'fake'
project_id = 'fake'
self.assertRaises(TypeError,
self.volume.create_volume,
vol_size, user_id, project_id)
def test_too_many_volumes(self):
vol_size = '1'
user_id = 'fake'
project_id = 'fake'
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
total_slots = FLAGS.slots_per_shelf * num_shelves
vols = []
for i in xrange(total_slots):
vid = self.volume.create_volume(vol_size, user_id, project_id)
vols.append(vid)
self.assertRaises(volume_service.NoMoreVolumes,
self.volume.create_volume,
vol_size, user_id, project_id)
for id in vols:
self.volume.delete_volume(id)
def test_run_attach_detach_volume(self):
# Create one volume and one compute to test with
instance_id = "storage-test"
vol_size = "5"
user_id = "fake"
project_id = 'fake'
mountpoint = "/dev/sdf"
volume_id = self.volume.create_volume(vol_size, user_id, project_id)
volume_obj = volume_service.get_volume(volume_id)
volume_obj.start_attach(instance_id, mountpoint)
rv = yield self.compute.attach_volume(volume_id,
instance_id,
mountpoint)
self.assertEqual(volume_obj['status'], "in-use")
self.assertEqual(volume_obj['attachStatus'], "attached")
self.assertEqual(volume_obj['instance_id'], instance_id)
self.assertEqual(volume_obj['mountpoint'], mountpoint)
self.assertRaises(exception.Error,
self.volume.delete_volume,
volume_id)
rv = yield self.volume.detach_volume(volume_id)
volume_obj = volume_service.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
rv = self.volume.delete_volume(volume_id)
self.assertRaises(exception.Error,
volume_service.get_volume,
volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass

View File

@@ -22,7 +22,6 @@ manage pid files and support syslogging.
"""
import logging
import logging.handlers
import os
import signal
import sys
@@ -32,7 +31,6 @@ from twisted.python import log
from twisted.python import reflect
from twisted.python import runtime
from twisted.python import usage
import UserDict
from nova import flags
@@ -161,6 +159,13 @@ def WrapTwistedOptions(wrapped):
except (AttributeError, KeyError):
self._data[key] = value
def get(self, key, default):
key = key.replace('-', '_')
try:
return getattr(FLAGS, key)
except (AttributeError, KeyError):
self._data.get(key, default)
return TwistedOptionsToFlags
@@ -209,9 +214,14 @@ def serve(filename):
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
elif FLAGS.logfile.endswith('twistd.log'):
FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
FLAGS.prefix = FLAGS.prefix.replace('twisted', name)
action = 'start'
if len(argv) > 1:
@@ -228,8 +238,16 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
formatter = logging.Formatter(
name + '(%(name)s): %(levelname)s %(message)s')
class NoNewlineFormatter(logging.Formatter):
"""Strips newlines from default formatter"""
def format(self, record):
"""Grabs default formatter's output and strips newlines"""
data = logging.Formatter.format(self, record)
return data.replace("\n", "--")
# NOTE(vish): syslog-ng doesn't handle newlines from trackbacks very well
formatter = NoNewlineFormatter(
'(%(name)s): %(levelname)s %(message)s')
handler = logging.StreamHandler(log.StdioOnnaStick())
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
@@ -239,11 +257,6 @@ def serve(filename):
else:
logging.getLogger().setLevel(logging.WARNING)
if FLAGS.syslog:
syslog = logging.handlers.SysLogHandler(address='/dev/log')
syslog.setFormatter(formatter)
logging.getLogger().addHandler(syslog)
logging.debug("Full set of FLAGS:")
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))

View File

@@ -20,7 +20,7 @@
System-level utilities and helper functions.
"""
from datetime import datetime
from datetime import datetime, timedelta
import inspect
import logging
import os
@@ -29,10 +29,20 @@ import subprocess
import socket
import sys
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
@@ -118,4 +128,7 @@ def get_my_ip():
def isotime(at=None):
if not at:
at = datetime.utcnow()
return at.strftime("%Y-%m-%dT%H:%M:%SZ")
return at.strftime(TIME_FORMAT)
def parse_isotime(timestr):
return datetime.strptime(timestr, TIME_FORMAT)

View File

@@ -50,16 +50,16 @@ from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
from nova.tests.auth_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.compute_unittest import *
from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.node_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.storage_unittest import *
from nova.tests.users_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.volume_unittest import *
FLAGS = flags.FLAGS

13
run_tests.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
venv=.nova-venv
with_venv=tools/with_venv.sh
if [ -e ${venv} ]; then
${with_venv} python run_tests.py
else
echo "You need to install the Nova virtualenv before you can run this."
echo ""
echo "Please run tools/install_venv.py"
exit 1
fi

View File

@@ -1,4 +1,10 @@
[build_sphinx]
source-dir = docs
build-dir = docs/_build
all_files = 1
all_files = 1
build-dir = doc/build
source-dir = doc/source
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0

View File

@@ -16,20 +16,41 @@
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
srcdir = os.path.join(os.path.dirname(sys.argv[0]), 'src')
import os
import subprocess
class local_sdist(sdist):
"""Customized sdist hook - builds the ChangeLog file from VC first"""
def run(self):
if os.path.isdir('.bzr'):
# We're in a bzr branch
log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
stdout=subprocess.PIPE)
changelog = log_cmd.communicate()[0]
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(changelog)
sdist.run(self)
setup(name='nova',
version='0.3.0',
description='None Other, Vaguely Awesome',
author='nova-core',
author_email='nova-core@googlegroups.com',
url='http://novacc.org/',
packages = find_packages(),
)
version='0.9.1',
description='cloud computing fabric controller',
author='OpenStack',
author_email='nova@lists.launchpad.net',
url='http://www.openstack.org/',
cmdclass={'sdist': local_sdist},
packages=find_packages(exclude=['bin', 'smoketests']),
scripts=['bin/nova-api',
'bin/nova-compute',
'bin/nova-dhcpbridge',
'bin/nova-import-canonical-imagestore',
'bin/nova-instancemonitor',
'bin/nova-manage',
'bin/nova-network',
'bin/nova-objectstore',
'bin/nova-rsapi',
'bin/nova-volume'])