fix merge errors

This commit is contained in:
Todd Willey
2010-07-15 00:21:17 -04:00
41 changed files with 847 additions and 190 deletions

3
README
View File

@@ -15,3 +15,6 @@ To disect it in detail: visit http://github.com/nova/cc
To taunt it with its weaknesses: use http://github.com/nova/cc/issues
To hack at it: read HACKING
To watch it: http://test.novacc.org/waterfall

92
bin/dhcpleasor.py Executable file
View File

@@ -0,0 +1,92 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
dhcpleasor.py
Handle lease database updates from DHCP servers.
"""
import sys
import os
import logging
sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
logging.debug(sys.path)
import getopt
from os import environ
from nova.compute import linux_net
from nova.compute import network
from nova import rpc
from nova import flags
FLAGS = flags.FLAGS
def add_lease(mac, ip, hostname, interface):
if FLAGS.fake_rabbit:
network.lease_ip(ip)
else:
rpc.cast(FLAGS.cloud_topic, {"method": "lease_ip",
"args" : {"address": ip}})
def old_lease(mac, ip, hostname, interface):
logging.debug("Adopted old lease or got a change of mac/hostname")
def del_lease(mac, ip, hostname, interface):
if FLAGS.fake_rabbit:
network.release_ip(ip)
else:
rpc.cast(FLAGS.cloud_topic, {"method": "release_ip",
"args" : {"address": ip}})
def init_leases(interface):
net = network.get_network_by_interface(interface)
res = ""
for host_name in net.hosts:
res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
return res
def main(argv=None):
if argv is None:
argv = sys.argv
interface = environ.get('DNSMASQ_INTERFACE', 'br0')
if int(environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
FLAGS.redis_db = 8
FLAGS.network_size = 32
FLAGS.fake_libvirt=True
FLAGS.fake_network=True
FLAGS.fake_users = True
action = argv[1]
if action in ['add','del','old']:
mac = argv[2]
ip = argv[3]
hostname = argv[4]
logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface))
globals()[action+'_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)
exit(0)
if __name__ == "__main__":
sys.exit(main())

View File

@@ -20,7 +20,7 @@
# under the License.
"""
Tornado daemon for the main API endpoint.
Tornado daemon for the main API endpoint.
"""
import logging
@@ -43,13 +43,11 @@ FLAGS = flags.FLAGS
def main(_argv):
user_manager = users.UserManager()
host_manager = model.Host
controllers = {
'Cloud': cloud.CloudController(),
'Admin': admin.AdminController(user_manager, host_manager)
'Admin': admin.AdminController()
}
_app = api.APIServerApplication(user_manager, controllers)
_app = api.APIServerApplication(controllers)
conn = rpc.Connection.instance()
consumer = rpc.AdapterConsumer(connection=conn,

View File

@@ -75,7 +75,8 @@ def main():
topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name),
proxy=n)
pulse = task.LoopingCall(n.report_state, FLAGS.node_name, 'nova-compute')
bin_name = os.path.basename(__file__)
pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name)
pulse.start(interval=FLAGS.node_report_state_interval, now=False)
injected = consumer_all.attach_to_twisted()
@@ -83,7 +84,7 @@ def main():
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application('nova-compute')
application = service.Application(bin_name)
n.setServiceParent(application)
return application

View File

@@ -62,6 +62,8 @@ class VpnCommands(object):
net = 'up'
else:
net = 'down'
print vpn['private_dns_name'],
print vpn['node_name'],
print vpn['instance_id'],
print vpn['state_description'],
print net
@@ -218,7 +220,7 @@ def methods_of(obj):
if __name__ == '__main__':
utils.default_flagfile()
utils.default_flagfile('/etc/nova/nova-manage.conf')
argv = FLAGS(sys.argv)
script_name = argv.pop(0)
if len(argv) < 1:

51
debian/changelog vendored
View File

@@ -1,3 +1,54 @@
nova (0.2.3-1) UNRELEASED; urgency=low
* Relax the Twisted dependency to python-twisted-core (rather than the
full stack).
* Move nova related configuration files into /etc/nova/.
* Add a dependency on nginx from nova-objectsstore and install a
suitable configuration file.
* Ship the CA directory in nova-common.
* Add a default flag file for nova-manage to help it find the CA.
* If set, pass KernelId and RamdiskId from RunInstances call to the
target compute node.
* Added --network_path setting to nova-compute's flagfile.
* Move templates from python directories to /usr/share/nova.
* Add debian/nova-common.dirs to create
var/lib/nova/{buckets,CA,images,instances,keys,networks}
* Don't pass --daemonize=1 to nova-compute. It's already daemonising
by default.
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 14 Jul 2010 12:00:00 -0700
nova (0.2.2-10) UNRELEASED; urgency=low
* Fixed extra space in vblade-persist
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 13 Jul 2010 19:00:00 -0700
nova (0.2.2-9) UNRELEASED; urgency=low
* Fixed invalid dn bug in ldap for adding roles
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 12 Jul 2010 15:20:00 -0700
nova (0.2.2-8) UNRELEASED; urgency=low
* Added a missing comma
-- Vishvananda Ishaya <vishvananda@gmail.com> Mon, 08 Jul 2010 10:05:00 -0700
nova (0.2.2-7) UNRELEASED; urgency=low
* Missing files from twisted patch
* License upedates
* Reformatting/cleanup
* Users/ldap bugfixes
* Merge fixes
* Documentation updates
* Vpn key creation fix
* Multiple shelves for volumes
-- Vishvananda Ishaya <vishvananda@gmail.com> Wed, 07 Jul 2010 18:45:00 -0700
nova (0.2.2-6) UNRELEASED; urgency=low
* Fix to make Key Injection work again

113
debian/control vendored
View File

@@ -3,36 +3,112 @@ Section: net
Priority: extra
Maintainer: Jesse Andrews <jesse@ansolabs.com>
Build-Depends: debhelper (>= 7)
Build-Depends-Indep: python-support
Build-Depends-Indep: python-support, python-setuptools
Standards-Version: 3.8.4
XS-Python-Version: 2.6
Package: nova-common
Architecture: all
Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted (>= 10.0.0-2ubuntu2nebula1), python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted-core, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
Provides: ${python:Provides}
Conflicts: nova
Description: Nova is a cloud
Description: Nova Cloud Computing - common files
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This package contains things that are needed by all parts of Nova.
Package: nova-compute
Architecture: all
Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.8.1), ${python:Depends}, ${misc:Depends}
Description: Nova compute
Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.7.5), curl, ${python:Depends}, ${misc:Depends}
Description: Nova Cloud Computing - compute node
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This is the package you will install on the nodes that will run your
virtual machines.
Package: nova-volume
Architecture: all
Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends}
Description: Nova volume
Description: Nova Cloud Computing - storage
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This is the package you will install on your storage nodes.
Package: nova-api
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
Description: Nova api
Description: Nova Cloud Computing - API frontend
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This package provides the API frontend.
Package: nova-objectstore
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
Description: Nova object store
Depends: nova-common (= ${binary:Version}), nginx, ${python:Depends}, ${misc:Depends}
Description: Nova Cloud Computing - object store
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This is the package you will install on the nodes that will contain your
object store.
Package: nova-instancemonitor
Architecture: all
@@ -42,4 +118,19 @@ Description: Nova instance monitor
Package: nova-tools
Architecture: all
Depends: python-boto, ${python:Depends}, ${misc:Depends}
Description: CLI tools to access nova
Description: Nova Cloud Computing - management tools
Nova is a cloud computing fabric controller (the main part of an IaaS
system) built to match the popular AWS EC2 and S3 APIs. It is written in
Python, using the Tornado and Twisted frameworks, and relies on the
standard AMQP messaging protocol, and the Redis distributed KVS.
.
Nova is intended to be easy to extend, and adapt. For example, it
currently uses an LDAP server for users and groups, but also includes a
fake LDAP server, that stores data in Redis. It has extensive test
coverage, and uses the Sphinx toolkit (the same as Python itself) for code
and user documentation.
.
While Nova is currently in Beta use within several organizations, the
codebase is very much under active development.
.
This package contains admin tools for Nova.

5
debian/nova-api.conf vendored Normal file
View File

@@ -0,0 +1,5 @@
--daemonize=1
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--fake_users=1
--datastore_path=/var/lib/nova/keeper

View File

@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-api
DAEMON_ARGS="--flagfile=/etc/nova.conf"
DAEMON_ARGS="--flagfile=/etc/nova/nova-api.conf"
PIDFILE=/var/run/nova-api.pid
ENABLED=false
ENABLED=true
if test -f /etc/default/nova-api; then
. /etc/default/nova-api

View File

@@ -1 +1,2 @@
bin/nova-api usr/bin
debian/nova-api.conf etc/nova

11
debian/nova-common.dirs vendored Normal file
View File

@@ -0,0 +1,11 @@
etc/nova
var/lib/nova/buckets
var/lib/nova/CA
var/lib/nova/CA/INTER
var/lib/nova/CA/newcerts
var/lib/nova/CA/private
var/lib/nova/CA/reqs
var/lib/nova/images
var/lib/nova/instances
var/lib/nova/keys
var/lib/nova/networks

View File

@@ -1,5 +1,10 @@
bin/nova-manage usr/bin
nova/auth/novarc.template usr/lib/pymodules/python2.6/nova/auth
nova/cloudpipe/client.ovpn.template usr/lib/pymodules/python2.6/nova/cloudpipe
nova/compute/libvirt.xml.template usr/lib/pymodules/python2.6/nova/compute
debian/nova-manage.conf etc/nova
nova/auth/novarc.template usr/share/nova
nova/cloudpipe/client.ovpn.template usr/share/nova
nova/compute/libvirt.xml.template usr/share/nova
nova/compute/interfaces.template usr/share/nova
usr/lib/python*/*-packages/nova/*
CA/openssl.cnf.tmpl var/lib/nova/CA
CA/geninter.sh var/lib/nova/CA
CA/genrootca.sh var/lib/nova/CA

10
debian/nova-compute.conf vendored Normal file
View File

@@ -0,0 +1,10 @@
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--datastore_path=/var/lib/nova/keeper
--instances_path=/var/lib/nova/instances
--networks_path=/var/lib/nova/networks
--simple_network_template=/usr/share/nova/interfaces.template
--libvirt_xml_template=/usr/share/nova/libvirt.xml.template
--vpn_client_template=/usr/share/nova/client.ovpn.template
--credentials_template=/usr/share/nova/novarc.template
--fake_users=1

View File

@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-compute
DAEMON_ARGS="--flagfile=/etc/nova.conf"
DAEMON_ARGS="--flagfile=/etc/nova/nova-compute.conf"
PIDFILE=/var/run/nova-compute.pid
ENABLED=false
ENABLED=true
if test -f /etc/default/nova-compute; then
. /etc/default/nova-compute

View File

@@ -1 +1,2 @@
bin/nova-compute usr/bin
debian/nova-compute.conf etc/nova

4
debian/nova-manage.conf vendored Normal file
View File

@@ -0,0 +1,4 @@
--ca_path=/var/lib/nova/CA
--credentials_template=/usr/share/nova/novarc.template
--keys_path=/var/lib/nova/keys
--vpn_client_template=/usr/share/nova/client.ovpn.template

7
debian/nova-objectstore.conf vendored Normal file
View File

@@ -0,0 +1,7 @@
--daemonize=1
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--datastore_path=/var/lib/nova/keeper
--fake_users=1
--images_path=/var/lib/nova/images
--buckets_path=/var/lib/nova/buckets

View File

@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-objectstore
DAEMON_ARGS="--flagfile=/etc/nova.conf"
DAEMON_ARGS="--flagfile=/etc/nova/nova-objectstore.conf"
PIDFILE=/var/run/nova-objectstore.pid
ENABLED=false
ENABLED=true
if test -f /etc/default/nova-objectstore; then
. /etc/default/nova-objectstore

View File

@@ -1 +1,3 @@
bin/nova-objectstore usr/bin
debian/nova-objectstore.conf etc/nova
debian/nova-objectstore.nginx.conf etc/nginx/sites-available

1
debian/nova-objectstore.links vendored Normal file
View File

@@ -0,0 +1 @@
/etc/nginx/sites-available/nova-objectstore.nginx.conf /etc/nginx/sites-enabled/nova-objectstore.nginx.conf

17
debian/nova-objectstore.nginx.conf vendored Normal file
View File

@@ -0,0 +1,17 @@
server {
listen 3333 default;
server_name localhost;
client_max_body_size 10m;
access_log /var/log/nginx/localhost.access.log;
location ~ /_images/.+ {
root /var/lib/nova/images;
rewrite ^/_images/(.*)$ /$1 break;
}
location / {
proxy_pass http://localhost:3334/;
}
}

7
debian/nova-volume.conf vendored Normal file
View File

@@ -0,0 +1,7 @@
--daemonize=1
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--datastore_path=/var/lib/nova/keeper
--fake_users=1
--images_path=/var/lib/nova/images
--buckets_path=/var/lib/nova/buckets

View File

@@ -13,10 +13,10 @@
set -e
DAEMON=/usr/bin/nova-volume
DAEMON_ARGS="--flagfile=/etc/nova.conf"
DAEMON_ARGS="--flagfile=/etc/nova/nova-volume.conf"
PIDFILE=/var/run/nova-volume.pid
ENABLED=false
ENABLED=true
if test -f /etc/default/nova-volume; then
. /etc/default/nova-volume

View File

@@ -1 +1,2 @@
bin/nova-volume usr/bin
debian/nova-volume.conf etc/nova

View File

@@ -16,7 +16,7 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('/Users/jmckenty/Projects/cc'))
sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')])
from nova import vendor

View File

@@ -1,22 +1,3 @@
..
Copyright 2010 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Copyright 2010 Anso Labs, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Getting Started with Nova
=========================
@@ -62,7 +43,7 @@ Installation
::
# system libraries and tools
apt-get install -y aoetools vlan
apt-get install -y aoetools vlan curl
modprobe aoe
# python libraries
@@ -81,6 +62,7 @@ Installation
# ON THE COMPUTE NODE:
apt-get install -y python-libvirt
apt-get install -y kpartx kvm libvirt-bin
modprobe kvm
# optional packages
apt-get install -y euca2ools
@@ -111,7 +93,7 @@ ON CLOUD CONTROLLER
location ~ /_images/.+ {
root NOVA_PATH/images;
rewrite ^/_images/(.*)\$ /\$1 break;
rewrite ^/_images/(.*)$ /$1 break;
}
location / {
@@ -128,6 +110,7 @@ ON VOLUME NODE
# This creates a 1GB file to create volumes out of
dd if=/dev/zero of=MY_FILE_PATH bs=100M count=10
losetup --show -f MY_FILE_PATH
# replace loop0 below with whatever losetup returns
echo "--storage_dev=/dev/loop0" >> NOVA_PATH/bin/nova.conf
Running

View File

@@ -28,7 +28,8 @@ import boto
from boto.ec2.regioninfo import RegionInfo
class UserInfo(object):
""" Information about a Nova user
"""
Information about a Nova user, as parsed through SAX
fields include:
username
accesskey
@@ -46,11 +47,9 @@ class UserInfo(object):
def __repr__(self):
return 'UserInfo:%s' % self.username
# this is needed by the sax parser, so ignore the ugly name
def startElement(self, name, attrs, connection):
return None
# this is needed by the sax parser, so ignore the ugly name
def endElement(self, name, value, connection):
if name == 'username':
self.username = str(value)
@@ -63,7 +62,7 @@ class UserInfo(object):
class HostInfo(object):
"""
Information about a Nova Host:
Information about a Nova Host, as parsed through SAX:
Disk stats
Running Instances
Memory stats

View File

@@ -34,15 +34,19 @@ SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
class NO_SUCH_OBJECT(Exception):
pass
class OBJECT_CLASS_VIOLATION(Exception):
pass
def initialize(uri):
return FakeLDAP()
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
@@ -67,6 +71,7 @@ def _match_query(query, attrs):
(k, sep, v) = inner.partition('=')
return _match(k, v, attrs)
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
@@ -83,6 +88,7 @@ def _paren_groups(source):
result.append(source[start:pos+1])
return result
def _match(k, v, attrs):
"""Match a given key and value against an attribute list."""
if k not in attrs:
@@ -96,6 +102,7 @@ def _match(k, v, attrs):
return True
return False
def _subs(value):
"""Returns a list of subclass strings.
@@ -109,6 +116,32 @@ def _subs(value):
return [value] + subs[value]
return [value]
def _from_json(encoded):
"""Convert attribute values from json representation.
Args:
encoded -- a json encoded string
Returns a list of strings
"""
return [str(x) for x in json.loads(encoded)]
def _to_json(unencoded):
"""Convert attribute values into json representation.
Args:
unencoded -- an unencoded string or list of strings. If it
is a single string, it will be converted into a list.
Returns a json string
"""
return json.dumps(list(unencoded))
class FakeLDAP(object):
#TODO(vish): refactor this class to use a wrapper instead of accessing
# redis directly
@@ -125,7 +158,7 @@ class FakeLDAP(object):
"""Add an object with the specified attributes at dn."""
key = "%s%s" % (self.__redis_prefix, dn)
value_dict = dict([(k, self.__to_json(v)) for k, v in attr])
value_dict = dict([(k, _to_json(v)) for k, v in attr])
datastore.Redis.instance().hmset(key, value_dict)
def delete_s(self, dn):
@@ -145,12 +178,12 @@ class FakeLDAP(object):
key = "%s%s" % (self.__redis_prefix, dn)
for cmd, k, v in attrs:
values = self.__from_json(redis.hget(key, k))
values = _from_json(redis.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
else:
values.remove(v)
values = redis.hset(key, k, self.__to_json(values))
values = redis.hset(key, k, _to_json(values))
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
@@ -171,7 +204,7 @@ class FakeLDAP(object):
# get the attributes from redis
attrs = redis.hgetall(key)
# turn the values from redis into lists
attrs = dict([(k, self.__from_json(v))
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
if not query or _match_query(query, attrs):
@@ -188,25 +221,4 @@ class FakeLDAP(object):
def __redis_prefix(self):
return 'ldap:'
def __from_json(self, encoded):
"""Convert attribute values from json representation.
Args:
encoded -- a json encoded string
Returns a list of strings
"""
return [str(x) for x in json.loads(encoded)]
def __to_json(self, unencoded):
"""Convert attribute values into json representation.
Args:
unencoded -- an unencoded string or list of strings. If it
is a single string, it will be converted into a list.
Returns a json string
"""
return json.dumps(list(unencoded))

View File

@@ -27,6 +27,7 @@ import datetime
import logging
import os
import shutil
import signer
import string
from string import Template
import tempfile
@@ -39,15 +40,14 @@ except Exception, e:
import fakeldap as ldap
import fakeldap
from nova import datastore
# TODO(termie): clean up these imports
import signer
from nova import datastore
from nova import exception
from nova import flags
from nova import crypto
from nova import utils
from nova.compute import model
from nova import objectstore # for flags
@@ -101,10 +101,17 @@ flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
flags.DEFINE_integer('vpn_start_port', 8000,
flags.DEFINE_integer('vpn_start_port', 1000,
'Start port for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_end_port', 9999,
flags.DEFINE_integer('vpn_end_port', 2000,
'End port for the cloudpipe VPN servers')
flags.DEFINE_string('credential_cert_subject',
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
'OU=NovaDev/CN=%s-%s',
'Subject for certificate for users')
flags.DEFINE_string('vpn_ip', '127.0.0.1',
'Public IP for the cloudpipe VPN servers')
@@ -306,7 +313,7 @@ class NoMorePorts(exception.Error):
pass
class Vpn(model.BasicModel):
class Vpn(datastore.BasicModel):
def __init__(self, project_id):
self.project_id = project_id
super(Vpn, self).__init__()
@@ -317,27 +324,25 @@ class Vpn(model.BasicModel):
@classmethod
def create(cls, project_id):
# TODO (vish): get list of vpn ips from redis
for ip in [FLAGS.vpn_ip]:
try:
port = cls.find_free_port_for_ip(ip)
vpn = cls(project_id)
# save ip for project
vpn['project'] = project_id
vpn['ip'] = ip
vpn['port'] = port
vpn.save()
return vpn
except NoMorePorts:
pass
raise NoMorePorts()
# TODO(vish): get list of vpn ips from redis
port = cls.find_free_port_for_ip(FLAGS.vpn_ip)
vpn = cls(project_id)
# save ip for project
vpn['project'] = project_id
vpn['ip'] = FLAGS.vpn_ip
vpn['port'] = port
vpn.save()
return vpn
@classmethod
def find_free_port_for_ip(cls, ip):
# TODO(vish): the redis access should be refactored into a
# base class
# TODO(vish): these redis commands should be generalized and
# placed into a base class. Conceptually, it is
# similar to an association, but we are just
# storing a set of values instead of keys that
# should be turned into objects.
redis = datastore.Redis.instance()
key = 'ip:%s:ports'
key = 'ip:%s:ports' % ip
# TODO(vish): these ports should be allocated through an admin
# command instead of a flag
if (not redis.exists(key) and
@@ -345,14 +350,14 @@ class Vpn(model.BasicModel):
for i in range(FLAGS.vpn_start_port, FLAGS.vpn_end_port + 1):
redis.sadd(key, i)
port = datastore.Redis.instance().spop(key)
port = redis.spop(key)
if not port:
raise NoMorePorts()
return port
@classmethod
def num_ports_for_ip(cls, ip):
return datastore.Redis.instance().scard('ip:%s:ports')
return datastore.Redis.instance().scard('ip:%s:ports' % ip)
@property
def ip(self):
@@ -466,7 +471,9 @@ class UserManager(object):
# create and destroy a project
Vpn.create(name)
return conn.create_project(name,
User.safe_id(manager_user), description, member_users)
User.safe_id(manager_user),
description,
member_users)
def get_projects(self):
@@ -584,7 +591,7 @@ class UserManager(object):
def __cert_subject(self, uid):
# FIXME(ja) - this should be pulled from a global configuration
return "/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=%s-%s" % (uid, str(datetime.datetime.utcnow().isoformat()))
return FLAGS.credential_cert_subject % (uid, utils.isotime())
class LDAPWrapper(object):
@@ -773,7 +780,7 @@ class LDAPWrapper(object):
def __create_group(self, group_dn, name, uid,
description, member_uids = None):
if self.group_exists(name):
if self.group_exists(group_dn):
raise exception.Duplicate("Group can't be created because "
"group %s already exists" % name)
members = []

View File

@@ -28,6 +28,7 @@ before trying to run this.
from nova import vendor
import redis
from nova import exception
from nova import flags
from nova import utils
@@ -54,3 +55,200 @@ class Redis(object):
cls._instance = inst
return cls._instance
class ConnectionError(exception.Error):
pass
def absorb_connection_error(fn):
def _wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except redis.exceptions.ConnectionError, ce:
raise ConnectionError(str(ce))
return _wrapper
class BasicModel(object):
"""
All Redis-backed data derives from this class.
You MUST specify an identifier() property that returns a unique string
per instance.
You MUST have an initializer that takes a single argument that is a value
returned by identifier() to load a new class with.
You may want to specify a dictionary for default_state().
You may also specify override_type at the class left to use a key other
than __class__.__name__.
You override save and destroy calls to automatically build and destroy
associations.
"""
override_type = None
@absorb_connection_error
def __init__(self):
self.initial_state = {}
self.state = Redis.instance().hgetall(self.__redis_key)
if self.state:
self.initial_state = self.state
else:
self.state = self.default_state()
def default_state(self):
"""You probably want to define this in your subclass"""
return {}
@classmethod
def _redis_name(cls):
return self.override_type or cls.__name__
@classmethod
def lookup(cls, identifier):
rv = cls(identifier)
if rv.is_new_record():
return None
else:
return rv
@classmethod
@absorb_connection_error
def all(cls):
"""yields all objects in the store"""
redis_set = cls._redis_set_name(cls.__name__)
for identifier in Redis.instance().smembers(redis_set):
yield cls(identifier)
@classmethod
@absorb_connection_error
def associated_to(cls, foreign_type, foreign_id):
redis_set = cls._redis_association_name(foreign_type, foreign_id)
for identifier in Redis.instance().smembers(redis_set):
yield cls(identifier)
@classmethod
def _redis_set_name(cls, kls_name):
# stupidly pluralize (for compatiblity with previous codebase)
return kls_name.lower() + "s"
@classmethod
def _redis_association_name(cls, foreign_type, foreign_id):
return cls._redis_set_name("%s:%s:%s" %
(foreign_type, foreign_id, cls.__name__))
@property
def identifier(self):
"""You DEFINITELY want to define this in your subclass"""
raise NotImplementedError("Your subclass should define identifier")
@property
def __redis_key(self):
return '%s:%s' % (self.__class__.__name__.lower(), self.identifier)
def __repr__(self):
return "<%s:%s>" % (self.__class__.__name__, self.identifier)
def keys(self):
return self.state.keys()
def copy(self):
copyDict = {}
for item in self.keys():
copyDict[item] = self[item]
return copyDict
def get(self, item, default):
return self.state.get(item, default)
def update(self, update_dict):
return self.state.update(update_dict)
def setdefault(self, item, default):
return self.state.setdefault(item, default)
def __getitem__(self, item):
return self.state[item]
def __setitem__(self, item, val):
self.state[item] = val
return self.state[item]
def __delitem__(self, item):
"""We don't support this"""
raise Exception("Silly monkey, models NEED all their properties.")
def is_new_record(self):
return self.initial_state == {}
@absorb_connection_error
def add_to_index(self):
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().sadd(set_name, self.identifier)
@absorb_connection_error
def remove_from_index(self):
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().srem(set_name, self.identifier)
@absorb_connection_error
def remove_from_index(self):
set_name = self.__class__._redis_set_name(self.__class__.__name__)
Redis.instance().srem(set_name, self.identifier)
@absorb_connection_error
def associate_with(self, foreign_type, foreign_id):
# note the extra 's' on the end is for plurality
# to match the old data without requiring a migration of any sort
self.add_associated_model_to_its_set(foreign_type, foreign_id)
redis_set = self.__class__._redis_association_name(foreign_type,
foreign_id)
Redis.instance().sadd(redis_set, self.identifier)
@absorb_connection_error
def unassociate_with(self, foreign_type, foreign_id):
redis_set = self.__class__._redis_association_name(foreign_type,
foreign_id)
Redis.instance().srem(redis_set, self.identifier)
def add_associated_model_to_its_set(self, my_type, my_id):
table = globals()
klsname = my_type.capitalize()
if table.has_key(klsname):
my_class = table[klsname]
my_inst = my_class(my_id)
my_inst.save()
else:
logging.warning("no model class for %s when building"
" association from %s",
klsname, self)
@absorb_connection_error
def save(self):
"""
update the directory with the state from this model
also add it to the index of items of the same type
then set the initial_state = state so new changes are tracked
"""
# TODO(ja): implement hmset in redis-py and use it
# instead of multiple calls to hset
if self.is_new_record():
self["create_time"] = utils.isotime()
for key, val in self.state.iteritems():
Redis.instance().hset(self.__redis_key, key, val)
self.add_to_index()
self.initial_state = self.state
return True
@absorb_connection_error
def destroy(self):
"""deletes all related records from datastore."""
logging.info("Destroying datamodel for %s %s",
self.__class__.__name__, self.identifier)
Redis.instance().delete(self.__redis_key)
self.remove_from_index()
return True

View File

@@ -24,6 +24,9 @@ Admin API controller, exposed through http via the api worker.
import base64
from nova.auth import users
from nova.compute import model
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
@@ -62,28 +65,24 @@ class AdminController(object):
allowing project managers to administer project users.
"""
def __init__(self, user_manager, host_manager):
self.user_manager = user_manager
self.host_manager = host_manager
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
return user_dict(self.user_manager.get_user(name))
return user_dict(users.UserManager.instance().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
[user_dict(u) for u in self.user_manager.get_users()] }
[user_dict(u) for u in users.UserManager.instance().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
return user_dict(self.user_manager.create_user(name))
return user_dict(users.UserManager.instance().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
@@ -91,7 +90,7 @@ class AdminController(object):
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
self.user_manager.delete_user(name)
users.UserManager.instance().delete_user(name)
return True
@@ -103,8 +102,8 @@ class AdminController(object):
"""
if project is None:
project = name
project = self.user_manager.get_project(project)
user = self.user_manager.get_user(name)
project = users.UserManager.instance().get_project(project)
user = users.UserManager.instance().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
@@ -117,9 +116,9 @@ class AdminController(object):
* DHCP servers running
* Iptables / bridges
"""
return {'hostSet': [host_dict(h) for h in self.host_manager.all()]}
return {'hostSet': [host_dict(h) for h in model.Host.all()]}
@admin_only
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
return host_dict(self.host_manager.lookup(name))
return host_dict(model.Host.lookup(name))

View File

@@ -324,7 +324,7 @@ class APIRequestHandler(tornado.web.RequestHandler):
class APIServerApplication(tornado.web.Application):
def __init__(self, user_manager, controllers):
def __init__(self, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
(r'/cloudpipe/(.*)', nova.cloudpipe.api.CloudPipeRequestHandler),
@@ -341,5 +341,4 @@ class APIServerApplication(tornado.web.Application):
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
self.user_manager = user_manager
self.controllers = controllers

View File

@@ -115,9 +115,9 @@ class CloudController(object):
def get_metadata(self, ip):
i = self.get_instance_by_ip(ip)
mpi = self._get_mpi_data(i['project_id'])
if i is None:
return None
mpi = self._get_mpi_data(i['project_id'])
if i['key_name']:
keys = {
'0': {
@@ -169,6 +169,28 @@ class CloudController(object):
return {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
@rbac.allow('all')
def describe_regions(self, context, region_name=None, **kwargs):
# TODO(vish): region_name is an array. Support filtering
return {'regionInfo': [{'regionName': 'nova',
'regionUrl': FLAGS.ec2_url}]}
@rbac.allow('all')
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
return {'snapshotSet': [{'snapshotId': 'fixme',
'volumeId': 'fixme',
'status': 'fixme',
'startTime': 'fixme',
'progress': 'fixme',
'ownerId': 'fixme',
'volumeSize': 0,
'description': 'fixme'}]}
@rbac.allow('all')
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = context.user.get_key_pairs()
@@ -178,7 +200,8 @@ class CloudController(object):
result = []
for key_pair in key_pairs:
# filter out the vpn keys
if context.user.is_admin() or not key_pair.name.endswith('-key'):
suffix = FLAGS.vpn_key_suffix
if context.user.is_admin() or not key_pair.name.endswith(suffix):
result.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
@@ -380,15 +403,17 @@ class CloudController(object):
def _format_instances(self, context, reservation_id = None):
reservations = {}
for instance in self.instdir.all:
if context.user.is_admin():
instgenerator = self.instdir.all
else:
instgenerator = self.instdir.by_project(context.project.id)
for instance in instgenerator:
res_id = instance.get('reservation_id', 'Unknown')
if reservation_id != None and reservation_id != res_id:
continue
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
if context.project.id != instance['project_id']:
continue
i = {}
i['instance_id'] = instance.get('instance_id', None)
i['image_id'] = instance.get('image_id', None)
@@ -475,6 +500,14 @@ class CloudController(object):
# TODO - Strip the IP from the instance
return defer.succeed({'disassociateResponse': ["Address disassociated."]})
def release_ip(self, context, private_ip, **kwargs):
self.network.release_ip(private_ip)
return defer.succeed({'releaseResponse': ["Address released."]})
def lease_ip(self, context, private_ip, **kwargs):
self.network.lease_ip(private_ip)
return defer.succeed({'leaseResponse': ["Address leased."]})
@rbac.allow('projectmanager', 'sysadmin')
def run_instances(self, context, **kwargs):
# make sure user can access the image
@@ -493,11 +526,20 @@ class CloudController(object):
key_data = key_pair.public_key
# TODO: Get the real security group of launch in here
security_group = "default"
bridge_name = network.BridgedNetwork.get_network_for_project(context.user.id, context.project.id, security_group)['bridge_name']
if FLAGS.simple_network:
bridge_name = FLAGS.simple_network_bridge
else:
net = network.BridgedNetwork.get_network_for_project(
context.user.id, context.project.id, security_group)
bridge_name = net['bridge_name']
for num in range(int(kwargs['max_count'])):
inst = self.instdir.new()
# TODO(ja): add ari, aki
inst['image_id'] = kwargs['image_id']
if 'kernel_id' in kwargs:
inst['kernel_id'] = kwargs['kernel_id']
if 'ramdisk_id' in kwargs:
inst['ramdisk_id'] = kwargs['ramdisk_id']
inst['user_data'] = kwargs.get('user_data', '')
inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
inst['reservation_id'] = reservation_id
@@ -509,12 +551,19 @@ class CloudController(object):
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
inst['bridge_name'] = bridge_name
if inst['image_id'] == FLAGS.vpn_image_id:
address = network.allocate_vpn_ip(
inst['user_id'], inst['project_id'], mac=inst['mac_address'])
if FLAGS.simple_network:
address = network.allocate_simple_ip()
else:
address = network.allocate_ip(
inst['user_id'], inst['project_id'], mac=inst['mac_address'])
if inst['image_id'] == FLAGS.vpn_image_id:
address = network.allocate_vpn_ip(
inst['user_id'],
inst['project_id'],
mac=inst['mac_address'])
else:
address = network.allocate_ip(
inst['user_id'],
inst['project_id'],
mac=inst['mac_address'])
inst['private_dns_name'] = str(address)
# TODO: allocate expresses on the router node
inst.save()
@@ -544,10 +593,13 @@ class CloudController(object):
pass
if instance.get('private_dns_name', None):
logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
try:
self.network.deallocate_ip(instance.get('private_dns_name', None))
except Exception, _err:
pass
if FLAGS.simple_network:
network.deallocate_simple_ip(instance.get('private_dns_name', None))
else:
try:
self.network.deallocate_ip(instance.get('private_dns_name', None))
except Exception, _err:
pass
if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "terminate_instance",
@@ -609,9 +661,8 @@ class CloudController(object):
result = { 'image_id': image_id, 'launchPermission': [] }
if image['isPublic']:
result['launchPermission'].append({ 'group': 'all' })
return defer.succeed(result)
@rbac.allow('projectmanager', 'sysadmin')
def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.

View File

@@ -74,6 +74,9 @@ DEFINE_string('default_instance_type',
'default instance type to use, testing only')
DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE', 'AMI for cloudpipe vpn server')
DEFINE_string('vpn_key_suffix',
'-key',
'Suffix to add to project name for vpn key')
# UNUSED
DEFINE_string('node_availability_zone',

View File

@@ -63,6 +63,10 @@ class Connection(connection.BrokerConnection):
cls._instance = cls(**params)
return cls._instance
@classmethod
def recreate(cls):
del cls._instance
return cls.instance()
class Consumer(messaging.Consumer):
# TODO(termie): it would be nice to give these some way of automatically
@@ -79,9 +83,22 @@ class Consumer(messaging.Consumer):
attachToTornado = attach_to_tornado
@exception.wrap_exception
def fetch(self, *args, **kwargs):
super(Consumer, self).fetch(*args, **kwargs)
# TODO(vish): the logic for failed connections and logging should be
# refactored into some sort of connection manager object
try:
if getattr(self, 'failed_connection', False):
# attempt to reconnect
self.conn = Connection.recreate()
self.backend = self.conn.create_backend()
super(Consumer, self).fetch(*args, **kwargs)
if getattr(self, 'failed_connection', False):
logging.error("Reconnected to queue")
self.failed_connection = False
except Exception, ex:
if not getattr(self, 'failed_connection', False):
logging.exception("Failed to fetch message from queue")
self.failed_connection = True
def attach_to_twisted(self):
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
@@ -115,9 +132,10 @@ class AdapterConsumer(TopicConsumer):
args = message_data.get('args', {})
message.ack()
if not method:
# vish: we may not want to ack here, but that means that bad messages
# stay in the queue indefinitely, so for now we just log the
# message and send an error string back to the caller
# NOTE(vish): we may not want to ack here, but that means that bad
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
_log.warn('no method for message: %s' % (message_data))
msg_reply(msg_id, 'No method for message: %s' % message_data)
return

View File

@@ -159,7 +159,7 @@ class ApiEc2TestCase(test.BaseTestCase):
self.host = '127.0.0.1'
self.app = api.APIServerApplication(self.users, {'Cloud': self.cloud})
self.app = api.APIServerApplication({'Cloud': self.cloud})
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',

View File

@@ -18,6 +18,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import unittest
@@ -26,6 +27,8 @@ import IPy
from nova import flags
from nova import test
from nova import exception
from nova.compute.exception import NoMoreAddresses
from nova.compute import network
from nova.auth import users
from nova import utils
@@ -40,6 +43,7 @@ class NetworkTestCase(test.TrialTestCase):
network_size=32)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
self.dnsmasq = FakeDNSMasq()
try:
self.manager.create_user('netuser', 'netuser', 'netuser')
except: pass
@@ -66,59 +70,128 @@ class NetworkTestCase(test.TrialTestCase):
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
logging.debug("Was allocated %s" % (address))
self.assertEqual(True, address in self._get_project_addresses("project0"))
net = network.get_project_network("project0", "default")
self.assertEqual(True, is_in_project(address, "project0"))
mac = utils.generate_mac()
hostname = "test-host"
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
rv = network.deallocate_ip(address)
self.assertEqual(False, address in self._get_project_addresses("project0"))
# Doesn't go away until it's dhcp released
self.assertEqual(True, is_in_project(address, "project0"))
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, "project0"))
def test_range_allocation(self):
mac = utils.generate_mac()
secondmac = utils.generate_mac()
hostname = "test-host"
address = network.allocate_ip(
"netuser", "project0", utils.generate_mac())
"netuser", "project0", mac)
secondaddress = network.allocate_ip(
"netuser", "project1", utils.generate_mac())
self.assertEqual(True,
address in self._get_project_addresses("project0"))
self.assertEqual(True,
secondaddress in self._get_project_addresses("project1"))
self.assertEqual(False, address in self._get_project_addresses("project1"))
"netuser", "project1", secondmac)
net = network.get_project_network("project0", "default")
secondnet = network.get_project_network("project1", "default")
self.assertEqual(True, is_in_project(address, "project0"))
self.assertEqual(True, is_in_project(secondaddress, "project1"))
self.assertEqual(False, is_in_project(address, "project1"))
# Addresses are allocated before they're issued
self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.issue_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
rv = network.deallocate_ip(address)
self.assertEqual(False, address in self._get_project_addresses("project0"))
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.assertEqual(False, is_in_project(address, "project0"))
# First address release shouldn't affect the second
self.assertEqual(True, is_in_project(secondaddress, "project1"))
rv = network.deallocate_ip(secondaddress)
self.assertEqual(False,
secondaddress in self._get_project_addresses("project1"))
self.dnsmasq.release_ip(secondmac, secondaddress,
hostname, secondnet.bridge_name)
self.assertEqual(False, is_in_project(secondaddress, "project1"))
def test_subnet_edge(self):
secondaddress = network.allocate_ip("netuser", "project0",
utils.generate_mac())
hostname = "toomany-hosts"
for project in range(1,5):
project_id = "project%s" % (project)
mac = utils.generate_mac()
mac2 = utils.generate_mac()
mac3 = utils.generate_mac()
address = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
"netuser", project_id, mac)
address2 = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
"netuser", project_id, mac2)
address3 = network.allocate_ip(
"netuser", project_id, utils.generate_mac())
self.assertEqual(False,
address in self._get_project_addresses("project0"))
self.assertEqual(False,
address2 in self._get_project_addresses("project0"))
self.assertEqual(False,
address3 in self._get_project_addresses("project0"))
"netuser", project_id, mac3)
self.assertEqual(False, is_in_project(address, "project0"))
self.assertEqual(False, is_in_project(address2, "project0"))
self.assertEqual(False, is_in_project(address3, "project0"))
rv = network.deallocate_ip(address)
rv = network.deallocate_ip(address2)
rv = network.deallocate_ip(address3)
net = network.get_project_network(project_id, "default")
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
net = network.get_project_network("project0", "default")
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
def test_too_many_projects(self):
for i in range(0, 30):
name = 'toomany-project%s' % i
self.manager.create_project(name, 'netuser', name)
address = network.allocate_ip(
"netuser", name, utils.generate_mac())
rv = network.deallocate_ip(address)
self.manager.delete_project(name)
def test_release_before_deallocate(self):
pass
def test_deallocate_before_issued(self):
pass
def test_too_many_addresses(self):
"""
Network size is 32, there are 5 addresses reserved for VPN.
So we should get 23 usable addresses
"""
net = network.get_project_network("project0", "default")
hostname = "toomany-hosts"
macs = {}
addresses = {}
for i in range(0, 22):
macs[i] = utils.generate_mac()
addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())
for i in range(0, 22):
rv = network.deallocate_ip(addresses[i])
self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
def is_in_project(address, project_id):
return address in network.get_project_network(project_id).list_addresses()
def _get_project_addresses(project_id):
project_addresses = []
for addr in network.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses
def binpath(script):
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
class FakeDNSMasq(object):
def issue_ip(self, mac, ip, hostname, interface):
cmd = "%s add %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("ISSUE_IP: %s, %s " % (out, err))
def release_ip(self, mac, ip, hostname, interface):
cmd = "%s del %s %s %s" % (binpath('dhcpleasor.py'), mac, ip, hostname)
env = {'DNSMASQ_INTERFACE': interface, 'TESTING' : '1'}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("RELEASE_IP: %s, %s " % (out, err))
def _get_project_addresses(self, project_id):
project_addresses = []
for addr in network.get_project_network(project_id).list_addresses():
project_addresses.append(addr)
return project_addresses

View File

@@ -28,7 +28,6 @@ import tempfile
from nova import vendor
from nova import flags
from nova import rpc
from nova import objectstore
from nova import test
from nova.auth import users
@@ -57,7 +56,6 @@ class ObjectStoreTestCase(test.BaseTestCase):
buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
self.um = users.UserManager.instance()

View File

@@ -38,10 +38,7 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage = None
self.flags(fake_libvirt=True,
fake_storage=True)
if FLAGS.fake_storage:
self.mystorage = storage.FakeBlockStore()
else:
self.mystorage = storage.BlockStore()
self.mystorage = storage.BlockStore()
def test_run_create_volume(self):
vol_size = '0'
@@ -65,6 +62,18 @@ class StorageTestCase(test.TrialTestCase):
self.mystorage.create_volume,
vol_size, user_id, project_id)
def test_too_many_volumes(self):
vol_size = '1'
user_id = 'fake'
project_id = 'fake'
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
total_slots = FLAGS.slots_per_shelf * num_shelves
for i in xrange(total_slots):
self.mystorage.create_volume(vol_size, user_id, project_id)
self.assertRaises(storage.NoMoreVolumes,
self.mystorage.create_volume,
vol_size, user_id, project_id)
def test_run_attach_detach_volume(self):
# Create one volume and one node to test with
instance_id = "storage-test"

View File

@@ -42,5 +42,4 @@ class ValidationTestCase(test.TrialTestCase):
@validate.typetest(instanceid=str, size=int, number_of_instances=int)
def type_case(instanceid, size, number_of_instances):
print ("type_case was successfully executed")
return True
return True

View File

@@ -46,13 +46,13 @@ import sys
from nova import vendor
from twisted.scripts import trial as trial_script
from nova import datastore
from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.keeper_unittest import *
from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.node_unittest import *
@@ -60,7 +60,6 @@ from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.storage_unittest import *
from nova.tests.users_unittest import *
from nova.tests.datastore_unittest import *
from nova.tests.validator_unittest import *