merged trunk, fixed an error with releasing ip
This commit is contained in:
@@ -29,8 +29,6 @@ from nova import flags
|
||||
from nova import rpc
|
||||
from nova import server
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import model
|
||||
from nova.endpoint import admin
|
||||
from nova.endpoint import api
|
||||
from nova.endpoint import cloud
|
||||
@@ -39,10 +37,10 @@ FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def main(_argv):
|
||||
"""Load the controllers and start the tornado I/O loop."""
|
||||
controllers = {
|
||||
'Cloud': cloud.CloudController(),
|
||||
'Admin': admin.AdminController()
|
||||
}
|
||||
'Admin': admin.AdminController()}
|
||||
_app = api.APIServerApplication(controllers)
|
||||
|
||||
conn = rpc.Connection.instance()
|
||||
|
@@ -18,8 +18,6 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
nova-dhcpbridge
|
||||
|
||||
Handle lease database updates from DHCP servers.
|
||||
"""
|
||||
|
||||
@@ -42,34 +40,43 @@ from nova.network import service
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def add_lease(mac, ip, hostname, interface):
|
||||
def add_lease(_mac, ip, _hostname, _interface):
|
||||
"""Set the IP that was assigned by the DHCP server."""
|
||||
if FLAGS.fake_rabbit:
|
||||
service.VlanNetworkService().lease_ip(ip)
|
||||
else:
|
||||
rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
{"method": "lease_ip",
|
||||
"args" : {"fixed_ip": ip}})
|
||||
"args": {"fixed_ip": ip}})
|
||||
|
||||
def old_lease(mac, ip, hostname, interface):
|
||||
|
||||
def old_lease(_mac, _ip, _hostname, _interface):
|
||||
"""Do nothing, just an old lease update."""
|
||||
logging.debug("Adopted old lease or got a change of mac/hostname")
|
||||
|
||||
def del_lease(mac, ip, hostname, interface):
|
||||
|
||||
def del_lease(_mac, ip, _hostname, _interface):
|
||||
"""Called when a lease expires."""
|
||||
if FLAGS.fake_rabbit:
|
||||
service.VlanNetworkService().release_ip(ip)
|
||||
else:
|
||||
rpc.cast("%s.%s" (FLAGS.network_topic, FLAGS.node_name),
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
{"method": "release_ip",
|
||||
"args" : {"fixed_ip": ip}})
|
||||
"args": {"fixed_ip": ip}})
|
||||
|
||||
|
||||
def init_leases(interface):
|
||||
"""Get the list of hosts for an interface."""
|
||||
net = model.get_network_by_interface(interface)
|
||||
res = ""
|
||||
for host_name in net.hosts:
|
||||
res += "%s\n" % linux_net.hostDHCP(net, host_name, net.hosts[host_name])
|
||||
res += "%s\n" % linux_net.hostDHCP(net, host_name,
|
||||
net.hosts[host_name])
|
||||
return res
|
||||
|
||||
|
||||
def main():
|
||||
"""Parse environment and arguments and call the approproate action."""
|
||||
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
|
||||
utils.default_flagfile(flagfile)
|
||||
argv = FLAGS(sys.argv)
|
||||
@@ -79,18 +86,19 @@ def main():
|
||||
FLAGS.redis_db = 8
|
||||
FLAGS.network_size = 32
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_network=True
|
||||
FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver'
|
||||
FLAGS.fake_network = True
|
||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||
action = argv[1]
|
||||
if action in ['add','del','old']:
|
||||
if action in ['add', 'del', 'old']:
|
||||
mac = argv[2]
|
||||
ip = argv[3]
|
||||
hostname = argv[4]
|
||||
logging.debug("Called %s for mac %s with ip %s and hostname %s on interface %s" % (action, mac, ip, hostname, interface))
|
||||
globals()[action+'_lease'](mac, ip, hostname, interface)
|
||||
logging.debug("Called %s for mac %s with ip %s and "
|
||||
"hostname %s on interface %s",
|
||||
action, mac, ip, hostname, interface)
|
||||
globals()[action + '_lease'](mac, ip, hostname, interface)
|
||||
else:
|
||||
print init_leases(interface)
|
||||
exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
main()
|
||||
|
@@ -37,20 +37,17 @@ FLAGS = flags.FLAGS
|
||||
|
||||
api_url = 'https://imagestore.canonical.com/api/dashboard'
|
||||
|
||||
image_cache = None
|
||||
def images():
|
||||
global image_cache
|
||||
if not image_cache:
|
||||
try:
|
||||
images = json.load(urllib2.urlopen(api_url))['images']
|
||||
image_cache = [i for i in images if i['title'].find('amd64') > -1]
|
||||
except Exception:
|
||||
print 'unable to download canonical image list'
|
||||
sys.exit(1)
|
||||
return image_cache
|
||||
|
||||
# FIXME(ja): add checksum/signature checks
|
||||
def get_images():
|
||||
"""Get a list of the images from the imagestore URL."""
|
||||
images = json.load(urllib2.urlopen(api_url))['images']
|
||||
images = [img for img in images if img['title'].find('amd64') > -1]
|
||||
return images
|
||||
|
||||
|
||||
def download(img):
|
||||
"""Download an image to the local filesystem."""
|
||||
# FIXME(ja): add checksum/signature checks
|
||||
tempdir = tempfile.mkdtemp(prefix='cis-')
|
||||
|
||||
kernel_id = None
|
||||
@@ -79,20 +76,22 @@ def download(img):
|
||||
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
utils.default_flagfile()
|
||||
argv = FLAGS(sys.argv)
|
||||
images = get_images()
|
||||
|
||||
if len(argv) == 2:
|
||||
for img in images():
|
||||
for img in images:
|
||||
if argv[1] == 'all' or argv[1] == img['title']:
|
||||
download(img)
|
||||
else:
|
||||
print 'usage: %s (title|all)'
|
||||
print 'available images:'
|
||||
for image in images():
|
||||
print image['title']
|
||||
for img in images:
|
||||
print img['title']
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
@@ -22,7 +22,6 @@
|
||||
"""
|
||||
|
||||
import logging
|
||||
from twisted.internet import task
|
||||
from twisted.application import service
|
||||
|
||||
from nova import twistd
|
||||
@@ -30,7 +29,11 @@ from nova.compute import monitor
|
||||
|
||||
logging.getLogger('boto').setLevel(logging.WARN)
|
||||
|
||||
def main():
|
||||
|
||||
if __name__ == '__main__':
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
logging.warn('Starting instance monitor')
|
||||
m = monitor.InstanceMonitor()
|
||||
|
||||
@@ -38,14 +41,3 @@ def main():
|
||||
# parses this file, return it so that we can get it into globals below
|
||||
application = service.Application('nova-instancemonitor')
|
||||
m.setServiceParent(application)
|
||||
return application
|
||||
|
||||
if __name__ == '__main__':
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
application = main()
|
||||
|
||||
|
||||
|
||||
|
||||
|
138
bin/nova-manage
138
bin/nova-manage
@@ -37,12 +37,15 @@ FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class VpnCommands(object):
|
||||
"""Class for managing VPNs."""
|
||||
|
||||
def __init__(self):
|
||||
self.manager = manager.AuthManager()
|
||||
self.instdir = model.InstanceDirectory()
|
||||
self.pipe = pipelib.CloudPipe(cloud.CloudController())
|
||||
|
||||
def list(self):
|
||||
"""Print a listing of the VPNs for all projects."""
|
||||
print "%-12s\t" % 'project',
|
||||
print "%-12s\t" % 'ip:port',
|
||||
print "%s" % 'state'
|
||||
@@ -50,9 +53,10 @@ class VpnCommands(object):
|
||||
print "%-12s\t" % project.name,
|
||||
print "%s:%s\t" % (project.vpn_ip, project.vpn_port),
|
||||
|
||||
vpn = self.__vpn_for(project.id)
|
||||
vpn = self._vpn_for(project.id)
|
||||
if vpn:
|
||||
out, err = utils.execute("ping -c1 -w1 %s > /dev/null; echo $?" % vpn['private_dns_name'])
|
||||
command = "ping -c1 -w1 %s > /dev/null; echo $?"
|
||||
out, _err = utils.execute(command % vpn['private_dns_name'])
|
||||
if out.strip() == '0':
|
||||
net = 'up'
|
||||
else:
|
||||
@@ -66,25 +70,32 @@ class VpnCommands(object):
|
||||
else:
|
||||
print None
|
||||
|
||||
def __vpn_for(self, project_id):
|
||||
def _vpn_for(self, project_id):
|
||||
"""Get the VPN instance for a project ID."""
|
||||
for instance in self.instdir.all:
|
||||
if (instance.state.has_key('image_id')
|
||||
if ('image_id' in instance.state
|
||||
and instance['image_id'] == FLAGS.vpn_image_id
|
||||
and not instance['state_description'] in ['shutting_down', 'shutdown']
|
||||
and not instance['state_description'] in
|
||||
['shutting_down', 'shutdown']
|
||||
and instance['project_id'] == project_id):
|
||||
return instance
|
||||
|
||||
def spawn(self):
|
||||
"""Run all VPNs."""
|
||||
for p in reversed(self.manager.get_projects()):
|
||||
if not self.__vpn_for(p.id):
|
||||
print 'spawning %s' % p.id
|
||||
self.pipe.launch_vpn_instance(p.id)
|
||||
time.sleep(10)
|
||||
if not self._vpn_for(p.id):
|
||||
print 'spawning %s' % p.id
|
||||
self.pipe.launch_vpn_instance(p.id)
|
||||
time.sleep(10)
|
||||
|
||||
def run(self, project_id):
|
||||
"""Start the VPN for a given project."""
|
||||
self.pipe.launch_vpn_instance(project_id)
|
||||
|
||||
|
||||
class RoleCommands(object):
|
||||
"""Class for managing roles."""
|
||||
|
||||
def __init__(self):
|
||||
self.manager = manager.AuthManager()
|
||||
|
||||
@@ -107,25 +118,24 @@ class RoleCommands(object):
|
||||
arguments: user, role [project]"""
|
||||
self.manager.remove_role(user, role, project)
|
||||
|
||||
|
||||
class UserCommands(object):
|
||||
"""Class for managing users."""
|
||||
|
||||
def __init__(self):
|
||||
self.manager = manager.AuthManager()
|
||||
|
||||
def __print_export(self, user):
|
||||
print 'export EC2_ACCESS_KEY=%s' % user.access
|
||||
print 'export EC2_SECRET_KEY=%s' % user.secret
|
||||
|
||||
def admin(self, name, access=None, secret=None):
|
||||
"""creates a new admin and prints exports
|
||||
arguments: name [access] [secret]"""
|
||||
user = self.manager.create_user(name, access, secret, True)
|
||||
self.__print_export(user)
|
||||
print_export(user)
|
||||
|
||||
def create(self, name, access=None, secret=None):
|
||||
"""creates a new user and prints exports
|
||||
arguments: name [access] [secret]"""
|
||||
user = self.manager.create_user(name, access, secret, False)
|
||||
self.__print_export(user)
|
||||
print_export(user)
|
||||
|
||||
def delete(self, name):
|
||||
"""deletes an existing user
|
||||
@@ -137,7 +147,7 @@ class UserCommands(object):
|
||||
arguments: name"""
|
||||
user = self.manager.get_user(name)
|
||||
if user:
|
||||
self.__print_export(user)
|
||||
print_export(user)
|
||||
else:
|
||||
print "User %s doesn't exist" % name
|
||||
|
||||
@@ -147,53 +157,58 @@ class UserCommands(object):
|
||||
for user in self.manager.get_users():
|
||||
print user.name
|
||||
|
||||
|
||||
def print_export(user):
|
||||
"""Print export variables to use with API."""
|
||||
print 'export EC2_ACCESS_KEY=%s' % user.access
|
||||
print 'export EC2_SECRET_KEY=%s' % user.secret
|
||||
|
||||
|
||||
class ProjectCommands(object):
|
||||
"""Class for managing projects."""
|
||||
|
||||
def __init__(self):
|
||||
self.manager = manager.AuthManager()
|
||||
|
||||
def add(self, project, user):
|
||||
"""adds user to project
|
||||
"""Adds user to project
|
||||
arguments: project user"""
|
||||
self.manager.add_to_project(user, project)
|
||||
|
||||
def create(self, name, project_manager, description=None):
|
||||
"""creates a new project
|
||||
"""Creates a new project
|
||||
arguments: name project_manager [description]"""
|
||||
user = self.manager.create_project(name, project_manager, description)
|
||||
self.manager.create_project(name, project_manager, description)
|
||||
|
||||
def delete(self, name):
|
||||
"""deletes an existing project
|
||||
"""Deletes an existing project
|
||||
arguments: name"""
|
||||
self.manager.delete_project(name)
|
||||
|
||||
def environment(self, project_id, user_id, filename='novarc'):
|
||||
"""exports environment variables to an sourcable file
|
||||
"""Exports environment variables to an sourcable file
|
||||
arguments: project_id user_id [filename='novarc]"""
|
||||
rc = self.manager.get_environment_rc(project_id, user_id)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(rc)
|
||||
|
||||
def list(self):
|
||||
"""lists all projects
|
||||
"""Lists all projects
|
||||
arguments: <none>"""
|
||||
for project in self.manager.get_projects():
|
||||
print project.name
|
||||
|
||||
def remove(self, project, user):
|
||||
"""removes user from project
|
||||
"""Removes user from project
|
||||
arguments: project user"""
|
||||
self.manager.remove_from_project(user, project)
|
||||
|
||||
def zip(self, project_id, user_id, filename='nova.zip'):
|
||||
"""exports credentials for project to a zip file
|
||||
def create_zip(self, project_id, user_id, filename='nova.zip'):
|
||||
"""Exports credentials for project to a zip file
|
||||
arguments: project_id user_id [filename='nova.zip]"""
|
||||
zip = self.manager.get_credentials(project_id, user_id)
|
||||
zip_file = self.manager.get_credentials(project_id, user_id)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(zip)
|
||||
|
||||
|
||||
def usage(script_name):
|
||||
print script_name + " category action [<args>]"
|
||||
f.write(zip_file)
|
||||
|
||||
|
||||
categories = [
|
||||
@@ -205,62 +220,61 @@ categories = [
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
"""finds all objects that have a key that case insensitively contains [name]
|
||||
key_value_tuples is a list of tuples of the form (key, value)
|
||||
"""Finds all objects that have a key that case insensitively contains
|
||||
[name] key_value_tuples is a list of tuples of the form (key, value)
|
||||
returns a list of tuples of the form (key, value)"""
|
||||
return [(k, v) for (k, v) in key_value_tuples if k.lower().find(name.lower()) == 0]
|
||||
result = []
|
||||
for (k, v) in key_value_tuples:
|
||||
if k.lower().find(name.lower()) == 0:
|
||||
result.append((k, v))
|
||||
if len(result) == 0:
|
||||
print "%s does not match any options:" % name
|
||||
for k, _v in key_value_tuples:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
if len(result) > 1:
|
||||
print "%s matched multiple options:" % name
|
||||
for k, _v in result:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
return result
|
||||
|
||||
|
||||
def methods_of(obj):
|
||||
"""get all callable methods of an object that don't start with underscore
|
||||
"""Get all callable methods of an object that don't start with underscore
|
||||
returns a list of tuples of the form (method_name, method)"""
|
||||
return [(i, getattr(obj, i)) for i in dir(obj) if callable(getattr(obj, i)) and not i.startswith('_')]
|
||||
result = []
|
||||
for i in dir(obj):
|
||||
if callable(getattr(obj, i)) and not i.startswith('_'):
|
||||
result.append((i, getattr(obj, i)))
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def main():
|
||||
"""Parse options and call the appropriate class/method."""
|
||||
utils.default_flagfile('/etc/nova/nova-manage.conf')
|
||||
argv = FLAGS(sys.argv)
|
||||
script_name = argv.pop(0)
|
||||
if len(argv) < 1:
|
||||
usage(script_name)
|
||||
print script_name + " category action [<args>]"
|
||||
print "Available categories:"
|
||||
for k, v in categories:
|
||||
for k, _ in categories:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
category = argv.pop(0)
|
||||
matches = lazy_match(category, categories)
|
||||
if len(matches) == 0:
|
||||
print "%s does not match any categories:" % category
|
||||
for k, v in categories:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
if len(matches) > 1:
|
||||
print "%s matched multiple categories:" % category
|
||||
for k, v in matches:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
# instantiate the command group object
|
||||
category, fn = matches[0]
|
||||
command_object = fn()
|
||||
actions = methods_of(command_object)
|
||||
if len(argv) < 1:
|
||||
usage(script_name)
|
||||
print script_name + " category action [<args>]"
|
||||
print "Available actions for %s category:" % category
|
||||
for k, v in actions:
|
||||
for k, _v in actions:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
action = argv.pop(0)
|
||||
matches = lazy_match(action, actions)
|
||||
if len(matches) == 0:
|
||||
print "%s does not match any actions" % action
|
||||
for k, v in actions:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
if len(matches) > 1:
|
||||
print "%s matched multiple actions:" % action
|
||||
for k, v in matches:
|
||||
print "\t%s" % k
|
||||
sys.exit(2)
|
||||
action, fn = matches[0]
|
||||
# call the action with the remaining arguments
|
||||
try:
|
||||
@@ -271,3 +285,5 @@ if __name__ == '__main__':
|
||||
print "%s %s: %s" % (category, action, fn.__doc__)
|
||||
sys.exit(2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@@ -30,15 +30,9 @@ from nova.objectstore import handler
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def main():
|
||||
app = handler.get_application()
|
||||
print app
|
||||
return app
|
||||
|
||||
# NOTE(soren): Stolen from nova-compute
|
||||
if __name__ == '__main__':
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
utils.default_flagfile()
|
||||
application = main()
|
||||
application = handler.get_application()
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# pylint: disable-msg=C0103
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
@@ -17,42 +18,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
WSGI daemon for the main API endpoint.
|
||||
Daemon for the Rackspace API endpoint.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from tornado import ioloop
|
||||
from wsgiref import simple_server
|
||||
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import server
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova import wsgi
|
||||
from nova.endpoint import rackspace
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
||||
|
||||
def main(_argv):
|
||||
user_manager = manager.AuthManager()
|
||||
api_instance = rackspace.Api(user_manager)
|
||||
conn = rpc.Connection.instance()
|
||||
rpc_consumer = rpc.AdapterConsumer(connection=conn,
|
||||
topic=FLAGS.cloud_topic,
|
||||
proxy=api_instance)
|
||||
|
||||
# TODO: fire rpc response listener (without attach to tornado)
|
||||
# io_inst = ioloop.IOLoop.instance()
|
||||
# _injected = consumer.attach_to_tornado(io_inst)
|
||||
|
||||
http_server = simple_server.WSGIServer(('0.0.0.0', FLAGS.cc_port), simple_server.WSGIRequestHandler)
|
||||
http_server.set_app(api_instance.handler)
|
||||
logging.debug('Started HTTP server on port %i' % FLAGS.cc_port)
|
||||
while True:
|
||||
http_server.handle_request()
|
||||
# io_inst.start()
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
server.serve('nova-rsapi', main)
|
||||
wsgi.run_server(rackspace.API(), FLAGS.cc_port)
|
||||
|
@@ -28,6 +28,8 @@ import json
|
||||
from nova import datastore
|
||||
|
||||
|
||||
SCOPE_BASE = 0
|
||||
SCOPE_ONELEVEL = 1 # not implemented
|
||||
SCOPE_SUBTREE = 2
|
||||
MOD_ADD = 0
|
||||
MOD_DELETE = 1
|
||||
@@ -188,15 +190,18 @@ class FakeLDAP(object):
|
||||
|
||||
Args:
|
||||
dn -- dn to search under
|
||||
scope -- only SCOPE_SUBTREE is supported
|
||||
scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
|
||||
query -- query to filter objects by
|
||||
fields -- fields to return. Returns all fields if not specified
|
||||
|
||||
"""
|
||||
if scope != SCOPE_SUBTREE:
|
||||
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
|
||||
raise NotImplementedError(str(scope))
|
||||
redis = datastore.Redis.instance()
|
||||
keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
|
||||
if scope == SCOPE_BASE:
|
||||
keys = ["%s%s" % (self.__redis_prefix, dn)]
|
||||
else:
|
||||
keys = redis.keys("%s*%s" % (self.__redis_prefix, dn))
|
||||
objects = []
|
||||
for key in keys:
|
||||
# get the attributes from redis
|
||||
|
@@ -272,26 +272,30 @@ class LdapDriver(object):
|
||||
"""Check if project exists"""
|
||||
return self.get_project(name) != None
|
||||
|
||||
def __find_object(self, dn, query = None):
|
||||
def __find_object(self, dn, query=None, scope=None):
|
||||
"""Find an object by dn and query"""
|
||||
objects = self.__find_objects(dn, query)
|
||||
objects = self.__find_objects(dn, query, scope)
|
||||
if len(objects) == 0:
|
||||
return None
|
||||
return objects[0]
|
||||
|
||||
def __find_dns(self, dn, query=None):
|
||||
def __find_dns(self, dn, query=None, scope=None):
|
||||
"""Find dns by query"""
|
||||
if scope is None: # one of the flags is 0!!
|
||||
scope = self.ldap.SCOPE_SUBTREE
|
||||
try:
|
||||
res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
|
||||
res = self.conn.search_s(dn, scope, query)
|
||||
except self.ldap.NO_SUCH_OBJECT:
|
||||
return []
|
||||
# just return the DNs
|
||||
return [dn for dn, attributes in res]
|
||||
|
||||
def __find_objects(self, dn, query = None):
|
||||
def __find_objects(self, dn, query=None, scope=None):
|
||||
"""Find objects by query"""
|
||||
if scope is None: # one of the flags is 0!!
|
||||
scope = self.ldap.SCOPE_SUBTREE
|
||||
try:
|
||||
res = self.conn.search_s(dn, self.ldap.SCOPE_SUBTREE, query)
|
||||
res = self.conn.search_s(dn, scope, query)
|
||||
except self.ldap.NO_SUCH_OBJECT:
|
||||
return []
|
||||
# just return the attributes
|
||||
@@ -361,7 +365,8 @@ class LdapDriver(object):
|
||||
if not self.__group_exists(group_dn):
|
||||
return False
|
||||
res = self.__find_object(group_dn,
|
||||
'(member=%s)' % self.__uid_to_dn(uid))
|
||||
'(member=%s)' % self.__uid_to_dn(uid),
|
||||
self.ldap.SCOPE_BASE)
|
||||
return res != None
|
||||
|
||||
def __add_to_group(self, uid, group_dn):
|
||||
@@ -391,7 +396,11 @@ class LdapDriver(object):
|
||||
if not self.__is_in_group(uid, group_dn):
|
||||
raise exception.NotFound("User %s is not a member of the group" %
|
||||
(uid,))
|
||||
self.__safe_remove_from_group(uid, group_dn)
|
||||
# NOTE(vish): remove user from group and any sub_groups
|
||||
sub_dns = self.__find_group_dns_with_member(
|
||||
group_dn, uid)
|
||||
for sub_dn in sub_dns:
|
||||
self.__safe_remove_from_group(uid, sub_dn)
|
||||
|
||||
def __safe_remove_from_group(self, uid, group_dn):
|
||||
"""Remove user from group, deleting group if user is last member"""
|
||||
|
@@ -90,13 +90,15 @@ class BasicModel(object):
|
||||
|
||||
@absorb_connection_error
|
||||
def __init__(self):
|
||||
self.initial_state = {}
|
||||
self.state = Redis.instance().hgetall(self.__redis_key)
|
||||
if self.state:
|
||||
self.initial_state = self.state
|
||||
state = Redis.instance().hgetall(self.__redis_key)
|
||||
if state:
|
||||
self.initial_state = state
|
||||
self.state = dict(self.initial_state)
|
||||
else:
|
||||
self.initial_state = {}
|
||||
self.state = self.default_state()
|
||||
|
||||
|
||||
def default_state(self):
|
||||
"""You probably want to define this in your subclass"""
|
||||
return {}
|
||||
@@ -239,7 +241,7 @@ class BasicModel(object):
|
||||
for key, val in self.state.iteritems():
|
||||
Redis.instance().hset(self.__redis_key, key, val)
|
||||
self.add_to_index()
|
||||
self.initial_state = self.state
|
||||
self.initial_state = dict(self.state)
|
||||
return True
|
||||
|
||||
@absorb_connection_error
|
||||
|
@@ -699,6 +699,8 @@ class CloudController(object):
|
||||
# TODO(devcamcar): Support users and groups other than 'all'.
|
||||
if attribute != 'launchPermission':
|
||||
raise exception.ApiError('attribute not supported: %s' % attribute)
|
||||
if not 'user_group' in kwargs:
|
||||
raise exception.ApiError('user or group not specified')
|
||||
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
|
||||
raise exception.ApiError('only group "all" is supported')
|
||||
if not operation_type in ['add', 'remove']:
|
||||
|
@@ -17,153 +17,95 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Rackspace API
|
||||
Rackspace API Endpoint
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import time
|
||||
import tornado.web
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import datastore
|
||||
from nova import exception
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import wsgi
|
||||
from nova.auth import manager
|
||||
from nova.compute import model
|
||||
from nova.compute import network
|
||||
from nova.endpoint import images
|
||||
from nova.endpoint import wsgi
|
||||
from nova.compute import model as compute
|
||||
from nova.network import model as network
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
|
||||
|
||||
|
||||
# TODO(todd): subclass Exception so we can bubble meaningful errors
|
||||
|
||||
|
||||
class Api(object):
|
||||
|
||||
def __init__(self, rpc_mechanism):
|
||||
self.controllers = {
|
||||
"v1.0": RackspaceAuthenticationApi(),
|
||||
"servers": RackspaceCloudServerApi()
|
||||
}
|
||||
self.rpc_mechanism = rpc_mechanism
|
||||
|
||||
def handler(self, environ, responder):
|
||||
environ['nova.context'] = self.build_context(environ)
|
||||
controller, path = wsgi.Util.route(
|
||||
environ['PATH_INFO'],
|
||||
self.controllers
|
||||
)
|
||||
if not controller:
|
||||
# TODO(todd): Exception (404)
|
||||
raise Exception("Missing Controller")
|
||||
rv = controller.process(path, environ)
|
||||
if type(rv) is tuple:
|
||||
responder(rv[0], rv[1])
|
||||
rv = rv[2]
|
||||
else:
|
||||
responder("200 OK", [])
|
||||
return rv
|
||||
|
||||
def build_context(self, env):
|
||||
rv = {}
|
||||
if env.has_key("HTTP_X_AUTH_TOKEN"):
|
||||
rv['user'] = manager.AuthManager().get_user_from_access_key(
|
||||
env['HTTP_X_AUTH_TOKEN']
|
||||
)
|
||||
if rv['user']:
|
||||
rv['project'] = manager.AuthManager().get_project(
|
||||
rv['user'].name
|
||||
)
|
||||
return rv
|
||||
|
||||
|
||||
class RackspaceApiEndpoint(object):
|
||||
def process(self, path, env):
|
||||
if not self.check_authentication(env):
|
||||
# TODO(todd): Exception (Unauthorized)
|
||||
raise Exception("Unable to authenticate")
|
||||
|
||||
if len(path) == 0:
|
||||
return self.index(env)
|
||||
|
||||
action = path.pop(0)
|
||||
if hasattr(self, action):
|
||||
method = getattr(self, action)
|
||||
return method(path, env)
|
||||
else:
|
||||
# TODO(todd): Exception (404)
|
||||
raise Exception("Missing method %s" % path[0])
|
||||
|
||||
def check_authentication(self, env):
|
||||
if hasattr(self, "process_without_authentication") \
|
||||
and getattr(self, "process_without_authentication"):
|
||||
return True
|
||||
if not env['nova.context']['user']:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class RackspaceAuthenticationApi(RackspaceApiEndpoint):
|
||||
class API(wsgi.Middleware):
|
||||
"""Entry point for all requests."""
|
||||
|
||||
def __init__(self):
|
||||
self.process_without_authentication = True
|
||||
super(API, self).__init__(Router(webob.exc.HTTPNotFound()))
|
||||
|
||||
# TODO(todd): make a actual session with a unique token
|
||||
# just pass the auth key back through for now
|
||||
def index(self, env):
|
||||
response = '204 No Content'
|
||||
headers = [
|
||||
('X-Server-Management-Url', 'http://%s' % env['HTTP_HOST']),
|
||||
('X-Storage-Url', 'http://%s' % env['HTTP_HOST']),
|
||||
('X-CDN-Managment-Url', 'http://%s' % env['HTTP_HOST']),
|
||||
('X-Auth-Token', env['HTTP_X_AUTH_KEY'])
|
||||
]
|
||||
body = ""
|
||||
return (response, headers, body)
|
||||
def __call__(self, environ, start_response):
|
||||
context = {}
|
||||
if "HTTP_X_AUTH_TOKEN" in environ:
|
||||
context['user'] = manager.AuthManager().get_user_from_access_key(
|
||||
environ['HTTP_X_AUTH_TOKEN'])
|
||||
if context['user']:
|
||||
context['project'] = manager.AuthManager().get_project(
|
||||
context['user'].name)
|
||||
if "user" not in context:
|
||||
return webob.exc.HTTPForbidden()(environ, start_response)
|
||||
environ['nova.context'] = context
|
||||
return self.application(environ, start_response)
|
||||
|
||||
|
||||
class RackspaceCloudServerApi(RackspaceApiEndpoint):
|
||||
class Router(wsgi.Router):
|
||||
"""Route requests to the next WSGI application."""
|
||||
|
||||
def _build_map(self):
|
||||
"""Build routing map for authentication and cloud."""
|
||||
self._connect("/v1.0", controller=AuthenticationAPI())
|
||||
cloud = CloudServerAPI()
|
||||
self._connect("/servers", controller=cloud.launch_server,
|
||||
conditions={"method": ["POST"]})
|
||||
self._connect("/servers/{server_id}", controller=cloud.delete_server,
|
||||
conditions={'method': ["DELETE"]})
|
||||
self._connect("/servers", controller=cloud)
|
||||
|
||||
|
||||
class AuthenticationAPI(wsgi.Application):
|
||||
"""Handle all authorization requests through WSGI applications."""
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req): # pylint: disable-msg=W0221
|
||||
# TODO(todd): make a actual session with a unique token
|
||||
# just pass the auth key back through for now
|
||||
res = webob.Response()
|
||||
res.status = '204 No Content'
|
||||
res.headers.add('X-Server-Management-Url', req.host_url)
|
||||
res.headers.add('X-Storage-Url', req.host_url)
|
||||
res.headers.add('X-CDN-Managment-Url', req.host_url)
|
||||
res.headers.add('X-Auth-Token', req.headers['X-Auth-Key'])
|
||||
return res
|
||||
|
||||
|
||||
class CloudServerAPI(wsgi.Application):
|
||||
"""Handle all server requests through WSGI applications."""
|
||||
|
||||
def __init__(self):
|
||||
self.instdir = model.InstanceDirectory()
|
||||
super(CloudServerAPI, self).__init__()
|
||||
self.instdir = compute.InstanceDirectory()
|
||||
self.network = network.PublicNetworkController()
|
||||
|
||||
def index(self, env):
|
||||
if env['REQUEST_METHOD'] == 'GET':
|
||||
return self.detail(env)
|
||||
elif env['REQUEST_METHOD'] == 'POST':
|
||||
return self.launch_server(env)
|
||||
|
||||
def detail(self, args, env):
|
||||
value = {
|
||||
"servers":
|
||||
[]
|
||||
}
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req): # pylint: disable-msg=W0221
|
||||
value = {"servers": []}
|
||||
for inst in self.instdir.all:
|
||||
value["servers"].append(self.instance_details(inst))
|
||||
|
||||
return json.dumps(value)
|
||||
|
||||
##
|
||||
##
|
||||
|
||||
def launch_server(self, env):
|
||||
data = json.loads(env['wsgi.input'].read(int(env['CONTENT_LENGTH'])))
|
||||
inst = self.build_server_instance(data, env['nova.context'])
|
||||
self.schedule_launch_of_instance(inst)
|
||||
return json.dumps({"server": self.instance_details(inst)})
|
||||
|
||||
def instance_details(self, inst):
|
||||
def instance_details(self, inst): # pylint: disable-msg=R0201
|
||||
"""Build the data structure to represent details for an instance."""
|
||||
return {
|
||||
"id": inst.get("instance_id", None),
|
||||
"imageId": inst.get("image_id", None),
|
||||
@@ -171,11 +113,9 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint):
|
||||
"hostId": inst.get("node_name", None),
|
||||
"status": inst.get("state", "pending"),
|
||||
"addresses": {
|
||||
"public": [self.network.get_public_ip_for_instance(
|
||||
inst.get("instance_id", None)
|
||||
)],
|
||||
"private": [inst.get("private_dns_name", None)]
|
||||
},
|
||||
"public": [network.get_public_ip_for_instance(
|
||||
inst.get("instance_id", None))],
|
||||
"private": [inst.get("private_dns_name", None)]},
|
||||
|
||||
# implemented only by Rackspace, not AWS
|
||||
"name": inst.get("name", "Not-Specified"),
|
||||
@@ -184,11 +124,22 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint):
|
||||
"progress": "Not-Supported",
|
||||
"metadata": {
|
||||
"Server Label": "Not-Supported",
|
||||
"Image Version": "Not-Supported"
|
||||
}
|
||||
}
|
||||
"Image Version": "Not-Supported"}}
|
||||
|
||||
@webob.dec.wsgify
|
||||
def launch_server(self, req):
|
||||
"""Launch a new instance."""
|
||||
data = json.loads(req.body)
|
||||
inst = self.build_server_instance(data, req.environ['nova.context'])
|
||||
rpc.cast(
|
||||
FLAGS.compute_topic, {
|
||||
"method": "run_instance",
|
||||
"args": {"instance_id": inst.instance_id}})
|
||||
|
||||
return json.dumps({"server": self.instance_details(inst)})
|
||||
|
||||
def build_server_instance(self, env, context):
|
||||
"""Build instance data structure and save it to the data store."""
|
||||
reservation = utils.generate_uid('r')
|
||||
ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
||||
inst = self.instdir.new()
|
||||
@@ -200,27 +151,33 @@ class RackspaceCloudServerApi(RackspaceApiEndpoint):
|
||||
inst['reservation_id'] = reservation
|
||||
inst['launch_time'] = ltime
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
address = network.allocate_ip(
|
||||
address = self.network.allocate_ip(
|
||||
inst['user_id'],
|
||||
inst['project_id'],
|
||||
mac=inst['mac_address']
|
||||
)
|
||||
mac=inst['mac_address'])
|
||||
inst['private_dns_name'] = str(address)
|
||||
inst['bridge_name'] = network.BridgedNetwork.get_network_for_project(
|
||||
inst['user_id'],
|
||||
inst['project_id'],
|
||||
'default' # security group
|
||||
)['bridge_name']
|
||||
'default')['bridge_name']
|
||||
# key_data, key_name, ami_launch_index
|
||||
# TODO(todd): key data or root password
|
||||
inst.save()
|
||||
return inst
|
||||
|
||||
def schedule_launch_of_instance(self, inst):
|
||||
rpc.cast(
|
||||
FLAGS.compute_topic,
|
||||
{
|
||||
"method": "run_instance",
|
||||
"args": {"instance_id": inst.instance_id}
|
||||
}
|
||||
)
|
||||
@webob.dec.wsgify
|
||||
@wsgi.route_args
|
||||
def delete_server(self, req, route_args): # pylint: disable-msg=R0201
|
||||
"""Delete an instance."""
|
||||
owner_hostname = None
|
||||
instance = compute.Instance.lookup(route_args['server_id'])
|
||||
if instance:
|
||||
owner_hostname = instance["node_name"]
|
||||
if not owner_hostname:
|
||||
return webob.exc.HTTPNotFound("Did not find image, or it was "
|
||||
"not in a running state.")
|
||||
rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname)
|
||||
rpc.cast(rpc_transport,
|
||||
{"method": "reboot_instance",
|
||||
"args": {"instance_id": route_args['server_id']}})
|
||||
req.status = "202 Accepted"
|
||||
|
141
nova/flags.py
141
nova/flags.py
@@ -21,16 +21,145 @@ Package-level global flags are defined here, the rest are defined
|
||||
where they're used.
|
||||
"""
|
||||
|
||||
import getopt
|
||||
import socket
|
||||
import sys
|
||||
|
||||
import gflags
|
||||
|
||||
|
||||
from gflags import *
|
||||
class FlagValues(gflags.FlagValues):
|
||||
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
|
||||
|
||||
Unknown flags will be ignored when parsing the command line, but the
|
||||
command line will be kept so that it can be replayed if new flags are
|
||||
defined after the initial parsing.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
gflags.FlagValues.__init__(self)
|
||||
self.__dict__['__dirty'] = []
|
||||
self.__dict__['__was_already_parsed'] = False
|
||||
self.__dict__['__stored_argv'] = []
|
||||
|
||||
def __call__(self, argv):
|
||||
# We're doing some hacky stuff here so that we don't have to copy
|
||||
# out all the code of the original verbatim and then tweak a few lines.
|
||||
# We're hijacking the output of getopt so we can still return the
|
||||
# leftover args at the end
|
||||
sneaky_unparsed_args = {"value": None}
|
||||
original_argv = list(argv)
|
||||
|
||||
if self.IsGnuGetOpt():
|
||||
orig_getopt = getattr(getopt, 'gnu_getopt')
|
||||
orig_name = 'gnu_getopt'
|
||||
else:
|
||||
orig_getopt = getattr(getopt, 'getopt')
|
||||
orig_name = 'getopt'
|
||||
|
||||
def _sneaky(*args, **kw):
|
||||
optlist, unparsed_args = orig_getopt(*args, **kw)
|
||||
sneaky_unparsed_args['value'] = unparsed_args
|
||||
return optlist, unparsed_args
|
||||
|
||||
try:
|
||||
setattr(getopt, orig_name, _sneaky)
|
||||
args = gflags.FlagValues.__call__(self, argv)
|
||||
except gflags.UnrecognizedFlagError:
|
||||
# Undefined args were found, for now we don't care so just
|
||||
# act like everything went well
|
||||
# (these three lines are copied pretty much verbatim from the end
|
||||
# of the __call__ function we are wrapping)
|
||||
unparsed_args = sneaky_unparsed_args['value']
|
||||
if unparsed_args:
|
||||
if self.IsGnuGetOpt():
|
||||
args = argv[:1] + unparsed
|
||||
else:
|
||||
args = argv[:1] + original_argv[-len(unparsed_args):]
|
||||
else:
|
||||
args = argv[:1]
|
||||
finally:
|
||||
setattr(getopt, orig_name, orig_getopt)
|
||||
|
||||
# Store the arguments for later, we'll need them for new flags
|
||||
# added at runtime
|
||||
self.__dict__['__stored_argv'] = original_argv
|
||||
self.__dict__['__was_already_parsed'] = True
|
||||
self.ClearDirty()
|
||||
return args
|
||||
|
||||
def SetDirty(self, name):
|
||||
"""Mark a flag as dirty so that accessing it will case a reparse."""
|
||||
self.__dict__['__dirty'].append(name)
|
||||
|
||||
def IsDirty(self, name):
|
||||
return name in self.__dict__['__dirty']
|
||||
|
||||
def ClearDirty(self):
|
||||
self.__dict__['__is_dirty'] = []
|
||||
|
||||
def WasAlreadyParsed(self):
|
||||
return self.__dict__['__was_already_parsed']
|
||||
|
||||
def ParseNewFlags(self):
|
||||
if '__stored_argv' not in self.__dict__:
|
||||
return
|
||||
new_flags = FlagValues()
|
||||
for k in self.__dict__['__dirty']:
|
||||
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
|
||||
|
||||
new_flags(self.__dict__['__stored_argv'])
|
||||
for k in self.__dict__['__dirty']:
|
||||
setattr(self, k, getattr(new_flags, k))
|
||||
self.ClearDirty()
|
||||
|
||||
def __setitem__(self, name, flag):
|
||||
gflags.FlagValues.__setitem__(self, name, flag)
|
||||
if self.WasAlreadyParsed():
|
||||
self.SetDirty(name)
|
||||
|
||||
def __getitem__(self, name):
|
||||
if self.IsDirty(name):
|
||||
self.ParseNewFlags()
|
||||
return gflags.FlagValues.__getitem__(self, name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self.IsDirty(name):
|
||||
self.ParseNewFlags()
|
||||
return gflags.FlagValues.__getattr__(self, name)
|
||||
|
||||
|
||||
FLAGS = FlagValues()
|
||||
|
||||
|
||||
def _wrapper(func):
|
||||
def _wrapped(*args, **kw):
|
||||
kw.setdefault('flag_values', FLAGS)
|
||||
func(*args, **kw)
|
||||
_wrapped.func_name = func.func_name
|
||||
return _wrapped
|
||||
|
||||
|
||||
DEFINE_string = _wrapper(gflags.DEFINE_string)
|
||||
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
|
||||
DEFINE_bool = _wrapper(gflags.DEFINE_bool)
|
||||
DEFINE_boolean = _wrapper(gflags.DEFINE_boolean)
|
||||
DEFINE_float = _wrapper(gflags.DEFINE_float)
|
||||
DEFINE_enum = _wrapper(gflags.DEFINE_enum)
|
||||
DEFINE_list = _wrapper(gflags.DEFINE_list)
|
||||
DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
|
||||
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
|
||||
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
|
||||
|
||||
|
||||
def DECLARE(name, module_string, flag_values=FLAGS):
|
||||
if module_string not in sys.modules:
|
||||
__import__(module_string, globals(), locals())
|
||||
if name not in flag_values:
|
||||
raise gflags.UnrecognizedFlag(
|
||||
"%s not defined by %s" % (name, module_string))
|
||||
|
||||
# This keeps pylint from barfing on the imports
|
||||
FLAGS = FLAGS
|
||||
DEFINE_string = DEFINE_string
|
||||
DEFINE_integer = DEFINE_integer
|
||||
DEFINE_bool = DEFINE_bool
|
||||
|
||||
# __GLOBAL FLAGS ONLY__
|
||||
# Define any app-specific flags in their own files, docs at:
|
||||
|
@@ -52,13 +52,8 @@ def stop(pidfile):
|
||||
"""
|
||||
# Get the pid from the pidfile
|
||||
try:
|
||||
pf = file(pidfile,'r')
|
||||
pid = int(pf.read().strip())
|
||||
pf.close()
|
||||
pid = int(open(pidfile,'r').read().strip())
|
||||
except IOError:
|
||||
pid = None
|
||||
|
||||
if not pid:
|
||||
message = "pidfile %s does not exist. Daemon not running?\n"
|
||||
sys.stderr.write(message % pidfile)
|
||||
return # not an error in a restart
|
||||
@@ -79,14 +74,15 @@ def stop(pidfile):
|
||||
|
||||
|
||||
def serve(name, main):
|
||||
"""Controller for server"""
|
||||
argv = FLAGS(sys.argv)
|
||||
|
||||
if not FLAGS.pidfile:
|
||||
FLAGS.pidfile = '%s.pid' % name
|
||||
|
||||
logging.debug("Full set of FLAGS: \n\n\n" )
|
||||
logging.debug("Full set of FLAGS: \n\n\n")
|
||||
for flag in FLAGS:
|
||||
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None) ))
|
||||
logging.debug("%s : %s", flag, FLAGS.get(flag, None))
|
||||
|
||||
action = 'start'
|
||||
if len(argv) > 1:
|
||||
@@ -102,7 +98,11 @@ def serve(name, main):
|
||||
else:
|
||||
print 'usage: %s [options] [start|stop|restart]' % argv[0]
|
||||
sys.exit(1)
|
||||
daemonize(argv, name, main)
|
||||
|
||||
|
||||
def daemonize(args, name, main):
|
||||
"""Does the work of daemonizing the process"""
|
||||
logging.getLogger('amqplib').setLevel(logging.WARN)
|
||||
if FLAGS.daemonize:
|
||||
logger = logging.getLogger()
|
||||
@@ -115,7 +115,7 @@ def serve(name, main):
|
||||
else:
|
||||
if not FLAGS.logfile:
|
||||
FLAGS.logfile = '%s.log' % name
|
||||
logfile = logging.handlers.FileHandler(FLAGS.logfile)
|
||||
logfile = logging.FileHandler(FLAGS.logfile)
|
||||
logfile.setFormatter(formatter)
|
||||
logger.addHandler(logfile)
|
||||
stdin, stdout, stderr = None, None, None
|
||||
@@ -137,4 +137,4 @@ def serve(name, main):
|
||||
stdout=stdout,
|
||||
stderr=stderr
|
||||
):
|
||||
main(argv)
|
||||
main(args)
|
||||
|
@@ -135,10 +135,18 @@ class AuthTestCase(test.BaseTestCase):
|
||||
self.manager.add_to_project('test2', 'testproj')
|
||||
self.assertTrue(self.manager.get_project('testproj').has_member('test2'))
|
||||
|
||||
def test_208_can_remove_user_from_project(self):
|
||||
def test_207_can_remove_user_from_project(self):
|
||||
self.manager.remove_from_project('test2', 'testproj')
|
||||
self.assertFalse(self.manager.get_project('testproj').has_member('test2'))
|
||||
|
||||
def test_208_can_remove_add_user_with_role(self):
|
||||
self.manager.add_to_project('test2', 'testproj')
|
||||
self.manager.add_role('test2', 'developer', 'testproj')
|
||||
self.manager.remove_from_project('test2', 'testproj')
|
||||
self.assertFalse(self.manager.has_role('test2', 'developer', 'testproj'))
|
||||
self.manager.add_to_project('test2', 'testproj')
|
||||
self.manager.remove_from_project('test2', 'testproj')
|
||||
|
||||
def test_209_can_generate_x509(self):
|
||||
# MUST HAVE RUN CLOUD SETUP BY NOW
|
||||
self.cloud = cloud.CloudController()
|
||||
|
@@ -16,25 +16,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
Utility methods for working with WSGI servers
|
||||
'''
|
||||
from nova import flags
|
||||
|
||||
class Util(object):
|
||||
|
||||
@staticmethod
|
||||
def route(reqstr, controllers):
|
||||
if len(reqstr) == 0:
|
||||
return Util.select_root_controller(controllers), []
|
||||
parts = [x for x in reqstr.split("/") if len(x) > 0]
|
||||
if len(parts) == 0:
|
||||
return Util.select_root_controller(controllers), []
|
||||
return controllers[parts[0]], parts[1:]
|
||||
|
||||
@staticmethod
|
||||
def select_root_controller(controllers):
|
||||
if '' in controllers:
|
||||
return controllers['']
|
||||
else:
|
||||
return None
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_integer('answer', 42, 'test flag')
|
87
nova/tests/flags_unittest.py
Normal file
87
nova/tests/flags_unittest.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
|
||||
|
||||
class FlagsTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(FlagsTestCase, self).setUp()
|
||||
self.FLAGS = flags.FlagValues()
|
||||
self.global_FLAGS = flags.FLAGS
|
||||
|
||||
def test_define(self):
|
||||
self.assert_('string' not in self.FLAGS)
|
||||
self.assert_('int' not in self.FLAGS)
|
||||
self.assert_('false' not in self.FLAGS)
|
||||
self.assert_('true' not in self.FLAGS)
|
||||
|
||||
flags.DEFINE_string('string', 'default', 'desc', flag_values=self.FLAGS)
|
||||
flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS)
|
||||
flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS)
|
||||
flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS)
|
||||
|
||||
self.assert_(self.FLAGS['string'])
|
||||
self.assert_(self.FLAGS['int'])
|
||||
self.assert_(self.FLAGS['false'])
|
||||
self.assert_(self.FLAGS['true'])
|
||||
self.assertEqual(self.FLAGS.string, 'default')
|
||||
self.assertEqual(self.FLAGS.int, 1)
|
||||
self.assertEqual(self.FLAGS.false, False)
|
||||
self.assertEqual(self.FLAGS.true, True)
|
||||
|
||||
argv = ['flags_test',
|
||||
'--string', 'foo',
|
||||
'--int', '2',
|
||||
'--false',
|
||||
'--notrue']
|
||||
|
||||
self.FLAGS(argv)
|
||||
self.assertEqual(self.FLAGS.string, 'foo')
|
||||
self.assertEqual(self.FLAGS.int, 2)
|
||||
self.assertEqual(self.FLAGS.false, True)
|
||||
self.assertEqual(self.FLAGS.true, False)
|
||||
|
||||
def test_declare(self):
|
||||
self.assert_('answer' not in self.global_FLAGS)
|
||||
flags.DECLARE('answer', 'nova.tests.declare_flags')
|
||||
self.assert_('answer' in self.global_FLAGS)
|
||||
self.assertEqual(self.global_FLAGS.answer, 42)
|
||||
|
||||
# Make sure we don't overwrite anything
|
||||
self.global_FLAGS.answer = 256
|
||||
self.assertEqual(self.global_FLAGS.answer, 256)
|
||||
flags.DECLARE('answer', 'nova.tests.declare_flags')
|
||||
self.assertEqual(self.global_FLAGS.answer, 256)
|
||||
|
||||
def test_runtime_and_unknown_flags(self):
|
||||
self.assert_('runtime_answer' not in self.global_FLAGS)
|
||||
|
||||
argv = ['flags_test', '--runtime_answer=60', 'extra_arg']
|
||||
args = self.global_FLAGS(argv)
|
||||
self.assertEqual(len(args), 2)
|
||||
self.assertEqual(args[1], 'extra_arg')
|
||||
|
||||
self.assert_('runtime_answer' not in self.global_FLAGS)
|
||||
|
||||
import nova.tests.runtime_flags
|
||||
|
||||
self.assert_('runtime_answer' in self.global_FLAGS)
|
||||
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
|
@@ -19,9 +19,7 @@
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import time
|
||||
from twisted.internet import defer
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
@@ -49,9 +47,9 @@ class ModelTestCase(test.TrialTestCase):
|
||||
inst['user_id'] = 'fake'
|
||||
inst['project_id'] = 'fake'
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['node_name'] = FLAGS.node_name
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['private_dns_name'] = '10.0.0.1'
|
||||
inst.save()
|
||||
return inst
|
||||
|
||||
@@ -71,118 +69,126 @@ class ModelTestCase(test.TrialTestCase):
|
||||
session_token.save()
|
||||
return session_token
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_create_instance(self):
|
||||
"""store with create_instace, then test that a load finds it"""
|
||||
instance = yield self.create_instance()
|
||||
old = yield model.Instance(instance.identifier)
|
||||
instance = self.create_instance()
|
||||
old = model.Instance(instance.identifier)
|
||||
self.assertFalse(old.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_delete_instance(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = yield self.create_instance()
|
||||
yield instance.destroy()
|
||||
newinst = yield model.Instance('i-test')
|
||||
instance = self.create_instance()
|
||||
instance.destroy()
|
||||
newinst = model.Instance('i-test')
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_instance_added_to_set(self):
|
||||
"""create, then check that it is listed for the project"""
|
||||
instance = yield self.create_instance()
|
||||
"""create, then check that it is listed in global set"""
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
for x in model.InstanceDirectory().all:
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_instance_associates_project(self):
|
||||
"""create, then check that it is listed for the project"""
|
||||
instance = yield self.create_instance()
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
for x in model.InstanceDirectory().by_project(instance.project):
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_instance_associates_ip(self):
|
||||
"""create, then check that it is listed for the ip"""
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
|
||||
self.assertEqual(x.identifier, 'i-test')
|
||||
|
||||
def test_instance_associates_node(self):
|
||||
"""create, then check that it is listed for the node_name"""
|
||||
instance = self.create_instance()
|
||||
found = False
|
||||
for x in model.InstanceDirectory().by_node(FLAGS.node_name):
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assertFalse(found)
|
||||
instance['node_name'] = 'test_node'
|
||||
instance.save()
|
||||
for x in model.InstanceDirectory().by_node('test_node'):
|
||||
if x.identifier == 'i-test':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
|
||||
def test_host_class_finds_hosts(self):
|
||||
host = yield self.create_host()
|
||||
host = self.create_host()
|
||||
self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_host_class_doesnt_find_missing_hosts(self):
|
||||
rv = yield model.Host.lookup('woahnelly')
|
||||
rv = model.Host.lookup('woahnelly')
|
||||
self.assertEqual(None, rv)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_create_host(self):
|
||||
"""store with create_host, then test that a load finds it"""
|
||||
host = yield self.create_host()
|
||||
old = yield model.Host(host.identifier)
|
||||
host = self.create_host()
|
||||
old = model.Host(host.identifier)
|
||||
self.assertFalse(old.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_delete_host(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = yield self.create_host()
|
||||
yield instance.destroy()
|
||||
newinst = yield model.Host('testhost')
|
||||
instance = self.create_host()
|
||||
instance.destroy()
|
||||
newinst = model.Host('testhost')
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_host_added_to_set(self):
|
||||
"""create, then check that it is included in list"""
|
||||
instance = yield self.create_host()
|
||||
instance = self.create_host()
|
||||
found = False
|
||||
for x in model.Host.all():
|
||||
if x.identifier == 'testhost':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_create_daemon_two_args(self):
|
||||
"""create a daemon with two arguments"""
|
||||
d = yield self.create_daemon()
|
||||
d = self.create_daemon()
|
||||
d = model.Daemon('testhost', 'nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_create_daemon_single_arg(self):
|
||||
"""Create a daemon using the combined host:bin format"""
|
||||
d = yield model.Daemon("testhost:nova-testdaemon")
|
||||
d = model.Daemon("testhost:nova-testdaemon")
|
||||
d.save()
|
||||
d = model.Daemon('testhost:nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_equality_of_daemon_single_and_double_args(self):
|
||||
"""Create a daemon using the combined host:bin arg, find with 2"""
|
||||
d = yield model.Daemon("testhost:nova-testdaemon")
|
||||
d = model.Daemon("testhost:nova-testdaemon")
|
||||
d.save()
|
||||
d = model.Daemon('testhost', 'nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_equality_daemon_of_double_and_single_args(self):
|
||||
"""Create a daemon using the combined host:bin arg, find with 2"""
|
||||
d = yield self.create_daemon()
|
||||
d = self.create_daemon()
|
||||
d = model.Daemon('testhost:nova-testdaemon')
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_delete_daemon(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = yield self.create_daemon()
|
||||
yield instance.destroy()
|
||||
newinst = yield model.Daemon('testhost', 'nova-testdaemon')
|
||||
instance = self.create_daemon()
|
||||
instance.destroy()
|
||||
newinst = model.Daemon('testhost', 'nova-testdaemon')
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_daemon_heartbeat(self):
|
||||
"""Create a daemon, sleep, heartbeat, check for update"""
|
||||
d = yield self.create_daemon()
|
||||
d = self.create_daemon()
|
||||
ts = d['updated_at']
|
||||
time.sleep(2)
|
||||
d.heartbeat()
|
||||
@@ -190,70 +196,62 @@ class ModelTestCase(test.TrialTestCase):
|
||||
ts2 = d2['updated_at']
|
||||
self.assert_(ts2 > ts)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_daemon_added_to_set(self):
|
||||
"""create, then check that it is included in list"""
|
||||
instance = yield self.create_daemon()
|
||||
instance = self.create_daemon()
|
||||
found = False
|
||||
for x in model.Daemon.all():
|
||||
if x.identifier == 'testhost:nova-testdaemon':
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_daemon_associates_host(self):
|
||||
"""create, then check that it is listed for the host"""
|
||||
instance = yield self.create_daemon()
|
||||
instance = self.create_daemon()
|
||||
found = False
|
||||
for x in model.Daemon.by_host('testhost'):
|
||||
if x.identifier == 'testhost:nova-testdaemon':
|
||||
found = True
|
||||
self.assertTrue(found)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_create_session_token(self):
|
||||
"""create"""
|
||||
d = yield self.create_session_token()
|
||||
d = self.create_session_token()
|
||||
d = model.SessionToken(d.token)
|
||||
self.assertFalse(d.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_delete_session_token(self):
|
||||
"""create, then destroy, then make sure loads a new record"""
|
||||
instance = yield self.create_session_token()
|
||||
yield instance.destroy()
|
||||
newinst = yield model.SessionToken(instance.token)
|
||||
instance = self.create_session_token()
|
||||
instance.destroy()
|
||||
newinst = model.SessionToken(instance.token)
|
||||
self.assertTrue(newinst.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_added_to_set(self):
|
||||
"""create, then check that it is included in list"""
|
||||
instance = yield self.create_session_token()
|
||||
instance = self.create_session_token()
|
||||
found = False
|
||||
for x in model.SessionToken.all():
|
||||
if x.identifier == instance.token:
|
||||
found = True
|
||||
self.assert_(found)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_associates_user(self):
|
||||
"""create, then check that it is listed for the user"""
|
||||
instance = yield self.create_session_token()
|
||||
instance = self.create_session_token()
|
||||
found = False
|
||||
for x in model.SessionToken.associated_to('user', 'testuser'):
|
||||
if x.identifier == instance.identifier:
|
||||
found = True
|
||||
self.assertTrue(found)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_generation(self):
|
||||
instance = yield model.SessionToken.generate('username', 'TokenType')
|
||||
instance = model.SessionToken.generate('username', 'TokenType')
|
||||
self.assertFalse(instance.is_new_record())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_find_generated_session_token(self):
|
||||
instance = yield model.SessionToken.generate('username', 'TokenType')
|
||||
found = yield model.SessionToken.lookup(instance.identifier)
|
||||
instance = model.SessionToken.generate('username', 'TokenType')
|
||||
found = model.SessionToken.lookup(instance.identifier)
|
||||
self.assert_(found)
|
||||
|
||||
def test_update_session_token_expiry(self):
|
||||
@@ -264,34 +262,29 @@ class ModelTestCase(test.TrialTestCase):
|
||||
expiry = utils.parse_isotime(instance['expiry'])
|
||||
self.assert_(expiry > datetime.utcnow())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_lookup_when_expired(self):
|
||||
instance = yield model.SessionToken.generate("testuser")
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
|
||||
instance.save()
|
||||
inst = model.SessionToken.lookup(instance.identifier)
|
||||
self.assertFalse(inst)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_lookup_when_not_expired(self):
|
||||
instance = yield model.SessionToken.generate("testuser")
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
inst = model.SessionToken.lookup(instance.identifier)
|
||||
self.assert_(inst)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_is_expired_when_expired(self):
|
||||
instance = yield model.SessionToken.generate("testuser")
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
|
||||
self.assert_(instance.is_expired())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_is_expired_when_not_expired(self):
|
||||
instance = yield model.SessionToken.generate("testuser")
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
self.assertFalse(instance.is_expired())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_session_token_ttl(self):
|
||||
instance = yield model.SessionToken.generate("testuser")
|
||||
instance = model.SessionToken.generate("testuser")
|
||||
now = datetime.utcnow()
|
||||
delta = timedelta(hours=1)
|
||||
instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
|
||||
|
@@ -16,6 +16,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Unittets for S3 objectstore clone.
|
||||
"""
|
||||
|
||||
import boto
|
||||
import glob
|
||||
import hashlib
|
||||
@@ -24,76 +28,69 @@ import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from nova import flags
|
||||
from nova import objectstore
|
||||
from nova.objectstore import bucket # for buckets_path flag
|
||||
from nova.objectstore import image # for images_path flag
|
||||
from nova import test
|
||||
from nova.auth import manager
|
||||
from nova.objectstore.handler import S3
|
||||
from nova.exception import NotEmpty, NotFound, NotAuthorized
|
||||
|
||||
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
|
||||
from twisted.internet import reactor, threads, defer
|
||||
from twisted.web import http, server
|
||||
|
||||
from nova import flags
|
||||
from nova import objectstore
|
||||
from nova import test
|
||||
from nova.auth import manager
|
||||
from nova.exception import NotEmpty, NotFound
|
||||
from nova.objectstore import image
|
||||
from nova.objectstore.handler import S3
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
oss_tempdir = tempfile.mkdtemp(prefix='test_oss-')
|
||||
# Create a unique temporary directory. We don't delete after test to
|
||||
# allow checking the contents after running tests. Users and/or tools
|
||||
# running the tests need to remove the tests directories.
|
||||
OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
|
||||
|
||||
# Create bucket/images path
|
||||
os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
|
||||
os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
|
||||
|
||||
# delete tempdirs from previous runs (we don't delete after test to allow
|
||||
# checking the contents after running tests)
|
||||
# TODO: This fails on the test box with a permission denied error
|
||||
# Also, doing these things in a global tempdir means that different runs of
|
||||
# the test suite on the same box could clobber each other.
|
||||
#for path in glob.glob(os.path.abspath(os.path.join(oss_tempdir, '../test_oss-*'))):
|
||||
# if path != oss_tempdir:
|
||||
# shutil.rmtree(path)
|
||||
|
||||
|
||||
# create bucket/images path
|
||||
os.makedirs(os.path.join(oss_tempdir, 'images'))
|
||||
os.makedirs(os.path.join(oss_tempdir, 'buckets'))
|
||||
|
||||
class ObjectStoreTestCase(test.BaseTestCase):
|
||||
def setUp(self):
|
||||
"""Test objectstore API directly."""
|
||||
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
"""Setup users and projects."""
|
||||
super(ObjectStoreTestCase, self).setUp()
|
||||
self.flags(buckets_path=os.path.join(oss_tempdir, 'buckets'),
|
||||
images_path=os.path.join(oss_tempdir, 'images'),
|
||||
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
|
||||
images_path=os.path.join(OSS_TEMPDIR, 'images'),
|
||||
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
self.um = manager.AuthManager()
|
||||
try:
|
||||
self.um.create_user('user1')
|
||||
except: pass
|
||||
try:
|
||||
self.um.create_user('user2')
|
||||
except: pass
|
||||
try:
|
||||
self.um.create_user('admin_user', admin=True)
|
||||
except: pass
|
||||
try:
|
||||
self.um.create_project('proj1', 'user1', 'a proj', ['user1'])
|
||||
except: pass
|
||||
try:
|
||||
self.um.create_project('proj2', 'user2', 'a proj', ['user2'])
|
||||
except: pass
|
||||
class Context(object): pass
|
||||
self.auth_manager = manager.AuthManager()
|
||||
self.auth_manager.create_user('user1')
|
||||
self.auth_manager.create_user('user2')
|
||||
self.auth_manager.create_user('admin_user', admin=True)
|
||||
self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1'])
|
||||
self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
|
||||
|
||||
class Context(object):
|
||||
"""Dummy context for running tests."""
|
||||
user = None
|
||||
project = None
|
||||
|
||||
self.context = Context()
|
||||
|
||||
def tearDown(self):
|
||||
self.um.delete_project('proj1')
|
||||
self.um.delete_project('proj2')
|
||||
self.um.delete_user('user1')
|
||||
self.um.delete_user('user2')
|
||||
self.um.delete_user('admin_user')
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
"""Tear down users and projects."""
|
||||
self.auth_manager.delete_project('proj1')
|
||||
self.auth_manager.delete_project('proj2')
|
||||
self.auth_manager.delete_user('user1')
|
||||
self.auth_manager.delete_user('user2')
|
||||
self.auth_manager.delete_user('admin_user')
|
||||
super(ObjectStoreTestCase, self).tearDown()
|
||||
|
||||
def test_buckets(self):
|
||||
self.context.user = self.um.get_user('user1')
|
||||
self.context.project = self.um.get_project('proj1')
|
||||
"""Test the bucket API."""
|
||||
self.context.user = self.auth_manager.get_user('user1')
|
||||
self.context.project = self.auth_manager.get_project('proj1')
|
||||
objectstore.bucket.Bucket.create('new_bucket', self.context)
|
||||
bucket = objectstore.bucket.Bucket('new_bucket')
|
||||
|
||||
@@ -101,12 +98,12 @@ class ObjectStoreTestCase(test.BaseTestCase):
|
||||
self.assert_(bucket.is_authorized(self.context))
|
||||
|
||||
# another user is not authorized
|
||||
self.context.user = self.um.get_user('user2')
|
||||
self.context.project = self.um.get_project('proj2')
|
||||
self.context.user = self.auth_manager.get_user('user2')
|
||||
self.context.project = self.auth_manager.get_project('proj2')
|
||||
self.assertFalse(bucket.is_authorized(self.context))
|
||||
|
||||
# admin is authorized to use bucket
|
||||
self.context.user = self.um.get_user('admin_user')
|
||||
self.context.user = self.auth_manager.get_user('admin_user')
|
||||
self.context.project = None
|
||||
self.assertTrue(bucket.is_authorized(self.context))
|
||||
|
||||
@@ -136,8 +133,9 @@ class ObjectStoreTestCase(test.BaseTestCase):
|
||||
self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
|
||||
|
||||
def test_images(self):
|
||||
self.context.user = self.um.get_user('user1')
|
||||
self.context.project = self.um.get_project('proj1')
|
||||
"Test the image API."
|
||||
self.context.user = self.auth_manager.get_user('user1')
|
||||
self.context.project = self.auth_manager.get_project('proj1')
|
||||
|
||||
# create a bucket for our bundle
|
||||
objectstore.bucket.Bucket.create('image_bucket', self.context)
|
||||
@@ -149,10 +147,12 @@ class ObjectStoreTestCase(test.BaseTestCase):
|
||||
bucket[os.path.basename(path)] = open(path, 'rb').read()
|
||||
|
||||
# register an image
|
||||
objectstore.image.Image.register_aws_image('i-testing', 'image_bucket/1mb.manifest.xml', self.context)
|
||||
image.Image.register_aws_image('i-testing',
|
||||
'image_bucket/1mb.manifest.xml',
|
||||
self.context)
|
||||
|
||||
# verify image
|
||||
my_img = objectstore.image.Image('i-testing')
|
||||
my_img = image.Image('i-testing')
|
||||
result_image_file = os.path.join(my_img.path, 'image')
|
||||
self.assertEqual(os.stat(result_image_file).st_size, 1048576)
|
||||
|
||||
@@ -160,38 +160,48 @@ class ObjectStoreTestCase(test.BaseTestCase):
|
||||
self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
|
||||
|
||||
# verify image permissions
|
||||
self.context.user = self.um.get_user('user2')
|
||||
self.context.project = self.um.get_project('proj2')
|
||||
self.context.user = self.auth_manager.get_user('user2')
|
||||
self.context.project = self.auth_manager.get_project('proj2')
|
||||
self.assertFalse(my_img.is_authorized(self.context))
|
||||
|
||||
|
||||
class TestHTTPChannel(http.HTTPChannel):
|
||||
# Otherwise we end up with an unclean reactor
|
||||
def checkPersistence(self, _, __):
|
||||
"""Dummy site required for twisted.web"""
|
||||
|
||||
def checkPersistence(self, _, __): # pylint: disable-msg=C0103
|
||||
"""Otherwise we end up with an unclean reactor."""
|
||||
return False
|
||||
|
||||
|
||||
class TestSite(server.Site):
|
||||
"""Dummy site required for twisted.web"""
|
||||
protocol = TestHTTPChannel
|
||||
|
||||
|
||||
class S3APITestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
"""Test objectstore through S3 API."""
|
||||
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
"""Setup users, projects, and start a test server."""
|
||||
super(S3APITestCase, self).setUp()
|
||||
|
||||
FLAGS.auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
|
||||
FLAGS.buckets_path = os.path.join(oss_tempdir, 'buckets')
|
||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver',
|
||||
FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
|
||||
|
||||
self.um = manager.AuthManager()
|
||||
self.admin_user = self.um.create_user('admin', admin=True)
|
||||
self.admin_project = self.um.create_project('admin', self.admin_user)
|
||||
self.auth_manager = manager.AuthManager()
|
||||
self.admin_user = self.auth_manager.create_user('admin', admin=True)
|
||||
self.admin_project = self.auth_manager.create_project('admin',
|
||||
self.admin_user)
|
||||
|
||||
shutil.rmtree(FLAGS.buckets_path)
|
||||
os.mkdir(FLAGS.buckets_path)
|
||||
|
||||
root = S3()
|
||||
self.site = TestSite(root)
|
||||
self.listening_port = reactor.listenTCP(0, self.site, interface='127.0.0.1')
|
||||
# pylint: disable-msg=E1101
|
||||
self.listening_port = reactor.listenTCP(0, self.site,
|
||||
interface='127.0.0.1')
|
||||
# pylint: enable-msg=E1101
|
||||
self.tcp_port = self.listening_port.getHost().port
|
||||
|
||||
|
||||
@@ -205,65 +215,90 @@ class S3APITestCase(test.TrialTestCase):
|
||||
is_secure=False,
|
||||
calling_format=OrdinaryCallingFormat())
|
||||
|
||||
# Don't attempt to reuse connections
|
||||
def get_http_connection(host, is_secure):
|
||||
"""Get a new S3 connection, don't attempt to reuse connections."""
|
||||
return self.conn.new_http_connection(host, is_secure)
|
||||
|
||||
self.conn.get_http_connection = get_http_connection
|
||||
|
||||
def _ensure_empty_list(self, l):
|
||||
self.assertEquals(len(l), 0, "List was not empty")
|
||||
def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
|
||||
self.assertEquals(len(buckets), 0, "Bucket list was not empty")
|
||||
return True
|
||||
|
||||
def _ensure_only_bucket(self, l, name):
|
||||
self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
|
||||
self.assertEquals(l[0].name, name, "Wrong name")
|
||||
def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
|
||||
self.assertEquals(len(buckets), 1,
|
||||
"Bucket list didn't have exactly one element in it")
|
||||
self.assertEquals(buckets[0].name, name, "Wrong name")
|
||||
return True
|
||||
|
||||
def test_000_list_buckets(self):
|
||||
d = threads.deferToThread(self.conn.get_all_buckets)
|
||||
d.addCallback(self._ensure_empty_list)
|
||||
return d
|
||||
"""Make sure we are starting with no buckets."""
|
||||
deferred = threads.deferToThread(self.conn.get_all_buckets)
|
||||
deferred.addCallback(self._ensure_no_buckets)
|
||||
return deferred
|
||||
|
||||
def test_001_create_and_delete_bucket(self):
|
||||
"""Test bucket creation and deletion."""
|
||||
bucket_name = 'testbucket'
|
||||
|
||||
d = threads.deferToThread(self.conn.create_bucket, bucket_name)
|
||||
d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
|
||||
deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
|
||||
deferred.addCallback(lambda _:
|
||||
threads.deferToThread(self.conn.get_all_buckets))
|
||||
|
||||
def ensure_only_bucket(l, name):
|
||||
self.assertEquals(len(l), 1, "List didn't have exactly one element in it")
|
||||
self.assertEquals(l[0].name, name, "Wrong name")
|
||||
d.addCallback(ensure_only_bucket, bucket_name)
|
||||
deferred.addCallback(self._ensure_one_bucket, bucket_name)
|
||||
|
||||
d.addCallback(lambda _:threads.deferToThread(self.conn.delete_bucket, bucket_name))
|
||||
d.addCallback(lambda _:threads.deferToThread(self.conn.get_all_buckets))
|
||||
d.addCallback(self._ensure_empty_list)
|
||||
return d
|
||||
deferred.addCallback(lambda _:
|
||||
threads.deferToThread(self.conn.delete_bucket,
|
||||
bucket_name))
|
||||
deferred.addCallback(lambda _:
|
||||
threads.deferToThread(self.conn.get_all_buckets))
|
||||
deferred.addCallback(self._ensure_no_buckets)
|
||||
return deferred
|
||||
|
||||
def test_002_create_bucket_and_key_and_delete_key_again(self):
|
||||
"""Test key operations on buckets."""
|
||||
bucket_name = 'testbucket'
|
||||
key_name = 'somekey'
|
||||
key_contents = 'somekey'
|
||||
|
||||
d = threads.deferToThread(self.conn.create_bucket, bucket_name)
|
||||
d.addCallback(lambda b:threads.deferToThread(b.new_key, key_name))
|
||||
d.addCallback(lambda k:threads.deferToThread(k.set_contents_from_string, key_contents))
|
||||
deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
|
||||
deferred.addCallback(lambda b:
|
||||
threads.deferToThread(b.new_key, key_name))
|
||||
deferred.addCallback(lambda k:
|
||||
threads.deferToThread(k.set_contents_from_string,
|
||||
key_contents))
|
||||
|
||||
def ensure_key_contents(bucket_name, key_name, contents):
|
||||
"""Verify contents for a key in the given bucket."""
|
||||
bucket = self.conn.get_bucket(bucket_name)
|
||||
key = bucket.get_key(key_name)
|
||||
self.assertEquals(key.get_contents_as_string(), contents, "Bad contents")
|
||||
d.addCallback(lambda _:threads.deferToThread(ensure_key_contents, bucket_name, key_name, key_contents))
|
||||
self.assertEquals(key.get_contents_as_string(), contents,
|
||||
"Bad contents")
|
||||
|
||||
deferred.addCallback(lambda _:
|
||||
threads.deferToThread(ensure_key_contents,
|
||||
bucket_name, key_name,
|
||||
key_contents))
|
||||
|
||||
def delete_key(bucket_name, key_name):
|
||||
"""Delete a key for the given bucket."""
|
||||
bucket = self.conn.get_bucket(bucket_name)
|
||||
key = bucket.get_key(key_name)
|
||||
key.delete()
|
||||
d.addCallback(lambda _:threads.deferToThread(delete_key, bucket_name, key_name))
|
||||
d.addCallback(lambda _:threads.deferToThread(self.conn.get_bucket, bucket_name))
|
||||
d.addCallback(lambda b:threads.deferToThread(b.get_all_keys))
|
||||
d.addCallback(self._ensure_empty_list)
|
||||
return d
|
||||
|
||||
def tearDown(self):
|
||||
self.um.delete_user('admin')
|
||||
self.um.delete_project('admin')
|
||||
return defer.DeferredList([defer.maybeDeferred(self.listening_port.stopListening)])
|
||||
super(S3APITestCase, self).tearDown()
|
||||
deferred.addCallback(lambda _:
|
||||
threads.deferToThread(delete_key, bucket_name,
|
||||
key_name))
|
||||
deferred.addCallback(lambda _:
|
||||
threads.deferToThread(self.conn.get_bucket,
|
||||
bucket_name))
|
||||
deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys))
|
||||
deferred.addCallback(self._ensure_no_buckets)
|
||||
return deferred
|
||||
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
"""Tear down auth and test server."""
|
||||
self.auth_manager.delete_user('admin')
|
||||
self.auth_manager.delete_project('admin')
|
||||
stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
|
||||
return defer.DeferredList([stop_listening])
|
||||
|
@@ -16,36 +16,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import cloudservers
|
||||
from nova import flags
|
||||
|
||||
class IdFake:
|
||||
def __init__(self, id):
|
||||
self.id = id
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
# to get your access key:
|
||||
# from nova.auth import users
|
||||
# users.UserManger.instance().get_users()[0].access
|
||||
rscloud = cloudservers.CloudServers(
|
||||
'admin',
|
||||
'6cca875e-5ab3-4c60-9852-abf5c5c60cc6'
|
||||
)
|
||||
rscloud.client.AUTH_URL = 'http://localhost:8773/v1.0'
|
||||
|
||||
|
||||
rv = rscloud.servers.list()
|
||||
print "SERVERS: %s" % rv
|
||||
|
||||
if len(rv) == 0:
|
||||
server = rscloud.servers.create(
|
||||
"test-server",
|
||||
IdFake("ami-tiny"),
|
||||
IdFake("m1.tiny")
|
||||
)
|
||||
print "LAUNCH: %s" % server
|
||||
else:
|
||||
server = rv[0]
|
||||
print "Server to kill: %s" % server
|
||||
|
||||
raw_input("press enter key to kill the server")
|
||||
|
||||
server.delete()
|
||||
flags.DEFINE_integer('runtime_answer', 54, 'test flag')
|
@@ -54,6 +54,7 @@ from nova.tests.auth_unittest import *
|
||||
from nova.tests.api_unittest import *
|
||||
from nova.tests.cloud_unittest import *
|
||||
from nova.tests.compute_unittest import *
|
||||
from nova.tests.flags_unittest import *
|
||||
from nova.tests.model_unittest import *
|
||||
from nova.tests.network_unittest import *
|
||||
from nova.tests.objectstore_unittest import *
|
||||
@@ -68,7 +69,8 @@ flags.DEFINE_bool('flush_db', True,
|
||||
'Flush the database before running fake tests')
|
||||
|
||||
flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
|
||||
'Path to where to pipe STDERR during test runs. Default = "run_tests.err.log"')
|
||||
'Path to where to pipe STDERR during test runs. '
|
||||
'Default = "run_tests.err.log"')
|
||||
|
||||
if __name__ == '__main__':
|
||||
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
|
||||
|
Reference in New Issue
Block a user