merged trunk and fixed errors

This commit is contained in:
Vishvananda Ishaya
2010-09-21 15:57:03 -07:00
28 changed files with 891 additions and 1989 deletions

View File

@@ -44,18 +44,20 @@ flags.DECLARE('auth_driver', 'nova.auth.manager')
flags.DECLARE('redis_db', 'nova.datastore')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
def add_lease(_mac, ip_address, _hostname, _interface):
def add_lease(mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
logging.debug("leasing ip")
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(None, ip_address)
network_manager.lease_fixed_ip(None, mac, ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "lease_fixed_ip",
"args": {"context": None,
"mac": mac,
"address": ip_address}})
@@ -64,16 +66,17 @@ def old_lease(_mac, _ip_address, _hostname, _interface):
logging.debug("Adopted old lease or got a change of mac/hostname")
def del_lease(_mac, ip_address, _hostname, _interface):
def del_lease(mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
logging.debug("releasing ip")
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(None, ip_address)
network_manager.release_fixed_ip(None, mac, ip_address)
else:
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "release_fixed_ip",
"args": {"context": None,
"mac": mac,
"address": ip_address}})

View File

@@ -17,15 +17,47 @@
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for nova management.
Connects to the running ADMIN api in the api daemon.
"""
import os
import sys
import time
import IPy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -35,7 +67,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
from nova import db
from nova import exception
from nova import flags
from nova import quota
from nova import utils
from nova.auth import manager
from nova.cloudpipe import pipelib
@@ -101,6 +135,29 @@ class VpnCommands(object):
self.pipe.launch_vpn_instance(project_id)
class ShellCommands(object):
def run(self):
"Runs a Python interactive interpreter. Tries to use IPython, if it's available."
try:
import IPython
# Explicitly pass an empty list as arguments, because otherwise IPython
# would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
import code
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.parse_and_bind("tab:complete")
code.interact()
class RoleCommands(object):
"""Class for managing roles."""
@@ -130,6 +187,13 @@ class RoleCommands(object):
class UserCommands(object):
"""Class for managing users."""
@staticmethod
def _print_export(user):
"""Print export variables to use with API."""
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
def __init__(self):
self.manager = manager.AuthManager()
@@ -137,13 +201,13 @@ class UserCommands(object):
"""creates a new admin and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, True)
print_export(user)
self._print_export(user)
def create(self, name, access=None, secret=None):
"""creates a new user and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, False)
print_export(user)
self._print_export(user)
def delete(self, name):
"""deletes an existing user
@@ -155,7 +219,7 @@ class UserCommands(object):
arguments: name"""
user = self.manager.get_user(name)
if user:
print_export(user)
self._print_export(user)
else:
print "User %s doesn't exist" % name
@@ -166,12 +230,6 @@ class UserCommands(object):
print user.name
def print_export(user):
"""Print export variables to use with API."""
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
class ProjectCommands(object):
"""Class for managing projects."""
@@ -206,6 +264,19 @@ class ProjectCommands(object):
for project in self.manager.get_projects():
print project.name
def quota(self, project_id, key=None, value=None):
"""Set or display quotas for project
arguments: project_id [key] [value]"""
if key:
quo = {'project_id': project_id, key: value}
try:
db.quota_update(None, project_id, quo)
except exception.NotFound:
db.quota_create(None, quo)
project_quota = quota.get_quota(None, project_id)
for key, value in project_quota.iteritems():
print '%s: %s' % (key, value)
def remove(self, project, user):
"""Removes user from project
arguments: project user"""
@@ -219,11 +290,46 @@ class ProjectCommands(object):
f.write(zip_file)
class FloatingIpCommands(object):
"""Class for managing floating ip."""
def create(self, host, range):
"""Creates floating ips for host by range
arguments: host ip_range"""
for address in IPy.IP(range):
db.floating_ip_create(None, {'address': str(address),
'host': host})
def delete(self, ip_range):
"""Deletes floating ips by range
arguments: range"""
for address in IPy.IP(ip_range):
db.floating_ip_destroy(None, str(address))
def list(self, host=None):
"""Lists all floating ips (optionally by host)
arguments: [host]"""
if host == None:
floating_ips = db.floating_ip_get_all(None)
else:
floating_ips = db.floating_ip_get_all_by_host(None, host)
for floating_ip in floating_ips:
instance = None
if floating_ip['fixed_ip']:
instance = floating_ip['fixed_ip']['instance']['str_id']
print "%s\t%s\t%s" % (floating_ip['host'],
floating_ip['address'],
instance)
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
('role', RoleCommands),
('shell', ShellCommands),
('vpn', VpnCommands),
('floating', FloatingIpCommands)
]

43
bin/nova-scheduler Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Twistd daemon for the nova scheduler nodes.
"""
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
from nova import service
from nova import twistd
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
application = service.Service.create()

View File

@@ -33,6 +33,7 @@ SCOPE_ONELEVEL = 1 # not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
MOD_REPLACE = 2
class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
@@ -175,7 +176,7 @@ class FakeLDAP(object):
Args:
dn -- a dn
attrs -- a list of tuples in the following form:
([MOD_ADD | MOD_DELETE], attribute, value)
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
redis = datastore.Redis.instance()
@@ -185,6 +186,8 @@ class FakeLDAP(object):
values = _from_json(redis.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
elif cmd == MOD_REPLACE:
values = [v]
else:
values.remove(v)
values = redis.hset(key, k, _to_json(values))

View File

@@ -99,13 +99,6 @@ class LdapDriver(object):
dn = FLAGS.ldap_user_subtree
return self.__to_user(self.__find_object(dn, query))
def get_key_pair(self, uid, key_name):
"""Retrieve key pair by uid and key name"""
dn = 'cn=%s,%s' % (key_name,
self.__uid_to_dn(uid))
attr = self.__find_object(dn, '(objectclass=novaKeyPair)')
return self.__to_key_pair(uid, attr)
def get_project(self, pid):
"""Retrieve project by id"""
dn = 'cn=%s,%s' % (pid,
@@ -119,12 +112,6 @@ class LdapDriver(object):
'(objectclass=novaUser)')
return [self.__to_user(attr) for attr in attrs]
def get_key_pairs(self, uid):
"""Retrieve list of key pairs"""
attrs = self.__find_objects(self.__uid_to_dn(uid),
'(objectclass=novaKeyPair)')
return [self.__to_key_pair(uid, attr) for attr in attrs]
def get_projects(self, uid=None):
"""Retrieve list of projects"""
pattern = '(objectclass=novaProject)'
@@ -154,21 +141,6 @@ class LdapDriver(object):
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
def create_key_pair(self, uid, key_name, public_key, fingerprint):
"""Create a key pair"""
# TODO(vish): possibly refactor this to store keys in their own ou
# and put dn reference in the user object
attr = [
('objectclass', ['novaKeyPair']),
('cn', [key_name]),
('sshPublicKey', [public_key]),
('keyFingerprint', [fingerprint]),
]
self.conn.add_s('cn=%s,%s' % (key_name,
self.__uid_to_dn(uid)),
attr)
return self.__to_key_pair(uid, dict(attr))
def create_project(self, name, manager_uid,
description=None, member_uids=None):
"""Create a project"""
@@ -202,6 +174,24 @@ class LdapDriver(object):
self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr)
return self.__to_project(dict(attr))
def modify_project(self, project_id, manager_uid=None, description=None):
"""Modify an existing project"""
if not manager_uid and not description:
return
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
raise exception.NotFound("Project can't be modified because "
"manager %s doesn't exist" %
manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
self.conn.modify_s('cn=%s,%s' % (project_id,
FLAGS.ldap_project_subtree),
attr)
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
@@ -265,19 +255,10 @@ class LdapDriver(object):
"""Delete a user"""
if not self.__user_exists(uid):
raise exception.NotFound("User %s doesn't exist" % uid)
self.__delete_key_pairs(uid)
self.__remove_from_all(uid)
self.conn.delete_s('uid=%s,%s' % (uid,
FLAGS.ldap_user_subtree))
def delete_key_pair(self, uid, key_name):
"""Delete a key pair"""
if not self.__key_pair_exists(uid, key_name):
raise exception.NotFound("Key Pair %s doesn't exist for user %s" %
(key_name, uid))
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
FLAGS.ldap_user_subtree))
def delete_project(self, project_id):
"""Delete a project"""
project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree)
@@ -288,10 +269,6 @@ class LdapDriver(object):
"""Check if user exists"""
return self.get_user(uid) != None
def __key_pair_exists(self, uid, key_name):
"""Check if key pair exists"""
return self.get_key_pair(uid, key_name) != None
def __project_exists(self, project_id):
"""Check if project exists"""
return self.get_project(project_id) != None
@@ -341,13 +318,6 @@ class LdapDriver(object):
"""Check if group exists"""
return self.__find_object(dn, '(objectclass=groupOfNames)') != None
def __delete_key_pairs(self, uid):
"""Delete all key pairs for user"""
keys = self.get_key_pairs(uid)
if keys != None:
for key in keys:
self.delete_key_pair(uid, key['name'])
@staticmethod
def __role_to_dn(role, project_id=None):
"""Convert role to corresponding dn"""
@@ -472,18 +442,6 @@ class LdapDriver(object):
'secret': attr['secretKey'][0],
'admin': (attr['isAdmin'][0] == 'TRUE')}
@staticmethod
def __to_key_pair(owner, attr):
"""Convert ldap attributes to KeyPair object"""
if attr == None:
return None
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'owner_id': owner,
'public_key': attr['sshPublicKey'][0],
'fingerprint': attr['keyFingerprint'][0]}
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr == None:

View File

@@ -128,24 +128,6 @@ class User(AuthBase):
def is_project_manager(self, project):
return AuthManager().is_project_manager(self, project)
def generate_key_pair(self, name):
return AuthManager().generate_key_pair(self.id, name)
def create_key_pair(self, name, public_key, fingerprint):
return AuthManager().create_key_pair(self.id,
name,
public_key,
fingerprint)
def get_key_pair(self, name):
return AuthManager().get_key_pair(self.id, name)
def delete_key_pair(self, name):
return AuthManager().delete_key_pair(self.id, name)
def get_key_pairs(self):
return AuthManager().get_key_pairs(self.id)
def __repr__(self):
return "User('%s', '%s', '%s', '%s', %s)" % (self.id,
self.name,
@@ -154,29 +136,6 @@ class User(AuthBase):
self.admin)
class KeyPair(AuthBase):
"""Represents an ssh key returned from the datastore
Even though this object is named KeyPair, only the public key and
fingerprint is stored. The user's private key is not saved.
"""
def __init__(self, id, name, owner_id, public_key, fingerprint):
AuthBase.__init__(self)
self.id = id
self.name = name
self.owner_id = owner_id
self.public_key = public_key
self.fingerprint = fingerprint
def __repr__(self):
return "KeyPair('%s', '%s', '%s', '%s', '%s')" % (self.id,
self.name,
self.owner_id,
self.public_key,
self.fingerprint)
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
@@ -539,6 +498,26 @@ class AuthManager(object):
db.security_group_create({}, values)
return project
def modify_project(self, project, manager_user=None, description=None):
"""Modify a project
@type name: Project or project_id
@param project: The project to modify.
@type manager_user: User or uid
@param manager_user: This user will be the new project manager.
@type description: str
@param project: This will be the new description of the project.
"""
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
drv.modify_project(Project.safe_id(project),
manager_user,
description)
def add_to_project(self, user, project):
"""Add user to project"""
with self.driver() as drv:
@@ -659,67 +638,13 @@ class AuthManager(object):
return User(**user_dict)
def delete_user(self, user):
"""Deletes a user"""
with self.driver() as drv:
drv.delete_user(User.safe_id(user))
"""Deletes a user
def generate_key_pair(self, user, key_name):
"""Generates a key pair for a user
Generates a public and private key, stores the public key using the
key_name, and returns the private key and fingerprint.
@type user: User or uid
@param user: User for which to create key pair.
@type key_name: str
@param key_name: Name to use for the generated KeyPair.
@rtype: tuple (private_key, fingerprint)
@return: A tuple containing the private_key and fingerprint.
"""
# NOTE(vish): generating key pair is slow so check for legal
# creation before creating keypair
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
db.key_pair_destroy_all_by_user(None, uid)
with self.driver() as drv:
if not drv.get_user(uid):
raise exception.NotFound("User %s doesn't exist" % user)
if drv.get_key_pair(uid, key_name):
raise exception.Duplicate("The keypair %s already exists"
% key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
self.create_key_pair(uid, key_name, public_key, fingerprint)
return private_key, fingerprint
def create_key_pair(self, user, key_name, public_key, fingerprint):
"""Creates a key pair for user"""
with self.driver() as drv:
kp_dict = drv.create_key_pair(User.safe_id(user),
key_name,
public_key,
fingerprint)
if kp_dict:
return KeyPair(**kp_dict)
def get_key_pair(self, user, key_name):
"""Retrieves a key pair for user"""
with self.driver() as drv:
kp_dict = drv.get_key_pair(User.safe_id(user), key_name)
if kp_dict:
return KeyPair(**kp_dict)
def get_key_pairs(self, user):
"""Retrieves all key pairs for user"""
with self.driver() as drv:
kp_list = drv.get_key_pairs(User.safe_id(user))
if not kp_list:
return []
return [KeyPair(**kp_dict) for kp_dict in kp_list]
def delete_key_pair(self, user, key_name):
"""Deletes a key pair for user"""
with self.driver() as drv:
drv.delete_key_pair(User.safe_id(user), key_name)
drv.delete_user(uid)
def get_credentials(self, user, project=None):
"""Get credential zip for user in project"""

View File

@@ -1,69 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Role-based access control decorators to use fpr wrapping other
methods with."""
from nova import exception
def allow(*roles):
"""Allow the given roles access the wrapped function."""
def wrap(func): # pylint: disable-msg=C0111
def wrapped_func(self, context, *args,
**kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
return func(self, context, *args, **kwargs)
raise exception.NotAuthorized()
return wrapped_func
return wrap
def deny(*roles):
"""Deny the given roles access the wrapped function."""
def wrap(func): # pylint: disable-msg=C0111
def wrapped_func(self, context, *args,
**kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
raise exception.NotAuthorized()
return func(self, context, *args, **kwargs)
return wrapped_func
return wrap
def __matches_role(context, role):
"""Check if a role is allowed."""
if role == 'all':
return True
if role == 'none':
return False
return context.project.has_role(context.user.id, role)

View File

@@ -1,214 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin API controller, exposed through http via the api worker.
"""
import base64
from nova import db
from nova import exception
from nova.auth import manager
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
return {
'username': user.id,
'accesskey': user.access,
'secretkey': user.secret,
'file': base64_file}
else:
return {}
def project_dict(project):
"""Convert the project object to a result dict"""
if project:
return {
'projectname': project.id,
'project_manager_id': project.project_manager_id,
'description': project.description}
else:
return {}
def host_dict(host):
"""Convert a host model object to a result dict"""
if host:
return host.state
else:
return {}
def admin_only(target):
"""Decorator for admin-only API calls"""
def wrapper(*args, **kwargs):
"""Internal wrapper method for admin-only API calls"""
context = args[1]
if context.user.is_admin():
return target(*args, **kwargs)
else:
return {}
return wrapper
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
Trivial admin_only wrapper will be replaced with RBAC,
allowing project managers to administer project users.
"""
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
return user_dict(manager.AuthManager().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
[user_dict(u) for u in manager.AuthManager().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
return user_dict(manager.AuthManager().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
"""Deletes a single user (NOT undoable.)
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
manager.AuthManager().delete_user(name)
return True
@admin_only
def describe_roles(self, context, project_roles=True, **kwargs):
"""Returns a list of allowed roles."""
roles = manager.AuthManager().get_roles(project_roles)
return { 'roles': [{'role': r} for r in roles]}
@admin_only
def describe_user_roles(self, context, user, project=None, **kwargs):
"""Returns a list of roles for the given user.
Omitting project will return any global roles that the user has.
Specifying project will return only project specific roles.
"""
roles = manager.AuthManager().get_user_roles(user, project=project)
return { 'roles': [{'role': r} for r in roles]}
@admin_only
def modify_user_role(self, context, user, role, project=None,
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
if operation == 'add':
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError('operation must be add or remove')
return True
@admin_only
def generate_x509_for_user(self, _context, name, project=None, **kwargs):
"""Generates and returns an x509 certificate for a single user.
Is usually called from a client that will wrap this with
access and secret key info, and return a zip file.
"""
if project is None:
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
def describe_project(self, context, name, **kwargs):
"""Returns project data, including member ids."""
return project_dict(manager.AuthManager().get_project(name))
@admin_only
def describe_projects(self, context, user=None, **kwargs):
"""Returns all projects - should be changed to deal with a list."""
return {'projectSet':
[project_dict(u) for u in
manager.AuthManager().get_projects(user=user)]}
@admin_only
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
return project_dict(
manager.AuthManager().create_project(
name,
manager_user,
description=None,
member_users=None))
@admin_only
def deregister_project(self, context, name):
"""Permanently deletes a project."""
manager.AuthManager().delete_project(name)
return True
@admin_only
def describe_project_members(self, context, name, **kwargs):
project = manager.AuthManager().get_project(name)
result = {
'members': [{'member': m} for m in project.member_ids]}
return result
@admin_only
def modify_project_member(self, context, user, project, operation, **kwargs):
"""Add or remove a user from a project."""
if operation =='add':
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError('operation must be add or remove')
return True
# FIXME(vish): these host commands don't work yet, perhaps some of the
# required data can be retrieved from service objects?
@admin_only
def describe_hosts(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:
* Disk Space
* Instance List
* RAM used
* CPU used
* DHCP servers running
* Iptables / bridges
"""
return {'hostSet': [host_dict(h) for h in db.host_get_all()]}
@admin_only
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
return host_dict(db.host_get(name))

View File

@@ -1,344 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tornado REST API Request Handlers for Nova functions
Most calls are proxied into the responsible controller.
"""
import logging
import multiprocessing
import random
import re
import urllib
# TODO(termie): replace minidom with etree
from xml.dom import minidom
import tornado.web
from twisted.internet import defer
from nova import crypto
from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
import nova.cloudpipe.api
from nova.endpoint import cloud
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def _camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
def _underscore_to_xmlcase(str):
res = _underscore_to_camelcase(str)
return res[:1].lower() + res[1:]
class APIRequestContext(object):
def __init__(self, handler, user, project):
self.handler = handler
self.user = user
self.project = project
self.request_id = ''.join(
[random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
for x in xrange(20)]
)
class APIRequest(object):
def __init__(self, controller, action):
self.controller = controller
self.action = action
def send(self, context, **kwargs):
try:
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
_error = ('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
_log.warning(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
args = {}
for key, value in kwargs.items():
parts = key.split(".")
key = _camelcase_to_underscore(parts[0])
if len(parts) > 1:
d = args.get(key, {})
d[parts[1]] = value[0]
value = d
else:
value = value[0]
args[key] = value
for key in args.keys():
if isinstance(args[key], dict):
if args[key] != {} and args[key].keys()[0].isdigit():
s = args[key].items()
s.sort()
args[key] = [v for k, v in s]
d = defer.maybeDeferred(method, context, **args)
d.addCallback(self._render_response, context.request_id)
return d
def _render_response(self, response_data, request_id):
xml = minidom.Document()
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
'http://ec2.amazonaws.com/doc/2009-11-30/')
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
if(response_data == True):
self._render_dict(xml, response_el, {'return': 'true'})
else:
self._render_dict(xml, response_el, response_data)
xml.appendChild(response_el)
response = xml.toxml()
xml.unlink()
_log.debug(response)
return response
def _render_dict(self, xml, el, data):
try:
for key in data.keys():
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
_log.debug(data)
raise
def _render_data(self, xml, el_name, data):
el_name = _underscore_to_xmlcase(el_name)
data_el = xml.createElement(el_name)
if isinstance(data, list):
for item in data:
data_el.appendChild(self._render_data(xml, 'item', item))
elif isinstance(data, dict):
self._render_dict(xml, data_el, data)
elif hasattr(data, '__dict__'):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
elif data != None:
data_el.appendChild(xml.createTextNode(str(data)))
return data_el
class RootRequestHandler(tornado.web.RequestHandler):
def get(self):
# available api versions
versions = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
for version in versions:
self.write('%s\n' % version)
self.finish()
class MetadataRequestHandler(tornado.web.RequestHandler):
def print_data(self, data):
if isinstance(data, dict):
output = ''
for key in data:
if key == '_name':
continue
output += key
if isinstance(data[key], dict):
if '_name' in data[key]:
output += '=' + str(data[key]['_name'])
else:
output += '/'
output += '\n'
self.write(output[:-1]) # cut off last \n
elif isinstance(data, list):
self.write('\n'.join(data))
else:
self.write(str(data))
def lookup(self, path, data):
items = path.split('/')
for item in items:
if item:
if not isinstance(data, dict):
return data
if not item in data:
return None
data = data[item]
return data
def get(self, path):
cc = self.application.controllers['Cloud']
meta_data = cc.get_metadata(self.request.remote_ip)
if meta_data is None:
_log.error('Failed to get metadata for ip: %s' %
self.request.remote_ip)
raise tornado.web.HTTPError(404)
data = self.lookup(path, meta_data)
if data is None:
raise tornado.web.HTTPError(404)
self.print_data(data)
self.finish()
class APIRequestHandler(tornado.web.RequestHandler):
def get(self, controller_name):
self.execute(controller_name)
@tornado.web.asynchronous
def execute(self, controller_name):
# Obtain the appropriate controller for this request.
try:
controller = self.application.controllers[controller_name]
except KeyError:
self._error('unhandled', 'no controller named %s' % controller_name)
return
args = self.request.arguments
# Read request signature.
try:
signature = args.pop('Signature')[0]
except:
raise tornado.web.HTTPError(400)
# Make a copy of args for authentication and signature verification.
auth_params = {}
for key, value in args.items():
auth_params[key] = value[0]
# Get requested action and remove authentication args for final request.
try:
action = args.pop('Action')[0]
access = args.pop('AWSAccessKeyId')[0]
args.pop('SignatureMethod')
args.pop('SignatureVersion')
args.pop('Version')
args.pop('Timestamp')
except:
raise tornado.web.HTTPError(400)
# Authenticate the request.
try:
(user, project) = manager.AuthManager().authenticate(
access,
signature,
auth_params,
self.request.method,
self.request.host,
self.request.path
)
except exception.Error, ex:
logging.debug("Authentication Failure: %s" % ex)
raise tornado.web.HTTPError(403)
_log.debug('action: %s' % action)
for key, value in args.items():
_log.debug('arg: %s\t\tval: %s' % (key, value))
request = APIRequest(controller, action)
context = APIRequestContext(self, user, project)
d = request.send(context, **args)
# d.addCallback(utils.debug)
# TODO: Wrap response in AWS XML format
d.addCallbacks(self._write_callback, self._error_callback)
def _write_callback(self, data):
self.set_header('Content-Type', 'text/xml')
self.write(data)
self.finish()
def _error_callback(self, failure):
try:
failure.raiseException()
except exception.ApiError as ex:
self._error(type(ex).__name__ + "." + ex.code, ex.message)
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
self._error(type(ex).__name__, str(ex))
raise
def post(self, controller_name):
self.execute(controller_name)
def _error(self, code, message):
self._status_code = 400
self.set_header('Content-Type', 'text/xml')
self.write('<?xml version="1.0"?>\n')
self.write('<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>' % (code, message))
self.finish()
class APIServerApplication(tornado.web.Application):
def __init__(self, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
(r'/cloudpipe/(.*)', nova.cloudpipe.api.CloudPipeRequestHandler),
(r'/cloudpipe', nova.cloudpipe.api.CloudPipeRequestHandler),
(r'/services/([A-Za-z0-9]+)/', APIRequestHandler),
(r'/latest/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2009-04-04/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-09-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-02-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-12-15/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-10-10/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-08-29/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-03-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
self.controllers = controllers

View File

@@ -1,870 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import logging
import os
import time
from twisted.internet import defer
from nova import db
from nova import exception
from nova import flags
from nova import rpc
from nova import utils
from nova.auth import rbac
from nova.auth import manager
from nova.compute.instance_types import INSTANCE_TYPES
from nova.endpoint import images
FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
def _gen_key(user_id, key_name):
""" Tuck this into AuthManager """
try:
mgr = manager.AuthManager()
private_key, fingerprint = mgr.generate_key_pair(user_id, key_name)
except Exception as ex:
return {'exception': ex}
return {'private_key': private_key, 'fingerprint': fingerprint}
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
self.setup()
def __str__(self):
return 'CloudController'
def setup(self):
""" Ensure the keychains and folders exist. """
# FIXME(ja): this should be moved to a nova-manage command,
# if not setup throw exceptions instead of running
# Create keys folder, if it doesn't exist
if not os.path.exists(FLAGS.keys_path):
os.makedirs(FLAGS.keys_path)
# Gen root CA, if we don't have one
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
if not os.path.exists(root_ca_path):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
os.chdir(start)
def _get_mpi_data(self, project_id):
result = {}
for instance in db.instance_get_by_project(project_id):
line = '%s slots=%d' % (instance.fixed_ip['str_id'],
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
result[instance['key_name']] = [line]
return result
def _trigger_refresh_security_group(self, security_group):
nodes = set([instance.host for instance in security_group.instances])
for node in nodes:
rpc.call('%s.%s' % (FLAGS.compute_topic, node),
{ "method": "refresh_security_group",
"args": { "context": None,
"security_group_id": security_group.id}})
def get_metadata(self, address):
instance_ref = db.fixed_ip_get_instance(None, address)
if instance_ref is None:
return None
mpi = self._get_mpi_data(instance_ref['project_id'])
if instance_ref['key_name']:
keys = {
'0': {
'_name': instance_ref['key_name'],
'openssh-key': instance_ref['key_data']
}
}
else:
keys = ''
hostname = instance_ref['hostname']
floating_ip = db.instance_get_floating_ip_address(None,
instance_ref['id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
'ami-id': instance_ref['image_id'],
'ami-launch-index': instance_ref['ami_launch_index'],
'ami-manifest-path': 'FIXME',
'block-device-mapping': { # TODO(vish): replace with real data
'ami': 'sda1',
'ephemeral0': 'sda2',
'root': '/dev/sda1',
'swap': 'sda3'
},
'hostname': hostname,
'instance-action': 'none',
'instance-id': instance_ref['str_id'],
'instance-type': instance_ref['instance_type'],
'local-hostname': hostname,
'local-ipv4': address,
'kernel-id': instance_ref['kernel_id'],
'placement': {
'availaibility-zone': instance_ref['availability_zone'],
},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
'ramdisk-id': instance_ref['ramdisk_id'],
'reservation-id': instance_ref['reservation_id'],
'security-groups': '',
'mpi': mpi
}
}
if False: # TODO(vish): store ancestor ids
data['ancestor-ami-ids'] = []
if False: # TODO(vish): store product codes
data['product-codes'] = []
return data
@rbac.allow('all')
def describe_availability_zones(self, context, **kwargs):
return {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
@rbac.allow('all')
def describe_regions(self, context, region_name=None, **kwargs):
# TODO(vish): region_name is an array. Support filtering
return {'regionInfo': [{'regionName': 'nova',
'regionUrl': FLAGS.ec2_url}]}
@rbac.allow('all')
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
return {'snapshotSet': [{'snapshotId': 'fixme',
'volumeId': 'fixme',
'status': 'fixme',
'startTime': 'fixme',
'progress': 'fixme',
'ownerId': 'fixme',
'volumeSize': 0,
'description': 'fixme'}]}
@rbac.allow('all')
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = context.user.get_key_pairs()
if not key_name is None:
key_pairs = [x for x in key_pairs if x.name in key_name]
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = FLAGS.vpn_key_suffix
if context.user.is_admin() or not key_pair.name.endswith(suffix):
result.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
})
return {'keypairsSet': result}
@rbac.allow('all')
def create_key_pair(self, context, key_name, **kwargs):
dcall = defer.Deferred()
pool = context.handler.application.settings.get('pool')
def _complete(kwargs):
if 'exception' in kwargs:
dcall.errback(kwargs['exception'])
return
dcall.callback({'keyName': key_name,
'keyFingerprint': kwargs['fingerprint'],
'keyMaterial': kwargs['private_key']})
pool.apply_async(_gen_key, [context.user.id, key_name],
callback=_complete)
return dcall
@rbac.allow('all')
def delete_key_pair(self, context, key_name, **kwargs):
context.user.delete_key_pair(key_name)
# aws returns true even if the key doens't exist
return True
@rbac.allow('all')
def describe_security_groups(self, context, group_name=None, **kwargs):
if context.user.is_admin():
groups = db.security_group_get_all(context)
else:
groups = db.security_group_get_by_project(context,
context.project.id)
groups = [self._format_security_group(context, g) for g in groups]
if not group_name is None:
groups = [g for g in groups if g.name in group_name]
return {'securityGroupInfo': groups }
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group.description
g['groupName'] = group.name
g['ownerId'] = context.user.id
g['ipPermissions'] = []
for rule in group.rules:
r = {}
r['ipProtocol'] = rule.protocol
r['fromPort'] = rule.from_port
r['toPort'] = rule.to_port
r['groups'] = []
r['ipRanges'] = []
if rule.group_id:
source_group = db.security_group_get(context, rule.group_id)
r['groups'] += [{'groupName': source_group.name,
'userId': source_group.user_id}]
else:
r['ipRanges'] += [{'cidrIp': rule.cidr}]
g['ipPermissions'] += [r]
return g
@rbac.allow('netadmin')
def revoke_security_group_ingress(self, context, group_name,
to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None,
user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
security_group = db.security_group_get_by_name(context,
context.project.id,
group_name)
criteria = {}
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = \
db.security_group_get_by_name(context,
source_project_id,
source_security_group_name)
criteria['group_id'] = source_security_group
elif cidr_ip:
criteria['cidr'] = cidr_ip
else:
return { 'return': False }
if ip_protocol and from_port and to_port:
criteria['protocol'] = ip_protocol
criteria['from_port'] = from_port
criteria['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if 'cidr' in criteria:
return { 'return': False }
for rule in security_group.rules:
for (k,v) in criteria.iteritems():
if getattr(rule, k, False) != v:
break
# If we make it here, we have a match
db.security_group_rule_destroy(context, rule.id)
self._trigger_refresh_security_group(security_group)
return True
# TODO(soren): Lots and lots of input validation. We're accepting
# strings here (such as ipProtocol), which are put into
# filter rules verbatim.
# TODO(soren): Dupe detection. Adding the same rule twice actually
# adds the same rule twice to the rule set, which is
# pointless.
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
# TODO(soren): De-duplicate the turning method arguments into dict stuff.
# revoke_security_group_ingress uses the exact same logic.
@rbac.allow('netadmin')
def authorize_security_group_ingress(self, context, group_name,
to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None,
source_security_group_name=None,
source_security_group_owner_id=None):
security_group = db.security_group_get_by_name(context,
context.project.id,
group_name)
values = { 'parent_group_id' : security_group.id }
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = \
db.security_group_get_by_name(context,
source_project_id,
source_security_group_name)
values['group_id'] = source_security_group.id
elif cidr_ip:
values['cidr'] = cidr_ip
else:
return { 'return': False }
if ip_protocol and from_port and to_port:
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if 'cidr' in values:
return None
security_group_rule = db.security_group_rule_create(context, values)
self._trigger_refresh_security_group(security_group)
return True
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project.id
return source_project_id
@rbac.allow('netadmin')
def create_security_group(self, context, group_name, group_description):
if db.securitygroup_exists(context, context.project.id, group_name):
raise exception.ApiError('group %s already exists' % group_name)
group = {'user_id' : context.user.id,
'project_id': context.project.id,
'name': group_name,
'description': group_description}
group_ref = db.security_group_create(context, group)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
@rbac.allow('netadmin')
def delete_security_group(self, context, group_name, **kwargs):
security_group = db.security_group_get_by_name(context,
context.project.id,
group_name)
db.security_group_destroy(context, security_group.id)
return True
@rbac.allow('projectmanager', 'sysadmin')
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
instance_ref = db.instance_get_by_str(context, instance_id[0])
return rpc.call('%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
{"method": "get_console_output",
"args": {"context": None,
"instance_id": instance_ref['id']}})
@rbac.allow('projectmanager', 'sysadmin')
def describe_volumes(self, context, **kwargs):
if context.user.is_admin():
volumes = db.volume_get_all(context)
else:
volumes = db.volume_get_by_project(context, context.project.id)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
v = {}
v['volumeId'] = volume['str_id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if context.user.is_admin():
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
volume['user_id'],
'host',
volume['instance_id'],
volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': volume['instance_id'],
'status': 'attached',
'volume_id': volume['str_id']}]
else:
v['attachmentSet'] = [{}]
return v
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
vol = {}
vol['size'] = size
vol['user_id'] = context.user.id
vol['project_id'] = context.project.id
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
volume_ref = db.volume_create(context, vol)
rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
"args": {"context": None,
"volume_id": volume_ref['id']}})
return {'volumeSet': [self._format_volume(context, volume_ref)]}
@rbac.allow('projectmanager', 'sysadmin')
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_ref = db.volume_get_by_str(context, volume_id)
# TODO(vish): abstract status checking?
if volume_ref['attach_status'] == "attached":
raise exception.ApiError("Volume is already attached")
instance_ref = db.instance_get_by_str(context, instance_id)
host = db.instance_get_host(context, instance_ref['id'])
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "attach_volume",
"args": {"context": None,
"volume_id": volume_ref['id'],
"instance_id": instance_ref['id'],
"mountpoint": device}})
return defer.succeed({'attachTime': volume_ref['attach_time'],
'device': volume_ref['mountpoint'],
'instanceId': instance_ref['id'],
'requestId': context.request_id,
'status': volume_ref['attach_status'],
'volumeId': volume_ref['id']})
@rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
volume_ref = db.volume_get_by_str(context, volume_id)
instance_ref = db.volume_get_instance(context, volume_ref['id'])
if not instance_ref:
raise exception.Error("Volume isn't attached to anything!")
# TODO(vish): abstract status checking?
if volume_ref['status'] == "available":
raise exception.Error("Volume is already detached")
try:
host = db.instance_get_host(context, instance_ref['id'])
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "detach_volume",
"args": {"context": None,
"instance_id": instance_ref['id'],
"volume_id": volume_ref['id']}})
except exception.NotFound:
# If the instance doesn't exist anymore,
# then we need to call detach blind
db.volume_detached(context)
return defer.succeed({'attachTime': volume_ref['attach_time'],
'device': volume_ref['mountpoint'],
'instanceId': instance_ref['str_id'],
'requestId': context.request_id,
'status': volume_ref['attach_status'],
'volumeId': volume_ref['id']})
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
@rbac.allow('all')
def describe_instances(self, context, **kwargs):
return defer.succeed(self._format_describe_instances(context))
def _format_describe_instances(self, context):
return { 'reservationSet': self._format_instances(context) }
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id)
assert len(i) == 1
return i[0]
def _format_instances(self, context, reservation_id=None):
reservations = {}
if reservation_id:
instances = db.instance_get_by_reservation(context, reservation_id)
else:
if not context.user.is_admin():
instances = db.instance_get_all(context)
else:
instances = db.instance_get_by_project(context, context.project.id)
for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
i['instanceId'] = instance['str_id']
i['imageId'] = instance['image_id']
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']
}
floating_addr = db.instance_get_floating_address(context,
instance['id'])
i['publicDnsName'] = floating_addr
fixed_addr = db.instance_get_fixed_address(context,
instance['id'])
i['privateDnsName'] = fixed_addr
if not i['publicDnsName']:
i['publicDnsName'] = i['privateDnsName']
i['dnsName'] = None
i['keyName'] = instance['key_name']
if context.user.is_admin():
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = self._convert_to_set([], 'product_codes')
i['instanceType'] = instance['instance_type']
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
if not reservations.has_key(instance['reservation_id']):
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
r['groupSet'] = self._convert_to_set([], 'groups')
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
@rbac.allow('all')
def describe_addresses(self, context, **kwargs):
return self.format_addresses(context)
def format_addresses(self, context):
addresses = []
if context.user.is_admin():
iterator = db.floating_ip_get_all(context)
else:
iterator = db.floating_ip_get_by_project(context,
context.project.id)
for floating_ip_ref in iterator:
address = floating_ip_ref['id_str']
instance_ref = db.floating_ip_get_instance(address)
address_rv = {'public_ip': address,
'instance_id': instance_ref['id_str']}
if context.user.is_admin():
details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id'])
address_rv['instance_id'] = details
addresses.append(address_rv)
return {'addressesSet': addresses}
@rbac.allow('netadmin')
@defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
network_topic = yield self._get_network_topic(context)
public_ip = yield rpc.call(network_topic,
{"method": "allocate_floating_ip",
"args": {"context": None,
"project_id": context.project.id}})
defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def release_address(self, context, public_ip, **kwargs):
# NOTE(vish): Should we make sure this works?
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "deallocate_floating_ip",
"args": {"context": None,
"floating_ip": floating_ip_ref['str_id']}})
defer.returnValue({'releaseResponse': ["Address released."]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def associate_address(self, context, instance_id, public_ip, **kwargs):
instance_ref = db.instance_get_by_str(context, instance_id)
fixed_ip_ref = db.fixed_ip_get_by_instance(context, instance_ref['id'])
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "associate_floating_ip",
"args": {"context": None,
"floating_ip": floating_ip_ref['str_id'],
"fixed_ip": fixed_ip_ref['str_id'],
"instance_id": instance_ref['id']}})
defer.returnValue({'associateResponse': ["Address associated."]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def disassociate_address(self, context, public_ip, **kwargs):
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "disassociate_floating_ip",
"args": {"context": None,
"floating_ip": floating_ip_ref['str_id']}})
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
@defer.inlineCallbacks
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
network_ref = db.project_get_network(context, context.project.id)
host = db.network_get_host(context, network_ref['id'])
if not host:
host = yield rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
"args": {"context": None,
"project_id": context.project.id}})
defer.returnValue(db.queue_get_for(context, FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def run_instances(self, context, **kwargs):
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
if not vpn:
image = images.get(context, kwargs['image_id'])
# FIXME(ja): if image is vpn, this breaks
# get defaults from imagestore
image_id = image['imageId']
kernel_id = image.get('kernelId', FLAGS.default_kernel)
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
# API parameters overrides of defaults
kernel_id = kwargs.get('kernel_id', kernel_id)
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
# make sure we have access to kernel and ramdisk
images.get(context, kernel_id)
images.get(context, ramdisk_id)
logging.debug("Going to run instances...")
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
if kwargs.has_key('key_name'):
key_pair = context.user.get_key_pair(kwargs['key_name'])
if not key_pair:
raise exception.ApiError('Key Pair %s not found' %
kwargs['key_name'])
key_data = key_pair.public_key
security_group_arg = kwargs.get('security_group', ["default"])
if not type(security_group_arg) is list:
security_group_arg = [security_group_arg]
security_groups = []
for security_group_name in security_group_arg:
group = db.security_group_get_by_project(context,
context.project.id,
security_group_name)
security_groups.append(group['id'])
reservation_id = utils.generate_uid('r')
base_options = {}
base_options['image_id'] = image_id
base_options['kernel_id'] = kernel_id
base_options['ramdisk_id'] = ramdisk_id
base_options['reservation_id'] = reservation_id
base_options['key_data'] = key_data
base_options['key_name'] = kwargs.get('key_name', None)
base_options['user_id'] = context.user.id
base_options['project_id'] = context.project.id
base_options['user_data'] = kwargs.get('user_data', '')
base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
for num in range(int(kwargs['max_count'])):
inst_id = db.instance_create(context, base_options)
for security_group_id in security_groups:
db.instance_add_security_group(context, inst_id,
security_group_id)
inst = {}
inst['mac_address'] = utils.generate_mac()
inst['launch_index'] = num
inst['hostname'] = inst_id
db.instance_update(context, inst_id, inst)
address = self.network_manager.allocate_fixed_ip(context,
inst_id,
vpn)
# TODO(vish): This probably should be done in the scheduler
# network is setup when host is assigned
network_topic = yield self._get_network_topic(context)
rpc.call(network_topic,
{"method": "setup_fixed_ip",
"args": {"context": None,
"address": address}})
rpc.cast(FLAGS.compute_topic,
{"method": "run_instance",
"args": {"context": None,
"instance_id": inst_id}})
logging.debug("Casting to node for %s/%s's instance %s" %
(context.project.name, context.user.name, inst_id))
defer.returnValue(self._format_run_instances(context,
reservation_id))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
for id_str in instance_id:
logging.debug("Going to try and terminate %s" % id_str)
try:
instance_ref = db.instance_get_by_str(context, id_str)
except exception.NotFound:
logging.warning("Instance %s was not found during terminate"
% id_str)
continue
# FIXME(ja): where should network deallocate occur?
address = db.instance_get_floating_address(context,
instance_ref['id'])
if address:
logging.debug("Disassociating address %s" % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "disassociate_floating_ip",
"args": {"context": None,
"address": address}})
address = db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
logging.debug("Deallocating address %s" % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
db.fixed_ip_deallocate(context, address)
host = db.instance_get_host(context, instance_ref['id'])
if host:
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
"args": {"context": None,
"instance_id": instance_ref['id']}})
else:
db.instance_destroy(context, instance_ref['id'])
defer.returnValue(True)
@rbac.allow('projectmanager', 'sysadmin')
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
for id_str in instance_id:
instance_ref = db.instance_get_by_str(context, id_str)
host = db.instance_get_host(context, instance_ref['id'])
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"context": None,
"instance_id": instance_ref['id']}})
return defer.succeed(True)
@rbac.allow('projectmanager', 'sysadmin')
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume_ref = db.volume_get_by_str(context, volume_id)
host = db.volume_get_host(context, volume_ref['id'])
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "delete_volume",
"args": {"context": None,
"volume_id": volume_id}})
return defer.succeed(True)
@rbac.allow('all')
def describe_images(self, context, image_id=None, **kwargs):
# The objectstore does its own authorization for describe
imageSet = images.list(context, image_id)
return defer.succeed({'imagesSet': imageSet})
@rbac.allow('projectmanager', 'sysadmin')
def deregister_image(self, context, image_id, **kwargs):
# FIXME: should the objectstore be doing these authorization checks?
images.deregister(context, image_id)
return defer.succeed({'imageId': image_id})
@rbac.allow('projectmanager', 'sysadmin')
def register_image(self, context, image_location=None, **kwargs):
# FIXME: should the objectstore be doing these authorization checks?
if image_location is None and kwargs.has_key('name'):
image_location = kwargs['name']
image_id = images.register(context, image_location)
logging.debug("Registered %s as %s" % (image_location, image_id))
return defer.succeed({'imageId': image_id})
@rbac.allow('all')
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
try:
image = images.list(context, image_id)[0]
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
result['launchPermission'].append({'group': 'all'})
return defer.succeed(result)
@rbac.allow('projectmanager', 'sysadmin')
def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
if not 'user_group' in kwargs:
raise exception.ApiError('user or group not specified')
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'remove']:
raise exception.ApiError('operation_type must be add or remove')
result = images.modify(context, image_id, operation_type)
return defer.succeed(result)

View File

@@ -1,108 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Proxy AMI-related calls from the cloud controller, to the running
objectstore service.
"""
import json
import urllib
import boto.s3.connection
from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
def modify(context, image_id, operation):
conn(context).make_request(
method='POST',
bucket='_images',
query_args=qs({'image_id': image_id, 'operation': operation}))
return True
def register(context, image_location):
""" rpc call to register a new image based from a manifest """
image_id = utils.generate_uid('ami')
conn(context).make_request(
method='PUT',
bucket='_images',
query_args=qs({'image_location': image_location,
'image_id': image_id}))
return image_id
def list(context, filter_list=[]):
""" return a list of all images that a user can see
optionally filtered by a list of image_id """
# FIXME: send along the list of only_images to check for
response = conn(context).make_request(
method='GET',
bucket='_images')
result = json.loads(response.read())
if not filter_list is None:
return [i for i in result if i['imageId'] in filter_list]
return result
def get(context, image_id):
"""return a image object if the context has permissions"""
result = list(context, [image_id])
if not result:
raise exception.NotFound('Image %s could not be found' % image_id)
image = result[0]
return image
def deregister(context, image_id):
""" unregister an image """
conn(context).make_request(
method='DELETE',
bucket='_images',
query_args=qs({'image_id': image_id}))
def conn(context):
access = manager.AuthManager().get_access_key(context.user,
context.project)
secret = str(context.user.secret)
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=False,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
def qs(params):
pairs = []
for key in params.keys():
pairs.append(key + '=' + urllib.quote(params[key]))
return '&'.join(pairs)

View File

@@ -167,10 +167,14 @@ def DECLARE(name, module_string, flag_values=FLAGS):
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
DEFINE_list('region_list',
[],
'list of region=url pairs separated by commas')
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on')
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
@@ -213,6 +217,8 @@ DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
'Manager for network')
DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
'Manager for volume')
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
DEFINE_string('host', socket.gethostname(),
'name of this node')

View File

@@ -30,7 +30,7 @@ from twisted.internet import protocol
from twisted.internet import reactor
from nova import flags
from nova.utils import ProcessExecutionError
from nova.exception import ProcessExecutionError
FLAGS = flags.FLAGS
flags.DEFINE_integer('process_pool_size', 4,
@@ -127,7 +127,7 @@ def get_process_output(executable, args=None, env=None, path=None,
deferred = defer.Deferred()
cmd = executable
if args:
cmd = cmd + " " + ' '.join(args)
cmd = " ".join([cmd] + args)
logging.debug("Running cmd: %s", cmd)
process_handler = BackRelayWithInput(
deferred,
@@ -141,8 +141,8 @@ def get_process_output(executable, args=None, env=None, path=None,
executable = str(executable)
if not args is None:
args = [str(x) for x in args]
process_reactor.spawnProcess( process_handler, executable,
(executable,)+tuple(args), env, path)
process_reactor.spawnProcess(process_handler, executable,
(executable,)+tuple(args), env, path)
return deferred

View File

@@ -0,0 +1,25 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nova.scheduler` -- Scheduler Nodes
=====================================================
.. automodule:: nova.scheduler
:platform: Unix
:synopsis: Module that picks a compute node to run a VM instance.
.. moduleauthor:: Chris Behrens <cbehrens@codestud.com>
"""

90
nova/scheduler/simple.py Normal file
View File

@@ -0,0 +1,90 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple Scheduler
"""
import datetime
from nova import db
from nova import flags
from nova.scheduler import driver
from nova.scheduler import chance
FLAGS = flags.FLAGS
flags.DEFINE_integer("max_cores", 16,
"maximum number of instance cores to allow per host")
flags.DEFINE_integer("max_gigabytes", 10000,
"maximum number of volume gigabytes to allow per host")
flags.DEFINE_integer("max_networks", 1000,
"maximum number of networks to allow per host")
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
results = db.service_get_all_compute_sorted(context)
for result in results:
(service, instance_cores) = result
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
raise driver.NoValidHost("All hosts have too many cores")
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.instance_update(context,
instance_id,
{'host': service['host'],
'scheduled_at': now})
return service['host']
raise driver.NoValidHost("No hosts found")
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
results = db.service_get_all_volume_sorted(context)
for result in results:
(service, volume_gigabytes) = result
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
raise driver.NoValidHost("All hosts have too many gigabytes")
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.volume_update(context,
volume_id,
{'host': service['host'],
'scheduled_at': now})
return service['host']
raise driver.NoValidHost("No hosts found")
def schedule_set_network_host(self, context, *_args, **_kwargs):
"""Picks a host that is up and has the fewest networks."""
results = db.service_get_all_network_sorted(context)
for result in results:
(service, instance_count) = result
if instance_count >= FLAGS.max_networks:
raise driver.NoValidHost("All hosts have too many networks")
if self.service_is_up(service):
return service['host']
raise driver.NoValidHost("No hosts found")

View File

@@ -23,60 +23,12 @@ from boto.ec2 import regioninfo
import httplib
import random
import StringIO
from tornado import httpserver
from twisted.internet import defer
import webob
from nova import flags
from nova import test
from nova import api
from nova.api.ec2 import cloud
from nova.auth import manager
from nova.endpoint import api
from nova.endpoint import cloud
FLAGS = flags.FLAGS
# NOTE(termie): These are a bunch of helper methods and classes to short
# circuit boto calls and feed them into our tornado handlers,
# it's pretty damn circuitous so apologies if you have to fix
# a bug in it
# NOTE(jaypipes) The pylint disables here are for R0913 (too many args) which
# isn't controllable since boto's HTTPRequest needs that many
# args, and for the version-differentiated import of tornado's
# httputil.
# NOTE(jaypipes): The disable-msg=E1101 and E1103 below is because pylint is
# unable to introspect the deferred's return value properly
def boto_to_tornado(method, path, headers, data, # pylint: disable-msg=R0913
host, connection=None):
""" translate boto requests into tornado requests
connection should be a FakeTornadoHttpConnection instance
"""
try:
headers = httpserver.HTTPHeaders()
except AttributeError:
from tornado import httputil # pylint: disable-msg=E0611
headers = httputil.HTTPHeaders()
for k, v in headers.iteritems():
headers[k] = v
req = httpserver.HTTPRequest(method=method,
uri=path,
headers=headers,
body=data,
host=host,
remote_ip='127.0.0.1',
connection=connection)
return req
def raw_to_httpresponse(response_string):
"""translate a raw tornado http response into an httplib.HTTPResponse"""
sock = FakeHttplibSocket(response_string)
resp = httplib.HTTPResponse(sock)
resp.begin()
return resp
class FakeHttplibSocket(object):
@@ -89,85 +41,35 @@ class FakeHttplibSocket(object):
return self._buffer
class FakeTornadoStream(object):
"""a fake stream to satisfy tornado's assumptions, trivial"""
def set_close_callback(self, _func):
"""Dummy callback for stream"""
pass
class FakeTornadoConnection(object):
"""A fake connection object for tornado to pass to its handlers
web requests are expected to write to this as they get data and call
finish when they are done with the request, we buffer the writes and
kick off a callback when it is done so that we can feed the result back
into boto.
"""
def __init__(self, deferred):
self._deferred = deferred
self._buffer = StringIO.StringIO()
def write(self, chunk):
"""Writes a chunk of data to the internal buffer"""
self._buffer.write(chunk)
def finish(self):
"""Finalizes the connection and returns the buffered data via the
deferred callback.
"""
data = self._buffer.getvalue()
self._deferred.callback(data)
xheaders = None
@property
def stream(self): # pylint: disable-msg=R0201
"""Required property for interfacing with tornado"""
return FakeTornadoStream()
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our tornado app, we then wait for the response and turn it back into
our WSGI app, we then wait for the response and turn it back into
the httplib.HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
self.deferred = defer.Deferred()
def request(self, method, path, data, headers):
"""Creates a connection to a fake tornado and sets
up a deferred request with the supplied data and
headers"""
conn = FakeTornadoConnection(self.deferred)
request = boto_to_tornado(connection=conn,
method=method,
path=path,
headers=headers,
data=data,
host=self.host)
self.app(request)
self.deferred.addCallback(raw_to_httpresponse)
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""A bit of deferred magic for catching the response
from the previously deferred request"""
@defer.inlineCallbacks
def _waiter():
"""Callback that simply yields the deferred's
return value."""
result = yield self.deferred
defer.returnValue(result)
d = _waiter()
# NOTE(termie): defer.returnValue above should ensure that
# this deferred has already been called by the time
# we get here, we are going to cheat and return
# the result of the callback
return d.result # pylint: disable-msg=E1101
return self.http_response
def close(self):
"""Required for compatibility with boto/tornado"""
@@ -180,7 +82,6 @@ class ApiEc2TestCase(test.BaseTestCase):
super(ApiEc2TestCase, self).setUp()
self.manager = manager.AuthManager()
self.cloud = cloud.CloudController()
self.host = '127.0.0.1'
@@ -193,12 +94,12 @@ class ApiEc2TestCase(test.BaseTestCase):
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=FLAGS.cc_port,
port=8773,
path='/services/Cloud')
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
http = FakeHttplibConnection(
self.app, '%s:%d' % (self.host, FLAGS.cc_port), False)
self.app, '%s:8773' % (self.host), False)
# pylint: disable-msg=E1103
self.ec2.new_http_connection(host, is_secure).AndReturn(http)
return http
@@ -224,7 +125,8 @@ class ApiEc2TestCase(test.BaseTestCase):
for x in range(random.randint(4, 8)))
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
self.manager.generate_key_pair(user.id, keyname)
# NOTE(vish): create depends on pool, so call helper directly
cloud._gen_key(None, user.id, keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
@@ -300,7 +202,7 @@ class ApiEc2TestCase(test.BaseTestCase):
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
@@ -370,7 +272,7 @@ class ApiEc2TestCase(test.BaseTestCase):
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:

View File

@@ -17,8 +17,6 @@
# under the License.
import logging
from M2Crypto import BIO
from M2Crypto import RSA
from M2Crypto import X509
import unittest
@@ -26,7 +24,7 @@ from nova import crypto
from nova import flags
from nova import test
from nova.auth import manager
from nova.endpoint import cloud
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
@@ -65,35 +63,6 @@ class AuthTestCase(test.BaseTestCase):
'export S3_URL="http://127.0.0.1:3333/"\n' +
'export EC2_USER_ID="test1"\n')
def test_006_test_key_storage(self):
user = self.manager.get_user('test1')
user.create_key_pair('public', 'key', 'fingerprint')
key = user.get_key_pair('public')
self.assertEqual('key', key.public_key)
self.assertEqual('fingerprint', key.fingerprint)
def test_007_test_key_generation(self):
user = self.manager.get_user('test1')
private_key, fingerprint = user.generate_key_pair('public2')
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
public_key = user.get_key_pair('public2').public_key
key.save_pub_key_bio(bio)
converted = crypto.ssl_pub_to_ssh_pub(bio.read())
# assert key fields are equal
self.assertEqual(public_key.split(" ")[1].strip(),
converted.split(" ")[1].strip())
def test_008_can_list_key_pairs(self):
keys = self.manager.get_user('test1').get_key_pairs()
self.assertTrue(filter(lambda k: k.name == 'public', keys))
self.assertTrue(filter(lambda k: k.name == 'public2', keys))
def test_009_can_delete_key_pair(self):
self.manager.get_user('test1').delete_key_pair('public')
keys = self.manager.get_user('test1').get_key_pairs()
self.assertFalse(filter(lambda k: k.name == 'public', keys))
def test_010_can_list_users(self):
users = self.manager.get_users()
logging.warn(users)
@@ -204,6 +173,12 @@ class AuthTestCase(test.BaseTestCase):
self.assert_(len(self.manager.get_projects()) > 1)
self.assertEqual(len(self.manager.get_projects('test2')), 1)
def test_220_can_modify_project(self):
self.manager.modify_project('testproj', 'test2', 'new description')
project = self.manager.get_project('testproj')
self.assertEqual(project.project_manager_id, 'test2')
self.assertEqual(project.description, 'new description')
def test_299_can_delete_project(self):
self.manager.delete_project('testproj')
self.assertFalse(filter(lambda p: p.name == 'testproj', self.manager.get_projects()))

View File

@@ -17,20 +17,26 @@
# under the License.
import logging
from M2Crypto import BIO
from M2Crypto import RSA
import StringIO
import time
from tornado import ioloop
from twisted.internet import defer
import unittest
from xml.etree import ElementTree
from nova import crypto
from nova import db
from nova import flags
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.endpoint import api
from nova.endpoint import cloud
from nova.compute import power_state
from nova.api.ec2 import context
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
@@ -54,16 +60,20 @@ class CloudTestCase(test.BaseTestCase):
proxy=self.compute)
self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop))
try:
manager.AuthManager().create_user('admin', 'admin', 'admin')
except: pass
admin = manager.AuthManager().get_user('admin')
project = manager.AuthManager().create_project('proj', 'admin', 'proj')
self.context = api.APIRequestContext(handler=None,project=project,user=admin)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.APIRequestContext(user=self.user,
project=self.project)
def tearDown(self):
manager.AuthManager().delete_project('proj')
manager.AuthManager().delete_user('admin')
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(CloudTestCase, self).setUp()
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
def test_console_output(self):
if FLAGS.connection_type == 'fake':
@@ -76,6 +86,33 @@ class CloudTestCase(test.BaseTestCase):
self.assert_(output)
rv = yield self.compute.terminate_instance(instance_id)
def test_key_generation(self):
result = self._create_key('test')
private_key = result['private_key']
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
public_key = db.key_pair_get(self.context,
self.context.user.id,
'test')['public_key']
key.save_pub_key_bio(bio)
converted = crypto.ssl_pub_to_ssh_pub(bio.read())
# assert key fields are equal
self.assertEqual(public_key.split(" ")[1].strip(),
converted.split(" ")[1].strip())
def test_describe_key_pairs(self):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
keys = result["keypairsSet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
if FLAGS.connection_type == 'fake':
logging.debug("Can't test instances without a real virtual env.")
@@ -94,7 +131,7 @@ class CloudTestCase(test.BaseTestCase):
rv = yield defer.succeed(time.sleep(1))
info = self.cloud._get_instance(instance['instance_id'])
logging.debug(info['state'])
if info['state'] == node.Instance.RUNNING:
if info['state'] == power_state.RUNNING:
break
self.assert_(rv)

View File

@@ -18,6 +18,8 @@
"""
Tests For Compute
"""
import datetime
import logging
from twisted.internet import defer
@@ -48,6 +50,7 @@ class ComputeTestCase(test.TrialTestCase):
def tearDown(self): # pylint: disable-msg=C0103
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
super(ComputeTestCase, self).tearDown()
def _create_instance(self):
"""Create a test instance"""
@@ -60,7 +63,7 @@ class ComputeTestCase(test.TrialTestCase):
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)
return db.instance_create(self.context, inst)['id']
@defer.inlineCallbacks
def test_run_terminate(self):
@@ -79,6 +82,24 @@ class ComputeTestCase(test.TrialTestCase):
logging.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
@defer.inlineCallbacks
def test_run_terminate_timestamps(self):
"""Make sure timestamps are set for launched and destroyed"""
instance_id = self._create_instance()
instance_ref = db.instance_get(self.context, instance_id)
self.assertEqual(instance_ref['launched_at'], None)
self.assertEqual(instance_ref['deleted_at'], None)
launch = datetime.datetime.utcnow()
yield self.compute.run_instance(self.context, instance_id)
instance_ref = db.instance_get(self.context, instance_id)
self.assert_(instance_ref['launched_at'] > launch)
self.assertEqual(instance_ref['deleted_at'], None)
terminate = datetime.datetime.utcnow()
yield self.compute.terminate_instance(self.context, instance_id)
instance_ref = db.instance_get({'deleted': True}, instance_id)
self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate)
@defer.inlineCallbacks
def test_reboot(self):
"""Ensure instance can be rebooted"""

View File

@@ -28,6 +28,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import context
FLAGS = flags.FLAGS
@@ -48,7 +49,7 @@ class NetworkTestCase(test.TrialTestCase):
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.network = utils.import_object(FLAGS.network_manager)
self.context = None
self.context = context.APIRequestContext(project=None, user=self.user)
for i in range(5):
name = 'project%s' % i
self.projects.append(self.manager.create_project(name,
@@ -56,12 +57,12 @@ class NetworkTestCase(test.TrialTestCase):
name))
# create the necessary network data for the project
self.network.set_network_host(self.context, self.projects[i].id)
instance_id = db.instance_create(None,
instance_ref = db.instance_create(None,
{'mac_address': utils.generate_mac()})
self.instance_id = instance_id
instance_id = db.instance_create(None,
self.instance_id = instance_ref['id']
instance_ref = db.instance_create(None,
{'mac_address': utils.generate_mac()})
self.instance2_id = instance_id
self.instance2_id = instance_ref['id']
def tearDown(self): # pylint: disable-msg=C0103
super(NetworkTestCase, self).tearDown()
@@ -75,12 +76,10 @@ class NetworkTestCase(test.TrialTestCase):
def _create_address(self, project_num, instance_id=None):
"""Create an address in given project num"""
net = db.project_get_network(None, self.projects[project_num].id)
address = db.fixed_ip_allocate(None, net['id'])
if instance_id is None:
instance_id = self.instance_id
db.fixed_ip_instance_associate(None, address, instance_id)
return address
self.context.project = self.projects[project_num]
return self.network.allocate_fixed_ip(self.context, instance_id)
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
@@ -103,14 +102,14 @@ class NetworkTestCase(test.TrialTestCase):
address = db.instance_get_floating_address(None, self.instance_id)
self.assertEqual(address, None)
self.network.deallocate_floating_ip(self.context, float_addr)
db.fixed_ip_deallocate(None, fix_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
address = self._create_address(0)
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
lease_ip(address)
db.fixed_ip_deallocate(None, address)
self.network.deallocate_fixed_ip(self.context, address)
# Doesn't go away until it's dhcp released
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
@@ -131,14 +130,14 @@ class NetworkTestCase(test.TrialTestCase):
lease_ip(address)
lease_ip(address2)
db.fixed_ip_deallocate(None, address)
self.network.deallocate_fixed_ip(self.context, address)
release_ip(address)
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
# First address release shouldn't affect the second
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
db.fixed_ip_deallocate(None, address2)
self.network.deallocate_fixed_ip(self.context, address2)
release_ip(address2)
self.assertFalse(is_allocated_in_project(address2,
self.projects[1].id))
@@ -147,10 +146,23 @@ class NetworkTestCase(test.TrialTestCase):
"""Makes sure that private ips don't overlap"""
first = self._create_address(0)
lease_ip(first)
instance_ids = []
for i in range(1, 5):
address = self._create_address(i)
address2 = self._create_address(i)
address3 = self._create_address(i)
mac = utils.generate_mac()
instance_ref = db.instance_create(None,
{'mac_address': mac})
instance_ids.append(instance_ref['id'])
address = self._create_address(i, instance_ref['id'])
mac = utils.generate_mac()
instance_ref = db.instance_create(None,
{'mac_address': mac})
instance_ids.append(instance_ref['id'])
address2 = self._create_address(i, instance_ref['id'])
mac = utils.generate_mac()
instance_ref = db.instance_create(None,
{'mac_address': mac})
instance_ids.append(instance_ref['id'])
address3 = self._create_address(i, instance_ref['id'])
lease_ip(address)
lease_ip(address2)
lease_ip(address3)
@@ -160,14 +172,16 @@ class NetworkTestCase(test.TrialTestCase):
self.projects[0].id))
self.assertFalse(is_allocated_in_project(address3,
self.projects[0].id))
db.fixed_ip_deallocate(None, address)
db.fixed_ip_deallocate(None, address2)
db.fixed_ip_deallocate(None, address3)
self.network.deallocate_fixed_ip(self.context, address)
self.network.deallocate_fixed_ip(self.context, address2)
self.network.deallocate_fixed_ip(self.context, address3)
release_ip(address)
release_ip(address2)
release_ip(address3)
for instance_id in instance_ids:
db.instance_destroy(None, instance_id)
release_ip(first)
db.fixed_ip_deallocate(None, first)
self.network.deallocate_fixed_ip(self.context, first)
def test_vpn_ip_and_port_looks_valid(self):
"""Ensure the vpn ip and port are reasonable"""
@@ -194,12 +208,12 @@ class NetworkTestCase(test.TrialTestCase):
"""Makes sure that ip addresses that are deallocated get reused"""
address = self._create_address(0)
lease_ip(address)
db.fixed_ip_deallocate(None, address)
self.network.deallocate_fixed_ip(self.context, address)
release_ip(address)
address2 = self._create_address(0)
self.assertEqual(address, address2)
db.fixed_ip_deallocate(None, address2)
self.network.deallocate_fixed_ip(self.context, address2)
def test_available_ips(self):
"""Make sure the number of available ips for the network is correct
@@ -226,21 +240,27 @@ class NetworkTestCase(test.TrialTestCase):
num_available_ips = db.network_count_available_ips(None,
network['id'])
addresses = []
instance_ids = []
for i in range(num_available_ips):
address = self._create_address(0)
mac = utils.generate_mac()
instance_ref = db.instance_create(None,
{'mac_address': mac})
instance_ids.append(instance_ref['id'])
address = self._create_address(0, instance_ref['id'])
addresses.append(address)
lease_ip(address)
self.assertEqual(db.network_count_available_ips(None,
network['id']), 0)
self.assertRaises(db.NoMoreAddresses,
db.fixed_ip_allocate,
None,
network['id'])
self.network.allocate_fixed_ip,
self.context,
'foo')
for i in range(len(addresses)):
db.fixed_ip_deallocate(None, addresses[i])
for i in range(num_available_ips):
self.network.deallocate_fixed_ip(self.context, addresses[i])
release_ip(addresses[i])
db.instance_destroy(None, instance_ids[i])
self.assertEqual(db.network_count_available_ips(None,
network['id']),
num_available_ips)
@@ -263,7 +283,10 @@ def binpath(script):
def lease_ip(private_ip):
"""Run add command on dhcpbridge"""
network_ref = db.fixed_ip_get_network(None, private_ip)
cmd = "%s add fake %s fake" % (binpath('nova-dhcpbridge'), private_ip)
instance_ref = db.fixed_ip_get_instance(None, private_ip)
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
@@ -274,7 +297,10 @@ def lease_ip(private_ip):
def release_ip(private_ip):
"""Run del command on dhcpbridge"""
network_ref = db.fixed_ip_get_network(None, private_ip)
cmd = "%s del fake %s fake" % (binpath('nova-dhcpbridge'), private_ip)
instance_ref = db.fixed_ip_get_instance(None, private_ip)
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}

View File

@@ -185,7 +185,7 @@ class S3APITestCase(test.TrialTestCase):
"""Setup users, projects, and start a test server."""
super(S3APITestCase, self).setUp()
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver',
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
self.auth_manager = manager.AuthManager()

View File

@@ -0,0 +1,152 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nova import db
from nova import exception
from nova import flags
from nova import quota
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import context
FLAGS = flags.FLAGS
class QuotaTestCase(test.TrialTestCase):
def setUp(self): # pylint: disable-msg=C0103
logging.getLogger().setLevel(logging.DEBUG)
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
quota_instances=2,
quota_cores=4,
quota_volumes=2,
quota_gigabytes=20,
quota_floating_ips=1)
self.cloud = cloud.CloudController()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.APIRequestContext(project=self.project,
user=self.user)
def tearDown(self): # pylint: disable-msg=C0103
manager.AuthManager().delete_project(self.project)
manager.AuthManager().delete_user(self.user)
super(QuotaTestCase, self).tearDown()
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.large'
inst['vcpus'] = cores
inst['mac_address'] = utils.generate_mac()
return db.instance_create(self.context, inst)['id']
def _create_volume(self, size=10):
"""Create a test volume"""
vol = {}
vol['user_id'] = self.user.id
vol['project_id'] = self.project.id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
self.assertEqual(num_instances, 10)
db.quota_destroy(self.context, self.project.id)
def test_too_many_instances(self):
instance_ids = []
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
def test_too_many_cores(self):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
def test_too_many_volumes(self):
volume_ids = []
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(cloud.QuotaError, self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_gigabytes(self):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(cloud.QuotaError,
self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_addresses(self):
address = '192.168.0.100'
try:
db.floating_ip_get_by_address(None, address)
except exception.NotFound:
db.floating_ip_create(None, {'address': address,
'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.project.id)
# NOTE(vish): This assert never fails. When cloud attempts to
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
self.assertRaises(cloud.QuotaError, self.cloud.allocate_address, self.context)

View File

@@ -0,0 +1,231 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
from nova import db
from nova import flags
from nova import service
from nova import test
from nova import rpc
from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import manager
from nova.scheduler import driver
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
def schedule(context, topic, *args, **kwargs):
return 'fallback_host'
def schedule_named_method(context, topic, num):
return 'named_host'
class SchedulerTestCase(test.TrialTestCase):
"""Test case for scheduler"""
def setUp(self): # pylint: disable=C0103
super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver')
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
rpc.cast('topic.fallback_host',
{'method': 'noexist',
'args': {'context': None,
'num': 7}})
self.mox.ReplayAll()
scheduler.noexist(None, 'topic', num=7)
def test_named_method(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
rpc.cast('topic.named_host',
{'method': 'named_method',
'args': {'context': None,
'num': 7}})
self.mox.ReplayAll()
scheduler.named_method(None, 'topic', num=7)
class SimpleDriverTestCase(test.TrialTestCase):
"""Test case for simple driver"""
def setUp(self): # pylint: disable-msg=C0103
super(SimpleDriverTestCase, self).setUp()
self.flags(connection_type='fake',
max_cores=4,
max_gigabytes=4,
volume_driver='nova.volume.driver.FakeAOEDriver',
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
self.context = None
self.manager = auth_manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = None
def tearDown(self): # pylint: disable-msg=C0103
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
def _create_instance(self):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst['vcpus'] = 1
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
"""Create a test volume"""
vol = {}
vol['image_id'] = 'ami-test'
vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
return db.volume_create(self.context, vol)['id']
def test_hosts_are_up(self):
"""Ensures driver can find the hosts that are up"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(len(hosts), 2)
compute1.kill()
compute2.kill()
def test_least_busy_host_gets_instance(self):
"""Ensures the host with less cores gets the next one"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance()
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual(host, 'host2')
compute1.terminate_instance(self.context, instance_id1)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
compute2.kill()
def test_too_many_cores(self):
"""Ensures we don't go over max cores"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
instance_ids1 = []
instance_ids2 = []
for index in xrange(FLAGS.max_cores):
instance_id = self._create_instance()
compute1.run_instance(self.context, instance_id)
instance_ids1.append(instance_id)
instance_id = self._create_instance()
compute2.run_instance(self.context, instance_id)
instance_ids2.append(instance_id)
instance_id = self._create_instance()
self.assertRaises(driver.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id)
for instance_id in instance_ids1:
compute1.terminate_instance(self.context, instance_id)
for instance_id in instance_ids2:
compute2.terminate_instance(self.context, instance_id)
compute1.kill()
compute2.kill()
def test_least_busy_host_gets_volume(self):
"""Ensures the host with less gigabytes gets the next one"""
volume1 = service.Service('host1',
'nova-volume',
'volume',
FLAGS.volume_manager)
volume2 = service.Service('host2',
'nova-volume',
'volume',
FLAGS.volume_manager)
volume_id1 = self._create_volume()
volume1.create_volume(self.context, volume_id1)
volume_id2 = self._create_volume()
host = self.scheduler.driver.schedule_create_volume(self.context,
volume_id2)
self.assertEqual(host, 'host2')
volume1.delete_volume(self.context, volume_id1)
db.volume_destroy(self.context, volume_id2)
volume1.kill()
volume2.kill()
def test_too_many_gigabytes(self):
"""Ensures we don't go over max gigabytes"""
volume1 = service.Service('host1',
'nova-volume',
'volume',
FLAGS.volume_manager)
volume2 = service.Service('host2',
'nova-volume',
'volume',
FLAGS.volume_manager)
volume_ids1 = []
volume_ids2 = []
for index in xrange(FLAGS.max_gigabytes):
volume_id = self._create_volume()
volume1.create_volume(self.context, volume_id)
volume_ids1.append(volume_id)
volume_id = self._create_volume()
volume2.create_volume(self.context, volume_id)
volume_ids2.append(volume_id)
volume_id = self._create_volume()
self.assertRaises(driver.NoValidHost,
self.scheduler.driver.schedule_create_volume,
self.context,
volume_id)
for volume_id in volume_ids1:
volume1.delete_volume(self.context, volume_id)
for volume_id in volume_ids2:
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()

View File

@@ -87,7 +87,7 @@ class ServiceTestCase(test.BaseTestCase):
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(None,
service_create).AndReturn(service_ref['id'])
service_create).AndReturn(service_ref)
self.mox.ReplayAll()
app = service.Service.create(host=host, binary=binary)
@@ -131,7 +131,7 @@ class ServiceTestCase(test.BaseTestCase):
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(None,
service_create).AndReturn(service_ref['id'])
service_create).AndReturn(service_ref)
service.db.service_get(None, service_ref['id']).AndReturn(service_ref)
service.db.service_update(None, service_ref['id'],
mox.ContainsKeyValue('report_count', 1))

View File

@@ -19,7 +19,7 @@ from xml.dom.minidom import parseString
from nova import db
from nova import flags
from nova import test
from nova.endpoint import cloud
from nova.api.ec2 import cloud
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
@@ -179,7 +179,8 @@ class NWFilterTestCase(test.TrialTestCase):
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
inst_id = db.instance_create({}, { 'user_id' : 'fake', 'project_id' : 'fake' })
inst_id = db.instance_create({}, {'user_id': 'fake',
'project_id': 'fake'})['id']
security_group = self.setup_and_return_security_group()
db.instance_add_security_group({}, inst_id, security_group.id)

View File

@@ -108,7 +108,7 @@ class VolumeTestCase(test.TrialTestCase):
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
instance_id = db.instance_create(self.context, inst)
instance_id = db.instance_create(self.context, inst)['id']
mountpoint = "/dev/sdf"
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)

View File

@@ -49,7 +49,8 @@ from nova import datastore
from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
#TODO(gundlach): rewrite and readd this after merge
#from nova.tests.access_unittest import *
from nova.tests.auth_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
@@ -58,7 +59,9 @@ from nova.tests.flags_unittest import *
from nova.tests.network_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.quota_unittest import *
from nova.tests.rpc_unittest import *
from nova.tests.scheduler_unittest import *
from nova.tests.service_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.volume_unittest import *