Merge trunk.

This commit is contained in:
Soren Hansen
2010-09-22 21:36:15 +02:00
14 changed files with 89 additions and 1853 deletions

View File

@@ -50,7 +50,6 @@
"""
CLI interface for nova management.
Connects to the running ADMIN api in the api daemon.
"""
import os
@@ -68,7 +67,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
from nova import db
from nova import exception
from nova import flags
from nova import quota
from nova import utils
from nova.auth import manager
from nova.cloudpipe import pipelib
@@ -186,6 +187,13 @@ class RoleCommands(object):
class UserCommands(object):
"""Class for managing users."""
@staticmethod
def _print_export(user):
"""Print export variables to use with API."""
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
def __init__(self):
self.manager = manager.AuthManager()
@@ -193,13 +201,13 @@ class UserCommands(object):
"""creates a new admin and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, True)
print_export(user)
self._print_export(user)
def create(self, name, access=None, secret=None):
"""creates a new user and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, False)
print_export(user)
self._print_export(user)
def delete(self, name):
"""deletes an existing user
@@ -211,7 +219,7 @@ class UserCommands(object):
arguments: name"""
user = self.manager.get_user(name)
if user:
print_export(user)
self._print_export(user)
else:
print "User %s doesn't exist" % name
@@ -222,12 +230,6 @@ class UserCommands(object):
print user.name
def print_export(user):
"""Print export variables to use with API."""
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
class ProjectCommands(object):
"""Class for managing projects."""
@@ -262,6 +264,19 @@ class ProjectCommands(object):
for project in self.manager.get_projects():
print project.name
def quota(self, project_id, key=None, value=None):
"""Set or display quotas for project
arguments: project_id [key] [value]"""
if key:
quo = {'project_id': project_id, key: value}
try:
db.quota_update(None, project_id, quo)
except exception.NotFound:
db.quota_create(None, quo)
project_quota = quota.get_quota(None, project_id)
for key, value in project_quota.iteritems():
print '%s: %s' % (key, value)
def remove(self, project, user):
"""Removes user from project
arguments: project user"""
@@ -274,6 +289,7 @@ class ProjectCommands(object):
with open(filename, 'w') as f:
f.write(zip_file)
class FloatingIpCommands(object):
"""Class for managing floating ip."""
@@ -306,6 +322,7 @@ class FloatingIpCommands(object):
floating_ip['address'],
instance)
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),

View File

@@ -1,69 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Role-based access control decorators to use fpr wrapping other
methods with."""
from nova import exception
def allow(*roles):
"""Allow the given roles access the wrapped function."""
def wrap(func): # pylint: disable-msg=C0111
def wrapped_func(self, context, *args,
**kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
return func(self, context, *args, **kwargs)
raise exception.NotAuthorized()
return wrapped_func
return wrap
def deny(*roles):
"""Deny the given roles access the wrapped function."""
def wrap(func): # pylint: disable-msg=C0111
def wrapped_func(self, context, *args,
**kwargs): # pylint: disable-msg=C0111
if context.user.is_superuser():
return func(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
raise exception.NotAuthorized()
return func(self, context, *args, **kwargs)
return wrapped_func
return wrap
def __matches_role(context, role):
"""Check if a role is allowed."""
if role == 'all':
return True
if role == 'none':
return False
return context.project.has_role(context.user.id, role)

View File

@@ -1,214 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin API controller, exposed through http via the api worker.
"""
import base64
from nova import db
from nova import exception
from nova.auth import manager
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
return {
'username': user.id,
'accesskey': user.access,
'secretkey': user.secret,
'file': base64_file}
else:
return {}
def project_dict(project):
"""Convert the project object to a result dict"""
if project:
return {
'projectname': project.id,
'project_manager_id': project.project_manager_id,
'description': project.description}
else:
return {}
def host_dict(host):
"""Convert a host model object to a result dict"""
if host:
return host.state
else:
return {}
def admin_only(target):
"""Decorator for admin-only API calls"""
def wrapper(*args, **kwargs):
"""Internal wrapper method for admin-only API calls"""
context = args[1]
if context.user.is_admin():
return target(*args, **kwargs)
else:
return {}
return wrapper
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
Trivial admin_only wrapper will be replaced with RBAC,
allowing project managers to administer project users.
"""
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
return user_dict(manager.AuthManager().get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list."""
return {'userSet':
[user_dict(u) for u in manager.AuthManager().get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
"""Creates a new user, and returns generated credentials."""
return user_dict(manager.AuthManager().create_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
"""Deletes a single user (NOT undoable.)
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
manager.AuthManager().delete_user(name)
return True
@admin_only
def describe_roles(self, context, project_roles=True, **kwargs):
"""Returns a list of allowed roles."""
roles = manager.AuthManager().get_roles(project_roles)
return { 'roles': [{'role': r} for r in roles]}
@admin_only
def describe_user_roles(self, context, user, project=None, **kwargs):
"""Returns a list of roles for the given user.
Omitting project will return any global roles that the user has.
Specifying project will return only project specific roles.
"""
roles = manager.AuthManager().get_user_roles(user, project=project)
return { 'roles': [{'role': r} for r in roles]}
@admin_only
def modify_user_role(self, context, user, role, project=None,
operation='add', **kwargs):
"""Add or remove a role for a user and project."""
if operation == 'add':
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError('operation must be add or remove')
return True
@admin_only
def generate_x509_for_user(self, _context, name, project=None, **kwargs):
"""Generates and returns an x509 certificate for a single user.
Is usually called from a client that will wrap this with
access and secret key info, and return a zip file.
"""
if project is None:
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
@admin_only
def describe_project(self, context, name, **kwargs):
"""Returns project data, including member ids."""
return project_dict(manager.AuthManager().get_project(name))
@admin_only
def describe_projects(self, context, user=None, **kwargs):
"""Returns all projects - should be changed to deal with a list."""
return {'projectSet':
[project_dict(u) for u in
manager.AuthManager().get_projects(user=user)]}
@admin_only
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
return project_dict(
manager.AuthManager().create_project(
name,
manager_user,
description=None,
member_users=None))
@admin_only
def deregister_project(self, context, name):
"""Permanently deletes a project."""
manager.AuthManager().delete_project(name)
return True
@admin_only
def describe_project_members(self, context, name, **kwargs):
project = manager.AuthManager().get_project(name)
result = {
'members': [{'member': m} for m in project.member_ids]}
return result
@admin_only
def modify_project_member(self, context, user, project, operation, **kwargs):
"""Add or remove a user from a project."""
if operation =='add':
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError('operation must be add or remove')
return True
# FIXME(vish): these host commands don't work yet, perhaps some of the
# required data can be retrieved from service objects?
@admin_only
def describe_hosts(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:
* Disk Space
* Instance List
* RAM used
* CPU used
* DHCP servers running
* Iptables / bridges
"""
return {'hostSet': [host_dict(h) for h in db.host_get_all()]}
@admin_only
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
return host_dict(db.host_get(name))

View File

@@ -1,347 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tornado REST API Request Handlers for Nova functions
Most calls are proxied into the responsible controller.
"""
import logging
import multiprocessing
import random
import re
import urllib
# TODO(termie): replace minidom with etree
from xml.dom import minidom
import tornado.web
from twisted.internet import defer
from nova import crypto
from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
import nova.cloudpipe.api
from nova.endpoint import cloud
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def _camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
def _underscore_to_xmlcase(str):
res = _underscore_to_camelcase(str)
return res[:1].lower() + res[1:]
class APIRequestContext(object):
def __init__(self, handler, user, project):
self.handler = handler
self.user = user
self.project = project
self.request_id = ''.join(
[random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
for x in xrange(20)]
)
class APIRequest(object):
def __init__(self, controller, action):
self.controller = controller
self.action = action
def send(self, context, **kwargs):
try:
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
_error = ('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
_log.warning(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
args = {}
for key, value in kwargs.items():
parts = key.split(".")
key = _camelcase_to_underscore(parts[0])
if len(parts) > 1:
d = args.get(key, {})
d[parts[1]] = value[0]
value = d
else:
value = value[0]
args[key] = value
for key in args.keys():
if isinstance(args[key], dict):
if args[key] != {} and args[key].keys()[0].isdigit():
s = args[key].items()
s.sort()
args[key] = [v for k, v in s]
d = defer.maybeDeferred(method, context, **args)
d.addCallback(self._render_response, context.request_id)
return d
def _render_response(self, response_data, request_id):
xml = minidom.Document()
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
'http://ec2.amazonaws.com/doc/2009-11-30/')
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
if(response_data == True):
self._render_dict(xml, response_el, {'return': 'true'})
else:
self._render_dict(xml, response_el, response_data)
xml.appendChild(response_el)
response = xml.toxml()
xml.unlink()
_log.debug(response)
return response
def _render_dict(self, xml, el, data):
try:
for key in data.keys():
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
_log.debug(data)
raise
def _render_data(self, xml, el_name, data):
el_name = _underscore_to_xmlcase(el_name)
data_el = xml.createElement(el_name)
if isinstance(data, list):
for item in data:
data_el.appendChild(self._render_data(xml, 'item', item))
elif isinstance(data, dict):
self._render_dict(xml, data_el, data)
elif hasattr(data, '__dict__'):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
elif data != None:
data_el.appendChild(xml.createTextNode(str(data)))
return data_el
class RootRequestHandler(tornado.web.RequestHandler):
def get(self):
# available api versions
versions = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
for version in versions:
self.write('%s\n' % version)
self.finish()
class MetadataRequestHandler(tornado.web.RequestHandler):
def print_data(self, data):
if isinstance(data, dict):
output = ''
for key in data:
if key == '_name':
continue
output += key
if isinstance(data[key], dict):
if '_name' in data[key]:
output += '=' + str(data[key]['_name'])
else:
output += '/'
output += '\n'
self.write(output[:-1]) # cut off last \n
elif isinstance(data, list):
self.write('\n'.join(data))
else:
self.write(str(data))
def lookup(self, path, data):
items = path.split('/')
for item in items:
if item:
if not isinstance(data, dict):
return data
if not item in data:
return None
data = data[item]
return data
def get(self, path):
cc = self.application.controllers['Cloud']
meta_data = cc.get_metadata(self.request.remote_ip)
if meta_data is None:
_log.error('Failed to get metadata for ip: %s' %
self.request.remote_ip)
raise tornado.web.HTTPError(404)
data = self.lookup(path, meta_data)
if data is None:
raise tornado.web.HTTPError(404)
self.print_data(data)
self.finish()
class APIRequestHandler(tornado.web.RequestHandler):
def get(self, controller_name):
self.execute(controller_name)
@tornado.web.asynchronous
def execute(self, controller_name):
# Obtain the appropriate controller for this request.
try:
controller = self.application.controllers[controller_name]
except KeyError:
self._error('unhandled', 'no controller named %s' % controller_name)
return
args = self.request.arguments
# Read request signature.
try:
signature = args.pop('Signature')[0]
except:
raise tornado.web.HTTPError(400)
# Make a copy of args for authentication and signature verification.
auth_params = {}
for key, value in args.items():
auth_params[key] = value[0]
# Get requested action and remove authentication args for final request.
try:
action = args.pop('Action')[0]
access = args.pop('AWSAccessKeyId')[0]
args.pop('SignatureMethod')
args.pop('SignatureVersion')
args.pop('Version')
args.pop('Timestamp')
except:
raise tornado.web.HTTPError(400)
# Authenticate the request.
try:
(user, project) = manager.AuthManager().authenticate(
access,
signature,
auth_params,
self.request.method,
self.request.host,
self.request.path
)
except exception.Error, ex:
logging.debug("Authentication Failure: %s" % ex)
raise tornado.web.HTTPError(403)
_log.debug('action: %s' % action)
for key, value in args.items():
_log.debug('arg: %s\t\tval: %s' % (key, value))
request = APIRequest(controller, action)
context = APIRequestContext(self, user, project)
d = request.send(context, **args)
# d.addCallback(utils.debug)
# TODO: Wrap response in AWS XML format
d.addCallbacks(self._write_callback, self._error_callback)
def _write_callback(self, data):
self.set_header('Content-Type', 'text/xml')
self.write(data)
self.finish()
def _error_callback(self, failure):
try:
failure.raiseException()
except exception.ApiError as ex:
if ex.code:
self._error(ex.code, ex.message)
else:
self._error(type(ex).__name__, ex.message)
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
self._error(type(ex).__name__, str(ex))
raise
def post(self, controller_name):
self.execute(controller_name)
def _error(self, code, message):
self._status_code = 400
self.set_header('Content-Type', 'text/xml')
self.write('<?xml version="1.0"?>\n')
self.write('<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>' % (code, message))
self.finish()
class APIServerApplication(tornado.web.Application):
def __init__(self, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
(r'/cloudpipe/(.*)', nova.cloudpipe.api.CloudPipeRequestHandler),
(r'/cloudpipe', nova.cloudpipe.api.CloudPipeRequestHandler),
(r'/services/([A-Za-z0-9]+)/', APIRequestHandler),
(r'/latest/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2009-04-04/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-09-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-02-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-12-15/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-10-10/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-08-29/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-03-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
self.controllers = controllers

View File

@@ -1,952 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import datetime
import logging
import os
import time
import IPy
from twisted.internet import defer
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import quota
from nova import rpc
from nova import utils
from nova.auth import rbac
from nova.compute.instance_types import INSTANCE_TYPES
from nova.endpoint import images
FLAGS = flags.FLAGS
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
InvalidInputException = exception.InvalidInputException
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass
def _gen_key(context, user_id, key_name):
"""Generate a key
This is a module level method because it is slow and we need to defer
it into a process pool."""
try:
# NOTE(vish): generating key pair is slow so check for legal
# creation before creating key_pair
try:
db.key_pair_get(context, user_id, key_name)
raise exception.Duplicate("The key_pair %s already exists"
% key_name)
except exception.NotFound:
pass
private_key, public_key, fingerprint = crypto.generate_key_pair()
key = {}
key['user_id'] = user_id
key['name'] = key_name
key['public_key'] = public_key
key['fingerprint'] = fingerprint
db.key_pair_create(context, key)
return {'private_key': private_key, 'fingerprint': fingerprint}
except Exception as ex:
return {'exception': ex}
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
self.setup()
def __str__(self):
return 'CloudController'
def setup(self):
""" Ensure the keychains and folders exist. """
# FIXME(ja): this should be moved to a nova-manage command,
# if not setup throw exceptions instead of running
# Create keys folder, if it doesn't exist
if not os.path.exists(FLAGS.keys_path):
os.makedirs(FLAGS.keys_path)
# Gen root CA, if we don't have one
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
if not os.path.exists(root_ca_path):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
os.chdir(start)
def _get_mpi_data(self, project_id):
result = {}
for instance in db.instance_get_by_project(None, project_id):
if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['str_id'],
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
key = str(instance['key_name'])
if key in result:
result[key].append(line)
else:
result[key] = [line]
return result
def _trigger_refresh_security_group(self, security_group):
nodes = set([instance.host for instance in security_group.instances])
for node in nodes:
rpc.call('%s.%s' % (FLAGS.compute_topic, node),
{ "method": "refresh_security_group",
"args": { "context": None,
"security_group_id": security_group.id}})
def get_metadata(self, address):
instance_ref = db.fixed_ip_get_instance(None, address)
if instance_ref is None:
return None
mpi = self._get_mpi_data(instance_ref['project_id'])
if instance_ref['key_name']:
keys = {
'0': {
'_name': instance_ref['key_name'],
'openssh-key': instance_ref['key_data']
}
}
else:
keys = ''
hostname = instance_ref['hostname']
floating_ip = db.instance_get_floating_address(None,
instance_ref['id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
'ami-id': instance_ref['image_id'],
'ami-launch-index': instance_ref['launch_index'],
'ami-manifest-path': 'FIXME',
'block-device-mapping': { # TODO(vish): replace with real data
'ami': 'sda1',
'ephemeral0': 'sda2',
'root': '/dev/sda1',
'swap': 'sda3'
},
'hostname': hostname,
'instance-action': 'none',
'instance-id': instance_ref['str_id'],
'instance-type': instance_ref['instance_type'],
'local-hostname': hostname,
'local-ipv4': address,
'kernel-id': instance_ref['kernel_id'],
'placement': {
'availability-zone': 'nova' # TODO(vish): real zone
},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
'ramdisk-id': instance_ref['ramdisk_id'],
'reservation-id': instance_ref['reservation_id'],
'security-groups': '',
'mpi': mpi
}
}
if False: # TODO(vish): store ancestor ids
data['ancestor-ami-ids'] = []
if False: # TODO(vish): store product codes
data['product-codes'] = []
return data
@rbac.allow('all')
def describe_availability_zones(self, context, **kwargs):
return {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
@rbac.allow('all')
def describe_regions(self, context, region_name=None, **kwargs):
if FLAGS.region_list:
regions = []
for region in FLAGS.region_list:
name, _sep, url = region.partition('=')
regions.append({'regionName': name,
'regionEndpoint': url})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': FLAGS.ec2_url}]
if region_name:
regions = [r for r in regions if r['regionName'] in region_name]
return {'regionInfo': regions }
@rbac.allow('all')
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
return {'snapshotSet': [{'snapshotId': 'fixme',
'volumeId': 'fixme',
'status': 'fixme',
'startTime': 'fixme',
'progress': 'fixme',
'ownerId': 'fixme',
'volumeSize': 0,
'description': 'fixme'}]}
@rbac.allow('all')
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = db.key_pair_get_all_by_user(context, context.user.id)
if not key_name is None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = FLAGS.vpn_key_suffix
if context.user.is_admin() or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keypairsSet': result}
@rbac.allow('all')
def create_key_pair(self, context, key_name, **kwargs):
dcall = defer.Deferred()
pool = context.handler.application.settings.get('pool')
def _complete(kwargs):
if 'exception' in kwargs:
dcall.errback(kwargs['exception'])
return
dcall.callback({'keyName': key_name,
'keyFingerprint': kwargs['fingerprint'],
'keyMaterial': kwargs['private_key']})
# TODO(vish): when context is no longer an object, pass it here
pool.apply_async(_gen_key, [None, context.user.id, key_name],
callback=_complete)
return dcall
@rbac.allow('all')
def delete_key_pair(self, context, key_name, **kwargs):
try:
db.key_pair_destroy(context, context.user.id, key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
@rbac.allow('all')
def describe_security_groups(self, context, group_name=None, **kwargs):
if context.user.is_admin():
groups = db.security_group_get_all(context)
else:
groups = db.security_group_get_by_project(context,
context.project.id)
groups = [self._format_security_group(context, g) for g in groups]
if not group_name is None:
groups = [g for g in groups if g.name in group_name]
return {'securityGroupInfo': groups }
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group.description
g['groupName'] = group.name
g['ownerId'] = context.user.id
g['ipPermissions'] = []
for rule in group.rules:
r = {}
r['ipProtocol'] = rule.protocol
r['fromPort'] = rule.from_port
r['toPort'] = rule.to_port
r['groups'] = []
r['ipRanges'] = []
if rule.group_id:
source_group = db.security_group_get(context, rule.group_id)
r['groups'] += [{'groupName': source_group.name,
'userId': source_group.user_id}]
else:
r['ipRanges'] += [{'cidrIp': rule.cidr}]
g['ipPermissions'] += [r]
return g
def _authorize_revoke_rule_args_to_dict(self, context,
to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None,
user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
values = {}
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = \
db.security_group_get_by_name(context,
source_project_id,
source_security_group_name)
values['group_id'] = source_security_group.id
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
IPy.IP(cidr_ip)
values['cidr'] = cidr_ip
else:
return { 'return': False }
if ip_protocol and from_port and to_port:
from_port = int(from_port)
to_port = int(to_port)
ip_protocol = str(ip_protocol)
if ip_protocol.upper() not in ['TCP','UDP','ICMP']:
raise InvalidInputException('%s is not a valid ipProtocol' %
(ip_protocol,))
if ((min(from_port, to_port) < -1) or
(max(from_port, to_port) > 65535)):
raise InvalidInputException('Invalid port range')
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if 'cidr' in values:
return None
return values
@rbac.allow('netadmin')
def revoke_security_group_ingress(self, context, group_name, **kwargs):
security_group = db.security_group_get_by_name(context,
context.project.id,
group_name)
criteria = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
for rule in security_group.rules:
for (k,v) in criteria.iteritems():
if getattr(rule, k, False) != v:
break
# If we make it here, we have a match
db.security_group_rule_destroy(context, rule.id)
self._trigger_refresh_security_group(security_group)
return True
# TODO(soren): Dupe detection. Adding the same rule twice actually
# adds the same rule twice to the rule set, which is
# pointless.
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
@rbac.allow('netadmin')
def authorize_security_group_ingress(self, context, group_name, **kwargs):
security_group = db.security_group_get_by_name(context,
context.project.id,
group_name)
values = self._authorize_revoke_rule_args_to_dict(context, **kwargs)
values['parent_group_id'] = security_group.id
security_group_rule = db.security_group_rule_create(context, values)
self._trigger_refresh_security_group(security_group)
return True
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project.id
return source_project_id
@rbac.allow('netadmin')
def create_security_group(self, context, group_name, group_description):
if db.securitygroup_exists(context, context.project.id, group_name):
raise exception.ApiError('group %s already exists' % group_name)
group = {'user_id' : context.user.id,
'project_id': context.project.id,
'name': group_name,
'description': group_description}
group_ref = db.security_group_create(context, group)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
@rbac.allow('netadmin')
def delete_security_group(self, context, group_name, **kwargs):
security_group = db.security_group_get_by_name(context,
context.project.id,
group_name)
db.security_group_destroy(context, security_group.id)
return True
@rbac.allow('projectmanager', 'sysadmin')
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
instance_ref = db.instance_get_by_str(context, instance_id[0])
return rpc.call('%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
{"method": "get_console_output",
"args": {"context": None,
"instance_id": instance_ref['id']}})
@rbac.allow('projectmanager', 'sysadmin')
def describe_volumes(self, context, **kwargs):
if context.user.is_admin():
volumes = db.volume_get_all(context)
else:
volumes = db.volume_get_by_project(context, context.project.id)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
v = {}
v['volumeId'] = volume['str_id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if context.user.is_admin():
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
volume['user_id'],
volume['host'],
volume['instance_id'],
volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': volume['instance_id'],
'status': 'attached',
'volume_id': volume['str_id']}]
else:
v['attachmentSet'] = [{}]
return v
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
# check quota
size = int(size)
if quota.allowed_volumes(context, 1, size) < 1:
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
context.project.id, size)
raise QuotaError("Volume quota exceeded. You cannot "
"create a volume of size %s" %
size)
vol = {}
vol['size'] = size
vol['user_id'] = context.user.id
vol['project_id'] = context.project.id
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
volume_ref = db.volume_create(context, vol)
rpc.cast(FLAGS.scheduler_topic,
{"method": "create_volume",
"args": {"context": None,
"topic": FLAGS.volume_topic,
"volume_id": volume_ref['id']}})
return {'volumeSet': [self._format_volume(context, volume_ref)]}
@rbac.allow('projectmanager', 'sysadmin')
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_ref = db.volume_get_by_str(context, volume_id)
# TODO(vish): abstract status checking?
if volume_ref['status'] != "available":
raise exception.ApiError("Volume status must be available")
if volume_ref['attach_status'] == "attached":
raise exception.ApiError("Volume is already attached")
instance_ref = db.instance_get_by_str(context, instance_id)
host = instance_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "attach_volume",
"args": {"context": None,
"volume_id": volume_ref['id'],
"instance_id": instance_ref['id'],
"mountpoint": device}})
return defer.succeed({'attachTime': volume_ref['attach_time'],
'device': volume_ref['mountpoint'],
'instanceId': instance_ref['id'],
'requestId': context.request_id,
'status': volume_ref['attach_status'],
'volumeId': volume_ref['id']})
@rbac.allow('projectmanager', 'sysadmin')
def detach_volume(self, context, volume_id, **kwargs):
volume_ref = db.volume_get_by_str(context, volume_id)
instance_ref = db.volume_get_instance(context, volume_ref['id'])
if not instance_ref:
raise exception.ApiError("Volume isn't attached to anything!")
# TODO(vish): abstract status checking?
if volume_ref['status'] == "available":
raise exception.ApiError("Volume is already detached")
try:
host = instance_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "detach_volume",
"args": {"context": None,
"instance_id": instance_ref['id'],
"volume_id": volume_ref['id']}})
except exception.NotFound:
# If the instance doesn't exist anymore,
# then we need to call detach blind
db.volume_detached(context)
return defer.succeed({'attachTime': volume_ref['attach_time'],
'device': volume_ref['mountpoint'],
'instanceId': instance_ref['str_id'],
'requestId': context.request_id,
'status': volume_ref['attach_status'],
'volumeId': volume_ref['id']})
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
@rbac.allow('all')
def describe_instances(self, context, **kwargs):
return defer.succeed(self._format_describe_instances(context))
def _format_describe_instances(self, context):
return { 'reservationSet': self._format_instances(context) }
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id)
assert len(i) == 1
return i[0]
def _format_instances(self, context, reservation_id=None):
reservations = {}
if reservation_id:
instances = db.instance_get_by_reservation(context,
reservation_id)
else:
if context.user.is_admin():
instances = db.instance_get_all(context)
else:
instances = db.instance_get_by_project(context,
context.project.id)
for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
i['instanceId'] = instance['str_id']
i['imageId'] = instance['image_id']
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']
}
fixed_addr = None
floating_addr = None
if instance['fixed_ip']:
fixed_addr = instance['fixed_ip']['str_id']
if instance['fixed_ip']['floating_ips']:
fixed = instance['fixed_ip']
floating_addr = fixed['floating_ips'][0]['str_id']
i['privateDnsName'] = fixed_addr
i['publicDnsName'] = floating_addr
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
if context.user.is_admin():
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = self._convert_to_set([], 'product_codes')
i['instanceType'] = instance['instance_type']
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
if not reservations.has_key(instance['reservation_id']):
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
r['groupSet'] = self._convert_to_set([], 'groups')
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
@rbac.allow('all')
def describe_addresses(self, context, **kwargs):
return self.format_addresses(context)
def format_addresses(self, context):
addresses = []
if context.user.is_admin():
iterator = db.floating_ip_get_all(context)
else:
iterator = db.floating_ip_get_by_project(context,
context.project.id)
for floating_ip_ref in iterator:
address = floating_ip_ref['str_id']
instance_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
instance_id = floating_ip_ref['fixed_ip']['instance']['str_id']
address_rv = {'public_ip': address,
'instance_id': instance_id}
if context.user.is_admin():
details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id'])
address_rv['instance_id'] = details
addresses.append(address_rv)
return {'addressesSet': addresses}
@rbac.allow('netadmin')
@defer.inlineCallbacks
def allocate_address(self, context, **kwargs):
# check quota
if quota.allowed_floating_ips(context, 1) < 1:
logging.warn("Quota exceeeded for %s, tried to allocate address",
context.project.id)
raise QuotaError("Address quota exceeded. You cannot "
"allocate any more addresses")
network_topic = yield self._get_network_topic(context)
public_ip = yield rpc.call(network_topic,
{"method": "allocate_floating_ip",
"args": {"context": None,
"project_id": context.project.id}})
defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def release_address(self, context, public_ip, **kwargs):
# NOTE(vish): Should we make sure this works?
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "deallocate_floating_ip",
"args": {"context": None,
"floating_address": floating_ip_ref['str_id']}})
defer.returnValue({'releaseResponse': ["Address released."]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def associate_address(self, context, instance_id, public_ip, **kwargs):
instance_ref = db.instance_get_by_str(context, instance_id)
fixed_ip_ref = db.fixed_ip_get_by_instance(context, instance_ref['id'])
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "associate_floating_ip",
"args": {"context": None,
"floating_address": floating_ip_ref['str_id'],
"fixed_address": fixed_ip_ref['str_id']}})
defer.returnValue({'associateResponse': ["Address associated."]})
@rbac.allow('netadmin')
@defer.inlineCallbacks
def disassociate_address(self, context, public_ip, **kwargs):
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "disassociate_floating_ip",
"args": {"context": None,
"floating_address": floating_ip_ref['str_id']}})
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
@defer.inlineCallbacks
def _get_network_topic(self, context):
"""Retrieves the network host for a project"""
network_ref = db.project_get_network(context, context.project.id)
host = network_ref['host']
if not host:
host = yield rpc.call(FLAGS.network_topic,
{"method": "set_network_host",
"args": {"context": None,
"project_id": context.project.id}})
defer.returnValue(db.queue_get_for(context, FLAGS.network_topic, host))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def run_instances(self, context, **kwargs):
instance_type = kwargs.get('instance_type', 'm1.small')
if instance_type not in INSTANCE_TYPES:
raise exception.ApiError("Unknown instance type: %s",
instance_type)
# check quota
max_instances = int(kwargs.get('max_count', 1))
min_instances = int(kwargs.get('min_count', max_instances))
num_instances = quota.allowed_instances(context,
max_instances,
instance_type)
if num_instances < min_instances:
logging.warn("Quota exceeeded for %s, tried to run %s instances",
context.project.id, min_instances)
raise QuotaError("Instance quota exceeded. You can only "
"run %s more instances of this type." %
num_instances, "InstanceLimitExceeded")
# make sure user can access the image
# vpn image is private so it doesn't show up on lists
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
if not vpn:
image = images.get(context, kwargs['image_id'])
# FIXME(ja): if image is vpn, this breaks
# get defaults from imagestore
image_id = image['imageId']
kernel_id = image.get('kernelId', FLAGS.default_kernel)
ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
# API parameters overrides of defaults
kernel_id = kwargs.get('kernel_id', kernel_id)
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
# make sure we have access to kernel and ramdisk
images.get(context, kernel_id)
images.get(context, ramdisk_id)
logging.debug("Going to run %s instances...", num_instances)
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
if kwargs.has_key('key_name'):
key_pair_ref = db.key_pair_get(context,
context.user.id,
kwargs['key_name'])
key_data = key_pair_ref['public_key']
security_group_arg = kwargs.get('security_group', ["default"])
if not type(security_group_arg) is list:
security_group_arg = [security_group_arg]
security_groups = []
for security_group_name in security_group_arg:
group = db.security_group_get_by_project(context,
context.project.id,
security_group_name)
security_groups.append(group['id'])
reservation_id = utils.generate_uid('r')
base_options = {}
base_options['state_description'] = 'scheduling'
base_options['image_id'] = image_id
base_options['kernel_id'] = kernel_id
base_options['ramdisk_id'] = ramdisk_id
base_options['reservation_id'] = reservation_id
base_options['key_data'] = key_data
base_options['key_name'] = kwargs.get('key_name', None)
base_options['user_id'] = context.user.id
base_options['project_id'] = context.project.id
base_options['user_data'] = kwargs.get('user_data', '')
type_data = INSTANCE_TYPES[instance_type]
base_options['memory_mb'] = type_data['memory_mb']
base_options['vcpus'] = type_data['vcpus']
base_options['local_gb'] = type_data['local_gb']
for num in range(num_instances):
instance_ref = db.instance_create(context, base_options)
inst_id = instance_ref['id']
for security_group_id in security_groups:
db.instance_add_security_group(context, inst_id,
security_group_id)
inst = {}
inst['mac_address'] = utils.generate_mac()
inst['launch_index'] = num
inst['hostname'] = instance_ref['str_id']
db.instance_update(context, inst_id, inst)
address = self.network_manager.allocate_fixed_ip(context,
inst_id,
vpn)
# TODO(vish): This probably should be done in the scheduler
# network is setup when host is assigned
network_topic = yield self._get_network_topic(context)
rpc.call(network_topic,
{"method": "setup_fixed_ip",
"args": {"context": None,
"address": address}})
rpc.cast(FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"context": None,
"topic": FLAGS.compute_topic,
"instance_id": inst_id}})
logging.debug("Casting to scheduler for %s/%s's instance %s" %
(context.project.name, context.user.name, inst_id))
defer.returnValue(self._format_run_instances(context,
reservation_id))
@rbac.allow('projectmanager', 'sysadmin')
@defer.inlineCallbacks
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
for id_str in instance_id:
logging.debug("Going to try and terminate %s" % id_str)
try:
instance_ref = db.instance_get_by_str(context, id_str)
except exception.NotFound:
logging.warning("Instance %s was not found during terminate"
% id_str)
continue
now = datetime.datetime.utcnow()
db.instance_update(context,
instance_ref['id'],
{'terminated_at': now})
# FIXME(ja): where should network deallocate occur?
address = db.instance_get_floating_address(context,
instance_ref['id'])
if address:
logging.debug("Disassociating address %s" % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
network_topic = yield self._get_network_topic(context)
rpc.cast(network_topic,
{"method": "disassociate_floating_ip",
"args": {"context": None,
"address": address}})
address = db.instance_get_fixed_address(context,
instance_ref['id'])
if address:
logging.debug("Deallocating address %s" % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
self.network_manager.deallocate_fixed_ip(context, address)
host = instance_ref['host']
if host:
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
"args": {"context": None,
"instance_id": instance_ref['id']}})
else:
db.instance_destroy(context, instance_ref['id'])
defer.returnValue(True)
@rbac.allow('projectmanager', 'sysadmin')
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
for id_str in instance_id:
instance_ref = db.instance_get_by_str(context, id_str)
host = instance_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"context": None,
"instance_id": instance_ref['id']}})
return defer.succeed(True)
@rbac.allow('projectmanager', 'sysadmin')
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume_ref = db.volume_get_by_str(context, volume_id)
if volume_ref['status'] != "available":
raise exception.ApiError("Volume status must be available")
now = datetime.datetime.utcnow()
db.volume_update(context, volume_ref['id'], {'terminated_at': now})
host = volume_ref['host']
rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host),
{"method": "delete_volume",
"args": {"context": None,
"volume_id": volume_ref['id']}})
return defer.succeed(True)
@rbac.allow('all')
def describe_images(self, context, image_id=None, **kwargs):
# The objectstore does its own authorization for describe
imageSet = images.list(context, image_id)
return defer.succeed({'imagesSet': imageSet})
@rbac.allow('projectmanager', 'sysadmin')
def deregister_image(self, context, image_id, **kwargs):
# FIXME: should the objectstore be doing these authorization checks?
images.deregister(context, image_id)
return defer.succeed({'imageId': image_id})
@rbac.allow('projectmanager', 'sysadmin')
def register_image(self, context, image_location=None, **kwargs):
# FIXME: should the objectstore be doing these authorization checks?
if image_location is None and kwargs.has_key('name'):
image_location = kwargs['name']
image_id = images.register(context, image_location)
logging.debug("Registered %s as %s" % (image_location, image_id))
return defer.succeed({'imageId': image_id})
@rbac.allow('all')
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
try:
image = images.list(context, image_id)[0]
except IndexError:
raise exception.ApiError('invalid id: %s' % image_id)
result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
result['launchPermission'].append({'group': 'all'})
return defer.succeed(result)
@rbac.allow('projectmanager', 'sysadmin')
def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.ApiError('attribute not supported: %s' % attribute)
if not 'user_group' in kwargs:
raise exception.ApiError('user or group not specified')
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'remove']:
raise exception.ApiError('operation_type must be add or remove')
result = images.modify(context, image_id, operation_type)
return defer.succeed(result)

View File

@@ -1,108 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Proxy AMI-related calls from the cloud controller, to the running
objectstore service.
"""
import json
import urllib
import boto.s3.connection
from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
def modify(context, image_id, operation):
conn(context).make_request(
method='POST',
bucket='_images',
query_args=qs({'image_id': image_id, 'operation': operation}))
return True
def register(context, image_location):
""" rpc call to register a new image based from a manifest """
image_id = utils.generate_uid('ami')
conn(context).make_request(
method='PUT',
bucket='_images',
query_args=qs({'image_location': image_location,
'image_id': image_id}))
return image_id
def list(context, filter_list=[]):
""" return a list of all images that a user can see
optionally filtered by a list of image_id """
# FIXME: send along the list of only_images to check for
response = conn(context).make_request(
method='GET',
bucket='_images')
result = json.loads(response.read())
if not filter_list is None:
return [i for i in result if i['imageId'] in filter_list]
return result
def get(context, image_id):
"""return a image object if the context has permissions"""
result = list(context, [image_id])
if not result:
raise exception.NotFound('Image %s could not be found' % image_id)
image = result[0]
return image
def deregister(context, image_id):
""" unregister an image """
conn(context).make_request(
method='DELETE',
bucket='_images',
query_args=qs({'image_id': image_id}))
def conn(context):
access = manager.AuthManager().get_access_key(context.user,
context.project)
secret = str(context.user.secret)
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=False,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
def qs(params):
pairs = []
for key in params.keys():
pairs.append(key + '=' + urllib.quote(params[key]))
return '&'.join(pairs)

View File

@@ -23,60 +23,12 @@ from boto.ec2 import regioninfo
import httplib
import random
import StringIO
from tornado import httpserver
from twisted.internet import defer
import webob
from nova import flags
from nova import test
from nova import api
from nova.api.ec2 import cloud
from nova.auth import manager
from nova.endpoint import api
from nova.endpoint import cloud
FLAGS = flags.FLAGS
# NOTE(termie): These are a bunch of helper methods and classes to short
# circuit boto calls and feed them into our tornado handlers,
# it's pretty damn circuitous so apologies if you have to fix
# a bug in it
# NOTE(jaypipes) The pylint disables here are for R0913 (too many args) which
# isn't controllable since boto's HTTPRequest needs that many
# args, and for the version-differentiated import of tornado's
# httputil.
# NOTE(jaypipes): The disable-msg=E1101 and E1103 below is because pylint is
# unable to introspect the deferred's return value properly
def boto_to_tornado(method, path, headers, data, # pylint: disable-msg=R0913
host, connection=None):
""" translate boto requests into tornado requests
connection should be a FakeTornadoHttpConnection instance
"""
try:
headers = httpserver.HTTPHeaders()
except AttributeError:
from tornado import httputil # pylint: disable-msg=E0611
headers = httputil.HTTPHeaders()
for k, v in headers.iteritems():
headers[k] = v
req = httpserver.HTTPRequest(method=method,
uri=path,
headers=headers,
body=data,
host=host,
remote_ip='127.0.0.1',
connection=connection)
return req
def raw_to_httpresponse(response_string):
"""translate a raw tornado http response into an httplib.HTTPResponse"""
sock = FakeHttplibSocket(response_string)
resp = httplib.HTTPResponse(sock)
resp.begin()
return resp
class FakeHttplibSocket(object):
@@ -89,85 +41,35 @@ class FakeHttplibSocket(object):
return self._buffer
class FakeTornadoStream(object):
"""a fake stream to satisfy tornado's assumptions, trivial"""
def set_close_callback(self, _func):
"""Dummy callback for stream"""
pass
class FakeTornadoConnection(object):
"""A fake connection object for tornado to pass to its handlers
web requests are expected to write to this as they get data and call
finish when they are done with the request, we buffer the writes and
kick off a callback when it is done so that we can feed the result back
into boto.
"""
def __init__(self, deferred):
self._deferred = deferred
self._buffer = StringIO.StringIO()
def write(self, chunk):
"""Writes a chunk of data to the internal buffer"""
self._buffer.write(chunk)
def finish(self):
"""Finalizes the connection and returns the buffered data via the
deferred callback.
"""
data = self._buffer.getvalue()
self._deferred.callback(data)
xheaders = None
@property
def stream(self): # pylint: disable-msg=R0201
"""Required property for interfacing with tornado"""
return FakeTornadoStream()
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our tornado app, we then wait for the response and turn it back into
our WSGI app, we then wait for the response and turn it back into
the httplib.HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
self.deferred = defer.Deferred()
def request(self, method, path, data, headers):
"""Creates a connection to a fake tornado and sets
up a deferred request with the supplied data and
headers"""
conn = FakeTornadoConnection(self.deferred)
request = boto_to_tornado(connection=conn,
method=method,
path=path,
headers=headers,
data=data,
host=self.host)
self.app(request)
self.deferred.addCallback(raw_to_httpresponse)
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""A bit of deferred magic for catching the response
from the previously deferred request"""
@defer.inlineCallbacks
def _waiter():
"""Callback that simply yields the deferred's
return value."""
result = yield self.deferred
defer.returnValue(result)
d = _waiter()
# NOTE(termie): defer.returnValue above should ensure that
# this deferred has already been called by the time
# we get here, we are going to cheat and return
# the result of the callback
return d.result # pylint: disable-msg=E1101
return self.http_response
def close(self):
"""Required for compatibility with boto/tornado"""
@@ -180,11 +82,10 @@ class ApiEc2TestCase(test.BaseTestCase):
super(ApiEc2TestCase, self).setUp()
self.manager = manager.AuthManager()
self.cloud = cloud.CloudController()
self.host = '127.0.0.1'
self.app = api.APIServerApplication({'Cloud': self.cloud})
self.app = api.API()
def expect_http(self, host=None, is_secure=False):
"""Returns a new EC2 connection"""
@@ -193,12 +94,12 @@ class ApiEc2TestCase(test.BaseTestCase):
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=FLAGS.cc_port,
port=8773,
path='/services/Cloud')
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
http = FakeHttplibConnection(
self.app, '%s:%d' % (self.host, FLAGS.cc_port), False)
self.app, '%s:8773' % (self.host), False)
# pylint: disable-msg=E1103
self.ec2.new_http_connection(host, is_secure).AndReturn(http)
return http
@@ -255,6 +156,10 @@ class ApiEc2TestCase(test.BaseTestCase):
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
for x in range(random.randint(4, 8)))
@@ -282,9 +187,13 @@ class ApiEc2TestCase(test.BaseTestCase):
"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
for x in range(random.randint(4, 8)))
@@ -346,6 +255,10 @@ class ApiEc2TestCase(test.BaseTestCase):
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
for x in range(random.randint(4, 8)))
other_security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \

View File

@@ -24,7 +24,7 @@ from nova import crypto
from nova import flags
from nova import test
from nova.auth import manager
from nova.endpoint import cloud
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS

View File

@@ -35,8 +35,8 @@ from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import power_state
from nova.endpoint import api
from nova.endpoint import cloud
from nova.api.ec2 import context
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
@@ -63,8 +63,7 @@ class CloudTestCase(test.BaseTestCase):
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = api.APIRequestContext(handler=None,
user=self.user,
self.context = context.APIRequestContext(user=self.user,
project=self.project)
def tearDown(self):

View File

@@ -28,7 +28,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
from nova.endpoint import api
from nova.api.ec2 import context
FLAGS = flags.FLAGS
@@ -49,7 +49,7 @@ class NetworkTestCase(test.TrialTestCase):
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.network = utils.import_object(FLAGS.network_manager)
self.context = api.APIRequestContext(None, project=None, user=self.user)
self.context = context.APIRequestContext(project=None, user=self.user)
for i in range(5):
name = 'project%s' % i
self.projects.append(self.manager.create_project(name,

View File

@@ -25,8 +25,8 @@ from nova import quota
from nova import test
from nova import utils
from nova.auth import manager
from nova.endpoint import cloud
from nova.endpoint import api
from nova.api.ec2 import cloud
from nova.api.ec2 import context
FLAGS = flags.FLAGS
@@ -48,8 +48,7 @@ class QuotaTestCase(test.TrialTestCase):
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
self.network = utils.import_object(FLAGS.network_manager)
self.context = api.APIRequestContext(handler=None,
project=self.project,
self.context = context.APIRequestContext(project=self.project,
user=self.user)
def tearDown(self): # pylint: disable-msg=C0103
@@ -95,11 +94,11 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
self.assertFailure(self.cloud.run_instances(self.context,
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small'),
cloud.QuotaError)
instance_type='m1.small')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -107,11 +106,11 @@ class QuotaTestCase(test.TrialTestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
self.assertFailure(self.cloud.run_instances(self.context,
self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small'),
cloud.QuotaError)
instance_type='m1.small')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -120,8 +119,7 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(cloud.QuotaError,
self.cloud.create_volume,
self.assertRaises(cloud.QuotaError, self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
@@ -151,5 +149,4 @@ class QuotaTestCase(test.TrialTestCase):
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
self.assertFailure(self.cloud.allocate_address(self.context),
cloud.QuotaError)
self.assertRaises(cloud.QuotaError, self.cloud.allocate_address, self.context)

View File

@@ -19,7 +19,6 @@ from xml.dom.minidom import parseString
from nova import db
from nova import flags
from nova import test
from nova.endpoint import cloud
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS

View File

@@ -49,7 +49,8 @@ from nova import datastore
from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
#TODO(gundlach): rewrite and readd this after merge
#from nova.tests.access_unittest import *
from nova.tests.auth_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *