merge trunk.

This commit is contained in:
Todd Willey 2011-01-17 13:05:26 -05:00
commit 58c6475012
48 changed files with 2272 additions and 595 deletions

View File

@ -4,8 +4,8 @@ Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chiradeep Vittal <chiradeep@cloud.com>
Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Behrens <cbehrens@codestud.com>
Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
@ -14,6 +14,7 @@ Ed Leafe <ed@leafe.com>
Eldar Nugaev <enugaev@griddynamics.com>
Eric Day <eday@oddments.org>
Ewan Mellor <ewan.mellor@citrix.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
Jay Pipes <jaypipes@gmail.com>
@ -26,11 +27,14 @@ Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Ken Pepple <ken.pepple@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>

61
bin/nova-direct-api Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova Direct API."""
import gettext
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import utils
from nova import wsgi
from nova.api import direct
from nova.compute import api as compute_api
FLAGS = flags.FLAGS
flags.DEFINE_integer('direct_port', 8001, 'Direct API port')
flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host')
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
direct.register_service('compute', compute_api.ComputeAPI())
direct.register_service('reflect', direct.Reflection())
router = direct.Router()
with_json = direct.JsonParamsMiddleware(router)
with_req = direct.PostParamsMiddleware(with_json)
with_auth = direct.DelegatedAuthMiddleware(with_req)
server = wsgi.Server()
server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host)
server.wait()

View File

@ -91,6 +91,7 @@ flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
class VpnCommands(object):
@ -439,11 +440,12 @@ class NetworkCommands(object):
"""Class for managing networks."""
def create(self, fixed_range=None, num_networks=None,
network_size=None, vlan_start=None, vpn_start=None):
network_size=None, vlan_start=None, vpn_start=None,
fixed_range_v6=None):
"""Creates fixed ips for host by range
arguments: [fixed_range=FLAG], [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG]"""
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
if not fixed_range:
fixed_range = FLAGS.fixed_range
if not num_networks:
@ -454,11 +456,13 @@ class NetworkCommands(object):
vlan_start = FLAGS.vlan_start
if not vpn_start:
vpn_start = FLAGS.vpn_start
if not fixed_range_v6:
fixed_range_v6 = FLAGS.fixed_range_v6
net_manager = utils.import_object(FLAGS.network_manager)
net_manager.create_networks(context.get_admin_context(),
fixed_range, int(num_networks),
int(network_size), int(vlan_start),
int(vpn_start))
int(vpn_start), fixed_range_v6)
class ServiceCommands(object):

145
bin/stack Executable file
View File

@ -0,0 +1,145 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""CLI for the Direct API."""
import eventlet
eventlet.monkey_patch()
import os
import pprint
import sys
import textwrap
import urllib
import urllib2
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
import gflags
from nova import utils
FLAGS = gflags.FLAGS
gflags.DEFINE_string('host', '127.0.0.1', 'Direct API host')
gflags.DEFINE_integer('port', 8001, 'Direct API host')
gflags.DEFINE_string('user', 'user1', 'Direct API username')
gflags.DEFINE_string('project', 'proj1', 'Direct API project')
USAGE = """usage: stack [options] <controller> <method> [arg1=value arg2=value]
`stack help` should output the list of available controllers
`stack <controller>` should output the available methods for that controller
`stack help <controller>` should do the same
`stack help <controller> <method>` should output info for a method
"""
def format_help(d):
"""Format help text, keys are labels and values are descriptions."""
indent = max([len(k) for k in d])
out = []
for k, v in d.iteritems():
t = textwrap.TextWrapper(initial_indent=' %s ' % k.ljust(indent),
subsequent_indent=' ' * (indent + 6))
out.extend(t.wrap(v))
return out
def help_all():
rv = do_request('reflect', 'get_controllers')
out = format_help(rv)
return (USAGE + str(FLAGS.MainModuleHelp()) +
'\n\nAvailable controllers:\n' +
'\n'.join(out) + '\n')
def help_controller(controller):
rv = do_request('reflect', 'get_methods')
methods = dict([(k.split('/')[2], v) for k, v in rv.iteritems()
if k.startswith('/%s' % controller)])
return ('Available methods for %s:\n' % controller +
'\n'.join(format_help(methods)))
def help_method(controller, method):
rv = do_request('reflect',
'get_method_info',
{'method': '/%s/%s' % (controller, method)})
sig = '%s(%s):' % (method, ', '.join(['='.join(x) for x in rv['args']]))
out = textwrap.wrap(sig, subsequent_indent=' ' * len('%s(' % method))
out.append('\n' + rv['doc'])
return '\n'.join(out)
def do_request(controller, method, params=None):
if params:
data = urllib.urlencode(params)
else:
data = None
url = 'http://%s:%s/%s/%s' % (FLAGS.host, FLAGS.port, controller, method)
headers = {'X-OpenStack-User': FLAGS.user,
'X-OpenStack-Project': FLAGS.project}
req = urllib2.Request(url, data, headers)
resp = urllib2.urlopen(req)
return utils.loads(resp.read())
if __name__ == '__main__':
args = FLAGS(sys.argv)
cmd = args.pop(0)
if not args:
print help_all()
sys.exit()
first = args.pop(0)
if first == 'help':
action = help_all
params = []
if args:
params.append(args.pop(0))
action = help_controller
if args:
params.append(args.pop(0))
action = help_method
print action(*params)
sys.exit(0)
controller = first
if not args:
print help_controller(controller)
sys.exit()
method = args.pop(0)
params = {}
for x in args:
key, value = x.split('=', 1)
params[key] = value
pprint.pprint(do_request(controller, method, params))

View File

@ -0,0 +1,37 @@
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto_v6.ec2.connection import EC2ConnectionV6
return EC2ConnectionV6(aws_access_key_id, aws_secret_access_key, **kwargs)

View File

View File

@ -0,0 +1,41 @@
'''
Created on 2010/12/20
@author: Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
'''
import boto
import boto.ec2
from boto_v6.ec2.instance import ReservationV6
class EC2ConnectionV6(boto.ec2.EC2Connection):
'''
EC2Connection for OpenStack IPV6 mode
'''
def get_all_instances(self, instance_ids=None, filters=None):
"""
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeInstancesV6', params,
[('item', ReservationV6)])

View File

@ -0,0 +1,37 @@
'''
Created on 2010/12/20
@author: Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
'''
import boto
from boto.resultset import ResultSet
from boto.ec2.instance import Reservation
from boto.ec2.instance import Group
from boto.ec2.instance import Instance
class ReservationV6(Reservation):
def startElement(self, name, attrs, connection):
if name == 'instancesSet':
self.instances = ResultSet([('item', InstanceV6)])
return self.instances
elif name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
else:
return None
class InstanceV6(Instance):
def __init__(self, connection=None):
Instance.__init__(self, connection)
self.dns_name_v6 = None
def endElement(self, name, value, connection):
Instance.endElement(self, name, value, connection)
if name == 'dnsNameV6':
self.dns_name_v6 = value
def _update(self, updated):
self.__dict__.update(updated.__dict__)
self.dns_name_v6 = updated.dns_name_v6

View File

@ -83,9 +83,17 @@ if [ "$CMD" == "install" ]; then
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm
sudo /etc/init.d/libvirt-bin restart
sudo modprobe nbd
sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot
sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy
sudo apt-get install -y python-libvirt python-libxml2 python-routes
sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy
sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah
#For IPV6
sudo apt-get install -y python-netaddr
sudo apt-get install -y radvd
#(Nati) Note that this configuration is only needed for nova-network node.
sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding"
sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra"
if [ "$USE_MYSQL" == 1 ]; then
cat <<MYSQL_PRESEED | debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
@ -107,6 +115,8 @@ function screen_it {
if [ "$CMD" == "run" ]; then
killall dnsmasq
#For IPv6
killall radvd
screen -d -m -S nova -t nova
sleep 1
if [ "$USE_MYSQL" == 1 ]; then

232
nova/api/direct.py Normal file
View File

@ -0,0 +1,232 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Public HTTP interface that allows services to self-register.
The general flow of a request is:
- Request is parsed into WSGI bits.
- Some middleware checks authentication.
- Routing takes place based on the URL to find a controller.
(/controller/method)
- Parameters are parsed from the request and passed to a method on the
controller as keyword arguments.
- Optionally 'json' is decoded to provide all the parameters.
- Actual work is done and a result is returned.
- That result is turned into json and returned.
"""
import inspect
import urllib
import routes
import webob
from nova import context
from nova import flags
from nova import utils
from nova import wsgi
ROUTES = {}
def register_service(path, handle):
ROUTES[path] = handle
class Router(wsgi.Router):
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self._load_registered_routes(mapper)
super(Router, self).__init__(mapper=mapper)
def _load_registered_routes(self, mapper):
for route in ROUTES:
mapper.connect('/%s/{action}' % route,
controller=ServiceWrapper(ROUTES[route]))
class DelegatedAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
os_user = request.headers['X-OpenStack-User']
os_project = request.headers['X-OpenStack-Project']
context_ref = context.RequestContext(user=os_user, project=os_project)
request.environ['openstack.context'] = context_ref
class JsonParamsMiddleware(wsgi.Middleware):
def process_request(self, request):
if 'json' not in request.params:
return
params_json = request.params['json']
params_parsed = utils.loads(params_json)
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ['openstack.params'] = params
class PostParamsMiddleware(wsgi.Middleware):
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ['openstack.params'] = params
class Reflection(object):
"""Reflection methods to list available methods."""
def __init__(self):
self._methods = {}
self._controllers = {}
def _gather_methods(self):
methods = {}
controllers = {}
for route, handler in ROUTES.iteritems():
controllers[route] = handler.__doc__.split('\n')[0]
for k in dir(handler):
if k.startswith('_'):
continue
f = getattr(handler, k)
if not callable(f):
continue
# bunch of ugly formatting stuff
argspec = inspect.getargspec(f)
args = [x for x in argspec[0]
if x != 'self' and x != 'context']
defaults = argspec[3] and argspec[3] or []
args_r = list(reversed(args))
defaults_r = list(reversed(defaults))
args_out = []
while args_r:
if defaults_r:
args_out.append((args_r.pop(0),
repr(defaults_r.pop(0))))
else:
args_out.append((str(args_r.pop(0)),))
# if the method accepts keywords
if argspec[2]:
args_out.insert(0, ('**%s' % argspec[2],))
methods['/%s/%s' % (route, k)] = {
'short_doc': f.__doc__.split('\n')[0],
'doc': f.__doc__,
'name': k,
'args': list(reversed(args_out))}
self._methods = methods
self._controllers = controllers
def get_controllers(self, context):
"""List available controllers."""
if not self._controllers:
self._gather_methods()
return self._controllers
def get_methods(self, context):
"""List available methods."""
if not self._methods:
self._gather_methods()
method_list = self._methods.keys()
method_list.sort()
methods = {}
for k in method_list:
methods[k] = self._methods[k]['short_doc']
return methods
def get_method_info(self, context, method):
"""Get detailed information about a method."""
if not self._methods:
self._gather_methods()
return self._methods[method]
class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
del arg_dict['action']
context = req.environ['openstack.context']
# allow middleware up the stack to override the params
params = {}
if 'openstack.params' in req.environ:
params = req.environ['openstack.params']
# TODO(termie): do some basic normalization on methods
method = getattr(self.service_handle, action)
result = method(context, **params)
if type(result) is dict or type(result) is list:
return self._serialize(result, req)
else:
return result
class Proxy(object):
"""Pretend a Direct API endpoint is an object."""
def __init__(self, app, prefix=None):
self.app = app
self.prefix = prefix
def __do_request(self, path, context, **kwargs):
req = webob.Request.blank(path)
req.method = 'POST'
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
req.environ['openstack.context'] = context
resp = req.get_response(self.app)
try:
return utils.loads(resp.body)
except Exception:
return resp.body
def __getattr__(self, key):
if self.prefix is None:
return self.__class__(self.app, prefix=key)
def _wrapper(context, **kwargs):
return self.__do_request('/%s/%s' % (self.prefix, key),
context,
**kwargs)
_wrapper.func_name = key
return _wrapper

View File

@ -26,16 +26,17 @@ import base64
import datetime
import IPy
import os
import urllib
from nova import compute
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import network
from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
@ -91,8 +92,11 @@ class CloudController(object):
self.image_service = utils.import_object(FLAGS.image_service)
self.network_api = network.API()
self.volume_api = volume.API()
self.compute_api = compute.API(self.image_service, self.network_api,
self.volume_api)
self.compute_api = compute.API(
network_api=self.network_api,
image_service=self.image_service,
volume_api=self.volume_api,
hostname_factory=id_to_ec2_id)
self.setup()
def __str__(self):
@ -128,15 +132,6 @@ class CloudController(object):
result[key] = [line]
return result
def _trigger_refresh_security_group(self, context, security_group):
nodes = set([instance['host'] for instance in security_group.instances
if instance['host'] is not None])
for node in nodes:
rpc.cast(context,
'%s.%s' % (FLAGS.compute_topic, node),
{"method": "refresh_security_group",
"args": {"security_group_id": security_group.id}})
def _get_availability_zone_by_host(self, context, host):
services = db.service_get_all_by_host(context, host)
if len(services) > 0:
@ -374,6 +369,7 @@ class CloudController(object):
values['group_id'] = source_security_group['id']
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
cidr_ip = urllib.unquote(cidr_ip).decode()
IPy.IP(cidr_ip)
values['cidr'] = cidr_ip
else:
@ -519,13 +515,8 @@ class CloudController(object):
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
instance_id = ec2_id_to_id(ec2_id)
instance_ref = self.compute_api.get(context, instance_id)
output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
{"method": "get_console_output",
"args": {"instance_id": instance_ref['id']}})
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
now = datetime.datetime.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
@ -593,7 +584,7 @@ class CloudController(object):
def delete_volume(self, context, volume_id, **kwargs):
volume_id = ec2_id_to_id(volume_id)
self.volume_api.delete(context, volume_id)
self.volume_api.delete(context, volume_id=volume_id)
return True
def update_volume(self, context, volume_id, **kwargs):
@ -610,9 +601,12 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_id = ec2_id_to_id(volume_id)
instance_id = ec2_id_to_id(instance_id)
LOG.audit(_("Attach volume %s to instacne %s at %s"), volume_id,
LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id,
instance_id, device, context=context)
self.compute_api.attach_volume(context, instance_id, volume_id, device)
self.compute_api.attach_volume(context,
instance_id=instance_id,
volume_id=volume_id,
device=device)
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
@ -625,7 +619,7 @@ class CloudController(object):
volume_id = ec2_id_to_id(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self.compute_api.detach_volume(context, volume_id)
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': id_to_ec2_id(instance['id']),
@ -643,6 +637,10 @@ class CloudController(object):
def describe_instances(self, context, **kwargs):
return self._format_describe_instances(context, **kwargs)
def describe_instances_v6(self, context, **kwargs):
kwargs['use_v6'] = True
return self._format_describe_instances(context, **kwargs)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
@ -652,6 +650,10 @@ class CloudController(object):
return i[0]
def _format_instances(self, context, instance_id=None, **kwargs):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
@ -678,10 +680,16 @@ class CloudController(object):
if instance['fixed_ip']['floating_ips']:
fixed = instance['fixed_ip']
floating_addr = fixed['floating_ips'][0]['address']
if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
i['dnsNameV6'] = utils.to_global_ipv6(
instance['fixed_ip']['network']['cidr_v6'],
instance['mac_address'])
i['privateDnsName'] = fixed_addr
i['publicDnsName'] = floating_addr
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
if context.user.is_admin():
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
@ -746,7 +754,9 @@ class CloudController(object):
LOG.audit(_("Associate address %s to instance %s"), public_ip,
instance_id, context=context)
instance_id = ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context, instance_id, public_ip)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
address=public_ip)
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
@ -757,8 +767,9 @@ class CloudController(object):
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
instances = self.compute_api.create(context,
instance_types.get_by_type(kwargs.get('instance_type', None)),
kwargs['image_id'],
instance_type=instance_types.get_by_type(
kwargs.get('instance_type', None)),
image_id=kwargs['image_id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
kernel_id=kwargs.get('kernel_id', None),
@ -769,8 +780,7 @@ class CloudController(object):
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'AvailabilityZone'),
generate_hostname=id_to_ec2_id)
'AvailabilityZone'))
return self._format_run_instances(context,
instances[0]['reservation_id'])
@ -780,7 +790,7 @@ class CloudController(object):
LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.delete(context, instance_id)
self.compute_api.delete(context, instance_id=instance_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
@ -788,19 +798,19 @@ class CloudController(object):
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.reboot(context, instance_id)
self.compute_api.reboot(context, instance_id=instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2_id_to_id(instance_id)
self.compute_api.rescue(context, instance_id)
self.compute_api.rescue(context, instance_id=instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
instance_id = ec2_id_to_id(instance_id)
self.compute_api.unrescue(context, instance_id)
self.compute_api.unrescue(context, instance_id=instance_id)
return True
def update_instance(self, context, ec2_id, **kwargs):
@ -811,7 +821,7 @@ class CloudController(object):
changes[field] = kwargs[field]
if changes:
instance_id = ec2_id_to_id(ec2_id)
self.compute_api.update(context, instance_id, **kwargs)
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
def describe_images(self, context, image_id=None, **kwargs):

View File

@ -165,15 +165,18 @@ class Controller(wsgi.Controller):
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
ctxt = req.environ['nova.context']
update_dict = {}
if 'adminPass' in inst_dict['server']:
update_dict['admin_pass'] = inst_dict['server']['adminPass']
try:
self.compute_api.set_admin_password(ctxt, id)
except exception.TimeoutException, e:
return exc.HTTPRequestTimeout()
if 'name' in inst_dict['server']:
update_dict['display_name'] = inst_dict['server']['name']
try:
self.compute_api.update(req.environ['nova.context'], id,
**update_dict)
self.compute_api.update(ctxt, id, **update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPNoContent()

View File

@ -21,6 +21,7 @@ Handles all requests relating to instances (guest vms).
"""
import datetime
import re
import time
from nova import db
@ -47,7 +48,8 @@ def generate_default_hostname(instance_id):
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
def __init__(self, image_service=None, network_api=None,
volume_api=None, hostname_factory=generate_default_hostname,
**kwargs):
if not image_service:
image_service = utils.import_object(FLAGS.image_service)
@ -58,9 +60,11 @@ class API(base.Base):
if not volume_api:
volume_api = volume.API()
self.volume_api = volume_api
self.hostname_factory = hostname_factory
super(API, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
"""Get the network topic for an instance."""
try:
instance = self.get(context, instance_id)
except exception.NotFound as e:
@ -81,8 +85,7 @@ class API(base.Base):
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None,
generate_hostname=generate_default_hostname):
availability_zone=None, user_data=None):
"""Create the number of instances requested if quota and
other arguments check out ok."""
@ -172,9 +175,9 @@ class API(base.Base):
security_group_id)
# Set sane defaults if not specified
updates = dict(hostname=generate_hostname(instance_id))
if (not hasattr(instance, 'display_name')) or \
instance.display_name == None:
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
instance.display_name == None):
updates['display_name'] = "Server %s" % instance_id
instance = self.update(context, instance_id, **updates)
@ -192,7 +195,7 @@ class API(base.Base):
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
return instances
return [dict(x.iteritems()) for x in instances]
def ensure_default_security_group(self, context):
""" Create security group for the security context if it
@ -277,10 +280,11 @@ class API(base.Base):
:retval None
"""
return self.db.instance_update(context, instance_id, kwargs)
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
def delete(self, context, instance_id):
LOG.debug(_("Going to try and terminate %s"), instance_id)
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound as e:
@ -301,16 +305,15 @@ class API(base.Base):
host = instance['host']
if host:
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
self.db.instance_destroy(context, instance_id)
def get(self, context, instance_id):
"""Get a single instance with the given ID."""
return self.db.instance_get_by_id(context, instance_id)
rv = self.db.instance_get_by_id(context, instance_id)
return dict(rv.iteritems())
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None):
@ -319,7 +322,7 @@ class API(base.Base):
an admin, it will retreive all instances in the system."""
if reservation_id is not None:
return self.db.instance_get_all_by_reservation(context,
reservation_id)
reservation_id)
if fixed_ip is not None:
return self.db.fixed_ip_get_instance(context, fixed_ip)
if project_id or not context.is_admin:
@ -332,50 +335,46 @@ class API(base.Base):
project_id)
return self.db.instance_get_all(context)
def _cast_compute_message(self, method, context, instance_id, host=None):
"""Generic handler for RPC casts to compute."""
if not host:
instance = self.get(context, instance_id)
host = instance['host']
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
kwargs = {'method': method, 'args': {'instance_id': instance_id}}
rpc.cast(context, queue, kwargs)
def _call_compute_message(self, method, context, instance_id, host=None):
"""Generic handler for RPC calls to compute."""
if not host:
instance = self.get(context, instance_id)
host = instance["host"]
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
kwargs = {"method": method, "args": {"instance_id": instance_id}}
return rpc.call(context, queue, kwargs)
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "snapshot_instance",
"args": {"instance_id": instance_id, "name": name}})
self._cast_compute_message('snapshot_instance', context, instance_id)
def reboot(self, context, instance_id):
"""Reboot the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('reboot_instance', context, instance_id)
def pause(self, context, instance_id):
"""Pause the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "pause_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('pause_instance', context, instance_id)
def unpause(self, context, instance_id):
"""Unpause the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unpause_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('unpause_instance', context, instance_id)
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
instance = self.get(context, instance_id)
host = instance["host"]
return rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "get_diagnostics",
"args": {"instance_id": instance_id}})
return self._call_compute_message(
"get_diagnostics",
context,
instance_id)
def get_actions(self, context, instance_id):
"""Retrieve actions for the given instance."""
@ -383,89 +382,54 @@ class API(base.Base):
def suspend(self, context, instance_id):
"""suspend the instance with instance_id"""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "suspend_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('suspend_instance', context, instance_id)
def resume(self, context, instance_id):
"""resume the instance with instance_id"""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "resume_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('resume_instance', context, instance_id)
def rescue(self, context, instance_id):
"""Rescue the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "rescue_instance",
"args": {"instance_id": instance_id}})
self._cast_compute_message('rescue_instance', context, instance_id)
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
"args": {"instance_id": instance['id']}})
self._cast_compute_message('unrescue_instance', context, instance_id)
def set_admin_password(self, context, instance_id):
"""Set the root/admin password for the given instance."""
self._cast_compute_message('set_admin_password', context, instance_id)
def get_ajax_console(self, context, instance_id):
"""Get a url to an AJAX Console"""
instance = self.get(context, instance_id)
output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic,
instance['host']),
{'method': 'get_ajax_console',
'args': {'instance_id': instance['id']}})
output = self._call_compute_message('get_ajax_console',
context,
instance_id)
rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic,
{'method': 'authorize_ajax_console',
'args': {'token': output['token'], 'host': output['host'],
'port': output['port']}})
return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
def lock(self, context, instance_id):
"""
lock the instance with instance_id
def get_console_output(self, context, instance_id):
"""Get console output for an an instance"""
return self._call_compute_message('get_console_output',
context,
instance_id)
"""
instance = self.get_instance(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "lock_instance",
"args": {"instance_id": instance['id']}})
def lock(self, context, instance_id):
"""lock the instance with instance_id"""
self._cast_compute_message('lock_instance', context, instance_id)
def unlock(self, context, instance_id):
"""
unlock the instance with instance_id
"""
instance = self.get_instance(context, instance_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unlock_instance",
"args": {"instance_id": instance['id']}})
"""unlock the instance with instance_id"""
self._cast_compute_message('unlock_instance', context, instance_id)
def get_lock(self, context, instance_id):
"""
return the boolean state of (instance with instance_id)'s lock
"""
instance = self.get_instance(context, instance_id)
"""return the boolean state of (instance with instance_id)'s lock"""
instance = self.get(context, instance_id)
return instance['locked']
def attach_volume(self, context, instance_id, volume_id, device):

View File

@ -1,205 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import os
import tempfile
from nova import exception
from nova import flags
from nova import log as logging
LOG = logging.getLogger('nova.compute.disk')
FLAGS = flags.FLAGS
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
def partition(infile, outfile, local_bytes=0, resize=True,
local_type='ext2', execute=None):
"""
Turns a partition (infile) into a bootable drive image (outfile).
The first 63 sectors (0-62) of the resulting image is a master boot record.
Infile becomes the first primary partition.
If local bytes is specified, a second primary partition is created and
formatted as ext2.
::
In the diagram below, dashes represent drive sectors.
+-----+------. . .-------+------. . .------+
| 0 a| b c|d e|
+-----+------. . .-------+------. . .------+
| mbr | primary partiton | local partition |
+-----+------. . .-------+------. . .------+
"""
sector_size = 512
file_size = os.path.getsize(infile)
if resize and file_size < FLAGS.minimum_root_size:
last_sector = FLAGS.minimum_root_size / sector_size - 1
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (infile, last_sector, sector_size))
execute('e2fsck -fp %s' % infile, check_exit_code=False)
execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
LOG.warn(_("Input partition size not evenly divisible by"
" sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
LOG.warn(_("Bytes for local storage not evenly divisible"
" by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
primary_first = mbr_last + 1 # b
primary_last = primary_first + primary_sectors - 1 # c
local_first = primary_last + 1 # d
local_last = local_first + local_sectors - 1 # e
last_sector = local_last # e
# create an empty file
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (outfile, mbr_last, sector_size))
# make mbr partition
execute('parted --script %s mklabel msdos' % outfile)
# append primary file
execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append'
% (infile, outfile, FLAGS.block_size))
# make primary partition
execute('parted --script %s mkpart primary %ds %ds'
% (outfile, primary_first, primary_last))
if local_bytes > 0:
# make the file bigger
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (outfile, last_sector, sector_size))
# make and format local partition
execute('parted --script %s mkpartfs primary %s %ds %ds'
% (outfile, local_type, local_first, local_last))
def extend(image, size, execute):
file_size = os.path.getsize(image)
if file_size >= size:
return
return execute('truncate -s size %s' % (image,))
def inject_data(image, key=None, net=None, partition=None, execute=None):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
out, err = execute('sudo losetup --find --show %s' % image)
if err:
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
device = out.strip()
try:
if not partition is None:
# create partition
out, err = execute('sudo kpartx -a %s' % device)
if err:
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
mapped_device = device
# We can only loopback mount raw images. If the device isn't there,
# it's normally because it's a .vmdk or a .vdi etc
if not os.path.exists(mapped_device):
raise exception.Error('Mapped device was not found (we can'
' only inject raw disk images): %s' %
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
try:
if key:
# inject key file
_inject_key_into_fs(key, tmpdir, execute=execute)
if net:
_inject_net_into_fs(net, tmpdir, execute=execute)
finally:
# unmount device
execute('sudo umount %s' % mapped_device)
finally:
# remove temporary directory
execute('rmdir %s' % tmpdir)
if not partition is None:
# remove partitions
execute('sudo kpartx -d %s' % device)
finally:
# remove loopback
execute('sudo losetup --detach %s' % device)
def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
execute('sudo chown root %s' % sshdir)
execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
execute('sudo chown root:root %s' % netdir)
execute('sudo chmod 755 %s' % netdir)
netfile = os.path.join(netdir, 'interfaces')
execute('sudo tee %s' % netfile, net)

View File

@ -35,6 +35,8 @@ terminating it.
"""
import datetime
import random
import string
import logging
import socket
import functools
@ -54,6 +56,8 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
'Driver to use for controlling virtualization')
flags.DEFINE_string('stub_network', False,
'Stub network related code')
flags.DEFINE_integer('password_length', 12,
'Length of generated admin passwords')
flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
@ -309,6 +313,35 @@ class ComputeManager(manager.Manager):
self.driver.snapshot(instance_ref, name)
@exception.wrap_exception
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
logging.warn('trying to reset the password on a non-running '
'instance: %s (state: %s expected: %s)',
instance_ref['id'],
instance_ref['state'],
power_state.RUNNING)
logging.debug('instance %s: setting admin password',
instance_ref['name'])
if new_pass is None:
# Generate a random password
new_pass = self._generate_password(FLAGS.password_length)
self.driver.set_admin_password(instance_ref, new_pass)
self._update_state(context, instance_id)
def _generate_password(self, length=20):
"""Generate a random sequence of letters and digits
to be used as a password.
"""
chrs = string.letters + string.digits
return "".join([random.choice(chrs) for i in xrange(length)])
@exception.wrap_exception
@checks_instance_lock
def rescue_instance(self, context, instance_id):

View File

@ -299,6 +299,10 @@ def fixed_ip_get_instance(context, address):
return IMPL.fixed_ip_get_instance(context, address)
def fixed_ip_get_instance_v6(context, address):
return IMPL.fixed_ip_get_instance_v6(context, address)
def fixed_ip_get_network(context, address):
"""Get a network for a fixed ip by address."""
return IMPL.fixed_ip_get_network(context, address)
@ -357,6 +361,10 @@ def instance_get_fixed_address(context, instance_id):
return IMPL.instance_get_fixed_address(context, instance_id)
def instance_get_fixed_address_v6(context, instance_id):
return IMPL.instance_get_fixed_address_v6(context, instance_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
@ -552,6 +560,10 @@ def project_get_network(context, project_id, associate=True):
return IMPL.project_get_network(context, project_id)
def project_get_network_v6(context, project_id):
return IMPL.project_get_network_v6(context, project_id)
###################

View File

@ -606,6 +606,17 @@ def fixed_ip_get_instance(context, address):
return fixed_ip_ref.instance
@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
mac = utils.to_mac(address)
result = session.query(models.Instance).\
filter_by(mac_address=mac).\
first()
return result
@require_admin_context
def fixed_ip_get_network(context, address):
fixed_ip_ref = fixed_ip_get_by_address(context, address)
@ -764,6 +775,7 @@ def instance_get_by_id(context, instance_id):
if is_admin_context(context):
result = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.floating_ips')).\
filter_by(id=instance_id).\
@ -793,6 +805,17 @@ def instance_get_fixed_address(context, instance_id):
return instance_ref.fixed_ip['address']
@require_context
def instance_get_fixed_address_v6(context, instance_id):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
network_ref = network_get_by_instance(context, instance_id)
prefix = network_ref.cidr_v6
mac = instance_ref.mac_address
return utils.to_global_ipv6(prefix, mac)
@require_context
def instance_get_floating_address(context, instance_id):
session = get_session()
@ -1130,6 +1153,11 @@ def project_get_network(context, project_id, associate=True):
return result
@require_context
def project_get_network_v6(context, project_id):
return project_get_network(context, project_id)
###################

View File

@ -90,8 +90,14 @@ class NovaBase(object):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict"""
return iter(self)
"""Make the model object behave like a dict.
Includes attributes from joins."""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
# TODO(vish): Store images in the database instead of file system
@ -411,6 +417,10 @@ class Network(BASE, NovaBase):
injected = Column(Boolean, default=False)
cidr = Column(String(255), unique=True)
cidr_v6 = Column(String(255), unique=True)
ra_server = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
gateway = Column(String(255))

View File

@ -76,6 +76,10 @@ class InvalidInputException(Error):
pass
class TimeoutException(Error):
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:

View File

@ -50,6 +50,7 @@ flags.DEFINE_string('routing_source_ip', '$my_ip',
'Public IP of network host')
flags.DEFINE_bool('use_nova_chains', False,
'use the nova_ routing chains instead of default')
flags.DEFINE_string('dns_server', None,
'if set, uses specific dns server for dnsmasq')
flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
@ -196,6 +197,10 @@ def ensure_bridge(bridge, interface, net_attrs=None):
net_attrs['gateway'],
net_attrs['broadcast'],
net_attrs['netmask']))
if(FLAGS.use_ipv6):
_execute("sudo ifconfig %s add %s up" % \
(bridge,
net_attrs['cidr_v6']))
else:
_execute("sudo ifconfig %s up" % bridge)
if FLAGS.use_nova_chains:
@ -262,6 +267,50 @@ def update_dhcp(context, network_id):
_execute(command, addl_env=env)
def update_ra(context, network_id):
network_ref = db.network_get(context, network_id)
conffile = _ra_file(network_ref['bridge'], 'conf')
with open(conffile, 'w') as f:
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (network_ref['bridge'], network_ref['cidr_v6'])
f.write(conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _ra_pid_for(network_ref['bridge'])
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat /proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('sudo kill -HUP %d' % pid)
return
except Exception as exc: # pylint: disable-msg=W0703
LOG.debug(_("Hupping radvd threw %s"), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
command = _ra_cmd(network_ref)
_execute(command)
db.network_update(context, network_id,
{"ra_server":
utils.get_my_linklocal(network_ref['bridge'])})
def _host_dhcp(fixed_ip_ref):
"""Return a host string for an address"""
instance_ref = fixed_ip_ref['instance']
@ -323,6 +372,15 @@ def _dnsmasq_cmd(net):
return ''.join(cmd)
def _ra_cmd(net):
"""Builds radvd command"""
cmd = ['sudo -E radvd',
# ' -u nobody',
' -C %s' % _ra_file(net['bridge'], 'conf'),
' -p %s' % _ra_file(net['bridge'], 'pid')]
return ''.join(cmd)
def _stop_dnsmasq(network):
"""Stops the dnsmasq instance for a given network"""
pid = _dnsmasq_pid_for(network)
@ -344,6 +402,16 @@ def _dhcp_file(bridge, kind):
kind))
def _ra_file(bridge, kind):
"""Return path to a pid or conf file for a bridge"""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path,
bridge,
kind))
def _dnsmasq_pid_for(bridge):
"""Returns the pid for prior dnsmasq instance for a bridge
@ -357,3 +425,18 @@ def _dnsmasq_pid_for(bridge):
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ra_pid_for(bridge):
"""Returns the pid for prior radvd instance for a bridge
Returns None if no pid file exists
If machine has rebooted pid might be incorrect (caller should check)
"""
pid_file = _ra_file(bridge, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())

View File

@ -82,6 +82,7 @@ flags.DEFINE_integer('network_size', 256,
flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
flags.DEFINE_integer('cnt_vpn_clients', 5,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
@ -90,6 +91,9 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False,
'Whether to update dhcp when fixed_ip is disassociated')
flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
'Seconds after which a deallocated ip is disassociated')
flags.DEFINE_bool('use_ipv6', True,
'use the ipv6')
flags.DEFINE_string('network_host', socket.gethostname(),
'Network host to use for ip allocation in flat modes')
flags.DEFINE_bool('fake_call', False,
@ -235,8 +239,8 @@ class NetworkManager(manager.Manager):
"""Get the network host for the current context."""
raise NotImplementedError()
def create_networks(self, context, num_networks, network_size,
*args, **kwargs):
def create_networks(self, context, cidr, num_networks, network_size,
cidr_v6, *args, **kwargs):
"""Create networks based on parameters."""
raise NotImplementedError()
@ -321,9 +325,11 @@ class FlatManager(NetworkManager):
pass
def create_networks(self, context, cidr, num_networks, network_size,
*args, **kwargs):
cidr_v6, *args, **kwargs):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
significant_bits_v6 = 64
for index in range(num_networks):
start = index * network_size
significant_bits = 32 - int(math.log(network_size, 2))
@ -336,7 +342,13 @@ class FlatManager(NetworkManager):
net['gateway'] = str(project_net[1])
net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2])
if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
net['cidr_v6'] = cidr_v6
network_ref = self.db.network_create_safe(context, net)
if network_ref:
self._create_fixed_ips(context, network_ref['id'])
@ -482,12 +494,16 @@ class VlanManager(NetworkManager):
network_ref['bridge'])
def create_networks(self, context, cidr, num_networks, network_size,
vlan_start, vpn_start):
vlan_start, vpn_start, cidr_v6):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64
significant_bits_v6 = 64
for index in range(num_networks):
vlan = vlan_start + index
start = index * network_size
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
cidr = "%s/%s" % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr)
@ -500,6 +516,11 @@ class VlanManager(NetworkManager):
net['dhcp_start'] = str(project_net[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
# NOTE(vish): This makes ports unique accross the cloud, a more
# robust solution would be to make them unique per ip
net['vpn_public_port'] = vpn_start + index
@ -538,6 +559,7 @@ class VlanManager(NetworkManager):
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'],
network_ref)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip:
@ -546,6 +568,8 @@ class VlanManager(NetworkManager):
network_ref['vpn_private_address'])
if not FLAGS.fake_network:
self.driver.update_dhcp(context, network_id)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, network_id)
@property
def _bottom_reserved_ips(self):

View File

@ -23,14 +23,10 @@ and some black magic for inline callbacks.
"""
import datetime
import sys
import time
import unittest
import mox
import stubout
from twisted.internet import defer
from twisted.trial import unittest as trial_unittest
from nova import context
from nova import db
@ -74,7 +70,8 @@ class TestCase(unittest.TestCase):
FLAGS.fixed_range,
5, 16,
FLAGS.vlan_start,
FLAGS.vpn_start)
FLAGS.vpn_start,
FLAGS.fixed_range_v6)
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
@ -139,95 +136,3 @@ class TestCase(unittest.TestCase):
_wrapped.func_name = self.originalAttach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
class TrialTestCase(trial_unittest.TestCase):
"""Test case base class for all unit tests"""
def setUp(self):
"""Run before each test method to initialize test environment"""
super(TrialTestCase, self).setUp()
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = datetime.datetime.utcnow()
ctxt = context.get_admin_context()
if db.network_count(ctxt) != 5:
network_manager.VlanManager().create_networks(ctxt,
FLAGS.fixed_range,
5, 16,
FLAGS.vlan_start,
FLAGS.vpn_start)
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.flag_overrides = {}
self.injected = []
self._original_flags = FLAGS.FlagValuesDict()
def tearDown(self):
"""Runs after each test method to finalize/tear down test
environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
# NOTE(vish): Clean up any ips associated during the test.
ctxt = context.get_admin_context()
db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
self.start)
db.network_disassociate_all(ctxt)
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
db.security_group_destroy_all(ctxt)
super(TrialTestCase, self).tearDown()
finally:
self.reset_flags()
def flags(self, **kw):
"""Override flag variables for a test"""
for k, v in kw.iteritems():
if k in self.flag_overrides:
self.reset_flags()
raise Exception(
'trying to override already overriden flag: %s' % k)
self.flag_overrides[k] = getattr(FLAGS, k)
setattr(FLAGS, k, v)
def reset_flags(self):
"""Resets all flag variables for the test. Runs after each test"""
FLAGS.Reset()
for k, v in self._original_flags.iteritems():
setattr(FLAGS, k, v)
def run(self, result=None):
test_method = getattr(self, self._testMethodName)
setattr(self,
self._testMethodName,
self._maybeInlineCallbacks(test_method, result))
rv = super(TrialTestCase, self).run(result)
setattr(self, self._testMethodName, test_method)
return rv
def _maybeInlineCallbacks(self, func, result):
def _wrapped():
g = func()
if isinstance(g, defer.Deferred):
return g
if not hasattr(g, 'send'):
return defer.succeed(g)
inlined = defer.inlineCallbacks(func)
d = inlined()
return d
_wrapped.func_name = func.func_name
return _wrapped

View File

@ -78,7 +78,7 @@ class FakeHttplibConnection(object):
pass
class XmlConversionTestCase(test.TrialTestCase):
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
conv = apirequest._try_convert
@ -95,7 +95,7 @@ class XmlConversionTestCase(test.TrialTestCase):
self.assertEqual(conv('-0'), 0)
class ApiEc2TestCase(test.TrialTestCase):
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
@ -262,6 +262,72 @@ class ApiEc2TestCase(test.TrialTestCase):
return
def test_authorize_revoke_security_group_cidr_v6(self):
"""
Test that we can add and remove CIDR based rules
to a security group for IPv6
"""
self.expect_http()
self.mox.ReplayAll()
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# At the moment, you need both of these to actually be netadmin
self.manager.add_role('fake', 'netadmin')
project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
self.assertEquals(len(group.rules), 1)
self.assertEquals(int(group.rules[0].from_port), 80)
self.assertEquals(int(group.rules[0].to_port), 81)
self.assertEquals(len(group.rules[0].grants), 1)
self.assertEquals(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
self.manager.delete_project(project)
self.manager.delete_user(user)
return
def test_authorize_revoke_security_group_foreign_group(self):
"""
Test that we can grant and revoke another security group access

View File

@ -21,6 +21,7 @@ import json
from M2Crypto import BIO
from M2Crypto import RSA
import os
import shutil
import tempfile
import time
@ -50,6 +51,8 @@ IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images')
os.makedirs(IMAGES_PATH)
# TODO(termie): these tests are rather fragile, they should at the lest be
# wiping database state after each run
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
@ -287,6 +290,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp1['id'])
def test_instance_update_state(self):
# TODO(termie): what is this code even testing?
def instance(num):
return {
'reservation_id': 'r-1',
@ -305,7 +309,8 @@ class CloudTestCase(test.TestCase):
'state': 0x01,
'user_data': ''}
rv = self.cloud._format_describe_instances(self.context)
self.assert_(len(rv['reservationSet']) == 0)
logging.error(str(rv))
self.assertEqual(len(rv['reservationSet']), 0)
# simulate launch of 5 instances
# self.cloud.instances['pending'] = {}
@ -368,6 +373,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual('Foo Img', img.metadata['description'])
self._fake_set_image_description(self.context, 'ami-testing', '')
self.assertEqual('', img.metadata['description'])
shutil.rmtree(pathdir)
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})

View File

@ -76,7 +76,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, **instance)
try:
self.assertNotEqual(ref[0].display_name, None)
self.assertNotEqual(ref[0]['display_name'], None)
finally:
db.instance_destroy(self.context, ref[0]['id'])
@ -87,10 +87,14 @@ class ComputeTestCase(test.TestCase):
'user_id': self.user.id,
'project_id': self.project.id}
group = db.security_group_create(self.context, values)
ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, security_group=['default'])
ref = self.compute_api.create(
self.context,
instance_type=FLAGS.default_instance_type,
image_id=None,
security_group=['default'])
try:
self.assertEqual(len(ref[0]['security_groups']), 1)
self.assertEqual(len(db.security_group_get_by_instance(
self.context, ref[0]['id'])), 1)
finally:
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['id'])
@ -152,6 +156,13 @@ class ComputeTestCase(test.TestCase):
self.compute.reboot_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_set_admin_password(self):
"""Ensure instance can have its admin password set"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.set_admin_password(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()

View File

@ -111,12 +111,14 @@ class ConsoleTestCase(test.TestCase):
console_instances = [con['instance_id'] for con in pool.consoles]
self.assert_(instance_id in console_instances)
db.instance_destroy(self.context, instance_id)
def test_add_console_does_not_duplicate(self):
instance_id = self._create_instance()
cons1 = self.console.add_console(self.context, instance_id)
cons2 = self.console.add_console(self.context, instance_id)
self.assertEqual(cons1, cons2)
db.instance_destroy(self.context, instance_id)
def test_remove_console(self):
instance_id = self._create_instance()
@ -127,3 +129,4 @@ class ConsoleTestCase(test.TestCase):
db.console_get,
self.context,
console_id)
db.instance_destroy(self.context, instance_id)

103
nova/tests/test_direct.py Normal file
View File

@ -0,0 +1,103 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Direct API."""
import json
import logging
import webob
from nova import compute
from nova import context
from nova import exception
from nova import test
from nova import utils
from nova.api import direct
from nova.tests import test_cloud
class FakeService(object):
def echo(self, context, data):
return {'data': data}
def context(self, context):
return {'user': context.user_id,
'project': context.project_id}
class DirectTestCase(test.TestCase):
def setUp(self):
super(DirectTestCase, self).setUp()
direct.register_service('fake', FakeService())
self.router = direct.PostParamsMiddleware(
direct.JsonParamsMiddleware(
direct.Router()))
self.auth_router = direct.DelegatedAuthMiddleware(self.router)
self.context = context.RequestContext('user1', 'proj1')
def tearDown(self):
direct.ROUTES = {}
def test_delegated_auth(self):
req = webob.Request.blank('/fake/context')
req.headers['X-OpenStack-User'] = 'user1'
req.headers['X-OpenStack-Project'] = 'proj1'
resp = req.get_response(self.auth_router)
data = json.loads(resp.body)
self.assertEqual(data['user'], 'user1')
self.assertEqual(data['project'], 'proj1')
def test_json_params(self):
req = webob.Request.blank('/fake/echo')
req.environ['openstack.context'] = self.context
req.method = 'POST'
req.body = 'json=%s' % json.dumps({'data': 'foo'})
resp = req.get_response(self.router)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
def test_post_params(self):
req = webob.Request.blank('/fake/echo')
req.environ['openstack.context'] = self.context
req.method = 'POST'
req.body = 'data=foo'
resp = req.get_response(self.router)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
def test_proxy(self):
proxy = direct.Proxy(self.router)
rv = proxy.fake.echo(self.context, data='baz')
self.assertEqual(rv['data'], 'baz')
class DirectCloudTestCase(test_cloud.CloudTestCase):
def setUp(self):
super(DirectCloudTestCase, self).setUp()
compute_handle = compute.API(image_service=self.cloud.image_service,
network_api=self.cloud.network_api,
volume_api=self.cloud.volume_api)
direct.register_service('compute', compute_handle)
self.router = direct.JsonParamsMiddleware(direct.Router())
proxy = direct.Proxy(self.router)
self.cloud.compute_api = proxy.compute
def tearDown(self):
super(DirectCloudTestCase, self).tearDown()
direct.ROUTES = {}

View File

@ -9,7 +9,7 @@ def _fake_context():
return context.RequestContext(1, 1)
class RootLoggerTestCase(test.TrialTestCase):
class RootLoggerTestCase(test.TestCase):
def setUp(self):
super(RootLoggerTestCase, self).setUp()
self.log = log.logging.root
@ -46,7 +46,7 @@ class RootLoggerTestCase(test.TrialTestCase):
self.assert_(True) # didn't raise exception
class NovaFormatterTestCase(test.TrialTestCase):
class NovaFormatterTestCase(test.TestCase):
def setUp(self):
super(NovaFormatterTestCase, self).setUp()
self.flags(logging_context_format_string="HAS CONTEXT "\
@ -78,7 +78,7 @@ class NovaFormatterTestCase(test.TrialTestCase):
self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue())
class NovaLoggerTestCase(test.TrialTestCase):
class NovaLoggerTestCase(test.TestCase):
def setUp(self):
super(NovaLoggerTestCase, self).setUp()
self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
@ -96,7 +96,7 @@ class NovaLoggerTestCase(test.TrialTestCase):
self.assertEqual(log.AUDIT, l.level)
class VerboseLoggerTestCase(test.TrialTestCase):
class VerboseLoggerTestCase(test.TestCase):
def setUp(self):
super(VerboseLoggerTestCase, self).setUp()
self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)

View File

@ -38,7 +38,7 @@ def conditional_forbid(req):
return 'OK'
class LockoutTestCase(test.TrialTestCase):
class LockoutTestCase(test.TestCase):
"""Test case for the Lockout middleware."""
def setUp(self): # pylint: disable-msg=C0103
super(LockoutTestCase, self).setUp()

View File

@ -96,6 +96,28 @@ class NetworkTestCase(test.TestCase):
self.context.project_id = self.projects[project_num].id
self.network.deallocate_fixed_ip(self.context, address)
def test_private_ipv6(self):
"""Make sure ipv6 is OK"""
if FLAGS.use_ipv6:
instance_ref = self._create_instance(0)
address = self._create_address(0, instance_ref['id'])
network_ref = db.project_get_network(
context.get_admin_context(),
self.context.project_id)
address_v6 = db.instance_get_fixed_address_v6(
context.get_admin_context(),
instance_ref['id'])
self.assertEqual(instance_ref['mac_address'],
utils.to_mac(address_v6))
instance_ref2 = db.fixed_ip_get_instance_v6(
context.get_admin_context(),
address_v6)
self.assertEqual(instance_ref['id'], instance_ref2['id'])
self.assertEqual(address_v6,
utils.to_global_ipv6(
network_ref['cidr_v6'],
instance_ref['mac_address']))
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
# TODO(vish): better way of adding floating ips

View File

@ -28,7 +28,7 @@ from nova import test
FLAGS = flags.FLAGS
class TwistdTestCase(test.TrialTestCase):
class TwistdTestCase(test.TestCase):
def setUp(self):
super(TwistdTestCase, self).setUp()
self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions)

View File

@ -31,6 +31,7 @@ from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
@ -262,3 +263,29 @@ class XenAPIVMTestCase(test.TestCase):
instance = db.instance_create(values)
self.conn.spawn(instance)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""
Unit tests for Diffie-Hellman code
"""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = SimpleDH()
self.bob = SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def test_encryption(self):
msg = "This is a top-secret message"
enc = self.alice.encrypt(msg)
dec = self.bob.decrypt(enc)
self.assertEquals(dec, msg)
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()

View File

@ -22,6 +22,7 @@ System-level utilities and helper functions.
import datetime
import inspect
import json
import os
import random
import subprocess
@ -30,6 +31,8 @@ import struct
import sys
import time
from xml.sax import saxutils
import re
import netaddr
from eventlet import event
from eventlet import greenthread
@ -200,6 +203,40 @@ def last_octet(address):
return int(address.split(".")[-1])
def get_my_linklocal(interface):
try:
if_str = execute("ip -f inet6 -o addr show %s" % interface)
condition = "\s+inet6\s+([0-9a-f:]+/\d+)\s+scope\s+link"
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
return 'fe00::'
except IndexError as ex:
LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
except ProcessExecutionError as ex:
LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
except:
return 'fe00::'
def to_global_ipv6(prefix, mac):
mac64 = netaddr.EUI(mac).eui64().words
int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
mac64_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).format()
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress("::ffff:ffff:ffff:ffff")
mask2 = netaddr.IPAddress("::0200:0:0:0")
mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
return ":".join(["%02x" % i for i in mac64[0:3] + mac64[5:8]])
def utcnow():
"""Overridable version of datetime.datetime.utcnow."""
if utcnow.override_time:
@ -355,3 +392,36 @@ def utf8(value):
return value.encode("utf-8")
assert isinstance(value, str)
return value
def to_primitive(value):
if type(value) is type([]) or type(value) is type((None,)):
o = []
for v in value:
o.append(to_primitive(v))
return o
elif type(value) is type({}):
o = {}
for k, v in value.iteritems():
o[k] = to_primitive(v)
return o
elif isinstance(value, datetime.datetime):
return str(value)
elif hasattr(value, 'iteritems'):
return to_primitive(dict(value.iteritems()))
elif hasattr(value, '__iter__'):
return to_primitive(list(value))
else:
return value
def dumps(value):
try:
return json.dumps(value)
except TypeError:
pass
return json.dumps(to_primitive(value))
def loads(s):
return json.loads(s)

186
nova/virt/disk.py Normal file
View File

@ -0,0 +1,186 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to resize, repartition, and modify disk images.
Includes injection of SSH PGP keys into authorized_keys file.
"""
import os
import tempfile
import time
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.compute.disk')
FLAGS = flags.FLAGS
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
def extend(image, size):
"""Increase image to size"""
file_size = os.path.getsize(image)
if file_size >= size:
return
utils.execute('truncate -s %s %s' % (size, image))
# NOTE(vish): attempts to resize filesystem
utils.execute('e2fsck -fp %s' % image, check_exit_code=False)
utils.execute('resize2fs %s' % image, check_exit_code=False)
def inject_data(image, key=None, net=None, partition=None, nbd=False):
"""Injects a ssh key and optionally net data into a disk image.
it will mount the image as a fully partitioned disk and attempt to inject
into the specified partition number.
If partition is not specified it mounts the image as a single partition.
"""
device = _link_device(image, nbd)
try:
if not partition is None:
# create partition
out, err = utils.execute('sudo kpartx -a %s' % device)
if err:
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
mapped_device = device
# We can only loopback mount raw images. If the device isn't there,
# it's normally because it's a .vmdk or a .vdi etc
if not os.path.exists(mapped_device):
raise exception.Error('Mapped device was not found (we can'
' only inject raw disk images): %s' %
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = utils.execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
try:
if key:
# inject key file
_inject_key_into_fs(key, tmpdir)
if net:
_inject_net_into_fs(net, tmpdir)
finally:
# unmount device
utils.execute('sudo umount %s' % mapped_device)
finally:
# remove temporary directory
utils.execute('rmdir %s' % tmpdir)
if not partition is None:
# remove partitions
utils.execute('sudo kpartx -d %s' % device)
finally:
_unlink_device(device, nbd)
def _link_device(image, nbd):
"""Link image to device using loopback or nbd"""
if nbd:
device = _allocate_device()
utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuuing
for i in xrange(10):
if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
return device
time.sleep(1)
raise exception.Error(_('nbd device %s did not show up') % device)
else:
out, err = utils.execute('sudo losetup --find --show %s' % image)
if err:
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
return out.strip()
def _unlink_device(device, nbd):
"""Unlink image from device using loopback or nbd"""
if nbd:
utils.execute('sudo qemu-nbd -d %s' % device)
_free_device(device)
else:
utils.execute('sudo losetup --detach %s' % device)
_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)]
def _allocate_device():
# NOTE(vish): This assumes no other processes are allocating nbd devices.
# It may race cause a race condition if multiple
# workers are running on a given machine.
while True:
if not _DEVICES:
raise exception.Error(_('No free nbd devices'))
device = _DEVICES.pop()
if not os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
break
return device
def _free_device(device):
_DEVICES.append(device)
def _inject_key_into_fs(key, fs):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
utils.execute('sudo chown root %s' % sshdir)
utils.execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
def _inject_net_into_fs(net, fs):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
utils.execute('sudo chown root:root %s' % netdir)
utils.execute('sudo chmod 755 %s' % netdir)
netfile = os.path.join(netdir, 'interfaces')
utils.execute('sudo tee %s' % netfile, net)

View File

@ -98,7 +98,7 @@ class FakeConnection(object):
the new instance.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
task that allows the caller to detect when it is complete.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
@ -122,7 +122,7 @@ class FakeConnection(object):
The second parameter is the name of the snapshot.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
task that allows the caller to detect when it is complete.
"""
pass
@ -134,7 +134,20 @@ class FakeConnection(object):
and so the instance is being specified as instance.name.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
task that allows the caller to detect when it is complete.
"""
pass
def set_admin_password(self, instance, new_pass):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
The work will be done asynchronously. This function returns a
task that allows the caller to detect when it is complete.
"""
pass
@ -182,7 +195,7 @@ class FakeConnection(object):
and so the instance is being specified as instance.name.
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
task that allows the caller to detect when it is complete.
"""
del self.instances[instance.name]

View File

@ -7,13 +7,13 @@
#set $disk_bus = 'uml'
<type>uml</type>
<kernel>/usr/bin/linux</kernel>
<root>/dev/ubda1</root>
<root>/dev/ubda</root>
#else
#if $type == 'xen'
#set $disk_prefix = 'sd'
#set $disk_bus = 'scsi'
<type>linux</type>
<root>/dev/xvda1</root>
<root>/dev/xvda</root>
#else
#set $disk_prefix = 'vd'
#set $disk_bus = 'virtio'
@ -28,7 +28,7 @@
#if $type == 'xen'
<cmdline>ro</cmdline>
#else
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
<cmdline>root=/dev/vda console=ttyS0</cmdline>
#end if
#if $getVar('ramdisk', None)
<initrd>${ramdisk}</initrd>
@ -46,18 +46,28 @@
<devices>
#if $getVar('rescue', False)
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/rescue-disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#else
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
#if $getVar('local', False)
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/local'/>
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#end if
#end if
<interface type='bridge'>
<source bridge='${bridge_name}'/>
@ -66,6 +76,7 @@
<filterref filter="nova-instance-${name}">
<parameter name="IP" value="${ip_address}" />
<parameter name="DHCPSERVER" value="${dhcp_server}" />
<parameter name="RASERVER" value="${ra_server}" />
#if $getVar('extra_params', False)
${extra_params}
#end if

View File

@ -58,9 +58,9 @@ from nova import log as logging
from nova import utils
#from nova.api import context
from nova.auth import manager
from nova.compute import disk
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import disk
from nova.virt import images
libvirt = None
@ -91,6 +91,9 @@ flags.DEFINE_string('libvirt_uri',
flags.DEFINE_bool('allow_project_net_traffic',
True,
'Whether to allow in project network traffic')
flags.DEFINE_bool('use_cow_images',
True,
'Whether to use cow images')
flags.DEFINE_string('ajaxterm_portrange',
'10000-12000',
'Range of ports that ajaxterm should randomly try to bind')
@ -127,6 +130,16 @@ def _get_net_and_mask(cidr):
return str(net.net()), str(net.netmask())
def _get_net_and_prefixlen(cidr):
net = IPy.IP(cidr)
return str(net.net()), str(net.prefixlen())
def _get_ip_version(cidr):
net = IPy.IP(cidr)
return int(net.version())
class LibvirtConnection(object):
def __init__(self, read_only):
@ -372,7 +385,6 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
self.nwfilter.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self._create_image(instance, xml)
@ -480,19 +492,57 @@ class LibvirtConnection(object):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
copy for use by the hypervisor.
The underlying method should specify a kwarg of target representing
where the image will be saved.
fname is used as the filename of the base image. The filename needs
to be unique to a given image.
If cow is True, it will make a CoW image instead of a copy.
"""
if not os.path.exists(target):
base_dir = os.path.join(FLAGS.instances_path, '_base')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
os.chmod(base_dir, 0777)
base = os.path.join(base_dir, fname)
if not os.path.exists(base):
fn(target=base, *args, **kwargs)
if cow:
utils.execute('qemu-img create -f qcow2 -o '
'cluster_size=2M,backing_file=%s %s'
% (base, target))
else:
utils.execute('cp %s %s' % (base, target))
def _fetch_image(self, target, image_id, user, project, size=None):
"""Grab image and optionally attempt to resize it"""
images.fetch(image_id, target, user, project)
if size:
disk.extend(target, size)
def _create_local(self, target, local_gb):
"""Create a blank image of specified size"""
utils.execute('truncate %s -s %dG' % (target, local_gb))
# TODO(vish): should we format disk by default?
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
basepath = lambda fname = '', prefix = prefix: os.path.join(
FLAGS.instances_path,
inst['name'],
prefix + fname)
def basepath(fname='', prefix=prefix):
return os.path.join(FLAGS.instances_path,
inst['name'],
prefix + fname)
# ensure directories exist and are writable
utils.execute('mkdir -p %s' % basepath(prefix=''))
utils.execute('chmod 0777 %s' % basepath(prefix=''))
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
@ -509,23 +559,44 @@ class LibvirtConnection(object):
disk_images = {'image_id': inst['image_id'],
'kernel_id': inst['kernel_id'],
'ramdisk_id': inst['ramdisk_id']}
if not os.path.exists(basepath('disk')):
images.fetch(inst.image_id, basepath('disk-raw'), user,
project)
if inst['kernel_id']:
if not os.path.exists(basepath('kernel')):
images.fetch(inst['kernel_id'], basepath('kernel'),
user, project)
if inst['ramdisk_id']:
if not os.path.exists(basepath('ramdisk')):
images.fetch(inst['ramdisk_id'], basepath('ramdisk'),
user, project)
if disk_images['kernel_id']:
self._cache_image(fn=self._fetch_image,
target=basepath('kernel'),
fname=disk_images['kernel_id'],
image_id=disk_images['kernel_id'],
user=user,
project=project)
if disk_images['ramdisk_id']:
self._cache_image(fn=self._fetch_image,
target=basepath('ramdisk'),
fname=disk_images['ramdisk_id'],
image_id=disk_images['ramdisk_id'],
user=user,
project=project)
def execute(cmd, process_input=None, check_exit_code=True):
return utils.execute(cmd=cmd,
process_input=process_input,
check_exit_code=check_exit_code)
root_fname = disk_images['image_id']
size = FLAGS.minimum_root_size
if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
size = None
root_fname += "_sm"
self._cache_image(fn=self._fetch_image,
target=basepath('disk'),
fname=root_fname,
cow=FLAGS.use_cow_images,
image_id=disk_images['image_id'],
user=user,
project=project,
size=size)
type_data = instance_types.INSTANCE_TYPES[inst['instance_type']]
if type_data['local_gb']:
self._cache_image(fn=self._create_local,
target=basepath('local'),
fname="local_%s" % type_data['local_gb'],
cow=FLAGS.use_cow_images,
local_gb=type_data['local_gb'])
# For now, we assume that if we're not using a kernel, we're using a
# partitioned disk image where the target partition is the first
@ -541,12 +612,16 @@ class LibvirtConnection(object):
if network_ref['injected']:
admin_context = context.get_admin_context()
address = db.instance_get_fixed_address(admin_context, inst['id'])
ra_server = network_ref['ra_server']
if not ra_server:
ra_server = "fd00::"
with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': address,
'netmask': network_ref['netmask'],
'gateway': network_ref['gateway'],
'broadcast': network_ref['broadcast'],
'dns': network_ref['dns']}
'dns': network_ref['dns'],
'ra_server': ra_server}
if key or net:
if key:
LOG.info(_('instance %s: injecting key into image %s'),
@ -555,34 +630,15 @@ class LibvirtConnection(object):
LOG.info(_('instance %s: injecting net into image %s'),
inst['name'], inst.image_id)
try:
disk.inject_data(basepath('disk-raw'), key, net,
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
execute=execute)
nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
LOG.warn(_('instance %s: ignoring error injecting data'
' into image %s (%s)'),
inst['name'], inst.image_id, e)
if inst['kernel_id']:
if os.path.exists(basepath('disk')):
utils.execute('rm -f %s' % basepath('disk'))
local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type]
['local_gb']
* 1024 * 1024 * 1024)
resize = True
if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
resize = False
if inst['kernel_id']:
disk.partition(basepath('disk-raw'), basepath('disk'),
local_bytes, resize, execute=execute)
else:
os.rename(basepath('disk-raw'), basepath('disk'))
disk.extend(basepath('disk'), local_bytes, execute=execute)
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))
@ -601,15 +657,36 @@ class LibvirtConnection(object):
instance['id'])
# Assume that the gateway also acts as the dhcp server.
dhcp_server = network['gateway']
ra_server = network['ra_server']
if not ra_server:
ra_server = 'fd00::'
if FLAGS.allow_project_net_traffic:
net, mask = _get_net_and_mask(network['cidr'])
extra_params = ("<parameter name=\"PROJNET\" "
if FLAGS.use_ipv6:
net, mask = _get_net_and_mask(network['cidr'])
net_v6, prefixlen_v6 = _get_net_and_prefixlen(
network['cidr_v6'])
extra_params = ("<parameter name=\"PROJNET\" "
"value=\"%s\" />\n"
"<parameter name=\"PROJMASK\" "
"value=\"%s\" />\n") % (net, mask)
"value=\"%s\" />\n"
"<parameter name=\"PROJNETV6\" "
"value=\"%s\" />\n"
"<parameter name=\"PROJMASKV6\" "
"value=\"%s\" />\n") % \
(net, mask, net_v6, prefixlen_v6)
else:
net, mask = _get_net_and_mask(network['cidr'])
extra_params = ("<parameter name=\"PROJNET\" "
"value=\"%s\" />\n"
"<parameter name=\"PROJMASK\" "
"value=\"%s\" />\n") % \
(net, mask)
else:
extra_params = "\n"
if FLAGS.use_cow_images:
driver_type = 'qcow2'
else:
driver_type = 'raw'
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
@ -621,8 +698,11 @@ class LibvirtConnection(object):
'mac_address': instance['mac_address'],
'ip_address': ip_address,
'dhcp_server': dhcp_server,
'ra_server': ra_server,
'extra_params': extra_params,
'rescue': rescue}
'rescue': rescue,
'local': instance_type['local_gb'],
'driver_type': driver_type}
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
@ -882,6 +962,15 @@ class NWFilterFirewall(FirewallDriver):
</rule>
</filter>'''
def nova_ra_filter(self):
return '''<filter name='nova-allow-ra-server' chain='root'>
<uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
<rule action='accept' direction='inout'
priority='100'>
<icmpv6 srcipaddr='$RASERVER'/>
</rule>
</filter>'''
def setup_basic_filtering(self, instance):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
logging.info('called setup_basic_filtering in nwfilter')
@ -910,9 +999,12 @@ class NWFilterFirewall(FirewallDriver):
self._define_filter(self.nova_base_ipv4_filter)
self._define_filter(self.nova_base_ipv6_filter)
self._define_filter(self.nova_dhcp_filter)
self._define_filter(self.nova_ra_filter)
self._define_filter(self.nova_vpn_filter)
if FLAGS.allow_project_net_traffic:
self._define_filter(self.nova_project_filter)
if FLAGS.use_ipv6:
self._define_filter(self.nova_project_filter_v6)
self.static_filters_configured = True
@ -944,13 +1036,13 @@ class NWFilterFirewall(FirewallDriver):
def nova_base_ipv6_filter(self):
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
for protocol in ['tcp', 'udp', 'icmp']:
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
for direction, action, priority in [('out', 'accept', 399),
('in', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s-ipv6 />
<%s />
</rule>""" % (action, direction,
priority, protocol)
priority, protocol)
retval += '</filter>'
return retval
@ -963,10 +1055,20 @@ class NWFilterFirewall(FirewallDriver):
retval += '</filter>'
return retval
def nova_project_filter_v6(self):
retval = "<filter name='nova-project-v6' chain='ipv6'>"
for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
retval += """<rule action='accept' direction='inout'
priority='200'>
<%s srcipaddr='$PROJNETV6'
srcipmask='$PROJMASKV6' />
</rule>""" % (protocol)
retval += '</filter>'
return retval
def _define_filter(self, xml):
if callable(xml):
xml = xml()
# execute in a native thread and block current greenthread until done
tpool.execute(self._conn.nwfilterDefineXML, xml)
@ -980,7 +1082,6 @@ class NWFilterFirewall(FirewallDriver):
it makes sure the filters for the security groups as well as
the base filter are all in place.
"""
if instance['image_id'] == FLAGS.vpn_image_id:
base_filter = 'nova-vpn'
else:
@ -992,11 +1093,15 @@ class NWFilterFirewall(FirewallDriver):
instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
'nova-allow-dhcp-server']
if FLAGS.use_ipv6:
instance_secgroup_filter_children += ['nova-allow-ra-server']
ctxt = context.get_admin_context()
if FLAGS.allow_project_net_traffic:
instance_filter_children += ['nova-project']
if FLAGS.use_ipv6:
instance_filter_children += ['nova-project-v6']
for security_group in db.security_group_get_by_instance(ctxt,
instance['id']):
@ -1024,12 +1129,19 @@ class NWFilterFirewall(FirewallDriver):
security_group = db.security_group_get(context.get_admin_context(),
security_group_id)
rule_xml = ""
v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
for rule in security_group.rules:
rule_xml += "<rule action='accept' direction='in' priority='300'>"
if rule.cidr:
net, mask = _get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
version = _get_ip_version(rule.cidr)
if(FLAGS.use_ipv6 and version == 6):
net, prefixlen = _get_net_and_prefixlen(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(v6protocol[rule.protocol], net, prefixlen)
else:
net, mask = _get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
@ -1044,8 +1156,11 @@ class NWFilterFirewall(FirewallDriver):
rule_xml += '/>\n'
rule_xml += "</rule>\n"
xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \
(security_group_id, rule_xml,)
xml = "<filter name='nova-secgroup-%s' " % security_group_id
if(FLAGS.use_ipv6):
xml += "chain='root'>%s</filter>" % rule_xml
else:
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
def _instance_filter_name(self, instance):
@ -1082,11 +1197,17 @@ class IptablesFirewallDriver(FirewallDriver):
def apply_ruleset(self):
current_filter, _ = self.execute('sudo iptables-save -t filter')
current_lines = current_filter.split('\n')
new_filter = self.modify_rules(current_lines)
new_filter = self.modify_rules(current_lines, 4)
self.execute('sudo iptables-restore',
process_input='\n'.join(new_filter))
if(FLAGS.use_ipv6):
current_filter, _ = self.execute('sudo ip6tables-save -t filter')
current_lines = current_filter.split('\n')
new_filter = self.modify_rules(current_lines, 6)
self.execute('sudo ip6tables-restore',
process_input='\n'.join(new_filter))
def modify_rules(self, current_lines):
def modify_rules(self, current_lines, ip_version=4):
ctxt = context.get_admin_context()
# Remove any trace of nova rules.
new_filter = filter(lambda l: 'nova-' not in l, current_lines)
@ -1100,8 +1221,8 @@ class IptablesFirewallDriver(FirewallDriver):
if not new_filter[rules_index].startswith(':'):
break
our_chains = [':nova-ipv4-fallback - [0:0]']
our_rules = ['-A nova-ipv4-fallback -j DROP']
our_chains = [':nova-fallback - [0:0]']
our_rules = ['-A nova-fallback -j DROP']
our_chains += [':nova-local - [0:0]']
our_rules += ['-A FORWARD -j nova-local']
@ -1112,7 +1233,10 @@ class IptablesFirewallDriver(FirewallDriver):
for instance_id in self.instances:
instance = self.instances[instance_id]
chain_name = self._instance_chain_name(instance)
ip_address = self._ip_for_instance(instance)
if(ip_version == 4):
ip_address = self._ip_for_instance(instance)
elif(ip_version == 6):
ip_address = self._ip_for_instance_v6(instance)
our_chains += [':%s - [0:0]' % chain_name]
@ -1139,13 +1263,19 @@ class IptablesFirewallDriver(FirewallDriver):
our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
# Allow DHCP responses
dhcp_server = self._dhcp_server_for_instance(instance)
our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
(chain_name, dhcp_server)]
if(ip_version == 4):
# Allow DHCP responses
dhcp_server = self._dhcp_server_for_instance(instance)
our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' %
(chain_name, dhcp_server)]
elif(ip_version == 6):
# Allow RA responses
ra_server = self._ra_server_for_instance(instance)
our_rules += ['-A %s -s %s -p icmpv6' %
(chain_name, ra_server)]
# If nothing matches, jump to the fallback chain
our_rules += ['-A %s -j nova-ipv4-fallback' % (chain_name,)]
our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
# then, security group chains and rules
for security_group_id in security_groups:
@ -1158,15 +1288,22 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in rules:
logging.info('%r', rule)
args = ['-A', chain_name, '-p', rule.protocol]
if rule.cidr:
args += ['-s', rule.cidr]
else:
if not rule.cidr:
# Eventually, a mechanism to grant access for security
# groups will turn up here. It'll use ipsets.
continue
version = _get_ip_version(rule.cidr)
if version != ip_version:
continue
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr]
if rule.protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
args += ['--dport', '%s' % (rule.from_port,)]
@ -1186,7 +1323,12 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
args += ['-m', 'icmp', '--icmp-type', icmp_type_arg]
if(ip_version == 4):
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
elif(ip_version == 6):
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j ACCEPT']
our_rules += [' '.join(args)]
@ -1212,7 +1354,16 @@ class IptablesFirewallDriver(FirewallDriver):
return db.instance_get_fixed_address(context.get_admin_context(),
instance['id'])
def _ip_for_instance_v6(self, instance):
return db.instance_get_fixed_address_v6(context.get_admin_context(),
instance['id'])
def _dhcp_server_for_instance(self, instance):
network = db.project_get_network(context.get_admin_context(),
instance['project_id'])
return network['gateway']
def _ra_server_for_instance(self, instance):
network = db.project_get_network(context.get_admin_context(),
instance['project_id'])
return network['ra_server']

View File

@ -20,6 +20,11 @@ Management class for VM-related functions (spawn, reboot, etc).
"""
import json
import M2Crypto
import os
import subprocess
import tempfile
import uuid
from nova import db
from nova import context
@ -127,12 +132,31 @@ class VMOps(object):
"""Refactored out the common code of many methods that receive either
a vm name or a vm instance, and want a vm instance in return.
"""
vm = None
try:
instance_name = instance_or_vm.name
vm = VMHelper.lookup(self._session, instance_name)
except AttributeError:
# A vm opaque ref was passed
vm = instance_or_vm
if instance_or_vm.startswith("OpaqueRef:"):
# Got passed an opaque ref; return it
return instance_or_vm
else:
# Must be the instance name
instance_name = instance_or_vm
except (AttributeError, KeyError):
# Note the the KeyError will only happen with fakes.py
# Not a string; must be an ID or a vm instance
if isinstance(instance_or_vm, (int, long)):
ctx = context.get_admin_context()
try:
instance_obj = db.instance_get_by_id(ctx, instance_or_vm)
instance_name = instance_obj.name
except exception.NotFound:
# The unit tests screw this up, as they use an integer for
# the vm name. I'd fix that up, but that's a matter for
# another bug report. So for now, just try with the passed
# value
instance_name = instance_or_vm
else:
instance_name = instance_or_vm.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
raise Exception(_('Instance not present %s') % instance_name)
return vm
@ -189,6 +213,44 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
self._session.wait_for_task(instance.id, task)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance. This is done via
an agent running on the VM. Communication between nova and the agent
is done via writing xenstore records. Since communication is done over
the XenAPI RPC calls, we need to encrypt the password. We're using a
simple Diffie-Hellman class instead of the more advanced one in
M2Crypto for compatibility with the agent code.
"""
# Need to uniquely identify this request.
transaction_id = str(uuid.uuid4())
# The simple Diffie-Hellman class is used to manage key exchange.
dh = SimpleDH()
args = {'id': transaction_id, 'pub': str(dh.get_public())}
resp = self._make_agent_call('key_init', instance, '', args)
if resp is None:
# No response from the agent
return
resp_dict = json.loads(resp)
# Successful return code from key_init is 'D0'
if resp_dict['returncode'] != 'D0':
# There was some sort of error; the message will contain
# a description of the error.
raise RuntimeError(resp_dict['message'])
agent_pub = int(resp_dict['message'])
dh.compute_shared(agent_pub)
enc_pass = dh.encrypt(new_pass)
# Send the encrypted password
args['enc_pass'] = enc_pass
resp = self._make_agent_call('password', instance, '', args)
if resp is None:
# No response from the agent
return
resp_dict = json.loads(resp)
# Successful return code from password is '0'
if resp_dict['returncode'] != '0':
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
def destroy(self, instance):
"""Destroy VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
@ -246,30 +308,19 @@ class VMOps(object):
def suspend(self, instance, callback):
"""suspend the specified instance"""
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
raise Exception(_("suspend: instance not present %s") %
instance_name)
vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.suspend', vm)
self._wait_with_callback(instance.id, task, callback)
def resume(self, instance, callback):
"""resume the specified instance"""
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
raise Exception(_("resume: instance not present %s") %
instance_name)
vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
self._wait_with_callback(instance.id, task, callback)
def get_info(self, instance_id):
def get_info(self, instance):
"""Return data about VM instance"""
vm = VMHelper.lookup(self._session, instance_id)
if vm is None:
raise exception.NotFound(_('Instance not'
' found %s') % instance_id)
vm = self._get_vm_opaque_ref(instance)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_info(rec)
@ -333,22 +384,34 @@ class VMOps(object):
return self._make_plugin_call('xenstore.py', method=method, vm=vm,
path=path, addl_args=addl_args)
def _make_agent_call(self, method, vm, path, addl_args={}):
"""Abstracts out the interaction with the agent xenapi plugin."""
return self._make_plugin_call('agent', method=method, vm=vm,
path=path, addl_args=addl_args)
def _make_plugin_call(self, plugin, method, vm, path, addl_args={}):
"""Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
instance_id = vm.id
vm = self._get_vm_opaque_ref(vm)
rec = self._session.get_xenapi().VM.get_record(vm)
args = {'dom_id': rec['domid'], 'path': path}
args.update(addl_args)
# If the 'testing_mode' attribute is set, add that to the args.
if getattr(self, 'testing_mode', False):
args['testing_mode'] = 'true'
try:
task = self._session.async_call_plugin(plugin, method, args)
ret = self._session.wait_for_task(0, task)
ret = self._session.wait_for_task(instance_id, task)
except self.XenAPI.Failure, e:
raise RuntimeError("%s" % e.details[-1])
ret = None
err_trace = e.details[-1]
err_msg = err_trace.splitlines()[-1]
strargs = str(args)
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
return ret
def add_to_xenstore(self, vm, path, key, value):
@ -460,3 +523,89 @@ class VMOps(object):
"""Removes all data from the xenstore parameter record for this VM."""
self.write_to_param_xenstore(instance_or_vm, {})
########################################################################
def _runproc(cmd):
pipe = subprocess.PIPE
return subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
stderr=pipe, close_fds=True)
class SimpleDH(object):
"""This class wraps all the functionality needed to implement
basic Diffie-Hellman-Merkle key exchange in Python. It features
intelligent defaults for the prime and base numbers needed for the
calculation, while allowing you to supply your own. It requires that
the openssl binary be installed on the system on which this is run,
as it uses that to handle the encryption and decryption. If openssl
is not available, a RuntimeError will be raised.
"""
def __init__(self, prime=None, base=None, secret=None):
"""You can specify the values for prime and base if you wish;
otherwise, reasonable default values will be used.
"""
if prime is None:
self._prime = 162259276829213363391578010288127
else:
self._prime = prime
if base is None:
self._base = 5
else:
self._base = base
self._shared = self._public = None
self._dh = M2Crypto.DH.set_params(
self.dec_to_mpi(self._prime),
self.dec_to_mpi(self._base))
self._dh.gen_key()
self._public = self.mpi_to_dec(self._dh.pub)
def get_public(self):
return self._public
def compute_shared(self, other):
self._shared = self.bin_to_dec(
self._dh.compute_key(self.dec_to_mpi(other)))
return self._shared
def mpi_to_dec(self, mpi):
bn = M2Crypto.m2.mpi_to_bn(mpi)
hexval = M2Crypto.m2.bn_to_hex(bn)
dec = int(hexval, 16)
return dec
def bin_to_dec(self, binval):
bn = M2Crypto.m2.bin_to_bn(binval)
hexval = M2Crypto.m2.bn_to_hex(bn)
dec = int(hexval, 16)
return dec
def dec_to_mpi(self, dec):
bn = M2Crypto.m2.dec_to_bn('%s' % dec)
mpi = M2Crypto.m2.bn_to_mpi(bn)
return mpi
def _run_ssl(self, text, which):
base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc '
'-a -pass pass:%(shared)s -nosalt %(dec_flag)s')
if which.lower()[0] == 'd':
dec_flag = ' -d'
else:
dec_flag = ''
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
file(tmpfile, 'w').write(text)
shared = self._shared
cmd = base_cmd % locals()
proc = _runproc(cmd)
proc.wait()
err = proc.stderr.read()
if err:
raise RuntimeError(_('OpenSSL error: %s') % err)
return proc.stdout.read()
def encrypt(self, text):
return self._run_ssl(text, 'enc')
def decrypt(self, text):
return self._run_ssl(text, 'dec')

View File

@ -149,6 +149,10 @@ class XenAPIConnection(object):
"""Reboot VM instance"""
self._vmops.reboot(instance)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance"""
self._vmops.set_admin_password(instance, new_pass)
def destroy(self, instance):
"""Destroy VM instance"""
self._vmops.destroy(instance)
@ -266,7 +270,8 @@ class XenAPISession(object):
def _poll_task(self, id, task, done):
"""Poll the given XenAPI task, and fire the given action if we
get a result."""
get a result.
"""
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)

View File

@ -371,3 +371,52 @@ class RBDDriver(VolumeDriver):
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host"""
pass
class SheepdogDriver(VolumeDriver):
"""Executes commands relating to Sheepdog Volumes"""
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
try:
(out, err) = self._execute("collie cluster info")
if not out.startswith('running'):
raise exception.Error(_("Sheepdog is not working: %s") % out)
except exception.ProcessExecutionError:
raise exception.Error(_("Sheepdog is not working"))
def create_volume(self, volume):
"""Creates a sheepdog volume"""
if int(volume['size']) == 0:
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
self._try_execute("qemu-img create sheepdog:%s %s" %
(volume['name'], sizestr))
def delete_volume(self, volume):
"""Deletes a logical volume"""
self._try_execute("collie vdi delete %s" % volume['name'])
def local_path(self, volume):
return "sheepdog:%s" % volume['name']
def ensure_export(self, context, volume):
"""Safely and synchronously recreates an export for a logical volume"""
pass
def create_export(self, context, volume):
"""Exports the volume"""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume"""
pass
def discover_volume(self, volume):
"""Discover volume on a remote host"""
return "sheepdog:%s" % volume['name']
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host"""
pass

View File

@ -21,7 +21,6 @@
Utility methods for working with WSGI servers
"""
import json
import os
import sys
from xml.dom import minidom
@ -39,6 +38,7 @@ from paste import deploy
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
@ -124,20 +124,38 @@ class Application(object):
class Middleware(Application):
"""
Base WSGI middleware wrapper. These classes require an application to be
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
def __init__(self, application): # pylint: disable-msg=W0231
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req): # pylint: disable-msg=W0221
"""Override to implement middleware behavior."""
return self.application
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
@ -323,7 +341,7 @@ class Serializer(object):
try:
is_xml = (datastring[0] == '<')
if not is_xml:
return json.loads(datastring)
return utils.loads(datastring)
return self._from_xml(datastring)
except:
return None
@ -356,7 +374,7 @@ class Serializer(object):
return result
def _to_json(self, data):
return json.dumps(data)
return utils.dumps(data)
def _to_xml(self, data):
metadata = self.metadata.get('application/xml', {})

View File

@ -0,0 +1,126 @@
#!/usr/bin/env python
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# XenAPI plugin for reading/writing information to xenstore
#
try:
import json
except ImportError:
import simplejson as json
import os
import random
import subprocess
import tempfile
import time
import XenAPIPlugin
from pluginlib_nova import *
configure_logging("xenstore")
import xenstore
AGENT_TIMEOUT = 30
def jsonify(fnc):
def wrapper(*args, **kwargs):
return json.dumps(fnc(*args, **kwargs))
return wrapper
class TimeoutError(StandardError):
pass
@jsonify
def key_init(self, arg_dict):
"""Handles the Diffie-Hellman key exchange with the agent to
establish the shared secret key used to encrypt/decrypt sensitive
info to be passed, such as passwords. Returns the shared
secret key value.
"""
pub = int(arg_dict["pub"])
arg_dict["value"] = json.dumps({"name": "keyinit", "value": pub})
request_id = arg_dict["id"]
arg_dict["path"] = "data/host/%s" % request_id
xenstore.write_record(self, arg_dict)
try:
resp = _wait_for_agent(self, request_id, arg_dict)
except TimeoutError, e:
raise PluginError("%s" % e)
return resp
@jsonify
def password(self, arg_dict):
"""Writes a request to xenstore that tells the agent to set
the root password for the given VM. The password should be
encrypted using the shared secret key that was returned by a
previous call to key_init. The encrypted password value should
be passed as the value for the 'enc_pass' key in arg_dict.
"""
pub = int(arg_dict["pub"])
enc_pass = arg_dict["enc_pass"]
arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass})
request_id = arg_dict["id"]
arg_dict["path"] = "data/host/%s" % request_id
xenstore.write_record(self, arg_dict)
try:
resp = _wait_for_agent(self, request_id, arg_dict)
except TimeoutError, e:
raise PluginError("%s" % e)
return resp
def _wait_for_agent(self, request_id, arg_dict):
"""Periodically checks xenstore for a response from the agent.
The request is always written to 'data/host/{id}', and
the agent's response for that request will be in 'data/guest/{id}'.
If no value appears from the agent within the time specified by
AGENT_TIMEOUT, the original request is deleted and a TimeoutError
is returned.
"""
arg_dict["path"] = "data/guest/%s" % request_id
arg_dict["ignore_missing_path"] = True
start = time.time()
while True:
if time.time() - start > AGENT_TIMEOUT:
# No response within the timeout period; bail out
# First, delete the request record
arg_dict["path"] = "data/host/%s" % request_id
xenstore.delete_record(self, arg_dict)
raise TimeoutError("TIMEOUT: No response from agent within %s seconds." %
AGENT_TIMEOUT)
ret = xenstore.read_record(self, arg_dict)
# Note: the response for None with be a string that includes
# double quotes.
if ret != '"None"':
# The agent responded
return ret
else:
time.sleep(3)
if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"key_init": key_init,
"password": password})

View File

@ -43,7 +43,7 @@ flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
# TODO(devamcar): Use random tempfile
ZIP_FILENAME = '/tmp/nova-me-x509.zip'
TEST_PREFIX = 'test%s' % int(random.random()*1000000)
TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
TEST_USERNAME = '%suser' % TEST_PREFIX
TEST_PROJECTNAME = '%sproject' % TEST_PREFIX
@ -96,4 +96,3 @@ class UserTests(AdminSmokeTestCase):
if __name__ == "__main__":
suites = {'user': unittest.makeSuite(UserTests)}
sys.exit(base.run_tests(suites))

View File

@ -17,6 +17,7 @@
# under the License.
import boto
import boto_v6
import commands
import httplib
import os
@ -69,6 +70,17 @@ class SmokeTestCase(unittest.TestCase):
'test.')
parts = self.split_clc_url(clc_url)
if FLAGS.use_ipv6:
return boto_v6.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
region=RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
path='/services/Cloud',
**kwargs)
return boto.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
@ -115,7 +127,8 @@ class SmokeTestCase(unittest.TestCase):
return True
def upload_image(self, bucket_name, image):
cmd = 'euca-upload-bundle -b %s -m /tmp/%s.manifest.xml' % (bucket_name, image)
cmd = 'euca-upload-bundle -b '
cmd += '%s -m /tmp/%s.manifest.xml' % (bucket_name, image)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n %s' % (cmd, output)
@ -130,6 +143,7 @@ class SmokeTestCase(unittest.TestCase):
raise Exception(output)
return True
def run_tests(suites):
argv = FLAGS(sys.argv)
@ -151,4 +165,3 @@ def run_tests(suites):
else:
for suite in suites.itervalues():
unittest.TextTestRunner(verbosity=2).run(suite)

View File

@ -33,6 +33,7 @@ DEFINE_bool = DEFINE_bool
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
DEFINE_string('region', 'nova', 'Region to use')
DEFINE_string('test_image', 'ami-tiny', 'Image to use for launch tests')
DEFINE_string('use_ipv6', True, 'use the ipv6 or not')

View File

@ -0,0 +1,180 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import commands
import os
import random
import socket
import sys
import time
import unittest
from smoketests import flags
from smoketests import base
from smoketests import user_smoketests
#Note that this test should run from
#public network (outside of private network segments)
#Please set EC2_URL correctly
#You should use admin account in this test
FLAGS = flags.FLAGS
TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
TEST_KEY2 = '%s_key2' % TEST_PREFIX
TEST_DATA = {}
class InstanceTestsFromPublic(user_smoketests.UserSmokeTestCase):
def test_001_can_create_keypair(self):
key = self.create_key_pair(self.conn, TEST_KEY)
self.assertEqual(key.name, TEST_KEY)
def test_002_security_group(self):
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.conn.create_security_group(security_group_name,
'test group')
group.connection = self.conn
group.authorize('tcp', 22, 22, '0.0.0.0/0')
if FLAGS.use_ipv6:
group.authorize('tcp', 22, 22, '::/0')
reservation = self.conn.run_instances(FLAGS.test_image,
key_name=TEST_KEY,
security_groups=[security_group_name],
instance_type='m1.tiny')
self.data['security_group_name'] = security_group_name
self.data['group'] = group
self.data['instance_id'] = reservation.instances[0].id
def test_003_instance_with_group_runs_within_60_seconds(self):
reservations = self.conn.get_all_instances([self.data['instance_id']])
instance = reservations[0].instances[0]
# allow 60 seconds to exit pending with IP
for x in xrange(60):
instance.update()
if instance.state == u'running':
break
time.sleep(1)
else:
self.fail('instance failed to start')
ip = reservations[0].instances[0].private_dns_name
self.failIf(ip == '0.0.0.0')
self.data['private_ip'] = ip
if FLAGS.use_ipv6:
ipv6 = reservations[0].instances[0].dns_name_v6
self.failIf(ipv6 is None)
self.data['ip_v6'] = ipv6
def test_004_can_ssh_to_ipv6(self):
if FLAGS.use_ipv6:
for x in xrange(20):
try:
conn = self.connect_ssh(
self.data['ip_v6'], TEST_KEY)
conn.close()
except Exception as ex:
print ex
time.sleep(1)
else:
break
else:
self.fail('could not ssh to instance')
def test_012_can_create_instance_with_keypair(self):
if 'instance_id' in self.data:
self.conn.terminate_instances([self.data['instance_id']])
reservation = self.conn.run_instances(FLAGS.test_image,
key_name=TEST_KEY,
instance_type='m1.tiny')
self.assertEqual(len(reservation.instances), 1)
self.data['instance_id'] = reservation.instances[0].id
def test_013_instance_runs_within_60_seconds(self):
reservations = self.conn.get_all_instances([self.data['instance_id']])
instance = reservations[0].instances[0]
# allow 60 seconds to exit pending with IP
for x in xrange(60):
instance.update()
if instance.state == u'running':
break
time.sleep(1)
else:
self.fail('instance failed to start')
ip = reservations[0].instances[0].private_dns_name
self.failIf(ip == '0.0.0.0')
self.data['private_ip'] = ip
if FLAGS.use_ipv6:
ipv6 = reservations[0].instances[0].dns_name_v6
self.failIf(ipv6 is None)
self.data['ip_v6'] = ipv6
def test_014_can_not_ping_private_ip(self):
for x in xrange(4):
# ping waits for 1 second
status, output = commands.getstatusoutput(
'ping -c1 %s' % self.data['private_ip'])
if status == 0:
self.fail('can ping private ip from public network')
if FLAGS.use_ipv6:
status, output = commands.getstatusoutput(
'ping6 -c1 %s' % self.data['ip_v6'])
if status == 0:
self.fail('can ping ipv6 from public network')
else:
pass
def test_015_can_not_ssh_to_private_ip(self):
for x in xrange(1):
try:
conn = self.connect_ssh(self.data['private_ip'], TEST_KEY)
conn.close()
except Exception:
time.sleep(1)
else:
self.fail('can ssh for ipv4 address from public network')
if FLAGS.use_ipv6:
for x in xrange(1):
try:
conn = self.connect_ssh(
self.data['ip_v6'], TEST_KEY)
conn.close()
except Exception:
time.sleep(1)
else:
self.fail('can ssh for ipv6 address from public network')
def test_999_tearDown(self):
self.delete_key_pair(self.conn, TEST_KEY)
security_group_name = self.data['security_group_name']
group = self.data['group']
if group:
group.revoke('tcp', 22, 22, '0.0.0.0/0')
if FLAGS.use_ipv6:
group.revoke('tcp', 22, 22, '::/0')
self.conn.delete_security_group(security_group_name)
if 'instance_id' in self.data:
self.conn.terminate_instances([self.data['instance_id']])
if __name__ == "__main__":
suites = {'instance': unittest.makeSuite(InstanceTestsFromPublic)}
sys.exit(base.run_tests(suites))

View File

@ -45,7 +45,7 @@ flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
'Local image file to use for bundling tests')
TEST_PREFIX = 'test%s' % int (random.random()*1000000)
TEST_PREFIX = 'test%s' % int(random.random() * 1000000)
TEST_BUCKET = '%s_bucket' % TEST_PREFIX
TEST_KEY = '%s_key' % TEST_PREFIX
TEST_GROUP = '%s_group' % TEST_PREFIX
@ -80,7 +80,7 @@ class ImageTests(UserSmokeTestCase):
def test_006_can_register_kernel(self):
kernel_id = self.conn.register_image('%s/%s.manifest.xml' %
(TEST_BUCKET, FLAGS.bundle_kernel))
(TEST_BUCKET, FLAGS.bundle_kernel))
self.assert_(kernel_id is not None)
self.data['kernel_id'] = kernel_id
@ -92,7 +92,7 @@ class ImageTests(UserSmokeTestCase):
time.sleep(1)
else:
print image.state
self.assert_(False) # wasn't available within 10 seconds
self.assert_(False) # wasn't available within 10 seconds
self.assert_(image.type == 'machine')
for i in xrange(10):
@ -101,7 +101,7 @@ class ImageTests(UserSmokeTestCase):
break
time.sleep(1)
else:
self.assert_(False) # wasn't available within 10 seconds
self.assert_(False) # wasn't available within 10 seconds
self.assert_(kernel.type == 'kernel')
def test_008_can_describe_image_attribute(self):
@ -152,14 +152,17 @@ class InstanceTests(UserSmokeTestCase):
for x in xrange(60):
instance.update()
if instance.state == u'running':
break
break
time.sleep(1)
else:
self.fail('instance failed to start')
ip = reservations[0].instances[0].private_dns_name
self.failIf(ip == '0.0.0.0')
self.data['private_ip'] = ip
print self.data['private_ip']
if FLAGS.use_ipv6:
ipv6 = reservations[0].instances[0].dns_name_v6
self.failIf(ipv6 is None)
self.data['ip_v6'] = ipv6
def test_004_can_ping_private_ip(self):
for x in xrange(120):
@ -171,6 +174,16 @@ class InstanceTests(UserSmokeTestCase):
else:
self.fail('could not ping instance')
if FLAGS.use_ipv6:
for x in xrange(120):
# ping waits for 1 second
status, output = commands.getstatusoutput(
'ping6 -c1 %s' % self.data['ip_v6'])
if status == 0:
break
else:
self.fail('could not ping instance')
def test_005_can_ssh_to_private_ip(self):
for x in xrange(30):
try:
@ -183,6 +196,19 @@ class InstanceTests(UserSmokeTestCase):
else:
self.fail('could not ssh to instance')
if FLAGS.use_ipv6:
for x in xrange(30):
try:
conn = self.connect_ssh(
self.data['ip_v6'], TEST_KEY)
conn.close()
except Exception:
time.sleep(1)
else:
break
else:
self.fail('could not ssh to instance v6')
def test_006_can_allocate_elastic_ip(self):
result = self.conn.allocate_address()
self.assertTrue(hasattr(result, 'public_ip'))
@ -388,7 +414,6 @@ class SecurityGroupTests(UserSmokeTestCase):
raise Exception("Timeout")
time.sleep(1)
def test_999_tearDown(self):
self.conn.delete_key_pair(TEST_KEY)
self.conn.delete_security_group(TEST_GROUP)

View File

@ -25,3 +25,4 @@ bzr
Twisted>=10.1.0
PasteDeploy
paste
netaddr