Merged with trunk

This commit is contained in:
Yuriy Taraday
2011-06-24 16:01:51 +04:00
13 changed files with 767 additions and 91 deletions

View File

@@ -97,7 +97,6 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DECLARE('gateway_v6', 'nova.network.manager') flags.DECLARE('gateway_v6', 'nova.network.manager')
flags.DECLARE('images_path', 'nova.image.local')
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection') flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpshortFlag())
@@ -258,6 +257,11 @@ class RoleCommands(object):
"""adds role to user """adds role to user
if project is specified, adds project specific role if project is specified, adds project specific role
arguments: user, role [project]""" arguments: user, role [project]"""
if project:
projobj = self.manager.get_project(project)
if not projobj.has_member(user):
print "%s not a member of %s" % (user, project)
return
self.manager.add_role(user, role, project) self.manager.add_role(user, role, project)
def has(self, user, role, project=None): def has(self, user, role, project=None):
@@ -874,10 +878,10 @@ class InstanceTypeCommands(object):
try: try:
instance_types.create(name, memory, vcpus, local_gb, instance_types.create(name, memory, vcpus, local_gb,
flavorid, swap, rxtx_quota, rxtx_cap) flavorid, swap, rxtx_quota, rxtx_cap)
#except exception.InvalidInputException: except exception.InvalidInput:
# print "Must supply valid parameters to create instance_type" print "Must supply valid parameters to create instance_type"
# print e print e
# sys.exit(1) sys.exit(1)
except exception.ApiError, e: except exception.ApiError, e:
print "\n\n" print "\n\n"
print "\n%s" % e print "\n%s" % e
@@ -1056,16 +1060,6 @@ class ImageCommands(object):
machine_images = {} machine_images = {}
other_images = {} other_images = {}
directory = os.path.abspath(directory) directory = os.path.abspath(directory)
# NOTE(vish): If we're importing from the images path dir, attempt
# to move the files out of the way before importing
# so we aren't writing to the same directory. This
# may fail if the dir was a mointpoint.
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
and directory == os.path.abspath(FLAGS.images_path)):
new_dir = "%s_bak" % directory
os.rename(directory, new_dir)
os.mkdir(directory)
directory = new_dir
for fn in glob.glob("%s/*/info.json" % directory): for fn in glob.glob("%s/*/info.json" % directory):
try: try:
image_path = os.path.join(fn.rpartition('/')[0], 'image') image_path = os.path.join(fn.rpartition('/')[0], 'image')
@@ -1082,6 +1076,70 @@ class ImageCommands(object):
self._convert_images(machine_images) self._convert_images(machine_images)
class AgentBuildCommands(object):
"""Class for managing agent builds."""
def create(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Creates a new agent build.
arguments: os architecture version url md5hash [hypervisor='xen']"""
ctxt = context.get_admin_context()
agent_build = db.agent_build_create(ctxt,
{'hypervisor': hypervisor,
'os': os,
'architecture': architecture,
'version': version,
'url': url,
'md5hash': md5hash})
def delete(self, os, architecture, hypervisor='xen'):
"""Deletes an existing agent build.
arguments: os architecture [hypervisor='xen']"""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_destroy(ctxt, agent_build_ref['id'])
def list(self, hypervisor=None):
"""Lists all agent builds.
arguments: <none>"""
fmt = "%-10s %-8s %12s %s"
ctxt = context.get_admin_context()
by_hypervisor = {}
for agent_build in db.agent_build_get_all(ctxt):
buildlist = by_hypervisor.get(agent_build.hypervisor)
if not buildlist:
buildlist = by_hypervisor[agent_build.hypervisor] = []
buildlist.append(agent_build)
for key, buildlist in by_hypervisor.iteritems():
if hypervisor and key != hypervisor:
continue
print "Hypervisor: %s" % key
print fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32)
for agent_build in buildlist:
print fmt % (agent_build.os, agent_build.architecture,
agent_build.version, agent_build.md5hash)
print ' %s' % agent_build.url
print
def modify(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Update an existing agent build.
arguments: os architecture version url md5hash [hypervisor='xen']
"""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_update(ctxt, agent_build_ref['id'],
{'version': version,
'url': url,
'md5hash': md5hash})
class ConfigCommands(object): class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s).""" """Class for exposing the flags defined by flag_file(s)."""
@@ -1094,6 +1152,7 @@ class ConfigCommands(object):
CATEGORIES = [ CATEGORIES = [
('account', AccountCommands), ('account', AccountCommands),
('agent', AgentBuildCommands),
('config', ConfigCommands), ('config', ConfigCommands),
('db', DbCommands), ('db', DbCommands),
('fixed', FixedIpCommands), ('fixed', FixedIpCommands),

View File

@@ -364,7 +364,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler') 'Manager for scheduler')
# The service to use for image search and retrieval # The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.local.LocalImageService', DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
'The service to use for retrieving and searching for images.') 'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(), DEFINE_string('host', socket.gethostname(),

View File

@@ -24,6 +24,7 @@ from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova import rpc from nova import rpc
from nova import utils
from eventlet import greenpool from eventlet import greenpool
@@ -201,38 +202,78 @@ class RedirectResult(exception.Error):
class reroute_compute(object): class reroute_compute(object):
"""Decorator used to indicate that the method should """
delegate the call the child zones if the db query reroute_compute is responsible for trying to lookup a resource in the
can't find anything.""" current zone and if it's not found there, delegating the call to the
child zones.
Since reroute_compute will be making 'cross-zone' calls, the ID for the
object must come in as a UUID-- if we receive an integer ID, we bail.
The steps involved are:
1. Validate that item_id is UUID like
2. Lookup item by UUID in the zone local database
3. If the item was found, then extract integer ID, and pass that to
the wrapped method. (This ensures that zone-local code can
continue to use integer IDs).
4. If the item was not found, we delgate the call to a child zone
using the UUID.
"""
def __init__(self, method_name): def __init__(self, method_name):
self.method_name = method_name self.method_name = method_name
def __call__(self, f): def _route_to_child_zones(self, context, collection, item_uuid):
def wrapped_f(*args, **kwargs):
collection, context, item_id = \
self.get_collection_context_and_id(args, kwargs)
try:
# Call the original function ...
return f(*args, **kwargs)
except exception.InstanceNotFound, e:
LOG.debug(_("Instance %(item_id)s not found "
"locally: '%(e)s'" % locals()))
if not FLAGS.enable_zone_routing: if not FLAGS.enable_zone_routing:
raise raise exception.InstanceNotFound(instance_id=item_uuid)
zones = db.zone_get_all(context) zones = db.zone_get_all(context)
if not zones: if not zones:
raise raise exception.InstanceNotFound(instance_id=item_uuid)
# Ask the children to provide an answer ... # Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ...")) LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones, result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command, wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_id)) collection, self.method_name, item_uuid))
# Scrub the results and raise another exception # Scrub the results and raise another exception
# so the API layers can bail out gracefully ... # so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result)) raise RedirectResult(self.unmarshall_result(result))
def __call__(self, f):
def wrapped_f(*args, **kwargs):
collection, context, item_id_or_uuid = \
self.get_collection_context_and_id(args, kwargs)
attempt_reroute = False
if utils.is_uuid_like(item_id_or_uuid):
item_uuid = item_id_or_uuid
try:
instance = db.instance_get_by_uuid(context, item_uuid)
except exception.InstanceNotFound, e:
# NOTE(sirp): since a UUID was passed in, we can attempt
# to reroute to a child zone
attempt_reroute = True
LOG.debug(_("Instance %(item_uuid)s not found "
"locally: '%(e)s'" % locals()))
else:
# NOTE(sirp): since we're not re-routing in this case, and
# we we were passed a UUID, we need to replace that UUID
# with an integer ID in the argument list so that the
# zone-local code can continue to use integer IDs.
item_id = instance['id']
args = list(args) # needs to be mutable to replace
self.replace_uuid_with_id(args, kwargs, item_id)
if attempt_reroute:
return self._route_to_child_zones(context, collection,
item_uuid)
else:
return f(*args, **kwargs)
return wrapped_f return wrapped_f
def _call_child_zones(self, zones, function): def _call_child_zones(self, zones, function):
@@ -251,6 +292,18 @@ class reroute_compute(object):
instance_id = args[2] instance_id = args[2]
return ("servers", context, instance_id) return ("servers", context, instance_id)
@staticmethod
def replace_uuid_with_id(args, kwargs, replacement_id):
"""
Extracts the UUID parameter from the arg or kwarg list and replaces
it with an integer ID.
"""
if 'instance_id' in kwargs:
kwargs['instance_id'] = replacement_id
elif len(args) > 1:
args.pop(2)
args.insert(2, replacement_id)
def unmarshall_result(self, zone_responses): def unmarshall_result(self, zone_responses):
"""Result is a list of responses from each child zone. """Result is a list of responses from each child zone.
Each decorator derivation is responsible to turning this Each decorator derivation is responsible to turning this

View File

@@ -39,7 +39,7 @@ flags.DEFINE_integer("max_networks", 1000,
class SimpleScheduler(chance.ChanceScheduler): class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host.""" """Implements Naive Scheduler that tries to find least loaded host."""
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances.""" """Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id) instance_ref = db.instance_get(context, instance_id)
if (instance_ref['availability_zone'] if (instance_ref['availability_zone']
@@ -75,6 +75,12 @@ class SimpleScheduler(chance.ChanceScheduler):
" for this request. Is the appropriate" " for this request. Is the appropriate"
" service running?")) " service running?"))
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes.""" """Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id) volume_ref = db.volume_get(context, volume_id)

View File

@@ -32,7 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS['network_size'].SetDefault(8) FLAGS['network_size'].SetDefault(8)
FLAGS['num_networks'].SetDefault(2) FLAGS['num_networks'].SetDefault(2)
FLAGS['fake_network'].SetDefault(True) FLAGS['fake_network'].SetDefault(True)
FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')

View File

@@ -0,0 +1,89 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from nova import context
from nova import db
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import admin
from nova.image import fake
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.adminapi')
class AdminApiTestCase(test.TestCase):
def setUp(self):
super(AdminApiTestCase, self).setUp()
self.flags(connection_type='fake')
self.conn = rpc.Connection.instance()
# set up our cloud
self.api = admin.AdminController()
# set up services
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
host = self.network.get_network_host(self.context.elevated())
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
rpc_cast = rpc.cast
def finish_cast(*args, **kwargs):
rpc_cast(*args, **kwargs)
greenthread.sleep(0.2)
self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self):
network_ref = db.project_get_network(self.context,
self.project.id)
db.network_disassociate(self.context, network_ref['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(AdminApiTestCase, self).tearDown()
def test_block_external_ips(self):
"""Make sure provider firewall rules are created."""
result = self.api.block_external_addresses(self.context, '1.1.1.1/32')
self.assertEqual('OK', result['status'])
self.assertEqual('Added 3 rules', result['message'])

View File

@@ -89,7 +89,7 @@ class FakeHttplibConnection(object):
class XmlConversionTestCase(test.TestCase): class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion""" """Unit test api xml conversion"""
def test_number_conversion(self): def test_number_conversion(self):
conv = apirequest._try_convert conv = ec2utils._try_convert
self.assertEqual(conv('None'), None) self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True) self.assertEqual(conv('True'), True)
self.assertEqual(conv('False'), False) self.assertEqual(conv('False'), False)

View File

@@ -35,7 +35,7 @@ from nova import utils
from nova.auth import manager from nova.auth import manager
from nova.api.ec2 import cloud from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils from nova.api.ec2 import ec2utils
from nova.image import local from nova.image import fake
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@@ -56,6 +56,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute') self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler') self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network') self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service) self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager() self.manager = manager.AuthManager()
@@ -69,8 +70,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}} 'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
rpc_cast = rpc.cast rpc_cast = rpc.cast
@@ -303,7 +304,7 @@ class CloudTestCase(test.TestCase):
def fake_show_none(meh, context, id): def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id') raise exception.ImageNotFound(image_id='bad_image_id')
self.stubs.Set(local.LocalImageService, 'detail', fake_detail) self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all # list all
result1 = describe_images(self.context) result1 = describe_images(self.context)
result1 = result1['imagesSet'][0] result1 = result1['imagesSet'][0]
@@ -317,8 +318,8 @@ class CloudTestCase(test.TestCase):
self.assertEqual(2, len(result3['imagesSet'])) self.assertEqual(2, len(result3['imagesSet']))
# provide an non-existing image_id # provide an non-existing image_id
self.stubs.UnsetAll() self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_none) self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
self.assertRaises(exception.ImageNotFound, describe_images, self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake']) self.context, ['ami-fake'])
@@ -329,8 +330,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': True} 'type': 'machine'}, 'is_public': True}
self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001', result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission') 'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission']) self.assertEqual([{'group': 'all'}], result['launchPermission'])
@@ -345,9 +346,9 @@ class CloudTestCase(test.TestCase):
def fake_update(meh, context, image_id, metadata, data=None): def fake_update(meh, context, image_id, metadata, data=None):
return metadata return metadata
self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
self.stubs.Set(local.LocalImageService, 'update', fake_update) self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001', result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add', 'launchPermission', 'add',
user_group=['all']) user_group=['all'])
@@ -359,7 +360,7 @@ class CloudTestCase(test.TestCase):
def fake_delete(self, context, id): def fake_delete(self, context, id):
return None return None
self.stubs.Set(local.LocalImageService, 'delete', fake_delete) self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image # valid image
result = deregister_image(self.context, 'ami-00000001') result = deregister_image(self.context, 'ami-00000001')
self.assertEqual(result['imageId'], 'ami-00000001') self.assertEqual(result['imageId'], 'ami-00000001')
@@ -369,18 +370,25 @@ class CloudTestCase(test.TestCase):
def fake_detail_empty(self, context): def fake_detail_empty(self, context):
return [] return []
self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty) self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image, self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001') self.context, 'ami-bad001')
def test_console_output(self): def _run_instance(self, **kwargs):
instance_type = FLAGS.default_instance_type
max_count = 1
kwargs = {'image_id': 'ami-1',
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs) rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId'] instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def _run_instance_wait(self, **kwargs):
ec2_instance_id = self._run_instance(**kwargs)
self._wait_for_running(ec2_instance_id)
return ec2_instance_id
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=FLAGS.default_instance_type,
max_count=1)
output = self.cloud.get_console_output(context=self.context, output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id]) instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
@@ -389,9 +397,7 @@ class CloudTestCase(test.TestCase):
rv = self.cloud.terminate_instances(self.context, [instance_id]) rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_ajax_console(self): def test_ajax_console(self):
kwargs = {'image_id': 'ami-1'} instance_id = self._run_instance(image_id='ami-1')
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
output = self.cloud.get_ajax_console(context=self.context, output = self.cloud.get_ajax_console(context=self.context,
instance_id=[instance_id]) instance_id=[instance_id])
self.assertEquals(output['url'], self.assertEquals(output['url'],
@@ -486,7 +492,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine'}} 'type': 'machine'}}
self.stubs.UnsetAll() self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state) self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.ApiError, run_instances, self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs) self.context, **kwargs)
@@ -501,7 +507,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine', 'image_state': 'decrypting'}} 'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll() self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt) self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.ApiError, run_instances, self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs) self.context, **kwargs)
@@ -515,7 +521,7 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'status': 'active'} 'type': 'machine'}, 'status': 'active'}
self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active) self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
result = run_instances(self.context, **kwargs) result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1) self.assertEqual(len(result['instancesSet']), 1)
@@ -544,7 +550,9 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_wont_update_private_fields(self): def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {}) inst = db.instance_create(self.context, {})
self.cloud.update_instance(self.context, inst['id'], ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3',
mac_address='DE:AD:BE:EF') mac_address='DE:AD:BE:EF')
inst = db.instance_get(self.context, inst['id']) inst = db.instance_get(self.context, inst['id'])
self.assertEqual(None, inst['mac_address']) self.assertEqual(None, inst['mac_address'])
@@ -567,3 +575,299 @@ class CloudTestCase(test.TestCase):
vol = db.volume_get(self.context, vol['id']) vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint']) self.assertEqual(None, vol['mountpoint'])
db.volume_destroy(self.context, vol['id']) db.volume_destroy(self.context, vol['id'])
def _restart_compute_service(self, periodic_interval=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval:
self.compute = self.start_service(
'compute', periodic_interval=periodic_interval)
else:
self.compute = self.start_service('compute')
def _wait_for_state(self, ctxt, instance_id, predicate):
"""Wait for an stopping instance to be a given state"""
id = ec2utils.ec2_id_to_id(instance_id)
while True:
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
LOG.debug(info)
if predicate(info):
break
greenthread.sleep(1)
def _wait_for_running(self, instance_id):
def is_running(info):
return info['state_description'] == 'running'
self._wait_for_state(self.context, instance_id, is_running)
def _wait_for_stopped(self, instance_id):
def is_stopped(info):
return info['state_description'] == 'stopped'
self._wait_for_state(self.context, instance_id, is_stopped)
def _wait_for_terminate(self, instance_id):
def is_deleted(info):
return info['deleted']
elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted)
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance_wait(**kwargs)
# a running instance can't be started. It is just ignored.
result = self.cloud.start_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
result = self.cloud.stop_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.start_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_running(instance_id)
result = self.cloud.stop_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.terminate_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._restart_compute_service()
def _volume_create(self):
kwargs = {'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached', }
return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_id, mountpoint):
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
def _assert_volume_detached(self, vol):
self.assertEqual(vol['instance_id'], None)
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'volume_id': vol2['id'],
'delete_on_termination': True, },
]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
self._wait_for_stopped(ec2_instance_id)
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
self._wait_for_running(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
vol['mountpoint'] == '/dev/vdc')
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=False)
vol = db.volume_get(admin_ctxt, vol1['id'])
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1['id'])
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=True)
vol = db.volume_get(admin_ctxt, vol2['id'])
self.assertTrue(vol['deleted'])
self._restart_compute_service()
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
self.cloud.compute_api.attach_volume(self.context,
instance_id=instance_id,
volume_id=vol2['id'],
device='/dev/vdc')
greenthread.sleep(0.3)
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
self.cloud.compute_api.detach_volume(self.context,
volume_id=vol1['id'])
greenthread.sleep(0.3)
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
self._wait_for_stopped(ec2_instance_id)
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
self._wait_for_running(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self.assertEqual(vol['id'], vol_id)
self._assert_volume_detached(vol)
db.volume_destroy(self.context, vol_id)
self._restart_compute_service()
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
greenthread.sleep(0.3)
return result['snapshotId']
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create()
ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snapshot1_id,
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'snapshot_id': snapshot2_id,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
vol1_id = None
vol2_id = None
for vol in vols:
snapshot_id = vol['snapshot_id']
if snapshot_id == snapshot1_id:
vol1_id = vol['id']
mountpoint = '/dev/vdb'
elif snapshot_id == snapshot2_id:
vol2_id = vol['id']
mountpoint = '/dev/vdc'
else:
self.fail()
self._assert_volume_attached(vol, instance_id, mountpoint)
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
self._wait_for_terminate(ec2_instance_id)
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=False)
vol = db.volume_get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1_id)
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=True)
vol = db.volume_get(admin_ctxt, vol2_id)
self.assertTrue(vol['deleted'])
for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
self.cloud.delete_snapshot(self.context, snapshot_id)
greenthread.sleep(0.3)
db.volume_destroy(self.context, vol['id'])

View File

@@ -22,21 +22,21 @@ Tests For Compute
import mox import mox
import stubout import stubout
from nova.auth import manager
from nova import compute from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova import context from nova import context
from nova import db from nova import db
from nova.db.sqlalchemy import models
from nova import exception from nova import exception
from nova import flags from nova import flags
import nova.image.fake
from nova import log as logging from nova import log as logging
from nova import rpc from nova import rpc
from nova import test from nova import test
from nova import utils from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.db.sqlalchemy import models
from nova.image import local
LOG = logging.getLogger('nova.tests.compute') LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@@ -73,7 +73,7 @@ class ComputeTestCase(test.TestCase):
def fake_show(meh, context, id): def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def tearDown(self): def tearDown(self):
self.manager.delete_user(self.user) self.manager.delete_user(self.user)
@@ -228,6 +228,21 @@ class ComputeTestCase(test.TestCase):
self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate) self.assert_(instance_ref['deleted_at'] > terminate)
def test_stop(self):
"""Ensure instance can be stopped"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.stop_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_start(self):
"""Ensure instance can be started"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.stop_instance(self.context, instance_id)
self.compute.start_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_pause(self): def test_pause(self):
"""Ensure instance can be paused""" """Ensure instance can be paused"""
instance_id = self._create_instance() instance_id = self._create_instance()
@@ -266,6 +281,14 @@ class ComputeTestCase(test.TestCase):
"File Contents") "File Contents")
self.compute.terminate_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id)
def test_agent_update(self):
"""Ensure instance can have its agent updated"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.agent_update(self.context, instance_id,
'http://127.0.0.1/agent', '00112233445566778899aabbccddeeff')
self.compute.terminate_instance(self.context, instance_id)
def test_snapshot(self): def test_snapshot(self):
"""Ensure instance can be snapshotted""" """Ensure instance can be snapshotted"""
instance_id = self._create_instance() instance_id = self._create_instance()

View File

@@ -799,6 +799,8 @@ class IptablesFirewallTestCase(test.TestCase):
self.network = utils.import_object(FLAGS.network_manager) self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object): class FakeLibvirtConnection(object):
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass pass
self.fake_libvirt_connection = FakeLibvirtConnection() self.fake_libvirt_connection = FakeLibvirtConnection()
self.fw = firewall.IptablesFirewallDriver( self.fw = firewall.IptablesFirewallDriver(
@@ -1035,7 +1037,6 @@ class IptablesFirewallTestCase(test.TestCase):
fakefilter.filterDefineXMLMock fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterLookupByName =\ self.fw.nwfilter._conn.nwfilterLookupByName =\
fakefilter.nwfilterLookupByName fakefilter.nwfilterLookupByName
instance_ref = self._create_instance_ref() instance_ref = self._create_instance_ref()
inst_id = instance_ref['id'] inst_id = instance_ref['id']
instance = db.instance_get(self.context, inst_id) instance = db.instance_get(self.context, inst_id)
@@ -1057,6 +1058,63 @@ class IptablesFirewallTestCase(test.TestCase):
db.instance_destroy(admin_ctxt, instance_ref['id']) db.instance_destroy(admin_ctxt, instance_ref['id'])
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
nw_info = _create_network_info(1)
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context, 'fake')
admin_ctxt = context.get_admin_context()
fixed_ip = {'address': ip, 'network_id': network_ref['id']}
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
self.fw.setup_basic_filtering(instance_ref, network_info=nw_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info=nw_info)
self.fw.apply_instance_filter(instance_ref)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
class NWFilterTestCase(test.TestCase): class NWFilterTestCase(test.TestCase):
def setUp(self): def setUp(self):

View File

@@ -164,3 +164,33 @@ class IptablesManagerTestCase(test.TestCase):
self.assertTrue('-A %s -j run_tests.py-%s' \ self.assertTrue('-A %s -j run_tests.py-%s' \
% (chain, chain) in new_lines, % (chain, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,)) "Built-in chain %s not wrapped" % (chain,))
def test_will_empty_chain(self):
self.manager.ipv4['filter'].add_chain('test-chain')
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain')
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
def test_will_empty_unwrapped_chain(self):
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
wrap=False)
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
def test_will_not_empty_wrapped_when_unwrapped(self):
self.manager.ipv4['filter'].add_chain('test-chain')
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
def test_will_not_empty_unwrapped_when_wrapped(self):
self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
wrap=False)
old_count = len(self.manager.ipv4['filter'].rules)
self.manager.ipv4['filter'].empty_chain('test-chain')
self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))

View File

@@ -33,12 +33,12 @@ from nova import utils
from nova.auth import manager from nova.auth import manager
from nova.compute import instance_types from nova.compute import instance_types
from nova.compute import power_state from nova.compute import power_state
from nova import exception
from nova.virt import xenapi_conn from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs from nova.tests.glance import stubs as glance_stubs
@@ -84,7 +84,8 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'} 'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size='0'): def _create_volume(self, size='0'):
"""Create a volume object.""" """Create a volume object."""
@@ -191,7 +192,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs) stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs) stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network) self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
stubs.stub_out_vm_methods(self.stubs) stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs) glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs) fake_utils.stub_out_utils_execute(self.stubs)
@@ -211,7 +212,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'} 'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
self.conn.spawn(instance) self.conn.spawn(instance)
@@ -228,6 +230,23 @@ class XenAPIVMTestCase(test.TestCase):
instance = self._create_instance() instance = self._create_instance()
self.conn.get_diagnostics(instance) self.conn.get_diagnostics(instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
def test_instance_snapshot(self): def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs) stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance() instance = self._create_instance()
@@ -352,7 +371,8 @@ class XenAPIVMTestCase(test.TestCase):
def _test_spawn(self, image_ref, kernel_id, ramdisk_id, def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux", instance_type_id="3", os_type="linux",
instance_id=1, check_injection=False): architecture="x86-64", instance_id=1,
check_injection=False):
stubs.stubout_loopingcall_start(self.stubs) stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id, values = {'id': instance_id,
'project_id': self.project.id, 'project_id': self.project.id,
@@ -362,11 +382,14 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id, 'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id, 'instance_type_id': instance_type_id,
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': os_type} 'os_type': os_type,
'architecture': architecture}
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
self.conn.spawn(instance) self.conn.spawn(instance)
self.create_vm_record(self.conn, os_type, instance_id) self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection) self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_not_enough_memory(self): def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance' FLAGS.xenapi_image_service = 'glance'
@@ -391,7 +414,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_linux(self): def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance' FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux") os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux() self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self): def test_spawn_vhd_glance_swapdisk(self):
@@ -420,7 +443,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_windows(self): def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance' FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows") os_type="windows", architecture="i386")
self.check_vm_params_for_windows() self.check_vm_params_for_windows()
def test_spawn_glance(self): def test_spawn_glance(self):
@@ -571,7 +594,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3, 'ramdisk_id': 3,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'} 'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, values) instance = db.instance_create(self.context, values)
self.conn.spawn(instance) self.conn.spawn(instance)
return instance return instance
@@ -581,8 +605,8 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code.""" """Unit tests for Diffie-Hellman code."""
def setUp(self): def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp() super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = SimpleDH() self.alice = vmops.SimpleDH()
self.bob = SimpleDH() self.bob = vmops.SimpleDH()
def test_shared(self): def test_shared(self):
alice_pub = self.alice.get_public() alice_pub = self.alice.get_public()
@@ -646,7 +670,8 @@ class XenAPIMigrateInstance(test.TestCase):
'local_gb': 5, 'local_gb': 5,
'instance_type_id': '3', # m1.large 'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff', 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'} 'os_type': 'linux',
'architecture': 'x86-64'}
fake_utils.stub_out_utils_execute(self.stubs) fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs) stubs.stub_out_migration_methods(self.stubs)
@@ -685,6 +710,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance() self.fake_instance = FakeInstance()
self.fake_instance.id = 42 self.fake_instance.id = 42
self.fake_instance.os_type = 'linux' self.fake_instance.os_type = 'linux'
self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, disk_type): def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type( dt = vm_utils.VMHelper.determine_disk_image_type(
@@ -729,6 +755,28 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.assert_disk_type(vm_utils.ImageType.DISK_VHD) self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
"""Test that cmp_version compares a as less than b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
"""Test that cmp_version compares a as greater than b"""
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
"""Test that cmp_version compares a as equal to b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
"""Test that cmp_version compares non-lexically"""
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
"""Test that cmp_version compares by length as last resort"""
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class FakeXenApi(object): class FakeXenApi(object):
"""Fake XenApi for testing HostState.""" """Fake XenApi for testing HostState."""

View File

@@ -211,6 +211,12 @@ class NovaTestResult(result.TextTestResult):
break break
sys.stdout = stdout sys.stdout = stdout
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to
# be outputted.
self.start_time = time.time()
def getDescription(self, test): def getDescription(self, test):
return str(test) return str(test)