Merge trunk
This commit is contained in:
157
bin/nova-manage
157
bin/nova-manage
@@ -55,6 +55,8 @@
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import gettext
|
import gettext
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
@@ -81,7 +83,7 @@ from nova import log as logging
|
|||||||
from nova import quota
|
from nova import quota
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.api.ec2.cloud import ec2_id_to_id
|
from nova.api.ec2 import ec2utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.cloudpipe import pipelib
|
from nova.cloudpipe import pipelib
|
||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
@@ -94,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager')
|
|||||||
flags.DECLARE('vlan_start', 'nova.network.manager')
|
flags.DECLARE('vlan_start', 'nova.network.manager')
|
||||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||||
|
flags.DECLARE('images_path', 'nova.image.local')
|
||||||
flags.DEFINE_flag(flags.HelpFlag())
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
@@ -104,7 +107,7 @@ def param2id(object_id):
|
|||||||
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
|
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
|
||||||
"""
|
"""
|
||||||
if '-' in object_id:
|
if '-' in object_id:
|
||||||
return ec2_id_to_id(object_id)
|
return ec2utils.ec2_id_to_id(object_id)
|
||||||
else:
|
else:
|
||||||
return int(object_id)
|
return int(object_id)
|
||||||
|
|
||||||
@@ -744,6 +747,155 @@ class InstanceTypeCommands(object):
|
|||||||
self._print_instance_types(name, inst_types)
|
self._print_instance_types(name, inst_types)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageCommands(object):
|
||||||
|
"""Methods for dealing with a cloud in an odd state"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.image_service = utils.import_object(FLAGS.image_service)
|
||||||
|
|
||||||
|
def _register(self, image_type, disk_format, container_format,
|
||||||
|
path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64', kernel_id=None, ramdisk_id=None):
|
||||||
|
meta = {'is_public': True,
|
||||||
|
'name': name,
|
||||||
|
'disk_format': disk_format,
|
||||||
|
'container_format': container_format,
|
||||||
|
'properties': {'image_state': 'available',
|
||||||
|
'owner': owner,
|
||||||
|
'type': image_type,
|
||||||
|
'architecture': architecture,
|
||||||
|
'image_location': 'local',
|
||||||
|
'is_public': (is_public == 'T')}}
|
||||||
|
print image_type, meta
|
||||||
|
if kernel_id:
|
||||||
|
meta['properties']['kernel_id'] = int(kernel_id)
|
||||||
|
if ramdisk_id:
|
||||||
|
meta['properties']['ramdisk_id'] = int(ramdisk_id)
|
||||||
|
elevated = context.get_admin_context()
|
||||||
|
try:
|
||||||
|
with open(path) as ifile:
|
||||||
|
image = self.image_service.create(elevated, meta, ifile)
|
||||||
|
new = image['id']
|
||||||
|
print _("Image registered to %(new)s (%(new)08x).") % locals()
|
||||||
|
return new
|
||||||
|
except Exception as exc:
|
||||||
|
print _("Failed to register %(path)s: %(exc)s") % locals()
|
||||||
|
|
||||||
|
def all_register(self, image, kernel, ramdisk, owner, name=None,
|
||||||
|
is_public='T', architecture='x86_64'):
|
||||||
|
"""Uploads an image, kernel, and ramdisk into the image_service
|
||||||
|
arguments: image kernel ramdisk owner [name] [is_public='T']
|
||||||
|
[architecture='x86_64']"""
|
||||||
|
kernel_id = self.kernel_register(kernel, owner, None,
|
||||||
|
is_public, architecture)
|
||||||
|
ramdisk_id = self.ramdisk_register(ramdisk, owner, None,
|
||||||
|
is_public, architecture)
|
||||||
|
self.image_register(image, owner, name, is_public,
|
||||||
|
architecture, kernel_id, ramdisk_id)
|
||||||
|
|
||||||
|
def image_register(self, path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64', kernel_id=None, ramdisk_id=None,
|
||||||
|
disk_format='ami', container_format='ami'):
|
||||||
|
"""Uploads an image into the image_service
|
||||||
|
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||||
|
[kernel_id=None] [ramdisk_id=None]
|
||||||
|
[disk_format='ami'] [container_format='ami']"""
|
||||||
|
return self._register('machine', disk_format, container_format, path,
|
||||||
|
owner, name, is_public, architecture,
|
||||||
|
kernel_id, ramdisk_id)
|
||||||
|
|
||||||
|
def kernel_register(self, path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64'):
|
||||||
|
"""Uploads a kernel into the image_service
|
||||||
|
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||||
|
"""
|
||||||
|
return self._register('kernel', 'aki', 'aki', path, owner, name,
|
||||||
|
is_public, architecture)
|
||||||
|
|
||||||
|
def ramdisk_register(self, path, owner, name=None, is_public='T',
|
||||||
|
architecture='x86_64'):
|
||||||
|
"""Uploads a ramdisk into the image_service
|
||||||
|
arguments: path owner [name] [is_public='T'] [architecture='x86_64']
|
||||||
|
"""
|
||||||
|
return self._register('ramdisk', 'ari', 'ari', path, owner, name,
|
||||||
|
is_public, architecture)
|
||||||
|
|
||||||
|
def _lookup(self, old_image_id):
|
||||||
|
try:
|
||||||
|
internal_id = ec2utils.ec2_id_to_id(old_image_id)
|
||||||
|
image = self.image_service.show(context, internal_id)
|
||||||
|
except exception.NotFound:
|
||||||
|
image = self.image_service.show_by_name(context, old_image_id)
|
||||||
|
return image['id']
|
||||||
|
|
||||||
|
def _old_to_new(self, old):
|
||||||
|
mapping = {'machine': 'ami',
|
||||||
|
'kernel': 'aki',
|
||||||
|
'ramdisk': 'ari'}
|
||||||
|
container_format = mapping[old['type']]
|
||||||
|
disk_format = container_format
|
||||||
|
new = {'disk_format': disk_format,
|
||||||
|
'container_format': container_format,
|
||||||
|
'is_public': True,
|
||||||
|
'name': old['imageId'],
|
||||||
|
'properties': {'image_state': old['imageState'],
|
||||||
|
'owner': old['imageOwnerId'],
|
||||||
|
'architecture': old['architecture'],
|
||||||
|
'type': old['type'],
|
||||||
|
'image_location': old['imageLocation'],
|
||||||
|
'is_public': old['isPublic']}}
|
||||||
|
if old.get('kernelId'):
|
||||||
|
new['properties']['kernel_id'] = self._lookup(old['kernelId'])
|
||||||
|
if old.get('ramdiskId'):
|
||||||
|
new['properties']['ramdisk_id'] = self._lookup(old['ramdiskId'])
|
||||||
|
return new
|
||||||
|
|
||||||
|
def _convert_images(self, images):
|
||||||
|
elevated = context.get_admin_context()
|
||||||
|
for image_path, image_metadata in images.iteritems():
|
||||||
|
meta = self._old_to_new(image_metadata)
|
||||||
|
old = meta['name']
|
||||||
|
try:
|
||||||
|
with open(image_path) as ifile:
|
||||||
|
image = self.image_service.create(elevated, meta, ifile)
|
||||||
|
new = image['id']
|
||||||
|
print _("Image %(old)s converted to " \
|
||||||
|
"%(new)s (%(new)08x).") % locals()
|
||||||
|
except Exception as exc:
|
||||||
|
print _("Failed to convert %(old)s: %(exc)s") % locals()
|
||||||
|
|
||||||
|
def convert(self, directory):
|
||||||
|
"""Uploads old objectstore images in directory to new service
|
||||||
|
arguments: directory"""
|
||||||
|
machine_images = {}
|
||||||
|
other_images = {}
|
||||||
|
directory = os.path.abspath(directory)
|
||||||
|
# NOTE(vish): If we're importing from the images path dir, attempt
|
||||||
|
# to move the files out of the way before importing
|
||||||
|
# so we aren't writing to the same directory. This
|
||||||
|
# may fail if the dir was a mointpoint.
|
||||||
|
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
|
||||||
|
and directory == os.path.abspath(FLAGS.images_path)):
|
||||||
|
new_dir = "%s_bak" % directory
|
||||||
|
os.move(directory, new_dir)
|
||||||
|
os.mkdir(directory)
|
||||||
|
directory = new_dir
|
||||||
|
for fn in glob.glob("%s/*/info.json" % directory):
|
||||||
|
try:
|
||||||
|
image_path = os.path.join(fn.rpartition('/')[0], 'image')
|
||||||
|
with open(fn) as metadata_file:
|
||||||
|
image_metadata = json.load(metadata_file)
|
||||||
|
if image_metadata['type'] == 'machine':
|
||||||
|
machine_images[image_path] = image_metadata
|
||||||
|
else:
|
||||||
|
other_images[image_path] = image_metadata
|
||||||
|
except Exception as exc:
|
||||||
|
print _("Failed to load %(fn)s.") % locals()
|
||||||
|
# NOTE(vish): do kernels and ramdisks first so images
|
||||||
|
self._convert_images(other_images)
|
||||||
|
self._convert_images(machine_images)
|
||||||
|
|
||||||
|
|
||||||
CATEGORIES = [
|
CATEGORIES = [
|
||||||
('user', UserCommands),
|
('user', UserCommands),
|
||||||
('project', ProjectCommands),
|
('project', ProjectCommands),
|
||||||
@@ -758,6 +910,7 @@ CATEGORIES = [
|
|||||||
('db', DbCommands),
|
('db', DbCommands),
|
||||||
('volume', VolumeCommands),
|
('volume', VolumeCommands),
|
||||||
('instance_type', InstanceTypeCommands),
|
('instance_type', InstanceTypeCommands),
|
||||||
|
('image', ImageCommands),
|
||||||
('flavor', InstanceTypeCommands)]
|
('flavor', InstanceTypeCommands)]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -348,7 +348,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
|||||||
'Manager for scheduler')
|
'Manager for scheduler')
|
||||||
|
|
||||||
# The service to use for image search and retrieval
|
# The service to use for image search and retrieval
|
||||||
DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
|
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
|
||||||
'The service to use for retrieving and searching for images.')
|
'The service to use for retrieving and searching for images.')
|
||||||
|
|
||||||
DEFINE_string('host', socket.gethostname(),
|
DEFINE_string('host', socket.gethostname(),
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
|
|||||||
FLAGS.network_size = 8
|
FLAGS.network_size = 8
|
||||||
FLAGS.num_networks = 2
|
FLAGS.num_networks = 2
|
||||||
FLAGS.fake_network = True
|
FLAGS.fake_network = True
|
||||||
|
FLAGS.image_service = 'nova.image.local.LocalImageService'
|
||||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||||
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||||
|
|||||||
@@ -38,6 +38,8 @@ from nova import test
|
|||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
|
from nova.api.ec2 import ec2utils
|
||||||
|
from nova.image import local
|
||||||
from nova.objectstore import image
|
from nova.objectstore import image
|
||||||
|
|
||||||
|
|
||||||
@@ -76,6 +78,12 @@ class CloudTestCase(test.TestCase):
|
|||||||
project=self.project)
|
project=self.project)
|
||||||
host = self.network.get_network_host(self.context.elevated())
|
host = self.network.get_network_host(self.context.elevated())
|
||||||
|
|
||||||
|
def fake_show(meh, context, id):
|
||||||
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||||
|
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
network_ref = db.project_get_network(self.context,
|
network_ref = db.project_get_network(self.context,
|
||||||
self.project.id)
|
self.project.id)
|
||||||
@@ -122,7 +130,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.cloud.allocate_address(self.context)
|
self.cloud.allocate_address(self.context)
|
||||||
inst = db.instance_create(self.context, {'host': self.compute.host})
|
inst = db.instance_create(self.context, {'host': self.compute.host})
|
||||||
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
|
||||||
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||||
self.cloud.associate_address(self.context,
|
self.cloud.associate_address(self.context,
|
||||||
instance_id=ec2_id,
|
instance_id=ec2_id,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
@@ -158,12 +166,12 @@ class CloudTestCase(test.TestCase):
|
|||||||
vol2 = db.volume_create(self.context, {})
|
vol2 = db.volume_create(self.context, {})
|
||||||
result = self.cloud.describe_volumes(self.context)
|
result = self.cloud.describe_volumes(self.context)
|
||||||
self.assertEqual(len(result['volumeSet']), 2)
|
self.assertEqual(len(result['volumeSet']), 2)
|
||||||
volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x')
|
volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
|
||||||
result = self.cloud.describe_volumes(self.context,
|
result = self.cloud.describe_volumes(self.context,
|
||||||
volume_id=[volume_id])
|
volume_id=[volume_id])
|
||||||
self.assertEqual(len(result['volumeSet']), 1)
|
self.assertEqual(len(result['volumeSet']), 1)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
|
ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
|
||||||
vol2['id'])
|
vol2['id'])
|
||||||
db.volume_destroy(self.context, vol1['id'])
|
db.volume_destroy(self.context, vol1['id'])
|
||||||
db.volume_destroy(self.context, vol2['id'])
|
db.volume_destroy(self.context, vol2['id'])
|
||||||
@@ -188,8 +196,10 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_describe_instances(self):
|
def test_describe_instances(self):
|
||||||
"""Makes sure describe_instances works and filters results."""
|
"""Makes sure describe_instances works and filters results."""
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
|
'image_id': 1,
|
||||||
'host': 'host1'})
|
'host': 'host1'})
|
||||||
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
|
'image_id': 1,
|
||||||
'host': 'host2'})
|
'host': 'host2'})
|
||||||
comp1 = db.service_create(self.context, {'host': 'host1',
|
comp1 = db.service_create(self.context, {'host': 'host1',
|
||||||
'availability_zone': 'zone1',
|
'availability_zone': 'zone1',
|
||||||
@@ -200,7 +210,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
result = self.cloud.describe_instances(self.context)
|
result = self.cloud.describe_instances(self.context)
|
||||||
result = result['reservationSet'][0]
|
result = result['reservationSet'][0]
|
||||||
self.assertEqual(len(result['instancesSet']), 2)
|
self.assertEqual(len(result['instancesSet']), 2)
|
||||||
instance_id = cloud.id_to_ec2_id(inst2['id'])
|
instance_id = ec2utils.id_to_ec2_id(inst2['id'])
|
||||||
result = self.cloud.describe_instances(self.context,
|
result = self.cloud.describe_instances(self.context,
|
||||||
instance_id=[instance_id])
|
instance_id=[instance_id])
|
||||||
result = result['reservationSet'][0]
|
result = result['reservationSet'][0]
|
||||||
@@ -215,10 +225,9 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.service_destroy(self.context, comp2['id'])
|
db.service_destroy(self.context, comp2['id'])
|
||||||
|
|
||||||
def test_console_output(self):
|
def test_console_output(self):
|
||||||
image_id = FLAGS.default_image
|
|
||||||
instance_type = FLAGS.default_instance_type
|
instance_type = FLAGS.default_instance_type
|
||||||
max_count = 1
|
max_count = 1
|
||||||
kwargs = {'image_id': image_id,
|
kwargs = {'image_id': 'ami-1',
|
||||||
'instance_type': instance_type,
|
'instance_type': instance_type,
|
||||||
'max_count': max_count}
|
'max_count': max_count}
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
@@ -234,8 +243,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
greenthread.sleep(0.3)
|
greenthread.sleep(0.3)
|
||||||
|
|
||||||
def test_ajax_console(self):
|
def test_ajax_console(self):
|
||||||
image_id = FLAGS.default_image
|
kwargs = {'image_id': 'ami-1'}
|
||||||
kwargs = {'image_id': image_id}
|
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
greenthread.sleep(0.3)
|
greenthread.sleep(0.3)
|
||||||
@@ -347,7 +355,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_update_of_instance_display_fields(self):
|
def test_update_of_instance_display_fields(self):
|
||||||
inst = db.instance_create(self.context, {})
|
inst = db.instance_create(self.context, {})
|
||||||
ec2_id = cloud.id_to_ec2_id(inst['id'])
|
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
|
||||||
self.cloud.update_instance(self.context, ec2_id,
|
self.cloud.update_instance(self.context, ec2_id,
|
||||||
display_name='c00l 1m4g3')
|
display_name='c00l 1m4g3')
|
||||||
inst = db.instance_get(self.context, inst['id'])
|
inst = db.instance_get(self.context, inst['id'])
|
||||||
@@ -365,7 +373,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_update_of_volume_display_fields(self):
|
def test_update_of_volume_display_fields(self):
|
||||||
vol = db.volume_create(self.context, {})
|
vol = db.volume_create(self.context, {})
|
||||||
self.cloud.update_volume(self.context,
|
self.cloud.update_volume(self.context,
|
||||||
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||||
display_name='c00l v0lum3')
|
display_name='c00l v0lum3')
|
||||||
vol = db.volume_get(self.context, vol['id'])
|
vol = db.volume_get(self.context, vol['id'])
|
||||||
self.assertEqual('c00l v0lum3', vol['display_name'])
|
self.assertEqual('c00l v0lum3', vol['display_name'])
|
||||||
@@ -374,7 +382,7 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_update_of_volume_wont_update_private_fields(self):
|
def test_update_of_volume_wont_update_private_fields(self):
|
||||||
vol = db.volume_create(self.context, {})
|
vol = db.volume_create(self.context, {})
|
||||||
self.cloud.update_volume(self.context,
|
self.cloud.update_volume(self.context,
|
||||||
cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
|
||||||
mountpoint='/not/here')
|
mountpoint='/not/here')
|
||||||
vol = db.volume_get(self.context, vol['id'])
|
vol = db.volume_get(self.context, vol['id'])
|
||||||
self.assertEqual(None, vol['mountpoint'])
|
self.assertEqual(None, vol['mountpoint'])
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ from nova import test
|
|||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import instance_types
|
from nova.compute import instance_types
|
||||||
|
from nova.image import local
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.tests.compute')
|
LOG = logging.getLogger('nova.tests.compute')
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -52,6 +52,11 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
self.context = context.RequestContext('fake', 'fake', False)
|
self.context = context.RequestContext('fake', 'fake', False)
|
||||||
|
|
||||||
|
def fake_show(meh, context, id):
|
||||||
|
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||||
|
|
||||||
|
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
@@ -60,7 +65,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
def _create_instance(self, params={}):
|
def _create_instance(self, params={}):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ class ConsoleTestCase(test.TestCase):
|
|||||||
inst = {}
|
inst = {}
|
||||||
#inst['host'] = self.host
|
#inst['host'] = self.host
|
||||||
#inst['name'] = 'instance-1234'
|
#inst['name'] = 'instance-1234'
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
|
|||||||
@@ -93,8 +93,7 @@ class DirectTestCase(test.TestCase):
|
|||||||
class DirectCloudTestCase(test_cloud.CloudTestCase):
|
class DirectCloudTestCase(test_cloud.CloudTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(DirectCloudTestCase, self).setUp()
|
super(DirectCloudTestCase, self).setUp()
|
||||||
compute_handle = compute.API(image_service=self.cloud.image_service,
|
compute_handle = compute.API(network_api=self.cloud.network_api,
|
||||||
network_api=self.cloud.network_api,
|
|
||||||
volume_api=self.cloud.volume_api)
|
volume_api=self.cloud.volume_api)
|
||||||
direct.register_service('compute', compute_handle)
|
direct.register_service('compute', compute_handle)
|
||||||
self.router = direct.JsonParamsMiddleware(direct.Router())
|
self.router = direct.JsonParamsMiddleware(direct.Router())
|
||||||
|
|||||||
@@ -155,7 +155,7 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
def _create_instance(self, **kwargs):
|
def _create_instance(self, **kwargs):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['user_id'] = self.user.id
|
inst['user_id'] = self.user.id
|
||||||
inst['project_id'] = self.project.id
|
inst['project_id'] = self.project.id
|
||||||
@@ -169,8 +169,6 @@ class SimpleDriverTestCase(test.TestCase):
|
|||||||
def _create_volume(self):
|
def _create_volume(self):
|
||||||
"""Create a test volume"""
|
"""Create a test volume"""
|
||||||
vol = {}
|
vol = {}
|
||||||
vol['image_id'] = 'ami-test'
|
|
||||||
vol['reservation_id'] = 'r-fakeres'
|
|
||||||
vol['size'] = 1
|
vol['size'] = 1
|
||||||
vol['availability_zone'] = 'test'
|
vol['availability_zone'] = 'test'
|
||||||
return db.volume_create(self.context, vol)['id']
|
return db.volume_create(self.context, vol)['id']
|
||||||
|
|||||||
@@ -15,6 +15,9 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
import os
|
||||||
|
|
||||||
|
import eventlet
|
||||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||||
from xml.dom.minidom import parseString as xml_to_dom
|
from xml.dom.minidom import parseString as xml_to_dom
|
||||||
|
|
||||||
@@ -31,6 +34,70 @@ FLAGS = flags.FLAGS
|
|||||||
flags.DECLARE('instances_path', 'nova.compute.manager')
|
flags.DECLARE('instances_path', 'nova.compute.manager')
|
||||||
|
|
||||||
|
|
||||||
|
def _concurrency(wait, done, target):
|
||||||
|
wait.wait()
|
||||||
|
done.send()
|
||||||
|
|
||||||
|
|
||||||
|
class CacheConcurrencyTestCase(test.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(CacheConcurrencyTestCase, self).setUp()
|
||||||
|
|
||||||
|
def fake_exists(fname):
|
||||||
|
basedir = os.path.join(FLAGS.instances_path, '_base')
|
||||||
|
if fname == basedir:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def fake_execute(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.stubs.Set(os.path, 'exists', fake_exists)
|
||||||
|
self.stubs.Set(utils, 'execute', fake_execute)
|
||||||
|
|
||||||
|
def test_same_fname_concurrency(self):
|
||||||
|
"""Ensures that the same fname cache runs at a sequentially"""
|
||||||
|
conn = libvirt_conn.LibvirtConnection
|
||||||
|
wait1 = eventlet.event.Event()
|
||||||
|
done1 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname', False, wait1, done1)
|
||||||
|
wait2 = eventlet.event.Event()
|
||||||
|
done2 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname', False, wait2, done2)
|
||||||
|
wait2.send()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
try:
|
||||||
|
self.assertFalse(done2.ready())
|
||||||
|
self.assertTrue('fname' in conn._image_sems)
|
||||||
|
finally:
|
||||||
|
wait1.send()
|
||||||
|
done1.wait()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
self.assertTrue(done2.ready())
|
||||||
|
self.assertFalse('fname' in conn._image_sems)
|
||||||
|
|
||||||
|
def test_different_fname_concurrency(self):
|
||||||
|
"""Ensures that two different fname caches are concurrent"""
|
||||||
|
conn = libvirt_conn.LibvirtConnection
|
||||||
|
wait1 = eventlet.event.Event()
|
||||||
|
done1 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname2', False, wait1, done1)
|
||||||
|
wait2 = eventlet.event.Event()
|
||||||
|
done2 = eventlet.event.Event()
|
||||||
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
|
'target', 'fname1', False, wait2, done2)
|
||||||
|
wait2.send()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
try:
|
||||||
|
self.assertTrue(done2.ready())
|
||||||
|
finally:
|
||||||
|
wait1.send()
|
||||||
|
eventlet.sleep(0)
|
||||||
|
|
||||||
|
|
||||||
class LibvirtConnTestCase(test.TestCase):
|
class LibvirtConnTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LibvirtConnTestCase, self).setUp()
|
super(LibvirtConnTestCase, self).setUp()
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ class VolumeTestCase(test.TestCase):
|
|||||||
def test_run_attach_detach_volume(self):
|
def test_run_attach_detach_volume(self):
|
||||||
"""Make sure volume can be attached and detached from instance."""
|
"""Make sure volume can be attached and detached from instance."""
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_id'] = 'ami-test'
|
inst['image_id'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = 'fake'
|
inst['user_id'] = 'fake'
|
||||||
|
|||||||
Reference in New Issue
Block a user