Merge "Add Cinder Volume API to Nova"
This commit is contained in:
commit
ff5981b59b
939
nova/tests/api/ec2/test_cinder_cloud.py
Normal file
939
nova/tests/api/ec2/test_cinder_cloud.py
Normal file
@ -0,0 +1,939 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.api.ec2 import ec2utils
|
||||
from nova.compute import api as compute_api
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import rpc
|
||||
from nova import test
|
||||
from nova.tests.image import fake
|
||||
from nova import volume
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def get_fake_cache():
|
||||
def _ip(ip, fixed=True, floats=None):
|
||||
ip_dict = {'address': ip, 'type': 'fixed'}
|
||||
if not fixed:
|
||||
ip_dict['type'] = 'floating'
|
||||
if fixed and floats:
|
||||
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
|
||||
return ip_dict
|
||||
|
||||
info = [{'address': 'aa:bb:cc:dd:ee:ff',
|
||||
'id': 1,
|
||||
'network': {'bridge': 'br0',
|
||||
'id': 1,
|
||||
'label': 'private',
|
||||
'subnets': [{'cidr': '192.168.0.0/24',
|
||||
'ips': [_ip('192.168.0.3',
|
||||
floats=['1.2.3.4',
|
||||
'5.6.7.8']),
|
||||
_ip('192.168.0.4')]}]}}]
|
||||
if FLAGS.use_ipv6:
|
||||
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
|
||||
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
|
||||
'ips': [_ip(ipv6_addr)]})
|
||||
return info
|
||||
|
||||
|
||||
def get_instances_with_cached_ips(orig_func, *args, **kwargs):
|
||||
"""Kludge the cache into instance(s) without having to create DB
|
||||
entries
|
||||
"""
|
||||
instances = orig_func(*args, **kwargs)
|
||||
if isinstance(instances, list):
|
||||
for instance in instances:
|
||||
instance['info_cache'] = {'network_info': get_fake_cache()}
|
||||
else:
|
||||
instances['info_cache'] = {'network_info': get_fake_cache()}
|
||||
return instances
|
||||
|
||||
|
||||
class CinderCloudTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(CinderCloudTestCase, self).setUp()
|
||||
self.flags(compute_driver='nova.virt.fake.FakeDriver',
|
||||
volume_api_class='nova.tests.fake_volume.API',
|
||||
stub_network=True)
|
||||
|
||||
def fake_show(meh, context, id):
|
||||
return {'id': id,
|
||||
'container_format': 'ami',
|
||||
'properties': {
|
||||
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'type': 'machine',
|
||||
'image_state': 'available'}}
|
||||
|
||||
def fake_detail(_self, context, **kwargs):
|
||||
image = fake_show(None, context, None)
|
||||
image['name'] = kwargs.get('filters', {}).get('name')
|
||||
return [image]
|
||||
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
|
||||
fake.stub_out_image_service(self.stubs)
|
||||
|
||||
def dumb(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
|
||||
# set up our cloud
|
||||
self.cloud = cloud.CloudController()
|
||||
self.flags(compute_scheduler_driver='nova.scheduler.'
|
||||
'chance.ChanceScheduler')
|
||||
|
||||
# set up services
|
||||
self.compute = self.start_service('compute')
|
||||
self.scheduler = self.start_service('scheduler')
|
||||
self.network = self.start_service('network')
|
||||
self.volume = self.start_service('volume')
|
||||
|
||||
self.user_id = 'fake'
|
||||
self.project_id = 'fake'
|
||||
self.context = context.RequestContext(self.user_id,
|
||||
self.project_id,
|
||||
is_admin=True)
|
||||
self.volume_api = volume.API()
|
||||
|
||||
# NOTE(comstud): Make 'cast' behave like a 'call' which will
|
||||
# ensure that operations complete
|
||||
self.stubs.Set(rpc, 'cast', rpc.call)
|
||||
|
||||
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
|
||||
db.api.s3_image_create(self.context,
|
||||
'cedef40a-ed67-4d10-800e-17455edce175')
|
||||
db.api.s3_image_create(self.context,
|
||||
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
|
||||
|
||||
def tearDown(self):
|
||||
self.volume_api.reset_fake_api(self.context)
|
||||
super(CinderCloudTestCase, self).tearDown()
|
||||
fake.FakeImageService_reset()
|
||||
|
||||
def _stub_instance_get_with_fixed_ips(self, func_name):
|
||||
orig_func = getattr(self.cloud.compute_api, func_name)
|
||||
|
||||
def fake_get(*args, **kwargs):
|
||||
return get_instances_with_cached_ips(orig_func, *args, **kwargs)
|
||||
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
|
||||
|
||||
def _create_key(self, name):
|
||||
# NOTE(vish): create depends on pool, so just call helper directly
|
||||
keypair_api = compute_api.KeypairAPI()
|
||||
return keypair_api.create_key_pair(self.context, self.context.user_id,
|
||||
name)
|
||||
|
||||
def test_describe_volumes(self):
|
||||
"""Makes sure describe_volumes works and filters results."""
|
||||
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
name='test-1',
|
||||
description='test volume 1')
|
||||
vol2 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
name='test-2',
|
||||
description='test volume 2')
|
||||
result = self.cloud.describe_volumes(self.context)
|
||||
self.assertEqual(len(result['volumeSet']), 2)
|
||||
result = self.cloud.describe_volumes(self.context,
|
||||
[vol1['volumeId']])
|
||||
self.assertEqual(len(result['volumeSet']), 1)
|
||||
self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
|
||||
|
||||
self.cloud.delete_volume(self.context, vol1['volumeId'])
|
||||
self.cloud.delete_volume(self.context, vol2['volumeId'])
|
||||
|
||||
def test_create_volume_in_availability_zone(self):
|
||||
"""Makes sure create_volume works when we specify an availability
|
||||
zone
|
||||
"""
|
||||
availability_zone = 'zone1:host1'
|
||||
|
||||
result = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
volume_id = result['volumeId']
|
||||
availabilityZone = result['availabilityZone']
|
||||
self.assertEqual(availabilityZone, availability_zone)
|
||||
result = self.cloud.describe_volumes(self.context)
|
||||
self.assertEqual(len(result['volumeSet']), 1)
|
||||
self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
|
||||
self.assertEqual(result['volumeSet'][0]['availabilityZone'],
|
||||
availabilityZone)
|
||||
|
||||
self.cloud.delete_volume(self.context, volume_id)
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
"""Makes sure create_volume works when we specify a snapshot."""
|
||||
availability_zone = 'zone1:host1'
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
snap = self.cloud.create_snapshot(self.context,
|
||||
vol1['volumeId'],
|
||||
name='snap-1',
|
||||
description='test snap of vol %s'
|
||||
% vol1['volumeId'])
|
||||
|
||||
vol2 = self.cloud.create_volume(self.context,
|
||||
snapshot_id=snap['snapshotId'])
|
||||
volume1_id = vol1['volumeId']
|
||||
volume2_id = vol2['volumeId']
|
||||
|
||||
result = self.cloud.describe_volumes(self.context)
|
||||
self.assertEqual(len(result['volumeSet']), 2)
|
||||
self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
|
||||
|
||||
self.cloud.delete_volume(self.context, volume2_id)
|
||||
self.cloud.delete_snapshot(self.context, snap['snapshotId'])
|
||||
self.cloud.delete_volume(self.context, volume1_id)
|
||||
|
||||
def test_describe_snapshots(self):
|
||||
"""Makes sure describe_snapshots works and filters results."""
|
||||
availability_zone = 'zone1:host1'
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
snap1 = self.cloud.create_snapshot(self.context,
|
||||
vol1['volumeId'],
|
||||
name='snap-1',
|
||||
description='test snap1 of vol %s' %
|
||||
vol1['volumeId'])
|
||||
snap2 = self.cloud.create_snapshot(self.context,
|
||||
vol1['volumeId'],
|
||||
name='snap-1',
|
||||
description='test snap2 of vol %s' %
|
||||
vol1['volumeId'])
|
||||
|
||||
result = self.cloud.describe_snapshots(self.context)
|
||||
self.assertEqual(len(result['snapshotSet']), 2)
|
||||
result = self.cloud.describe_snapshots(
|
||||
self.context,
|
||||
snapshot_id=[snap2['snapshotId']])
|
||||
self.assertEqual(len(result['snapshotSet']), 1)
|
||||
|
||||
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
|
||||
self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
|
||||
self.cloud.delete_volume(self.context, vol1['volumeId'])
|
||||
|
||||
def test_create_snapshot(self):
|
||||
"""Makes sure create_snapshot works."""
|
||||
availability_zone = 'zone1:host1'
|
||||
result = self.cloud.describe_snapshots(self.context)
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
snap1 = self.cloud.create_snapshot(self.context,
|
||||
vol1['volumeId'],
|
||||
name='snap-1',
|
||||
description='test snap1 of vol %s' %
|
||||
vol1['volumeId'])
|
||||
|
||||
snapshot_id = snap1['snapshotId']
|
||||
result = self.cloud.describe_snapshots(self.context)
|
||||
self.assertEqual(len(result['snapshotSet']), 1)
|
||||
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
|
||||
|
||||
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
|
||||
self.cloud.delete_volume(self.context, vol1['volumeId'])
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
"""Makes sure delete_snapshot works."""
|
||||
availability_zone = 'zone1:host1'
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
snap1 = self.cloud.create_snapshot(self.context,
|
||||
vol1['volumeId'],
|
||||
name='snap-1',
|
||||
description='test snap1 of vol %s' %
|
||||
vol1['volumeId'])
|
||||
|
||||
snapshot_id = snap1['snapshotId']
|
||||
result = self.cloud.delete_snapshot(self.context,
|
||||
snapshot_id=snapshot_id)
|
||||
self.assertTrue(result)
|
||||
self.cloud.delete_volume(self.context, vol1['volumeId'])
|
||||
|
||||
def _block_device_mapping_create(self, instance_uuid, mappings):
|
||||
volumes = []
|
||||
for bdm in mappings:
|
||||
db.block_device_mapping_create(self.context, bdm)
|
||||
if 'volume_id' in bdm:
|
||||
values = {'id': bdm['volume_id']}
|
||||
for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
|
||||
('snapshot_size', 'volume_size'),
|
||||
('delete_on_termination',
|
||||
'delete_on_termination')]:
|
||||
if bdm_key in bdm:
|
||||
values[vol_key] = bdm[bdm_key]
|
||||
kwargs = {'name': 'bdmtest-volume',
|
||||
'description': 'bdm test volume description',
|
||||
'status': 'available',
|
||||
'host': self.volume.host,
|
||||
'size': 1,
|
||||
'attach_status': 'detached',
|
||||
'volume_id': values['id']}
|
||||
vol = self.volume_api.create_with_kwargs(self.context,
|
||||
**kwargs)
|
||||
if 'snapshot_id' in values:
|
||||
self.volume_api.create_snapshot(self.context,
|
||||
vol,
|
||||
'snapshot-bdm',
|
||||
'fake snap for bdm tests',
|
||||
values['snapshot_id'])
|
||||
|
||||
self.volume_api.attach(self.context, vol,
|
||||
instance_uuid, bdm['device_name'])
|
||||
volumes.append(vol)
|
||||
return volumes
|
||||
|
||||
def _setUpBlockDeviceMapping(self):
|
||||
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
|
||||
inst1 = db.instance_create(self.context,
|
||||
{'image_ref': image_uuid,
|
||||
'instance_type_id': 1,
|
||||
'root_device_name': '/dev/sdb1'})
|
||||
inst2 = db.instance_create(self.context,
|
||||
{'image_ref': image_uuid,
|
||||
'instance_type_id': 1,
|
||||
'root_device_name': '/dev/sdc1'})
|
||||
|
||||
instance_uuid = inst1['uuid']
|
||||
mappings0 = [
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb1',
|
||||
'snapshot_id': '1',
|
||||
'volume_id': '2'},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb2',
|
||||
'volume_id': '3',
|
||||
'volume_size': 1},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb3',
|
||||
'delete_on_termination': True,
|
||||
'snapshot_id': '4',
|
||||
'volume_id': '5'},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb4',
|
||||
'delete_on_termination': False,
|
||||
'snapshot_id': '6',
|
||||
'volume_id': '7'},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb5',
|
||||
'snapshot_id': '8',
|
||||
'volume_id': '9',
|
||||
'volume_size': 0},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb6',
|
||||
'snapshot_id': '10',
|
||||
'volume_id': '11',
|
||||
'volume_size': 1},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb7',
|
||||
'no_device': True},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb8',
|
||||
'virtual_name': 'swap'},
|
||||
{'instance_uuid': instance_uuid,
|
||||
'device_name': '/dev/sdb9',
|
||||
'virtual_name': 'ephemeral3'}]
|
||||
|
||||
volumes = self._block_device_mapping_create(instance_uuid, mappings0)
|
||||
return (inst1, inst2, volumes)
|
||||
|
||||
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
|
||||
for vol in volumes:
|
||||
self.volume_api.delete(self.context, vol)
|
||||
for uuid in (inst1['uuid'], inst2['uuid']):
|
||||
for bdm in db.block_device_mapping_get_all_by_instance(
|
||||
self.context, uuid):
|
||||
db.block_device_mapping_destroy(self.context, bdm['id'])
|
||||
db.instance_destroy(self.context, inst2['uuid'])
|
||||
db.instance_destroy(self.context, inst1['uuid'])
|
||||
|
||||
_expected_instance_bdm1 = {
|
||||
'instanceId': 'i-00000001',
|
||||
'rootDeviceName': '/dev/sdb1',
|
||||
'rootDeviceType': 'ebs'}
|
||||
|
||||
_expected_block_device_mapping0 = [
|
||||
{'deviceName': '/dev/sdb1',
|
||||
'ebs': {'status': 'in-use',
|
||||
'deleteOnTermination': False,
|
||||
'volumeId': '2',
|
||||
}},
|
||||
{'deviceName': '/dev/sdb2',
|
||||
'ebs': {'status': 'in-use',
|
||||
'deleteOnTermination': False,
|
||||
'volumeId': '3',
|
||||
}},
|
||||
{'deviceName': '/dev/sdb3',
|
||||
'ebs': {'status': 'in-use',
|
||||
'deleteOnTermination': True,
|
||||
'volumeId': '5',
|
||||
}},
|
||||
{'deviceName': '/dev/sdb4',
|
||||
'ebs': {'status': 'in-use',
|
||||
'deleteOnTermination': False,
|
||||
'volumeId': '7',
|
||||
}},
|
||||
{'deviceName': '/dev/sdb5',
|
||||
'ebs': {'status': 'in-use',
|
||||
'deleteOnTermination': False,
|
||||
'volumeId': '9',
|
||||
}},
|
||||
{'deviceName': '/dev/sdb6',
|
||||
'ebs': {'status': 'in-use',
|
||||
'deleteOnTermination': False,
|
||||
'volumeId': '11', }}]
|
||||
# NOTE(yamahata): swap/ephemeral device case isn't supported yet.
|
||||
|
||||
_expected_instance_bdm2 = {
|
||||
'instanceId': 'i-00000002',
|
||||
'rootDeviceName': '/dev/sdc1',
|
||||
'rootDeviceType': 'instance-store'}
|
||||
|
||||
def test_format_instance_bdm(self):
|
||||
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
|
||||
|
||||
result = {}
|
||||
self.cloud._format_instance_bdm(self.context, inst1['uuid'],
|
||||
'/dev/sdb1', result)
|
||||
self.assertSubDictMatch(
|
||||
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
|
||||
result)
|
||||
self._assertEqualBlockDeviceMapping(
|
||||
self._expected_block_device_mapping0, result['blockDeviceMapping'])
|
||||
|
||||
result = {}
|
||||
self.cloud._format_instance_bdm(self.context, inst2['uuid'],
|
||||
'/dev/sdc1', result)
|
||||
self.assertSubDictMatch(
|
||||
{'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
|
||||
result)
|
||||
|
||||
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
|
||||
|
||||
def _assertInstance(self, instance_id):
|
||||
ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
|
||||
result = self.cloud.describe_instances(self.context,
|
||||
instance_id=[ec2_instance_id])
|
||||
result = result['reservationSet'][0]
|
||||
self.assertEqual(len(result['instancesSet']), 1)
|
||||
result = result['instancesSet'][0]
|
||||
self.assertEqual(result['instanceId'], ec2_instance_id)
|
||||
return result
|
||||
|
||||
def _assertEqualBlockDeviceMapping(self, expected, result):
|
||||
self.assertEqual(len(expected), len(result))
|
||||
for x in expected:
|
||||
found = False
|
||||
for y in result:
|
||||
if x['deviceName'] == y['deviceName']:
|
||||
self.assertSubDictMatch(x, y)
|
||||
found = True
|
||||
break
|
||||
self.assertTrue(found)
|
||||
|
||||
def test_describe_instances_bdm(self):
|
||||
"""Make sure describe_instances works with root_device_name and
|
||||
block device mappings
|
||||
"""
|
||||
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
|
||||
|
||||
result = self._assertInstance(inst1['id'])
|
||||
self.assertSubDictMatch(self._expected_instance_bdm1, result)
|
||||
self._assertEqualBlockDeviceMapping(
|
||||
self._expected_block_device_mapping0, result['blockDeviceMapping'])
|
||||
|
||||
result = self._assertInstance(inst2['id'])
|
||||
self.assertSubDictMatch(self._expected_instance_bdm2, result)
|
||||
|
||||
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
|
||||
|
||||
def assertDictListUnorderedMatch(self, L1, L2, key):
|
||||
self.assertEqual(len(L1), len(L2))
|
||||
for d1 in L1:
|
||||
self.assertTrue(key in d1)
|
||||
for d2 in L2:
|
||||
self.assertTrue(key in d2)
|
||||
if d1[key] == d2[key]:
|
||||
self.assertDictMatch(d1, d2)
|
||||
|
||||
def _setUpImageSet(self, create_volumes_and_snapshots=False):
|
||||
mappings1 = [
|
||||
{'device': '/dev/sda1', 'virtual': 'root'},
|
||||
|
||||
{'device': 'sdb0', 'virtual': 'ephemeral0'},
|
||||
{'device': 'sdb1', 'virtual': 'ephemeral1'},
|
||||
{'device': 'sdb2', 'virtual': 'ephemeral2'},
|
||||
{'device': 'sdb3', 'virtual': 'ephemeral3'},
|
||||
{'device': 'sdb4', 'virtual': 'ephemeral4'},
|
||||
|
||||
{'device': 'sdc0', 'virtual': 'swap'},
|
||||
{'device': 'sdc1', 'virtual': 'swap'},
|
||||
{'device': 'sdc2', 'virtual': 'swap'},
|
||||
{'device': 'sdc3', 'virtual': 'swap'},
|
||||
{'device': 'sdc4', 'virtual': 'swap'}]
|
||||
block_device_mapping1 = [
|
||||
{'device_name': '/dev/sdb1', 'snapshot_id': 01234567},
|
||||
{'device_name': '/dev/sdb2', 'volume_id': 01234567},
|
||||
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
|
||||
{'device_name': '/dev/sdb4', 'no_device': True},
|
||||
|
||||
{'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
|
||||
{'device_name': '/dev/sdc2', 'volume_id': 12345678},
|
||||
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
|
||||
{'device_name': '/dev/sdc4', 'no_device': True}]
|
||||
image1 = {
|
||||
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'properties': {
|
||||
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
'type': 'machine',
|
||||
'image_state': 'available',
|
||||
'mappings': mappings1,
|
||||
'block_device_mapping': block_device_mapping1,
|
||||
}
|
||||
}
|
||||
|
||||
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
|
||||
block_device_mapping2 = [{'device_name': '/dev/sdb1',
|
||||
'snapshot_id': 01234567}]
|
||||
image2 = {
|
||||
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
|
||||
'properties': {
|
||||
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
|
||||
'type': 'machine',
|
||||
'root_device_name': '/dev/sdb1',
|
||||
'mappings': mappings2,
|
||||
'block_device_mapping': block_device_mapping2}}
|
||||
|
||||
def fake_show(meh, context, image_id):
|
||||
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
|
||||
for i in _images:
|
||||
if str(i['id']) == str(image_id):
|
||||
return i
|
||||
raise exception.ImageNotFound(image_id=image_id)
|
||||
|
||||
def fake_detail(meh, context):
|
||||
return [copy.deepcopy(image1), copy.deepcopy(image2)]
|
||||
|
||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
|
||||
|
||||
volumes = []
|
||||
snapshots = []
|
||||
if create_volumes_and_snapshots:
|
||||
for bdm in block_device_mapping1:
|
||||
if 'volume_id' in bdm:
|
||||
vol = self._volume_create(bdm['volume_id'])
|
||||
volumes.append(vol['id'])
|
||||
if 'snapshot_id' in bdm:
|
||||
kwargs = {'volume_id': 76543210,
|
||||
'volume_size': 1,
|
||||
'name': 'test-snap',
|
||||
'description': 'test snap desc',
|
||||
'snap_id': bdm['snapshot_id'],
|
||||
'status': 'available'}
|
||||
snap = self.volume_api.create_snapshot_with_kwargs(
|
||||
self.context, **kwargs)
|
||||
snapshots.append(snap['id'])
|
||||
return (volumes, snapshots)
|
||||
|
||||
def _assertImageSet(self, result, root_device_type, root_device_name):
|
||||
self.assertEqual(1, len(result['imagesSet']))
|
||||
result = result['imagesSet'][0]
|
||||
self.assertTrue('rootDeviceType' in result)
|
||||
self.assertEqual(result['rootDeviceType'], root_device_type)
|
||||
self.assertTrue('rootDeviceName' in result)
|
||||
self.assertEqual(result['rootDeviceName'], root_device_name)
|
||||
self.assertTrue('blockDeviceMapping' in result)
|
||||
|
||||
return result
|
||||
|
||||
_expected_root_device_name1 = '/dev/sda1'
|
||||
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
|
||||
# It makes sense only when user overriding existing
|
||||
# mapping.
|
||||
_expected_bdms1 = [
|
||||
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
|
||||
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
|
||||
'snap-00053977'}},
|
||||
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
|
||||
'vol-00053977'}},
|
||||
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
|
||||
# {'deviceName': '/dev/sdb4', 'noDevice': True},
|
||||
|
||||
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
|
||||
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
|
||||
'snap-00bc614e'}},
|
||||
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
|
||||
'vol-00bc614e'}},
|
||||
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
|
||||
# {'deviceName': '/dev/sdc4', 'noDevice': True}
|
||||
]
|
||||
|
||||
_expected_root_device_name2 = '/dev/sdb1'
|
||||
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
|
||||
'ebs': {'snapshotId': 'snap-00053977'}}]
|
||||
|
||||
def _run_instance(self, **kwargs):
|
||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||
instance_id = rv['instancesSet'][0]['instanceId']
|
||||
return instance_id
|
||||
|
||||
def _restart_compute_service(self, periodic_interval=None):
|
||||
"""restart compute service. NOTE: fake driver forgets all instances."""
|
||||
self.compute.kill()
|
||||
if periodic_interval:
|
||||
self.compute = self.start_service(
|
||||
'compute', periodic_interval=periodic_interval)
|
||||
else:
|
||||
self.compute = self.start_service('compute')
|
||||
|
||||
def _volume_create(self, volume_id=None):
|
||||
kwargs = {'name': 'test-volume',
|
||||
'description': 'test volume description',
|
||||
'status': 'available',
|
||||
'host': self.volume.host,
|
||||
'size': 1,
|
||||
'attach_status': 'detached'}
|
||||
if volume_id:
|
||||
kwargs['volume_id'] = volume_id
|
||||
return self.volume_api.create_with_kwargs(self.context, **kwargs)
|
||||
#return db.volume_create(self.context, kwargs)
|
||||
|
||||
def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
|
||||
self.assertEqual(vol['instance_uuid'], instance_uuid)
|
||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
|
||||
def _assert_volume_detached(self, vol):
|
||||
self.assertEqual(vol['instance_uuid'], None)
|
||||
self.assertEqual(vol['mountpoint'], None)
|
||||
self.assertEqual(vol['status'], "available")
|
||||
self.assertEqual(vol['attach_status'], "detached")
|
||||
|
||||
def test_stop_start_with_volume(self):
|
||||
"""Make sure run instance with block device mapping works"""
|
||||
availability_zone = 'zone1:host1'
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
vol2 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
|
||||
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
|
||||
# enforce periodic tasks run in short time to avoid wait for 60s.
|
||||
self._restart_compute_service(periodic_interval=0.3)
|
||||
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1,
|
||||
'block_device_mapping': [{'device_name': '/dev/vdb',
|
||||
'volume_id': vol1_uuid,
|
||||
'delete_on_termination': False},
|
||||
{'device_name': '/dev/vdc',
|
||||
'volume_id': vol2_uuid,
|
||||
'delete_on_termination': True},
|
||||
]}
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
|
||||
ec2_instance_id)
|
||||
vols = self.volume_api.get_all(self.context)
|
||||
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
|
||||
|
||||
self.assertEqual(len(vols), 2)
|
||||
for vol in vols:
|
||||
self.assertTrue(str(vol['id']) == str(vol1_uuid) or
|
||||
str(vol['id']) == str(vol2_uuid))
|
||||
if(str(vol['id']) == str(vol1_uuid)):
|
||||
self.volume_api.attach(self.context, vol,
|
||||
instance_uuid, '/dev/vdb')
|
||||
elif(str(vol['id']) == str(vol2_uuid)):
|
||||
self.volume_api.attach(self.context, vol,
|
||||
instance_uuid, '/dev/vdc')
|
||||
|
||||
vol = self.volume_api.get(self.context, vol1_uuid)
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
|
||||
|
||||
vol = self.volume_api.get(self.context, vol2_uuid)
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
|
||||
|
||||
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
|
||||
self.assertTrue(result)
|
||||
|
||||
vol = self.volume_api.get(self.context, vol1_uuid)
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
|
||||
|
||||
vol = self.volume_api.get(self.context, vol1_uuid)
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
|
||||
|
||||
vol = self.volume_api.get(self.context, vol2_uuid)
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
|
||||
|
||||
self.cloud.start_instances(self.context, [ec2_instance_id])
|
||||
vols = self.volume_api.get_all(self.context)
|
||||
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
|
||||
self.assertEqual(len(vols), 2)
|
||||
for vol in vols:
|
||||
self.assertTrue(str(vol['id']) == str(vol1_uuid) or
|
||||
str(vol['id']) == str(vol2_uuid))
|
||||
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
|
||||
vol['mountpoint'] == '/dev/vdc')
|
||||
self.assertEqual(vol['instance_uuid'], instance_uuid)
|
||||
self.assertEqual(vol['status'], "in-use")
|
||||
self.assertEqual(vol['attach_status'], "attached")
|
||||
|
||||
#Here we puke...
|
||||
self.cloud.terminate_instances(self.context, [ec2_instance_id])
|
||||
|
||||
admin_ctxt = context.get_admin_context(read_deleted="no")
|
||||
vol = self.volume_api.get(admin_ctxt, vol2_uuid)
|
||||
self.assertFalse(vol['deleted'])
|
||||
self.cloud.delete_volume(self.context, vol1['volumeId'])
|
||||
self._restart_compute_service()
|
||||
|
||||
def test_stop_with_attached_volume(self):
|
||||
"""Make sure attach info is reflected to block device mapping"""
|
||||
|
||||
availability_zone = 'zone1:host1'
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
vol2 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
|
||||
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
|
||||
|
||||
# enforce periodic tasks run in short time to avoid wait for 60s.
|
||||
self._restart_compute_service(periodic_interval=0.3)
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1,
|
||||
'block_device_mapping': [{'device_name': '/dev/vdb',
|
||||
'volume_id': vol1_uuid,
|
||||
'delete_on_termination': True}]}
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
|
||||
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
|
||||
ec2_instance_id)
|
||||
|
||||
vols = self.volume_api.get_all(self.context)
|
||||
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
|
||||
self.assertEqual(len(vols), 1)
|
||||
for vol in vols:
|
||||
self.assertEqual(vol['id'], vol1_uuid)
|
||||
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
|
||||
vol = self.volume_api.get(self.context, vol2_uuid)
|
||||
self._assert_volume_detached(vol)
|
||||
|
||||
instance = db.instance_get(self.context, instance_id)
|
||||
self.cloud.compute_api.attach_volume(self.context,
|
||||
instance,
|
||||
volume_id=vol2_uuid,
|
||||
device='/dev/vdc')
|
||||
|
||||
vol1 = self.volume_api.get(self.context, vol1_uuid)
|
||||
self._assert_volume_attached(vol1, instance_uuid, '/dev/vdb')
|
||||
|
||||
vol2 = self.volume_api.get(self.context, vol2_uuid)
|
||||
self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc')
|
||||
|
||||
self.cloud.compute_api.detach_volume(self.context,
|
||||
volume_id=vol1_uuid)
|
||||
|
||||
vol1 = self.volume_api.get(self.context, vol1_uuid)
|
||||
self._assert_volume_detached(vol1)
|
||||
|
||||
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
|
||||
self.assertTrue(result)
|
||||
|
||||
vol2 = self.volume_api.get(self.context, vol2_uuid)
|
||||
self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc')
|
||||
|
||||
self.cloud.start_instances(self.context, [ec2_instance_id])
|
||||
vols = self.volume_api.get_all(self.context)
|
||||
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
|
||||
self.assertEqual(len(vols), 1)
|
||||
|
||||
self._assert_volume_detached(vol1)
|
||||
|
||||
vol1 = self.volume_api.get(self.context, vol1_uuid)
|
||||
self._assert_volume_detached(vol1)
|
||||
|
||||
self.cloud.terminate_instances(self.context, [ec2_instance_id])
|
||||
|
||||
def _create_snapshot(self, ec2_volume_id):
|
||||
result = self.cloud.create_snapshot(self.context,
|
||||
volume_id=ec2_volume_id)
|
||||
return result['snapshotId']
|
||||
|
||||
def test_run_with_snapshot(self):
|
||||
"""Makes sure run/stop/start instance with snapshot works."""
|
||||
availability_zone = 'zone1:host1'
|
||||
vol1 = self.cloud.create_volume(self.context,
|
||||
size=1,
|
||||
availability_zone=availability_zone)
|
||||
|
||||
snap1 = self.cloud.create_snapshot(self.context,
|
||||
vol1['volumeId'],
|
||||
name='snap-1',
|
||||
description='test snap of vol %s' %
|
||||
vol1['volumeId'])
|
||||
snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
|
||||
|
||||
snap2 = self.cloud.create_snapshot(self.context,
|
||||
vol1['volumeId'],
|
||||
name='snap-2',
|
||||
description='test snap of vol %s' %
|
||||
vol1['volumeId'])
|
||||
snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
|
||||
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1,
|
||||
'block_device_mapping': [{'device_name': '/dev/vdb',
|
||||
'snapshot_id': snap1_uuid,
|
||||
'delete_on_termination': False, },
|
||||
{'device_name': '/dev/vdc',
|
||||
'snapshot_id': snap2_uuid,
|
||||
'delete_on_termination': True}]}
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
|
||||
ec2_instance_id)
|
||||
|
||||
vols = self.volume_api.get_all(self.context)
|
||||
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
|
||||
|
||||
self.assertEqual(len(vols), 2)
|
||||
|
||||
vol1_id = None
|
||||
vol2_id = None
|
||||
for vol in vols:
|
||||
snapshot_uuid = vol['snapshot_id']
|
||||
if snapshot_uuid == snap1_uuid:
|
||||
vol1_id = vol['id']
|
||||
mountpoint = '/dev/vdb'
|
||||
elif snapshot_uuid == snap2_uuid:
|
||||
vol2_id = vol['id']
|
||||
mountpoint = '/dev/vdc'
|
||||
else:
|
||||
self.fail()
|
||||
|
||||
self._assert_volume_attached(vol, instance_uuid, mountpoint)
|
||||
|
||||
#Just make sure we found them
|
||||
self.assertTrue(vol1_id)
|
||||
self.assertTrue(vol2_id)
|
||||
|
||||
self.cloud.terminate_instances(self.context, [ec2_instance_id])
|
||||
|
||||
admin_ctxt = context.get_admin_context(read_deleted="no")
|
||||
vol = self.volume_api.get(admin_ctxt, vol1_id)
|
||||
self._assert_volume_detached(vol)
|
||||
self.assertFalse(vol['deleted'])
|
||||
#db.volume_destroy(self.context, vol1_id)
|
||||
|
||||
##admin_ctxt = context.get_admin_context(read_deleted="only")
|
||||
##vol = db.volume_get(admin_ctxt, vol2_id)
|
||||
##self.assertTrue(vol['deleted'])
|
||||
|
||||
#for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
|
||||
# self.cloud.delete_snapshot(self.context, snapshot_id)
|
||||
|
||||
def test_create_image(self):
|
||||
"""Make sure that CreateImage works"""
|
||||
# enforce periodic tasks run in short time to avoid wait for 60s.
|
||||
self._restart_compute_service(periodic_interval=0.3)
|
||||
|
||||
(volumes, snapshots) = self._setUpImageSet(
|
||||
create_volumes_and_snapshots=True)
|
||||
|
||||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1}
|
||||
ec2_instance_id = self._run_instance(**kwargs)
|
||||
|
||||
self.cloud.terminate_instances(self.context, [ec2_instance_id])
|
||||
self._restart_compute_service()
|
||||
|
||||
@staticmethod
|
||||
def _fake_bdm_get(ctxt, id):
|
||||
return [{'volume_id': 87654321,
|
||||
'snapshot_id': None,
|
||||
'no_device': None,
|
||||
'virtual_name': None,
|
||||
'delete_on_termination': True,
|
||||
'device_name': '/dev/sdh'},
|
||||
{'volume_id': None,
|
||||
'snapshot_id': 98765432,
|
||||
'no_device': None,
|
||||
'virtual_name': None,
|
||||
'delete_on_termination': True,
|
||||
'device_name': '/dev/sdi'},
|
||||
{'volume_id': None,
|
||||
'snapshot_id': None,
|
||||
'no_device': True,
|
||||
'virtual_name': None,
|
||||
'delete_on_termination': None,
|
||||
'device_name': None},
|
||||
{'volume_id': None,
|
||||
'snapshot_id': None,
|
||||
'no_device': None,
|
||||
'virtual_name': 'ephemeral0',
|
||||
'delete_on_termination': None,
|
||||
'device_name': '/dev/sdb'},
|
||||
{'volume_id': None,
|
||||
'snapshot_id': None,
|
||||
'no_device': None,
|
||||
'virtual_name': 'swap',
|
||||
'delete_on_termination': None,
|
||||
'device_name': '/dev/sdc'},
|
||||
{'volume_id': None,
|
||||
'snapshot_id': None,
|
||||
'no_device': None,
|
||||
'virtual_name': 'ephemeral1',
|
||||
'delete_on_termination': None,
|
||||
'device_name': '/dev/sdd'},
|
||||
{'volume_id': None,
|
||||
'snapshot_id': None,
|
||||
'no_device': None,
|
||||
'virtual_name': 'ephemeral2',
|
||||
'delete_on_termination': None,
|
||||
'device_name': '/dev/sd3'},
|
||||
]
|
260
nova/tests/fake_volume.py
Normal file
260
nova/tests/fake_volume.py
Normal file
@ -0,0 +1,260 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Implementation of a fake volume API"""
|
||||
|
||||
from nova import exception
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.openstack.common import timeutils
|
||||
from nova import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class fake_volume():
|
||||
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
|
||||
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
|
||||
|
||||
def __init__(self, size, name,
|
||||
description, id, snapshot,
|
||||
volume_type, metadata,
|
||||
availability_zone):
|
||||
snapshot_id = None
|
||||
if snapshot is not None:
|
||||
snapshot_id = snapshot['id']
|
||||
if id is None:
|
||||
id = str(utils.gen_uuid())
|
||||
self.vol = {
|
||||
'created_at': timeutils.utcnow(),
|
||||
'deleted_at': None,
|
||||
'updated_at': timeutils.utcnow(),
|
||||
'uuid': 'WTF',
|
||||
'deleted': False,
|
||||
'id': id,
|
||||
'user_id': self.user_uuid,
|
||||
'project_id': 'fake-project-id',
|
||||
'snapshot_id': snapshot_id,
|
||||
'host': None,
|
||||
'size': size,
|
||||
'availability_zone': availability_zone,
|
||||
'instance_uuid': None,
|
||||
'mountpoint': None,
|
||||
'attach_time': timeutils.utcnow(),
|
||||
'status': 'available',
|
||||
'attach_status': 'detached',
|
||||
'scheduled_at': None,
|
||||
'launched_at': None,
|
||||
'terminated_at': None,
|
||||
'display_name': name,
|
||||
'display_description': description,
|
||||
'provider_location': 'fake-location',
|
||||
'provider_auth': 'fake-auth',
|
||||
'volume_type_id': 99
|
||||
}
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self.vol[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.vol[key] = value
|
||||
|
||||
def __getitem__(self, key):
|
||||
self.vol[key]
|
||||
|
||||
|
||||
class fake_snapshot():
|
||||
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
|
||||
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
|
||||
|
||||
def __init__(self, volume_id, size, name, desc, id=None):
|
||||
if id is None:
|
||||
id = str(utils.gen_uuid())
|
||||
self.snap = {
|
||||
'created_at': timeutils.utcnow(),
|
||||
'deleted_at': None,
|
||||
'updated_at': timeutils.utcnow(),
|
||||
'uuid': 'WTF',
|
||||
'deleted': False,
|
||||
'id': str(id),
|
||||
'volume_id': volume_id,
|
||||
'status': 'creating',
|
||||
'progress': '0%',
|
||||
'volume_size': 1,
|
||||
'display_name': name,
|
||||
'display_description': desc,
|
||||
'user_id': self.user_uuid,
|
||||
'project_id': 'fake-project-id'
|
||||
}
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self.snap[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.snap[key] = value
|
||||
|
||||
def __getitem__(self, key):
|
||||
self.snap[key]
|
||||
|
||||
|
||||
class API(object):
|
||||
volume_list = []
|
||||
snapshot_list = []
|
||||
_instance = None
|
||||
|
||||
class Singleton:
|
||||
def __init__(self):
|
||||
self.API = None
|
||||
|
||||
def __init__(self):
|
||||
if API._instance is None:
|
||||
API._instance = API.Singleton()
|
||||
|
||||
self._EventHandler_instance = API._instance
|
||||
|
||||
def create(self, context, size, name, description, snapshot=None,
|
||||
volume_type=None, metadata=None, availability_zone=None):
|
||||
v = fake_volume(size, name,
|
||||
description, None,
|
||||
snapshot, volume_type,
|
||||
metadata, availability_zone)
|
||||
self.volume_list.append(v.vol)
|
||||
LOG.info('creating volume %s', v.vol['id'])
|
||||
return v.vol
|
||||
|
||||
def create_with_kwargs(self, context, **kwargs):
|
||||
v = fake_volume(kwargs['size'],
|
||||
kwargs['name'],
|
||||
kwargs['description'],
|
||||
str(kwargs.get('volume_id', None)),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None)
|
||||
if kwargs.get('status', None) is not None:
|
||||
v.vol['status'] = kwargs['status']
|
||||
if kwargs['host'] is not None:
|
||||
v.vol['host'] = kwargs['host']
|
||||
if kwargs['attach_status'] is not None:
|
||||
v.vol['attach_status'] = kwargs['attach_status']
|
||||
if kwargs.get('snapshot_id', None) is not None:
|
||||
v.vol['snapshot_id'] = kwargs['snapshot_id']
|
||||
|
||||
self.volume_list.append(v.vol)
|
||||
return v.vol
|
||||
|
||||
def get(self, context, volume_id):
|
||||
if volume_id == 87654321:
|
||||
return {'id': volume_id,
|
||||
'attach_time': '13:56:24',
|
||||
'status': 'in-use'}
|
||||
|
||||
for v in self.volume_list:
|
||||
if v['id'] == str(volume_id):
|
||||
return v
|
||||
|
||||
def get_all(self, context):
|
||||
return self.volume_list
|
||||
|
||||
def delete(self, context, volume):
|
||||
LOG.info('deleting volume %s', volume['id'])
|
||||
self.volume_list = [v for v in self.volume_list if v != volume]
|
||||
|
||||
def check_attach(self, context, volume):
|
||||
if volume['status'] != 'available':
|
||||
msg = _("status must be available")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if volume['attach_status'] == 'attached':
|
||||
msg = _("already attached")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
def check_detach(self, context, volume):
|
||||
if volume['status'] == "available":
|
||||
msg = _("already detached")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
def attach(self, context, volume, instance_uuid, mountpoint):
|
||||
LOG.info('attaching volume %s', volume['id'])
|
||||
volume = self.get(context, volume['id'])
|
||||
volume['status'] = 'in-use'
|
||||
volume['mountpoint'] = mountpoint
|
||||
volume['attach_status'] = 'attached'
|
||||
volume['instance_uuid'] = instance_uuid
|
||||
volume['attach_time'] = timeutils.utcnow()
|
||||
|
||||
def fake_set_snapshot_id(self, context, volume, snapshot_id):
|
||||
volume['snapshot_id'] = snapshot_id
|
||||
|
||||
def reset_fake_api(self, context):
|
||||
del self.volume_list[:]
|
||||
del self.snapshot_list[:]
|
||||
|
||||
def detach(self, context, volume):
|
||||
LOG.info('detaching volume %s', volume['id'])
|
||||
volume = self.get(context, volume['id'])
|
||||
volume['status'] = 'available'
|
||||
volume['mountpoint'] = None
|
||||
volume['attach_status'] = 'detached'
|
||||
volume['instance_uuid'] = None
|
||||
|
||||
def initialize_connection(self, context, volume_id, connector):
|
||||
return {'driver_volume_type': 'iscsi', 'data': {}}
|
||||
|
||||
def terminate_connection(self, context, volume_id, connector):
|
||||
return None
|
||||
|
||||
def get_snapshot(self, context, snapshot_id):
|
||||
for snap in self.snapshot_list:
|
||||
if snap['id'] == str(snapshot_id):
|
||||
return snap
|
||||
|
||||
def get_all_snapshots(self, context):
|
||||
return self.snapshot_list
|
||||
|
||||
def create_snapshot(self, context, volume, name, description, id=None):
|
||||
snapshot = fake_snapshot(volume['id'], volume['size'],
|
||||
name, description, id)
|
||||
self.snapshot_list.append(snapshot.snap)
|
||||
return snapshot.snap
|
||||
|
||||
def create_snapshot_with_kwargs(self, context, **kwargs):
|
||||
snapshot = fake_snapshot(kwargs.get('volume_id'),
|
||||
kwargs.get('volume_size'),
|
||||
kwargs.get('name'),
|
||||
kwargs.get('description'),
|
||||
kwargs.get('snap_id'))
|
||||
|
||||
status = kwargs.get('status', None)
|
||||
snapshot.snap['status'] = status
|
||||
self.snapshot_list.append(snapshot.snap)
|
||||
return snapshot.snap
|
||||
|
||||
def create_snapshot_force(self, context, volume,
|
||||
name, description, id=None):
|
||||
snapshot = fake_snapshot(volume['id'], volume['size'],
|
||||
name, description, id)
|
||||
self.snapshot_list.append(snapshot.snap)
|
||||
return snapshot.snap
|
||||
|
||||
def delete_snapshot(self, context, snapshot):
|
||||
self.snapshot_list = [s for s in self.snapshot_list if s != snapshot]
|
||||
|
||||
def reserve_volume(self, context, volume):
|
||||
LOG.info('reserving volume %s', volume['id'])
|
||||
volume = self.get(context, volume['id'])
|
||||
volume['status'] = 'attaching'
|
||||
|
||||
def unreserve_volume(self, context, volume):
|
||||
LOG.info('unreserving volume %s', volume['id'])
|
||||
volume = self.get(context, volume['id'])
|
||||
volume['status'] = 'available'
|
@ -21,5 +21,8 @@
|
||||
import nova.flags
|
||||
import nova.openstack.common.importutils
|
||||
|
||||
API = nova.openstack.common.importutils.import_class(
|
||||
nova.flags.FLAGS.volume_api_class)
|
||||
|
||||
def API():
|
||||
importutils = nova.openstack.common.importutils
|
||||
cls = importutils.import_class(nova.flags.FLAGS.volume_api_class)
|
||||
return cls()
|
||||
|
229
nova/volume/cinder.py
Normal file
229
nova/volume/cinder.py
Normal file
@ -0,0 +1,229 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Handles all requests relating to volumes + cinder.
|
||||
"""
|
||||
|
||||
|
||||
from cinderclient import service_catalog
|
||||
from cinderclient.v1 import client as cinder_client
|
||||
|
||||
from nova.db import base
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova.openstack.common import log as logging
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def cinderclient(context):
|
||||
|
||||
# FIXME: the cinderclient ServiceCatalog object is mis-named.
|
||||
# It actually contains the entire access blob.
|
||||
compat_catalog = {
|
||||
'access': {'serviceCatalog': context.service_catalog}
|
||||
}
|
||||
sc = service_catalog.ServiceCatalog(compat_catalog)
|
||||
url = sc.url_for(service_type='volume', service_name='cinder')
|
||||
|
||||
LOG.debug('cinderclient connection created using token "%s" and url "%s"' %
|
||||
(context.auth_token, url))
|
||||
|
||||
c = cinder_client.Client(context.user_id,
|
||||
context.auth_token,
|
||||
project_id=context.project_id,
|
||||
auth_url=url)
|
||||
c.client.auth_token = context.auth_token
|
||||
c.client.management_url = url
|
||||
return c
|
||||
|
||||
|
||||
def _untranslate_volume_summary_view(context, vol):
|
||||
"""Maps keys for volumes summary view."""
|
||||
d = {}
|
||||
|
||||
d['id'] = vol.id
|
||||
d['status'] = vol.status
|
||||
d['size'] = vol.size
|
||||
d['availability_zone'] = vol.availability_zone
|
||||
d['created_at'] = vol.created_at
|
||||
|
||||
# TODO(jdg): The calling code expects attach_time and
|
||||
# mountpoint to be set. When the calling
|
||||
# code is more defensive this can be
|
||||
# removed.
|
||||
d['attach_time'] = ""
|
||||
d['mountpoint'] = ""
|
||||
|
||||
if vol.attachments:
|
||||
att = vol.attachments[0]
|
||||
d['attach_status'] = 'attached'
|
||||
d['instance_uuid'] = att['server_id']
|
||||
d['mountpoint'] = att['device']
|
||||
else:
|
||||
d['attach_status'] = 'detached'
|
||||
|
||||
d['display_name'] = vol.display_name
|
||||
d['display_description'] = vol.display_description
|
||||
|
||||
# TODO(jdg): Information may be lost in this translation
|
||||
d['volume_type_id'] = vol.volume_type
|
||||
d['snapshot_id'] = vol.snapshot_id
|
||||
|
||||
d['vol_metadata'] = []
|
||||
for k, v in vol.metadata:
|
||||
item = {}
|
||||
item['key'] = k
|
||||
item['value'] = v
|
||||
d['vol_metadata'].append(item)
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def _untranslate_snapshot_summary_view(context, snapshot):
|
||||
"""Maps keys for snapshots summary view."""
|
||||
d = {}
|
||||
|
||||
d['id'] = snapshot.id
|
||||
d['status'] = snapshot.status
|
||||
d['progress'] = snapshot.progress
|
||||
d['size'] = snapshot.size
|
||||
d['created_at'] = snapshot.created_at
|
||||
d['display_name'] = snapshot.display_name
|
||||
d['display_description'] = snapshot.display_description
|
||||
d['volume_id'] = snapshot.volume_id
|
||||
d['project_id'] = snapshot.project_id
|
||||
d['volume_size'] = snapshot.size
|
||||
|
||||
return d
|
||||
|
||||
|
||||
class API(base.Base):
|
||||
"""API for interacting with the volume manager."""
|
||||
|
||||
def get(self, context, volume_id):
|
||||
item = cinderclient(context).volumes.get(volume_id)
|
||||
return _untranslate_volume_summary_view(context, item)
|
||||
|
||||
def get_all(self, context, search_opts={}):
|
||||
items = cinderclient(context).volumes.list(detailed=True)
|
||||
rval = []
|
||||
|
||||
for item in items:
|
||||
rval.append(_untranslate_volume_summary_view(context, item))
|
||||
|
||||
return rval
|
||||
|
||||
def check_attach(self, context, volume):
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume['status'] != "available":
|
||||
msg = _("status must be available")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if volume['attach_status'] == "attached":
|
||||
msg = _("already attached")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
def check_detach(self, context, volume):
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume['status'] == "available":
|
||||
msg = _("already detached")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
def reserve_volume(self, context, volume):
|
||||
cinderclient(context).volumes.reserve(volume['id'])
|
||||
|
||||
def unreserve_volume(self, context, volume):
|
||||
cinderclient(context).volumes.reserve(volume['id'])
|
||||
|
||||
def attach(self, context, volume, instance_uuid, mountpoint):
|
||||
cinderclient(context).volumes.attach(volume['id'],
|
||||
instance_uuid,
|
||||
mountpoint)
|
||||
|
||||
def detach(self, context, volume):
|
||||
cinderclient(context).volumes.detach(volume['id'])
|
||||
|
||||
def initialize_connection(self, context, volume, connector):
|
||||
return cinderclient(context).\
|
||||
volumes.initialize_connection(volume['id'], connector)
|
||||
|
||||
def terminate_connection(self, context, volume, connector):
|
||||
return cinderclient(context).\
|
||||
volumes.terminate_connection(volume['id'], connector)
|
||||
|
||||
def create(self, context, size, name, description, snapshot=None,
|
||||
volume_type=None, metadata=None, availability_zone=None):
|
||||
|
||||
item = cinderclient(context).volumes.create(size, snapshot,
|
||||
name, description,
|
||||
volume_type)
|
||||
|
||||
volume = _untranslate_volume_summary_view(context, item)
|
||||
return _untranslate_volume_summary_view(context, item)
|
||||
|
||||
def delete(self, context, volume):
|
||||
cinderclient(context).volumes.delete(volume['id'])
|
||||
|
||||
def update(self, context, volume, fields):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_snapshot(self, context, snapshot_id):
|
||||
item = cinderclient(context).volume_snapshots.get(snapshot_id)
|
||||
return _untranslate_snapshot_summary_view(context, item)
|
||||
|
||||
def get_all_snapshots(self, context):
|
||||
items = cinderclient(context).volume_snapshots.list(detailed=True)
|
||||
rvals = []
|
||||
|
||||
for item in items:
|
||||
rvals.append(_untranslate_snapshot_summary_view(context, item))
|
||||
|
||||
return rvals
|
||||
|
||||
def create_snapshot(self, context, volume, name, description):
|
||||
item = cinderclient(context).volume_snapshots.create(volume['id'],
|
||||
False,
|
||||
name,
|
||||
description)
|
||||
return _untranslate_snapshot_summary_view(context, item)
|
||||
|
||||
def create_snapshot_force(self, context, volume, name, description):
|
||||
item = cinderclient(context).volume_snapshots.create(volume['id'],
|
||||
True,
|
||||
name,
|
||||
description)
|
||||
|
||||
return _untranslate_snapshot_summary_view(context, item)
|
||||
|
||||
def delete_snapshot(self, context, snapshot):
|
||||
cinderclient(context).volume_snapshots.delete(snapshot['id'])
|
||||
|
||||
def get_volume_metadata(self, context, volume):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_volume_metadata(self, context, volume, key):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_volume_metadata(self, context, volume, metadata, delete=False):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_volume_metadata_value(self, volume, key):
|
||||
raise NotImplementedError()
|
Loading…
Reference in New Issue
Block a user