Update create_volume API to use versionedobjects

The following patch updates create_volume API to use
volume versionedobjects.  Changes were made to be
backwards compatible with older RPC clients.  It
only includes changes to the core cinder code.
Changes in the drivers are left to each driver
maintainer to update.

Note that this patch DOES NOT try to use
object dot notation everywhere, since it would
increase the size of the patch.  Instead, it
will be done in subsequent patches.

Co-Authored-By: Michal Dulko <michal.dulko@intel.com>
Change-Id: Ic1b0f09132f8fc500b29650abbd57f18ea8bd9dd
Partial-Implements: blueprint cinder-objects

Change-Id: Ief9c63e8bddb2b40bdef4465b9099cff33d7c3bc
This commit is contained in:
Thang Pham 2015-10-27 15:38:54 -07:00
parent 6a942ba647
commit 032032f0c3
22 changed files with 389 additions and 316 deletions

View File

@ -398,11 +398,6 @@ class VolumeController(wsgi.Controller):
volume.get('display_description'), volume.get('display_description'),
**kwargs) **kwargs)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
new_volume = dict(new_volume)
retval = _translate_volume_detail_view(context, new_volume, image_uuid) retval = _translate_volume_detail_view(context, new_volume, image_uuid)
return {'volume': retval} return {'volume': retval}

View File

@ -418,10 +418,6 @@ class VolumeController(wsgi.Controller):
volume.get('display_description'), volume.get('display_description'),
**kwargs) **kwargs)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
new_volume = dict(new_volume)
retval = self._view_builder.detail(req, new_volume) retval = self._view_builder.detail(req, new_volume)
return retval return retval

View File

@ -23,8 +23,8 @@ from oslo_config import cfg
from oslo_utils import importutils from oslo_utils import importutils
from oslo_utils import timeutils from oslo_utils import timeutils
from cinder import db
from cinder.i18n import _ from cinder.i18n import _
from cinder import objects
from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import rpcapi as volume_rpcapi
@ -46,8 +46,14 @@ def volume_update_db(context, volume_id, host):
:returns: A Volume with the updated fields set properly. :returns: A Volume with the updated fields set properly.
""" """
values = {'host': host, 'scheduled_at': timeutils.utcnow()} volume = objects.Volume.get_by_id(context, volume_id)
return db.volume_update(context, volume_id, values) volume.host = host
volume.scheduled_at = timeutils.utcnow()
volume.save()
# A volume object is expected to be returned, as it is used by
# filter_scheduler.
return volume
def group_update_db(context, group, host): def group_update_db(context, group, host):

View File

@ -17,7 +17,7 @@ from taskflow.patterns import linear_flow
from cinder import exception from cinder import exception
from cinder import flow_utils from cinder import flow_utils
from cinder.i18n import _, _LE from cinder.i18n import _LE
from cinder import rpc from cinder import rpc
from cinder import utils from cinder import utils
from cinder.volume.flows import common from cinder.volume.flows import common
@ -40,39 +40,33 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
**kwargs) **kwargs)
self.db_api = db_api self.db_api = db_api
def _populate_request_spec(self, context, volume_id, snapshot_id, def _populate_request_spec(self, context, volume, snapshot_id,
image_id): image_id):
# Create the full request spec using the volume_id. # Create the full request spec using the volume object.
# #
# NOTE(harlowja): this will fetch the volume from the database, if # NOTE(dulek): At this point, a volume can be deleted before it gets
# the volume has been deleted before we got here then this should fail. # scheduled. If a delete API call is made, the volume gets instantly
# # delete and scheduling will fail when it tries to update the DB entry
# In the future we might want to have a lock on the volume_id so that # (with the host) in ScheduleCreateVolumeTask below.
# the volume can not be deleted while its still being created? volume_type_id = volume.volume_type_id
if not volume_id: vol_type = volume.volume_type
raise exception.InvalidInput(
reason=_("No volume_id provided to populate a "
"request_spec from"))
volume_ref = self.db_api.volume_get(context, volume_id)
volume_type_id = volume_ref.get('volume_type_id')
vol_type = self.db_api.volume_type_get(context, volume_type_id)
return { return {
'volume_id': volume_id, 'volume_id': volume.id,
'snapshot_id': snapshot_id, 'snapshot_id': snapshot_id,
'image_id': image_id, 'image_id': image_id,
'volume_properties': { 'volume_properties': {
'size': utils.as_int(volume_ref.get('size'), quiet=False), 'size': utils.as_int(volume.size, quiet=False),
'availability_zone': volume_ref.get('availability_zone'), 'availability_zone': volume.availability_zone,
'volume_type_id': volume_type_id, 'volume_type_id': volume_type_id,
}, },
'volume_type': list(dict(vol_type).items()), 'volume_type': list(dict(vol_type).items()),
} }
def execute(self, context, request_spec, volume_id, snapshot_id, def execute(self, context, request_spec, volume, snapshot_id,
image_id): image_id):
# For RPC version < 1.2 backward compatibility # For RPC version < 1.2 backward compatibility
if request_spec is None: if request_spec is None:
request_spec = self._populate_request_spec(context, volume_id, request_spec = self._populate_request_spec(context, volume.id,
snapshot_id, image_id) snapshot_id, image_id)
return { return {
'request_spec': request_spec, 'request_spec': request_spec,
@ -143,7 +137,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
def get_flow(context, db_api, driver_api, request_spec=None, def get_flow(context, db_api, driver_api, request_spec=None,
filter_properties=None, filter_properties=None,
volume_id=None, snapshot_id=None, image_id=None): volume=None, snapshot_id=None, image_id=None):
"""Constructs and returns the scheduler entrypoint flow. """Constructs and returns the scheduler entrypoint flow.
@ -158,7 +152,7 @@ def get_flow(context, db_api, driver_api, request_spec=None,
'context': context, 'context': context,
'raw_request_spec': request_spec, 'raw_request_spec': request_spec,
'filter_properties': filter_properties, 'filter_properties': filter_properties,
'volume_id': volume_id, 'volume': volume,
'snapshot_id': snapshot_id, 'snapshot_id': snapshot_id,
'image_id': image_id, 'image_id': image_id,
} }

View File

@ -33,6 +33,7 @@ from cinder import exception
from cinder import flow_utils from cinder import flow_utils
from cinder.i18n import _, _LE from cinder.i18n import _, _LE
from cinder import manager from cinder import manager
from cinder import objects
from cinder import quota from cinder import quota
from cinder import rpc from cinder import rpc
from cinder.scheduler.flows import create_volume from cinder.scheduler.flows import create_volume
@ -55,7 +56,7 @@ LOG = logging.getLogger(__name__)
class SchedulerManager(manager.Manager): class SchedulerManager(manager.Manager):
"""Chooses a host to create volumes.""" """Chooses a host to create volumes."""
RPC_API_VERSION = '1.8' RPC_API_VERSION = '1.9'
target = messaging.Target(version=RPC_API_VERSION) target = messaging.Target(version=RPC_API_VERSION)
@ -116,15 +117,22 @@ class SchedulerManager(manager.Manager):
def create_volume(self, context, topic, volume_id, snapshot_id=None, def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None, image_id=None, request_spec=None,
filter_properties=None): filter_properties=None, volume=None):
self._wait_for_scheduler() self._wait_for_scheduler()
# FIXME(thangp): Remove this in v2.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the
# volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try: try:
flow_engine = create_volume.get_flow(context, flow_engine = create_volume.get_flow(context,
db, self.driver, db, self.driver,
request_spec, request_spec,
filter_properties, filter_properties,
volume_id, volume,
snapshot_id, snapshot_id,
image_id) image_id)
except Exception: except Exception:

View File

@ -42,6 +42,7 @@ class SchedulerAPI(object):
1.6 - Add create_consistencygroup method 1.6 - Add create_consistencygroup method
1.7 - Add get_active_pools method 1.7 - Add get_active_pools method
1.8 - Add sending object over RPC in create_consistencygroup method 1.8 - Add sending object over RPC in create_consistencygroup method
1.9 - Adds support for sending objects over RPC in create_volume()
""" """
RPC_API_VERSION = '1.0' RPC_API_VERSION = '1.0'
@ -51,7 +52,10 @@ class SchedulerAPI(object):
target = messaging.Target(topic=CONF.scheduler_topic, target = messaging.Target(topic=CONF.scheduler_topic,
version=self.RPC_API_VERSION) version=self.RPC_API_VERSION)
serializer = objects_base.CinderObjectSerializer() serializer = objects_base.CinderObjectSerializer()
self.client = rpc.get_client(target, version_cap='1.8',
# NOTE(thangp): Until version pinning is impletemented, set the client
# version_cap to None
self.client = rpc.get_client(target, version_cap=None,
serializer=serializer) serializer=serializer)
def create_consistencygroup(self, ctxt, topic, group, def create_consistencygroup(self, ctxt, topic, group,
@ -72,17 +76,21 @@ class SchedulerAPI(object):
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None, def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None, image_id=None, request_spec=None,
filter_properties=None): filter_properties=None, volume=None):
cctxt = self.client.prepare(version='1.2')
request_spec_p = jsonutils.to_primitive(request_spec) request_spec_p = jsonutils.to_primitive(request_spec)
return cctxt.cast(ctxt, 'create_volume', msg_args = {'topic': topic, 'volume_id': volume_id,
topic=topic, 'snapshot_id': snapshot_id, 'image_id': image_id,
volume_id=volume_id, 'request_spec': request_spec_p,
snapshot_id=snapshot_id, 'filter_properties': filter_properties}
image_id=image_id, if self.client.can_send_version('1.9'):
request_spec=request_spec_p, version = '1.9'
filter_properties=filter_properties) msg_args['volume'] = volume
else:
version = '1.2'
cctxt = self.client.prepare(version=version)
return cctxt.cast(ctxt, 'create_volume', **msg_args)
def migrate_volume_to_host(self, ctxt, topic, volume_id, host, def migrate_volume_to_host(self, ctxt, topic, volume_id, host,
force_host_copy=False, request_spec=None, force_host_copy=False, request_spec=None,

View File

@ -87,7 +87,25 @@ class SchedulerRpcAPITestCase(test.TestCase):
capabilities='fake_capabilities', capabilities='fake_capabilities',
fanout=True) fanout=True)
def test_create_volume(self): @mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_create_volume(self, can_send_version):
self._test_scheduler_api('create_volume',
rpc_method='cast',
topic='topic',
volume_id='volume_id',
snapshot_id='snapshot_id',
image_id='image_id',
request_spec='fake_request_spec',
filter_properties='filter_properties',
volume='volume',
version='1.9')
can_send_version.assert_called_once_with('1.9')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_create_volume_old(self, can_send_version):
# Tests backwards compatibility with older clients
self._test_scheduler_api('create_volume', self._test_scheduler_api('create_volume',
rpc_method='cast', rpc_method='cast',
topic='topic', topic='topic',
@ -97,6 +115,7 @@ class SchedulerRpcAPITestCase(test.TestCase):
request_spec='fake_request_spec', request_spec='fake_request_spec',
filter_properties='filter_properties', filter_properties='filter_properties',
version='1.2') version='1.2')
can_send_version.assert_called_once_with('1.9')
def test_migrate_volume_to_host(self): def test_migrate_volume_to_host(self):
self._test_scheduler_api('migrate_volume_to_host', self._test_scheduler_api('migrate_volume_to_host',

View File

@ -28,6 +28,7 @@ from cinder.scheduler import filter_scheduler
from cinder.scheduler import manager from cinder.scheduler import manager
from cinder import test from cinder import test
from cinder.tests.unit import fake_consistencygroup from cinder.tests.unit import fake_consistencygroup
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF CONF = cfg.CONF
@ -100,15 +101,16 @@ class SchedulerManagerTestCase(test.TestCase):
# Test NoValidHost exception behavior for create_volume. # Test NoValidHost exception behavior for create_volume.
# Puts the volume in 'error' state and eats the exception. # Puts the volume in 'error' state and eats the exception.
_mock_sched_create.side_effect = exception.NoValidHost(reason="") _mock_sched_create.side_effect = exception.NoValidHost(reason="")
fake_volume_id = 1 volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic' topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id} request_spec = {'volume_id': volume.id}
self.manager.create_volume(self.context, topic, fake_volume_id, self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec, request_spec=request_spec,
filter_properties={}) filter_properties={},
volume=volume)
_mock_volume_update.assert_called_once_with(self.context, _mock_volume_update.assert_called_once_with(self.context,
fake_volume_id, volume.id,
{'status': 'error'}) {'status': 'error'})
_mock_sched_create.assert_called_once_with(self.context, request_spec, _mock_sched_create.assert_called_once_with(self.context, request_spec,
{}) {})
@ -116,14 +118,15 @@ class SchedulerManagerTestCase(test.TestCase):
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('eventlet.sleep') @mock.patch('eventlet.sleep')
def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create): def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create):
fake_volume_id = 1 volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic' topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id} request_spec = {'volume_id': volume.id}
self.manager.create_volume(self.context, topic, fake_volume_id, self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec, request_spec=request_spec,
filter_properties={}) filter_properties={},
volume=volume)
_mock_sched_create.assert_called_once_with(self.context, request_spec, _mock_sched_create.assert_called_once_with(self.context, request_spec,
{}) {})
self.assertFalse(_mock_sleep.called) self.assertFalse(_mock_sleep.called)
@ -135,16 +138,17 @@ class SchedulerManagerTestCase(test.TestCase):
_mock_is_ready, _mock_is_ready,
_mock_sched_create): _mock_sched_create):
self.manager._startup_delay = True self.manager._startup_delay = True
fake_volume_id = 1 volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic' topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id} request_spec = {'volume_id': volume.id}
_mock_is_ready.side_effect = [False, False, True] _mock_is_ready.side_effect = [False, False, True]
self.manager.create_volume(self.context, topic, fake_volume_id, self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec, request_spec=request_spec,
filter_properties={}) filter_properties={},
volume=volume)
_mock_sched_create.assert_called_once_with(self.context, request_spec, _mock_sched_create.assert_called_once_with(self.context, request_spec,
{}) {})
calls = [mock.call(1)] * 2 calls = [mock.call(1)] * 2
@ -158,16 +162,17 @@ class SchedulerManagerTestCase(test.TestCase):
_mock_is_ready, _mock_is_ready,
_mock_sched_create): _mock_sched_create):
self.manager._startup_delay = True self.manager._startup_delay = True
fake_volume_id = 1 volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic' topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id} request_spec = {'volume_id': volume.id}
_mock_is_ready.return_value = True _mock_is_ready.return_value = True
self.manager.create_volume(self.context, topic, fake_volume_id, self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec, request_spec=request_spec,
filter_properties={}) filter_properties={},
volume=volume)
_mock_sched_create.assert_called_once_with(self.context, request_spec, _mock_sched_create.assert_called_once_with(self.context, request_spec,
{}) {})
self.assertFalse(_mock_sleep.called) self.assertFalse(_mock_sleep.called)
@ -346,10 +351,13 @@ class SchedulerDriverModuleTestCase(test.TestCase):
self.context = context.RequestContext('fake_user', 'fake_project') self.context = context.RequestContext('fake_user', 'fake_project')
@mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.volume_update')
@mock.patch('oslo_utils.timeutils.utcnow') @mock.patch('cinder.objects.volume.Volume.get_by_id')
def test_volume_host_update_db(self, _mock_utcnow, _mock_vol_update): def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update):
_mock_utcnow.return_value = 'fake-now' volume = fake_volume.fake_volume_obj(self.context)
driver.volume_update_db(self.context, 31337, 'fake_host') _mock_volume_get.return_value = volume
_mock_vol_update.assert_called_once_with(self.context, 31337,
{'host': 'fake_host', driver.volume_update_db(self.context, volume.id, 'fake_host')
'scheduled_at': 'fake-now'}) scheduled_at = volume.scheduled_at.replace(tzinfo=None)
_mock_vol_update.assert_called_once_with(
self.context, volume.id, {'host': 'fake_host',
'scheduled_at': scheduled_at})

View File

@ -21,13 +21,12 @@ import os
import tempfile import tempfile
import mock import mock
from oslo_utils import timeutils
from oslo_utils import units from oslo_utils import units
from cinder import db
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _
from cinder.image import image_utils from cinder.image import image_utils
from cinder import objects
from cinder import test from cinder import test
from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import test_volume from cinder.tests.unit import test_volume
@ -1090,7 +1089,6 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
NOTE: if clone_error is True we force the image type to raw otherwise NOTE: if clone_error is True we force the image type to raw otherwise
clone_image is not called clone_image is not called
""" """
volume_id = 1
# See tests.image.fake for image types. # See tests.image.fake for image types.
if raw: if raw:
@ -1099,32 +1097,34 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# creating volume testdata # creating volume testdata
db.volume_create(self.context, db_volume = {'display_description': 'Test Desc',
{'id': volume_id, 'size': 20,
'updated_at': timeutils.utcnow(), 'status': 'creating',
'display_description': 'Test Desc', 'availability_zone': 'fake_zone',
'size': 20, 'attach_status': 'detached',
'status': 'creating', 'host': 'dummy'}
'instance_uuid': None, volume = objects.Volume(context=self.context, **db_volume)
'host': 'dummy'}) volume.create()
try: try:
if not clone_error: if not clone_error:
self.volume.create_volume(self.context, self.volume.create_volume(self.context,
volume_id, volume.id,
request_spec={'image_id': image_id}) request_spec={'image_id': image_id},
volume=volume)
else: else:
self.assertRaises(exception.CinderException, self.assertRaises(exception.CinderException,
self.volume.create_volume, self.volume.create_volume,
self.context, self.context,
volume_id, volume.id,
request_spec={'image_id': image_id}) request_spec={'image_id': image_id},
volume=volume)
volume = db.volume_get(self.context, volume_id) volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual(expected_status, volume['status']) self.assertEqual(expected_status, volume.status)
finally: finally:
# cleanup # cleanup
db.volume_destroy(self.context, volume_id) volume.destroy()
def test_create_vol_from_image_status_available(self): def test_create_vol_from_image_status_available(self):
"""Clone raw image then verify volume is in available state.""" """Clone raw image then verify volume is in available state."""

View File

@ -3024,16 +3024,12 @@ class StorwizeSVCDriverTestCase(test.TestCase):
@mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk')
def test_storwize_update_migrated_volume(self, rename_vdisk): def test_storwize_update_migrated_volume(self, rename_vdisk):
ctxt = testutils.get_test_admin_context() ctxt = testutils.get_test_admin_context()
current_volume_id = 'fake_volume_id' backend_volume = self._create_volume()
original_volume_id = 'fake_original_volume_id' volume = self._create_volume()
current_name = 'volume-' + current_volume_id
original_name = 'volume-' + original_volume_id
backend_volume = self._create_volume(id=current_volume_id)
volume = self._create_volume(id=original_volume_id)
model_update = self.driver.update_migrated_volume(ctxt, volume, model_update = self.driver.update_migrated_volume(ctxt, volume,
backend_volume, backend_volume,
'available') 'available')
rename_vdisk.assert_called_once_with(current_name, original_name) rename_vdisk.assert_called_once_with(backend_volume.name, volume.name)
self.assertEqual({'_name_id': None}, model_update) self.assertEqual({'_name_id': None}, model_update)
rename_vdisk.reset_mock() rename_vdisk.reset_mock()
@ -3041,14 +3037,14 @@ class StorwizeSVCDriverTestCase(test.TestCase):
model_update = self.driver.update_migrated_volume(ctxt, volume, model_update = self.driver.update_migrated_volume(ctxt, volume,
backend_volume, backend_volume,
'available') 'available')
self.assertEqual({'_name_id': current_volume_id}, model_update) self.assertEqual({'_name_id': backend_volume.id}, model_update)
rename_vdisk.reset_mock() rename_vdisk.reset_mock()
rename_vdisk.side_effect = exception.VolumeBackendAPIException rename_vdisk.side_effect = exception.VolumeBackendAPIException
model_update = self.driver.update_migrated_volume(ctxt, volume, model_update = self.driver.update_migrated_volume(ctxt, volume,
backend_volume, backend_volume,
'attached') 'attached')
self.assertEqual({'_name_id': current_volume_id}, model_update) self.assertEqual({'_name_id': backend_volume.id}, model_update)
def test_storwize_vdisk_copy_ops(self): def test_storwize_vdisk_copy_ops(self):
ctxt = testutils.get_test_admin_context() ctxt = testutils.get_test_admin_context()

View File

@ -54,6 +54,7 @@ from cinder.tests.unit.brick import fake_lvm
from cinder.tests.unit import conf_fixture from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_driver from cinder.tests.unit import fake_driver
from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit.keymgr import fake as fake_keymgr from cinder.tests.unit.keymgr import fake as fake_keymgr
from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import utils as tests_utils
@ -516,17 +517,16 @@ class VolumeTestCase(BaseVolumeTestCase):
availability_zone=CONF.storage_availability_zone, availability_zone=CONF.storage_availability_zone,
**self.volume_params) **self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id']) self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(0, len(self.notifier.notifications), self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications) self.notifier.notifications)
self.assertRaises(exception.DriverNotInitialized, self.assertRaises(exception.DriverNotInitialized,
self.volume.delete_volume, self.volume.delete_volume,
self.context, volume_id) self.context, volume.id)
volume = db.volume_get(context.get_admin_context(), volume_id) volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error_deleting", volume.status) self.assertEqual("error_deleting", volume.status)
db.volume_destroy(context.get_admin_context(), volume_id) volume.destroy()
@mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock()) @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock())
@mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock()) @mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock())
@ -562,7 +562,7 @@ class VolumeTestCase(BaseVolumeTestCase):
'replication_status': 'disabled', 'replication_status': 'disabled',
'replication_extended_status': None, 'replication_extended_status': None,
'replication_driver_data': None, 'replication_driver_data': None,
'metadata': [], 'metadata': None,
'volume_attachment': [], 'volume_attachment': [],
} }
self.assertDictMatch(expected, msg['payload']) self.assertDictMatch(expected, msg['payload'])
@ -580,6 +580,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.assertEqual(4, len(self.notifier.notifications), self.assertEqual(4, len(self.notifier.notifications),
self.notifier.notifications) self.notifier.notifications)
msg = self.notifier.notifications[2] msg = self.notifier.notifications[2]
expected['metadata'] = []
self.assertEqual('volume.delete.start', msg['event_type']) self.assertEqual('volume.delete.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload']) self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3] msg = self.notifier.notifications[3]
@ -597,9 +598,7 @@ class VolumeTestCase(BaseVolumeTestCase):
**self.volume_params) **self.volume_params)
volume_id = volume['id'] volume_id = volume['id']
self.volume.create_volume(self.context, volume_id) self.volume.create_volume(self.context, volume_id)
result_meta = { self.assertEqual(test_meta, volume.metadata)
volume.volume_metadata[0].key: volume.volume_metadata[0].value}
self.assertEqual(test_meta, result_meta)
self.volume.delete_volume(self.context, volume_id) self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound, self.assertRaises(exception.NotFound,
@ -629,8 +628,7 @@ class VolumeTestCase(BaseVolumeTestCase):
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
volume = tests_utils.create_volume(self.context, metadata=test_meta1, volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params) **self.volume_params)
volume_id = volume['id'] self.volume.create_volume(self.context, volume.id, volume=volume)
self.volume.create_volume(self.context, volume_id)
volume_api = cinder.volume.api.API() volume_api = cinder.volume.api.API()
@ -1558,7 +1556,6 @@ class VolumeTestCase(BaseVolumeTestCase):
dst_vol = tests_utils.create_volume(self.context, dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id, source_volid=src_vol_id,
**self.volume_params) **self.volume_params)
dst_vol_id = dst_vol['id']
orig_elevated = self.context.elevated orig_elevated = self.context.elevated
@ -1571,7 +1568,7 @@ class VolumeTestCase(BaseVolumeTestCase):
# we expect this to block and then fail # we expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume, t = eventlet.spawn(self.volume.create_volume,
self.context, self.context,
volume_id=dst_vol_id, volume_id=dst_vol.id,
request_spec={'source_volid': src_vol_id}) request_spec={'source_volid': src_vol_id})
gthreads.append(t) gthreads.append(t)
@ -1747,8 +1744,7 @@ class VolumeTestCase(BaseVolumeTestCase):
dst_vol = tests_utils.create_volume(self.context, dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id, snapshot_id=snapshot_id,
**self.volume_params) **self.volume_params)
self.volume.create_volume(self.context, self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol)
dst_vol['id'])
self.assertRaises(exception.GlanceMetadataNotFound, self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_to_volume, db.volume_glance_metadata_copy_to_volume,
@ -3548,8 +3544,7 @@ class VolumeTestCase(BaseVolumeTestCase):
spec=tests_utils.get_file_spec()) spec=tests_utils.get_file_spec())
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = tests_utils.create_volume(self.context, volume = tests_utils.create_volume(self.context, **self.volume_params)
**self.volume_params)['id']
# creating volume testdata # creating volume testdata
try: try:
request_spec = { request_spec = {
@ -3557,12 +3552,13 @@ class VolumeTestCase(BaseVolumeTestCase):
'image_id': image_id, 'image_id': image_id,
} }
self.volume.create_volume(self.context, self.volume.create_volume(self.context,
volume_id, volume.id,
request_spec) request_spec,
volume=volume)
finally: finally:
# cleanup # cleanup
os.unlink(dst_path) os.unlink(dst_path)
volume = db.volume_get(self.context, volume_id) volume = objects.Volume.get_by_id(self.context, volume.id)
return volume return volume
@ -3600,25 +3596,25 @@ class VolumeTestCase(BaseVolumeTestCase):
self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)
# creating volume testdata # creating volume testdata
volume_id = 1 kwargs = {'display_description': 'Test Desc',
db.volume_create(self.context, 'size': 20,
{'id': volume_id, 'availability_zone': 'fake_availability_zone',
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'status': 'creating',
'display_description': 'Test Desc', 'attach_status': 'detached',
'size': 20, 'host': 'dummy'}
'status': 'creating', volume = objects.Volume(context=self.context, **kwargs)
'host': 'dummy'}) volume.create()
self.assertRaises(exception.ImageNotFound, self.assertRaises(exception.ImageNotFound,
self.volume.create_volume, self.volume.create_volume,
self.context, self.context,
volume_id, volume.id,
{'image_id': self.FAKE_UUID}) {'image_id': self.FAKE_UUID})
volume = db.volume_get(self.context, volume_id) volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error", volume['status']) self.assertEqual("error", volume['status'])
self.assertFalse(volume['bootable']) self.assertFalse(volume['bootable'])
# cleanup # cleanup
db.volume_destroy(self.context, volume_id) volume.destroy()
os.unlink(dst_path) os.unlink(dst_path)
def test_create_volume_from_image_copy_exception_rescheduling(self): def test_create_volume_from_image_copy_exception_rescheduling(self):
@ -4389,20 +4385,20 @@ class VolumeMigrationTestCase(VolumeTestCase):
nova_api): nova_api):
attached_host = 'some-host' attached_host = 'some-host'
fake_volume_id = 'fake_volume_id' fake_volume_id = 'fake_volume_id'
fake_new_volume = {'status': 'available', 'id': fake_volume_id} fake_db_new_volume = {'status': 'available', 'id': fake_volume_id}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}} host_obj = {'host': 'newhost', 'capabilities': {}}
fake_uuid = fakes.get_fake_uuid() fake_uuid = fakes.get_fake_uuid()
update_server_volume = nova_api.return_value.update_server_volume update_server_volume = nova_api.return_value.update_server_volume
volume_get.return_value = fake_new_volume volume_get.return_value = fake_new_volume
volume = tests_utils.create_volume(self.context, size=1, volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host) host=CONF.host)
volume = tests_utils.attach_volume(self.context, volume['id'], volume_attach = tests_utils.attach_volume(
fake_uuid, attached_host, self.context, volume['id'], fake_uuid, attached_host, '/dev/vda')
'/dev/vda') self.assertIsNotNone(volume_attach['volume_attachment'][0]['id'])
self.assertIsNotNone(volume['volume_attachment'][0]['id']) self.assertEqual(
self.assertEqual(fake_uuid, fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid'])
volume['volume_attachment'][0]['instance_uuid']) self.assertEqual('in-use', volume_attach['status'])
self.assertEqual('in-use', volume['status'])
self.volume._migrate_volume_generic(self.context, volume, self.volume._migrate_volume_generic(self.context, volume,
host_obj, None) host_obj, None)
self.assertFalse(migrate_volume_completion.called) self.assertFalse(migrate_volume_completion.called)
@ -5118,8 +5114,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
consistencygroup_id=group2.id, consistencygroup_id=group2.id,
snapshot_id=snapshot_id, snapshot_id=snapshot_id,
**self.volume_params) **self.volume_params)
volume2_id = volume2['id'] self.volume.create_volume(self.context, volume2.id, volume=volume2)
self.volume.create_volume(self.context, volume2_id)
self.volume.create_consistencygroup_from_src( self.volume.create_consistencygroup_from_src(
self.context, group2, cgsnapshot=cgsnapshot) self.context, group2, cgsnapshot=cgsnapshot)
cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
@ -5186,8 +5181,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
consistencygroup_id=group3.id, consistencygroup_id=group3.id,
source_volid=volume_id, source_volid=volume_id,
**self.volume_params) **self.volume_params)
volume3_id = volume3['id'] self.volume.create_volume(self.context, volume3.id, volume=volume3)
self.volume.create_volume(self.context, volume3_id)
self.volume.create_consistencygroup_from_src( self.volume.create_consistencygroup_from_src(
self.context, group3, source_cg=group) self.context, group3, source_cg=group)
@ -5444,8 +5438,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
status='creating', status='creating',
size=1) size=1)
self.volume.host = 'host1@backend1' self.volume.host = 'host1@backend1'
volume_id = volume['id'] self.volume.create_volume(self.context, volume.id, volume=volume)
self.volume.create_volume(self.context, volume_id)
self.volume.delete_consistencygroup(self.context, group) self.volume.delete_consistencygroup(self.context, group)
cg = objects.ConsistencyGroup.get_by_id( cg = objects.ConsistencyGroup.get_by_id(
@ -5480,8 +5473,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
status='creating', status='creating',
size=1) size=1)
self.volume.host = 'host1@backend2' self.volume.host = 'host1@backend2'
volume_id = volume['id'] self.volume.create_volume(self.context, volume.id, volume=volume)
self.volume.create_volume(self.context, volume_id)
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
self.volume.delete_consistencygroup, self.volume.delete_consistencygroup,

View File

@ -17,6 +17,7 @@ Unit Tests for cinder.volume.rpcapi
""" """
import copy import copy
import mock
from oslo_config import cfg from oslo_config import cfg
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
@ -84,6 +85,7 @@ class VolumeRpcAPITestCase(test.TestCase):
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id) cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume = jsonutils.to_primitive(volume)
self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol)
self.fake_volume_metadata = volume["volume_metadata"] self.fake_volume_metadata = volume["volume_metadata"]
self.fake_snapshot = snapshot self.fake_snapshot = snapshot
self.fake_reservations = ["RESERVATION"] self.fake_reservations = ["RESERVATION"]
@ -117,8 +119,13 @@ class VolumeRpcAPITestCase(test.TestCase):
expected_msg = copy.deepcopy(kwargs) expected_msg = copy.deepcopy(kwargs)
if 'volume' in expected_msg: if 'volume' in expected_msg:
volume = expected_msg['volume'] volume = expected_msg['volume']
# NOTE(thangp): copy.deepcopy() is making oslo_versionedobjects
# think that 'metadata' was changed.
if isinstance(volume, objects.Volume):
volume.obj_reset_changes()
del expected_msg['volume'] del expected_msg['volume']
expected_msg['volume_id'] = volume['id'] expected_msg['volume_id'] = volume['id']
expected_msg['volume'] = volume
if 'snapshot' in expected_msg: if 'snapshot' in expected_msg:
snapshot = expected_msg['snapshot'] snapshot = expected_msg['snapshot']
del expected_msg['snapshot'] del expected_msg['snapshot']
@ -194,6 +201,10 @@ class VolumeRpcAPITestCase(test.TestCase):
expected_cgsnapshot = expected_msg[kwarg].obj_to_primitive() expected_cgsnapshot = expected_msg[kwarg].obj_to_primitive()
cgsnapshot = value.obj_to_primitive() cgsnapshot = value.obj_to_primitive()
self.assertEqual(expected_cgsnapshot, cgsnapshot) self.assertEqual(expected_cgsnapshot, cgsnapshot)
elif isinstance(value, objects.Volume):
expected_volume = expected_msg[kwarg].obj_to_primitive()
volume = value.obj_to_primitive()
self.assertEqual(expected_volume, volume)
else: else:
self.assertEqual(expected_msg[kwarg], value) self.assertEqual(expected_msg[kwarg], value)
@ -219,26 +230,46 @@ class VolumeRpcAPITestCase(test.TestCase):
self._test_volume_api('delete_cgsnapshot', rpc_method='cast', self._test_volume_api('delete_cgsnapshot', rpc_method='cast',
cgsnapshot=self.fake_cgsnap, version='1.31') cgsnapshot=self.fake_cgsnap, version='1.31')
def test_create_volume(self): @mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_create_volume(self, can_send_version):
self._test_volume_api('create_volume', self._test_volume_api('create_volume',
rpc_method='cast', rpc_method='cast',
volume=self.fake_volume, volume=self.fake_volume_obj,
host='fake_host1',
request_spec='fake_request_spec',
filter_properties='fake_properties',
allow_reschedule=True,
version='1.32')
can_send_version.assert_called_once_with('1.32')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_create_volume_old(self, can_send_version):
# Tests backwards compatibility with older clients
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume_obj,
host='fake_host1', host='fake_host1',
request_spec='fake_request_spec', request_spec='fake_request_spec',
filter_properties='fake_properties', filter_properties='fake_properties',
allow_reschedule=True, allow_reschedule=True,
version='1.24') version='1.24')
can_send_version.assert_called_once_with('1.32')
def test_create_volume_serialization(self): @mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=True)
def test_create_volume_serialization(self, can_send_version):
request_spec = {"metadata": self.fake_volume_metadata} request_spec = {"metadata": self.fake_volume_metadata}
self._test_volume_api('create_volume', self._test_volume_api('create_volume',
rpc_method='cast', rpc_method='cast',
volume=self.fake_volume, volume=self.fake_volume_obj,
host='fake_host1', host='fake_host1',
request_spec=request_spec, request_spec=request_spec,
filter_properties='fake_properties', filter_properties='fake_properties',
allow_reschedule=True, allow_reschedule=True,
version='1.24') version='1.32')
can_send_version.assert_called_once_with('1.32')
def test_delete_volume(self): def test_delete_volume(self):
self._test_volume_api('delete_volume', self._test_volume_api('delete_volume',

View File

@ -17,8 +17,8 @@ import datetime
import mock import mock
from cinder import context from cinder import context
from cinder import db
from cinder import exception from cinder import exception
from cinder import objects
from cinder import test from cinder import test
from cinder.tests.unit import utils from cinder.tests.unit import utils
from cinder.transfer import api as transfer_api from cinder.transfer import api as transfer_api
@ -35,10 +35,9 @@ class VolumeTransferTestCase(test.TestCase):
@mock.patch('cinder.volume.utils.notify_about_volume_usage') @mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_volume_create_delete(self, mock_notify): def test_transfer_volume_create_delete(self, mock_notify):
tx_api = transfer_api.API() tx_api = transfer_api.API()
utils.create_volume(self.ctxt, id='1', volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
updated_at=self.updated_at) response = tx_api.create(self.ctxt, volume.id, 'Description')
response = tx_api.create(self.ctxt, '1', 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id)
volume = db.volume_get(self.ctxt, '1')
self.assertEqual('awaiting-transfer', volume['status'], self.assertEqual('awaiting-transfer', volume['status'],
'Unexpected state') 'Unexpected state')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
@ -47,7 +46,7 @@ class VolumeTransferTestCase(test.TestCase):
self.assertEqual(2, mock_notify.call_count) self.assertEqual(2, mock_notify.call_count)
tx_api.delete(self.ctxt, response['id']) tx_api.delete(self.ctxt, response['id'])
volume = db.volume_get(self.ctxt, '1') volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('available', volume['status'], 'Unexpected state') self.assertEqual('available', volume['status'], 'Unexpected state')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"), calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"),
mock.call(self.ctxt, mock.ANY, "transfer.delete.end")] mock.call(self.ctxt, mock.ANY, "transfer.delete.end")]
@ -56,22 +55,21 @@ class VolumeTransferTestCase(test.TestCase):
def test_transfer_invalid_volume(self): def test_transfer_invalid_volume(self):
tx_api = transfer_api.API() tx_api = transfer_api.API()
utils.create_volume(self.ctxt, id='1', status='in-use', volume = utils.create_volume(self.ctxt, status='in-use',
updated_at=self.updated_at) updated_at=self.updated_at)
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
tx_api.create, tx_api.create,
self.ctxt, '1', 'Description') self.ctxt, volume.id, 'Description')
volume = db.volume_get(self.ctxt, '1') volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('in-use', volume['status'], 'Unexpected state') self.assertEqual('in-use', volume['status'], 'Unexpected state')
@mock.patch('cinder.volume.utils.notify_about_volume_usage') @mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_transfer_accept(self, mock_notify): def test_transfer_accept(self, mock_notify):
svc = self.start_service('volume', host='test_host') svc = self.start_service('volume', host='test_host')
tx_api = transfer_api.API() tx_api = transfer_api.API()
utils.create_volume(self.ctxt, id='1', volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description')
transfer = tx_api.create(self.ctxt, '1', 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id)
volume = db.volume_get(self.ctxt, '1')
self.assertEqual('awaiting-transfer', volume['status'], self.assertEqual('awaiting-transfer', volume['status'],
'Unexpected state') 'Unexpected state')
@ -88,11 +86,13 @@ class VolumeTransferTestCase(test.TestCase):
mock_notify.assert_has_calls(calls) mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count) self.assertEqual(2, mock_notify.call_count)
db.volume_update(self.ctxt, '1', {'status': 'wrong'}) volume.status = 'wrong'
volume.save()
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
tx_api.accept, tx_api.accept,
self.ctxt, transfer['id'], transfer['auth_key']) self.ctxt, transfer['id'], transfer['auth_key'])
db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'}) volume.status = 'awaiting-transfer'
volume.save()
# Because the InvalidVolume exception is raised in tx_api, so there is # Because the InvalidVolume exception is raised in tx_api, so there is
# only transfer.accept.start called and missing transfer.accept.end. # only transfer.accept.start called and missing transfer.accept.end.
@ -105,15 +105,13 @@ class VolumeTransferTestCase(test.TestCase):
response = tx_api.accept(self.ctxt, response = tx_api.accept(self.ctxt,
transfer['id'], transfer['id'],
transfer['auth_key']) transfer['auth_key'])
volume = db.volume_get(self.ctxt, '1') volume = objects.Volume.get_by_id(self.ctxt, volume.id)
self.assertEqual('new_project_id', volume['project_id'], self.assertEqual('new_project_id', volume.project_id)
'Unexpected project id') self.assertEqual('new_user_id', volume.user_id)
self.assertEqual('new_user_id', volume['user_id'],
'Unexpected user id')
self.assertEqual(volume['id'], response['volume_id'], self.assertEqual(response['volume_id'], volume.id,
'Unexpected volume id in response.') 'Unexpected volume id in response.')
self.assertEqual(transfer['id'], response['id'], self.assertEqual(response['id'], transfer['id'],
'Unexpected transfer id in response.') 'Unexpected transfer id in response.')
calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"),
@ -125,8 +123,7 @@ class VolumeTransferTestCase(test.TestCase):
def test_transfer_get(self): def test_transfer_get(self):
tx_api = transfer_api.API() tx_api = transfer_api.API()
volume = utils.create_volume(self.ctxt, id='1', volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
updated_at=self.updated_at)
transfer = tx_api.create(self.ctxt, volume['id'], 'Description') transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
t = tx_api.get(self.ctxt, transfer['id']) t = tx_api.get(self.ctxt, transfer['id'])
self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id')
@ -136,7 +133,7 @@ class VolumeTransferTestCase(test.TestCase):
nctxt = context.RequestContext(user_id='new_user_id', nctxt = context.RequestContext(user_id='new_user_id',
project_id='new_project_id') project_id='new_project_id')
utils.create_volume(nctxt, id='2', updated_at=self.updated_at) utils.create_volume(nctxt, updated_at=self.updated_at)
self.assertRaises(exception.TransferNotFound, self.assertRaises(exception.TransferNotFound,
tx_api.get, tx_api.get,
nctxt, nctxt,
@ -148,8 +145,7 @@ class VolumeTransferTestCase(test.TestCase):
@mock.patch('cinder.volume.utils.notify_about_volume_usage') @mock.patch('cinder.volume.utils.notify_about_volume_usage')
def test_delete_transfer_with_deleted_volume(self, mock_notify): def test_delete_transfer_with_deleted_volume(self, mock_notify):
# create a volume # create a volume
volume = utils.create_volume(self.ctxt, id='1', volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
updated_at=self.updated_at)
# create a transfer # create a transfer
tx_api = transfer_api.API() tx_api = transfer_api.API()
transfer = tx_api.create(self.ctxt, volume['id'], 'Description') transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
@ -161,7 +157,7 @@ class VolumeTransferTestCase(test.TestCase):
mock_notify.assert_has_calls(calls) mock_notify.assert_has_calls(calls)
self.assertEqual(2, mock_notify.call_count) self.assertEqual(2, mock_notify.call_count)
# force delete volume # force delete volume
db.volume_destroy(context.get_admin_context(), volume['id']) volume.destroy()
# Make sure transfer has been deleted. # Make sure transfer has been deleted.
self.assertRaises(exception.TransferNotFound, self.assertRaises(exception.TransferNotFound,
tx_api.get, tx_api.get,

View File

@ -52,7 +52,8 @@ def create_volume(ctxt,
vol['user_id'] = ctxt.user_id vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id vol['project_id'] = ctxt.project_id
vol['status'] = status vol['status'] = status
vol['migration_status'] = migration_status if migration_status:
vol['migration_status'] = migration_status
vol['display_name'] = display_name vol['display_name'] = display_name
vol['display_description'] = display_description vol['display_description'] = display_description
vol['attach_status'] = 'detached' vol['attach_status'] = 'detached'
@ -64,11 +65,16 @@ def create_volume(ctxt,
for key in kwargs: for key in kwargs:
vol[key] = kwargs[key] vol[key] = kwargs[key]
vol['replication_status'] = replication_status vol['replication_status'] = replication_status
vol['replication_extended_status'] = replication_extended_status if replication_extended_status:
vol['replication_driver_data'] = replication_driver_data vol['replication_extended_status'] = replication_extended_status
vol['previous_status'] = previous_status if replication_driver_data:
vol['replication_driver_data'] = replication_driver_data
if previous_status:
vol['previous_status'] = previous_status
return db.volume_create(ctxt, vol) volume = objects.Volume(ctxt, **vol)
volume.create()
return volume
def attach_volume(ctxt, volume_id, instance_uuid, attached_host, def attach_volume(ctxt, volume_id, instance_uuid, attached_host,

View File

@ -36,9 +36,9 @@ class FakeSchedulerRpcAPI(object):
self.expected_spec = expected_spec self.expected_spec = expected_spec
self.test_inst = test_inst self.test_inst = test_inst
def create_volume(self, ctxt, volume, volume_ref, snapshot_id=None, def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None, image_id=None, request_spec=None,
filter_properties=None): filter_properties=None, volume=None):
self.test_inst.assertEqual(self.expected_spec, request_spec) self.test_inst.assertEqual(self.expected_spec, request_spec)

View File

@ -50,17 +50,21 @@ class CreateVolumeFlowTestCase(test.TestCase):
# called to avoid div by zero errors. # called to avoid div by zero errors.
self.counter = float(0) self.counter = float(0)
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.volume.utils.extract_host') @mock.patch('cinder.volume.utils.extract_host')
@mock.patch('time.time', side_effect=time_inc) @mock.patch('time.time', side_effect=time_inc)
@mock.patch('cinder.objects.ConsistencyGroup.get_by_id') @mock.patch('cinder.objects.ConsistencyGroup.get_by_id')
def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time, def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time,
mock_extract_host): mock_extract_host, volume_get_by_id):
volume = fake_volume.fake_volume_obj(self.ctxt)
volume_get_by_id.return_value = volume
props = {} props = {}
cg_obj = (fake_consistencygroup. cg_obj = (fake_consistencygroup.
fake_consistencyobject_obj(self.ctxt, consistencygroup_id=1, fake_consistencyobject_obj(self.ctxt, consistencygroup_id=1,
host='host@backend#pool')) host='host@backend#pool'))
consistencygroup_get_by_id.return_value = cg_obj consistencygroup_get_by_id.return_value = cg_obj
spec = {'volume_id': None, spec = {'volume_id': None,
'volume': None,
'source_volid': None, 'source_volid': None,
'snapshot_id': None, 'snapshot_id': None,
'image_id': None, 'image_id': None,
@ -76,7 +80,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
task._cast_create_volume(self.ctxt, spec, props) task._cast_create_volume(self.ctxt, spec, props)
spec = {'volume_id': 1, spec = {'volume_id': volume.id,
'volume': volume,
'source_volid': 2, 'source_volid': 2,
'snapshot_id': 3, 'snapshot_id': 3,
'image_id': 4, 'image_id': 4,
@ -346,26 +351,26 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
@mock.patch('cinder.volume.flows.manager.create_volume.' @mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask.' 'CreateVolumeFromSpecTask.'
'_handle_bootable_volume_glance_meta') '_handle_bootable_volume_glance_meta')
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id')
def test_create_from_snapshot(self, snapshot_get_by_id, handle_bootable): def test_create_from_snapshot(self, snapshot_get_by_id, volume_get_by_id,
handle_bootable):
fake_db = mock.MagicMock() fake_db = mock.MagicMock()
fake_driver = mock.MagicMock() fake_driver = mock.MagicMock()
fake_volume_manager = mock.MagicMock() fake_volume_manager = mock.MagicMock()
fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
fake_volume_manager, fake_db, fake_driver) fake_volume_manager, fake_db, fake_driver)
volume = fake_volume.fake_db_volume() volume_db = {'bootable': True}
orig_volume_db = mock.MagicMock(id=10, bootable=True) volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db)
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt)
snapshot_get_by_id.return_value = snapshot_obj snapshot_get_by_id.return_value = snapshot_obj
fake_db.volume_get.return_value = orig_volume_db volume_get_by_id.return_value = volume_obj
fake_manager._create_from_snapshot(self.ctxt, volume, fake_manager._create_from_snapshot(self.ctxt, volume_obj,
snapshot_obj.id) snapshot_obj.id)
fake_driver.create_volume_from_snapshot.assert_called_once_with( fake_driver.create_volume_from_snapshot.assert_called_once_with(
volume, snapshot_obj) volume_obj, snapshot_obj)
fake_db.volume_get.assert_called_once_with(self.ctxt, handle_bootable.assert_called_once_with(self.ctxt, volume_obj.id,
snapshot_obj.volume_id)
handle_bootable.assert_called_once_with(self.ctxt, volume['id'],
snapshot_id=snapshot_obj.id) snapshot_id=snapshot_obj.id)
@mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id')
@ -620,11 +625,13 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
image_meta=image_meta image_meta=image_meta
) )
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_from_image_cache_miss( def test_create_from_image_cache_miss(
self, mock_qemu_info, mock_get_internal_context, self, mock_qemu_info, mock_volume_get, mock_volume_update,
mock_create_from_img_dl, mock_create_from_src, mock_get_internal_context, mock_create_from_img_dl,
mock_handle_bootable, mock_fetch_img): mock_create_from_src, mock_handle_bootable, mock_fetch_img):
mock_get_internal_context.return_value = self.ctxt mock_get_internal_context.return_value = self.ctxt
mock_fetch_img.return_value = mock.MagicMock( mock_fetch_img.return_value = mock.MagicMock(
spec=utils.get_file_spec()) spec=utils.get_file_spec())
@ -636,13 +643,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
volume = fake_volume.fake_volume_obj(self.ctxt, size=10, volume = fake_volume.fake_volume_obj(self.ctxt, size=10,
host='foo@bar#pool') host='foo@bar#pool')
image_volume = fake_volume.fake_db_volume(size=2) mock_volume_get.return_value = volume
self.mock_db.volume_create.return_value = image_volume
def update_volume(ctxt, id, updates):
volume.update(updates)
return volume
self.mock_db.volume_update.side_effect = update_volume
image_location = 'someImageLocationStr' image_location = 'someImageLocationStr'
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'
@ -676,12 +677,8 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
) )
# The volume size should be reduced to virtual_size and then put back # The volume size should be reduced to virtual_size and then put back
self.mock_db.volume_update.assert_any_call(self.ctxt, mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2})
volume['id'], mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10})
{'size': 2})
self.mock_db.volume_update.assert_any_call(self.ctxt,
volume['id'],
{'size': 10})
# Make sure created a new cache entry # Make sure created a new cache entry
(self.mock_volume_manager. (self.mock_volume_manager.
@ -695,9 +692,12 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
image_meta=image_meta image_meta=image_meta
) )
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_from_image_cache_miss_error_downloading( def test_create_from_image_cache_miss_error_downloading(
self, mock_qemu_info, mock_get_internal_context, self, mock_qemu_info, mock_volume_get, mock_volume_update,
mock_get_internal_context,
mock_create_from_img_dl, mock_create_from_src, mock_create_from_img_dl, mock_create_from_src,
mock_handle_bootable, mock_fetch_img): mock_handle_bootable, mock_fetch_img):
mock_fetch_img.return_value = mock.MagicMock() mock_fetch_img.return_value = mock.MagicMock()
@ -709,16 +709,10 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
volume = fake_volume.fake_volume_obj(self.ctxt, size=10, volume = fake_volume.fake_volume_obj(self.ctxt, size=10,
host='foo@bar#pool') host='foo@bar#pool')
image_volume = fake_volume.fake_db_volume(size=2) mock_volume_get.return_value = volume
self.mock_db.volume_create.return_value = image_volume
mock_create_from_img_dl.side_effect = exception.CinderException() mock_create_from_img_dl.side_effect = exception.CinderException()
def update_volume(ctxt, id, updates):
volume.update(updates)
return volume
self.mock_db.volume_update.side_effect = update_volume
image_location = 'someImageLocationStr' image_location = 'someImageLocationStr'
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'
image_meta = mock.MagicMock() image_meta = mock.MagicMock()
@ -756,13 +750,9 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
# The volume size should be reduced to virtual_size and then put back, # The volume size should be reduced to virtual_size and then put back,
# especially if there is an exception while creating the volume. # especially if there is an exception while creating the volume.
self.assertEqual(2, self.mock_db.volume_update.call_count) self.assertEqual(2, mock_volume_update.call_count)
self.mock_db.volume_update.assert_any_call(self.ctxt, mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2})
volume['id'], mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10})
{'size': 2})
self.mock_db.volume_update.assert_any_call(self.ctxt,
volume['id'],
{'size': 10})
# Make sure we didn't try and create a cache entry # Make sure we didn't try and create a cache entry
self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.ensure_space.called)
@ -773,7 +763,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_create_from_src, mock_handle_bootable, mock_fetch_img):
self.mock_driver.clone_image.return_value = (None, False) self.mock_driver.clone_image.return_value = (None, False)
mock_get_internal_context.return_value = None mock_get_internal_context.return_value = None
volume = fake_volume.fake_db_volume() volume = fake_volume.fake_volume_obj(self.ctxt)
image_location = 'someImageLocationStr' image_location = 'someImageLocationStr'
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'

View File

@ -107,7 +107,8 @@ def check_policy(context, action, target_obj=None):
if isinstance(target_obj, objects_base.CinderObject): if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work # Turn object into dict so target.update can work
target.update(target_obj.obj_to_primitive() or {}) target.update(
target_obj.obj_to_primitive()['versioned_object.data'] or {})
else: else:
target.update(target_obj or {}) target.update(target_obj or {})
@ -310,7 +311,7 @@ class API(base.Base):
'snapshot': snapshot, 'snapshot': snapshot,
'image_id': image_id, 'image_id': image_id,
'raw_volume_type': volume_type, 'raw_volume_type': volume_type,
'metadata': metadata, 'metadata': metadata or {},
'raw_availability_zone': availability_zone, 'raw_availability_zone': availability_zone,
'source_volume': source_volume, 'source_volume': source_volume,
'scheduler_hints': scheduler_hints, 'scheduler_hints': scheduler_hints,

View File

@ -479,7 +479,8 @@ class EntryCreateTask(flow_utils.CinderTask):
# Merge in the other required arguments which should provide the rest # Merge in the other required arguments which should provide the rest
# of the volume property fields (if applicable). # of the volume property fields (if applicable).
volume_properties.update(kwargs) volume_properties.update(kwargs)
volume = self.db.volume_create(context, volume_properties) volume = objects.Volume(context=context, **volume_properties)
volume.create()
return { return {
'volume_id': volume['id'], 'volume_id': volume['id'],
@ -505,16 +506,16 @@ class EntryCreateTask(flow_utils.CinderTask):
# already been created and the quota has already been absorbed. # already been created and the quota has already been absorbed.
return return
vol_id = result['volume_id'] volume = result['volume']
try: try:
self.db.volume_destroy(context.elevated(), vol_id) volume.destroy()
except exception.CinderException: except exception.CinderException:
# We are already reverting, therefore we should silence this # We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad. # exception since a second exception being active will be bad.
# #
# NOTE(harlowja): Being unable to destroy a volume is pretty # NOTE(harlowja): Being unable to destroy a volume is pretty
# bad though!! # bad though!!
LOG.exception(_LE("Failed destroying volume entry %s"), vol_id) LOG.exception(_LE("Failed destroying volume entry %s"), volume.id)
class QuotaReserveTask(flow_utils.CinderTask): class QuotaReserveTask(flow_utils.CinderTask):
@ -678,7 +679,7 @@ class VolumeCastTask(flow_utils.CinderTask):
def __init__(self, scheduler_rpcapi, volume_rpcapi, db): def __init__(self, scheduler_rpcapi, volume_rpcapi, db):
requires = ['image_id', 'scheduler_hints', 'snapshot_id', requires = ['image_id', 'scheduler_hints', 'snapshot_id',
'source_volid', 'volume_id', 'volume_type', 'source_volid', 'volume_id', 'volume', 'volume_type',
'volume_properties', 'source_replicaid', 'volume_properties', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id', ] 'consistencygroup_id', 'cgsnapshot_id', ]
super(VolumeCastTask, self).__init__(addons=[ACTION], super(VolumeCastTask, self).__init__(addons=[ACTION],
@ -691,6 +692,7 @@ class VolumeCastTask(flow_utils.CinderTask):
source_volid = request_spec['source_volid'] source_volid = request_spec['source_volid']
source_replicaid = request_spec['source_replicaid'] source_replicaid = request_spec['source_replicaid']
volume_id = request_spec['volume_id'] volume_id = request_spec['volume_id']
volume = request_spec['volume']
snapshot_id = request_spec['snapshot_id'] snapshot_id = request_spec['snapshot_id']
image_id = request_spec['image_id'] image_id = request_spec['image_id']
cgroup_id = request_spec['consistencygroup_id'] cgroup_id = request_spec['consistencygroup_id']
@ -714,14 +716,17 @@ class VolumeCastTask(flow_utils.CinderTask):
# snapshot resides instead of passing it through the scheduler, so # snapshot resides instead of passing it through the scheduler, so
# snapshot can be copied to the new volume. # snapshot can be copied to the new volume.
snapshot = objects.Snapshot.get_by_id(context, snapshot_id) snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
source_volume_ref = self.db.volume_get(context, snapshot.volume_id) source_volume_ref = objects.Volume.get_by_id(context,
host = source_volume_ref['host'] snapshot.volume_id)
host = source_volume_ref.host
elif source_volid: elif source_volid:
source_volume_ref = self.db.volume_get(context, source_volid) source_volume_ref = objects.Volume.get_by_id(context,
host = source_volume_ref['host'] source_volid)
host = source_volume_ref.host
elif source_replicaid: elif source_replicaid:
source_volume_ref = self.db.volume_get(context, source_replicaid) source_volume_ref = objects.Volume.get_by_id(context,
host = source_volume_ref['host'] source_replicaid)
host = source_volume_ref.host
if not host: if not host:
# Cast to the scheduler and let it handle whatever is needed # Cast to the scheduler and let it handle whatever is needed
@ -733,18 +738,19 @@ class VolumeCastTask(flow_utils.CinderTask):
snapshot_id=snapshot_id, snapshot_id=snapshot_id,
image_id=image_id, image_id=image_id,
request_spec=request_spec, request_spec=request_spec,
filter_properties=filter_properties) filter_properties=filter_properties,
volume=volume)
else: else:
# Bypass the scheduler and send the request directly to the volume # Bypass the scheduler and send the request directly to the volume
# manager. # manager.
now = timeutils.utcnow() volume.host = host
values = {'host': host, 'scheduled_at': now} volume.scheduled_at = timeutils.utcnow()
volume_ref = self.db.volume_update(context, volume_id, values) volume.save()
if not cgsnapshot_id: if not cgsnapshot_id:
self.volume_rpcapi.create_volume( self.volume_rpcapi.create_volume(
context, context,
volume_ref, volume,
volume_ref['host'], volume.host,
request_spec, request_spec,
filter_properties, filter_properties,
allow_reschedule=False) allow_reschedule=False)

View File

@ -62,7 +62,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
def __init__(self, reschedule_context, db, scheduler_rpcapi, def __init__(self, reschedule_context, db, scheduler_rpcapi,
do_reschedule): do_reschedule):
requires = ['filter_properties', 'request_spec', 'volume_id', requires = ['filter_properties', 'request_spec', 'volume_ref',
'context'] 'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION], super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires) requires=requires)
@ -94,7 +94,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
def execute(self, **kwargs): def execute(self, **kwargs):
pass pass
def _pre_reschedule(self, context, volume_id): def _pre_reschedule(self, context, volume):
"""Actions that happen before the rescheduling attempt occur here.""" """Actions that happen before the rescheduling attempt occur here."""
try: try:
@ -112,15 +112,16 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
'host': None, 'host': None,
} }
LOG.debug("Updating volume %(volume_id)s with %(update)s.", LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume_id}) {'update': update, 'volume_id': volume.id})
self.db.volume_update(context, volume_id, update) volume.update(update)
volume.save()
except exception.CinderException: except exception.CinderException:
# Don't let updating the state cause the rescheduling to fail. # Don't let updating the state cause the rescheduling to fail.
LOG.exception(_LE("Volume %s: update volume state failed."), LOG.exception(_LE("Volume %s: update volume state failed."),
volume_id) volume.id)
def _reschedule(self, context, cause, request_spec, filter_properties, def _reschedule(self, context, cause, request_spec, filter_properties,
volume_id): volume):
"""Actions that happen during the rescheduling attempt occur here.""" """Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume create_volume = self.scheduler_rpcapi.create_volume
@ -131,11 +132,11 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
retry_info = filter_properties['retry'] retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0) num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume_id request_spec['volume_id'] = volume.id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s " LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s", "attempt %(num)d due to %(reason)s",
{'volume_id': volume_id, {'volume_id': volume.id,
'method': common.make_pretty_name(create_volume), 'method': common.make_pretty_name(create_volume),
'num': num_attempts, 'num': num_attempts,
'reason': cause.exception_str}) 'reason': cause.exception_str})
@ -144,16 +145,17 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
# Stringify to avoid circular ref problem in json serialization # Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info) retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, CONF.volume_topic, volume_id, return create_volume(context, CONF.volume_topic, volume.id,
request_spec=request_spec, request_spec=request_spec,
filter_properties=filter_properties) filter_properties=filter_properties,
volume=volume)
def _post_reschedule(self, volume_id): def _post_reschedule(self, volume):
"""Actions that happen after the rescheduling attempt occur here.""" """Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume_id) LOG.debug("Volume %s: re-scheduled", volume.id)
def revert(self, context, result, flow_failures, volume_id, **kwargs): def revert(self, context, result, flow_failures, volume_ref, **kwargs):
# NOTE(dulek): Revert is occurring and manager need to know if # NOTE(dulek): Revert is occurring and manager need to know if
# rescheduling happened. We're returning boolean flag that will # rescheduling happened. We're returning boolean flag that will
# indicate that. It which will be available in flow engine store # indicate that. It which will be available in flow engine store
@ -162,16 +164,16 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
# If do not want to be rescheduled, just set the volume's status to # If do not want to be rescheduled, just set the volume's status to
# error and return. # error and return.
if not self.do_reschedule: if not self.do_reschedule:
common.error_out_volume(context, self.db, volume_id) common.error_out_volume(context, self.db, volume_ref.id)
LOG.error(_LE("Volume %s: create failed"), volume_id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id)
return False return False
# Check if we have a cause which can tell us not to reschedule and # Check if we have a cause which can tell us not to reschedule and
# set the volume's status to error. # set the volume's status to error.
for failure in flow_failures.values(): for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types): if failure.check(*self.no_reschedule_types):
common.error_out_volume(context, self.db, volume_id) common.error_out_volume(context, self.db, volume_ref.id)
LOG.error(_LE("Volume %s: create failed"), volume_id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id)
return False return False
# Use a different context when rescheduling. # Use a different context when rescheduling.
@ -179,12 +181,13 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
cause = list(flow_failures.values())[0] cause = list(flow_failures.values())[0]
context = self.reschedule_context context = self.reschedule_context
try: try:
self._pre_reschedule(context, volume_id) self._pre_reschedule(context, volume_ref)
self._reschedule(context, cause, volume_id=volume_id, **kwargs) self._reschedule(context, cause, volume=volume_ref, **kwargs)
self._post_reschedule(volume_id) self._post_reschedule(volume_ref)
return True return True
except exception.CinderException: except exception.CinderException:
LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id) LOG.exception(_LE("Volume %s: rescheduling failed"),
volume_ref.id)
return False return False
@ -206,8 +209,7 @@ class ExtractVolumeRefTask(flow_utils.CinderTask):
# #
# In the future we might want to have a lock on the volume_id so that # In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created? # the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id) return objects.Volume.get_by_id(context, volume_id)
return volume_ref
def revert(self, context, volume_id, result, **kwargs): def revert(self, context, volume_id, result, **kwargs):
if isinstance(result, ft.Failure) or not self.set_error: if isinstance(result, ft.Failure) or not self.set_error:
@ -269,7 +271,8 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
# NOTE(harlowja): This will likely fail if the source volume # NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred. # disappeared by the time this call occurred.
source_volid = volume_ref.get('source_volid') source_volid = volume_ref.get('source_volid')
source_volume_ref = self.db.volume_get(context, source_volid) source_volume_ref = objects.Volume.get_by_id(context,
source_volid)
specs.update({ specs.update({
'source_volid': source_volid, 'source_volid': source_volid,
# This is captured incase we have to revert and we want to set # This is captured incase we have to revert and we want to set
@ -284,7 +287,8 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
# NOTE(harlowja): This will likely fail if the replica # NOTE(harlowja): This will likely fail if the replica
# disappeared by the time this call occurred. # disappeared by the time this call occurred.
source_volid = request_spec['source_replicaid'] source_volid = request_spec['source_replicaid']
source_volume_ref = self.db.volume_get(context, source_volid) source_volume_ref = objects.Volume.get_by_id(context,
source_volid)
specs.update({ specs.update({
'source_replicaid': source_volid, 'source_replicaid': source_volid,
'source_replicastatus': source_volume_ref['status'], 'source_replicastatus': source_volume_ref['status'],
@ -443,8 +447,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# will not destroy the volume (although they could in the future). # will not destroy the volume (although they could in the future).
make_bootable = False make_bootable = False
try: try:
originating_vref = self.db.volume_get(context, originating_vref = objects.Volume.get_by_id(context,
snapshot.volume_id) snapshot.volume_id)
make_bootable = originating_vref.bootable make_bootable = originating_vref.bootable
except exception.CinderException as ex: except exception.CinderException as ex:
LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s " LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
@ -476,14 +480,14 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# NOTE(harlowja): likely this is not the best place for this to happen # NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions # and we should have proper locks on the source volume while actions
# that use the source volume are underway. # that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_volid) srcvol_ref = objects.Volume.get_by_id(context, source_volid)
model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref) model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this # NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures # point the volume has already been created and further failures
# will not destroy the volume (although they could in the future). # will not destroy the volume (although they could in the future).
if srcvol_ref.bootable: if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(context, volume_ref['id'], self._handle_bootable_volume_glance_meta(
source_volid=source_volid) context, volume_ref.id, source_volid=volume_ref.id)
return model_update return model_update
def _create_from_source_replica(self, context, volume_ref, def _create_from_source_replica(self, context, volume_ref,
@ -494,7 +498,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# NOTE(harlowja): likely this is not the best place for this to happen # NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions # and we should have proper locks on the source volume while actions
# that use the source volume are underway. # that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_replicaid) srcvol_ref = objects.Volume.get_by_id(context, source_replicaid)
model_update = self.driver.create_replica_test_volume(volume_ref, model_update = self.driver.create_replica_test_volume(volume_ref,
srcvol_ref) srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this # NOTE(harlowja): Subtasks would be useful here since after this
@ -754,12 +758,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
image_id=image_id, reason=reason) image_id=image_id, reason=reason)
if virtual_size and virtual_size != original_size: if virtual_size and virtual_size != original_size:
updates = {'size': virtual_size} volume_ref.size = virtual_size
volume_ref = self.db.volume_update( volume_ref.save()
context,
volume_ref['id'],
updates
)
model_update = self._create_from_image_download( model_update = self._create_from_image_download(
context, context,
@ -773,9 +773,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# Update the newly created volume db entry before we clone it # Update the newly created volume db entry before we clone it
# for the image-volume creation. # for the image-volume creation.
if model_update: if model_update:
volume_ref = self.db.volume_update(context, volume_ref.update(model_update)
volume_ref['id'], volume_ref.save()
model_update)
self.manager._create_image_cache_volume_entry(internal_context, self.manager._create_image_cache_volume_entry(internal_context,
volume_ref, volume_ref,
image_id, image_id,
@ -785,12 +784,12 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# what was originally requested. If an exception has occurred we # what was originally requested. If an exception has occurred we
# still need to put this back before letting it be raised further # still need to put this back before letting it be raised further
# up the stack. # up the stack.
if volume_ref['size'] != original_size: if volume_ref.size != original_size:
self.driver.extend_volume(volume_ref, original_size) self.driver.extend_volume(volume_ref, original_size)
updates = {'size': original_size} volume_ref.size = original_size
self.db.volume_update(context, volume_ref['id'], updates) volume_ref.save()
self._handle_bootable_volume_glance_meta(context, volume_ref['id'], self._handle_bootable_volume_glance_meta(context, volume_ref.id,
image_id=image_id, image_id=image_id,
image_meta=image_meta) image_meta=image_meta)
return model_update return model_update
@ -839,8 +838,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
# Persist any model information provided on creation. # Persist any model information provided on creation.
try: try:
if model_update: if model_update:
volume_ref = self.db.volume_update(context, volume_ref['id'], volume_ref.update(model_update)
model_update) volume_ref.save()
except exception.CinderException: except exception.CinderException:
# If somehow the update failed we want to ensure that the # If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at # failure is logged (but not try rescheduling since the volume at
@ -872,7 +871,6 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
} }
def execute(self, context, volume, volume_spec): def execute(self, context, volume, volume_spec):
volume_id = volume['id']
new_status = self.status_translation.get(volume_spec.get('status'), new_status = self.status_translation.get(volume_spec.get('status'),
'available') 'available')
update = { update = {
@ -884,18 +882,19 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
# or are there other side-effects that this will cause if the # or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in # status isn't updated correctly (aka it will likely be stuck in
# 'creating' if this fails)?? # 'creating' if this fails)??
volume_ref = self.db.volume_update(context, volume_id, update) volume.update(update)
volume.save()
# Now use the parent to notify. # Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref) super(CreateVolumeOnFinishTask, self).execute(context, volume)
except exception.CinderException: except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with " LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(update)s"), {'volume_id': volume_id, "%(update)s"), {'volume_id': volume.id,
'update': update}) 'update': update})
# Even if the update fails, the volume is ready. # Even if the update fails, the volume is ready.
LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): " LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): "
"created successfully"), "created successfully"),
{'volume_name': volume_spec['volume_name'], {'volume_name': volume_spec['volume_name'],
'volume_id': volume_id}) 'volume_id': volume.id})
def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume_id, def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume_id,

View File

@ -190,7 +190,7 @@ def locked_snapshot_operation(f):
class VolumeManager(manager.SchedulerDependentManager): class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices.""" """Manages attachable block storage devices."""
RPC_API_VERSION = '1.31' RPC_API_VERSION = '1.32'
target = messaging.Target(version=RPC_API_VERSION) target = messaging.Target(version=RPC_API_VERSION)
@ -476,9 +476,16 @@ class VolumeManager(manager.SchedulerDependentManager):
return self.driver.initialized return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None, def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True): filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume.""" """Creates the volume."""
# FIXME(thangp): Remove this in v2.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated() context_elevated = context.elevated()
if filter_properties is None: if filter_properties is None:
filter_properties = {} filter_properties = {}
@ -496,7 +503,7 @@ class VolumeManager(manager.SchedulerDependentManager):
self.driver, self.driver,
self.scheduler_rpcapi, self.scheduler_rpcapi,
self.host, self.host,
volume_id, volume.id,
allow_reschedule, allow_reschedule,
context, context,
request_spec, request_spec,
@ -505,7 +512,7 @@ class VolumeManager(manager.SchedulerDependentManager):
) )
except Exception: except Exception:
msg = _("Create manager volume flow failed.") msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id}) LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg) raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id') snapshot_id = request_spec.get('snapshot_id')
@ -563,13 +570,13 @@ class VolumeManager(manager.SchedulerDependentManager):
if not vol_ref: if not vol_ref:
# Flow was reverted and not rescheduled, fetching # Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed. # volume_ref from the DB, because it will be needed.
vol_ref = self.db.volume_get(context, volume_id) vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update # NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete. # volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref) self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref) LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref['id'] return vol_ref.id
@locked_volume_operation @locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False): def delete_volume(self, context, volume_id, unmanage_only=False):
@ -1586,9 +1593,10 @@ class VolumeManager(manager.SchedulerDependentManager):
new_vol_values = dict(volume) new_vol_values = dict(volume)
del new_vol_values['id'] del new_vol_values['id']
del new_vol_values['_name_id'] del new_vol_values['_name_id']
new_vol_values.pop('name', None)
# We don't copy volume_type because the db sets that according to # We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy # volume_type_id, which we do copy
del new_vol_values['volume_type'] new_vol_values.pop('volume_type', None)
if new_type_id: if new_type_id:
new_vol_values['volume_type_id'] = new_type_id new_vol_values['volume_type_id'] = new_type_id
new_vol_values['host'] = host['host'] new_vol_values['host'] = host['host']
@ -1600,8 +1608,9 @@ class VolumeManager(manager.SchedulerDependentManager):
# I think # I think
new_vol_values['migration_status'] = 'target:%s' % volume['id'] new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached' new_vol_values['attach_status'] = 'detached'
new_vol_values['volume_attachment'] = [] new_vol_values.pop('volume_attachment', None)
new_volume = self.db.volume_create(ctxt, new_vol_values) new_volume = objects.Volume(context=ctxt, **new_vol_values)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'], rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False) None, None, allow_reschedule=False)

View File

@ -79,6 +79,7 @@ class VolumeAPI(object):
1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot() 1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot()
and delete_cgsnapshot() to cast method only with necessary and delete_cgsnapshot() to cast method only with necessary
args. Forwarding CGSnapshot object instead of CGSnapshot_id. args. Forwarding CGSnapshot object instead of CGSnapshot_id.
1.32 - Adds support for sending objects over RPC in create_volume().
""" """
BASE_RPC_API_VERSION = '1.0' BASE_RPC_API_VERSION = '1.0'
@ -88,7 +89,11 @@ class VolumeAPI(object):
target = messaging.Target(topic=CONF.volume_topic, target = messaging.Target(topic=CONF.volume_topic,
version=self.BASE_RPC_API_VERSION) version=self.BASE_RPC_API_VERSION)
serializer = objects_base.CinderObjectSerializer() serializer = objects_base.CinderObjectSerializer()
self.client = rpc.get_client(target, '1.31', serializer=serializer)
# NOTE(thangp): Until version pinning is impletemented, set the client
# version_cap to None
self.client = rpc.get_client(target, version_cap=None,
serializer=serializer)
def create_consistencygroup(self, ctxt, group, host): def create_consistencygroup(self, ctxt, group, host):
new_host = utils.extract_host(host) new_host = utils.extract_host(host)
@ -132,14 +137,20 @@ class VolumeAPI(object):
def create_volume(self, ctxt, volume, host, request_spec, def create_volume(self, ctxt, volume, host, request_spec,
filter_properties, allow_reschedule=True): filter_properties, allow_reschedule=True):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.24')
request_spec_p = jsonutils.to_primitive(request_spec) request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume', msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p,
volume_id=volume['id'], 'filter_properties': filter_properties,
request_spec=request_spec_p, 'allow_reschedule': allow_reschedule}
filter_properties=filter_properties, if self.client.can_send_version('1.32'):
allow_reschedule=allow_reschedule) version = '1.32'
msg_args['volume'] = volume
else:
version = '1.24'
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version=version)
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume', **msg_args)
def delete_volume(self, ctxt, volume, unmanage_only=False): def delete_volume(self, ctxt, volume, unmanage_only=False):
new_host = utils.extract_host(volume['host']) new_host = utils.extract_host(volume['host'])

View File

@ -76,6 +76,8 @@ objects_ignore_messages = [
"Module 'cinder.objects' has no 'ServiceList' member", "Module 'cinder.objects' has no 'ServiceList' member",
"Module 'cinder.objects' has no 'Snapshot' member", "Module 'cinder.objects' has no 'Snapshot' member",
"Module 'cinder.objects' has no 'SnapshotList' member", "Module 'cinder.objects' has no 'SnapshotList' member",
"Module 'cinder.objects' has no 'Volume' member",
"Module 'cinder.objects' has no 'VolumeList' member",
] ]
objects_ignore_modules = ["cinder/objects/"] objects_ignore_modules = ["cinder/objects/"]