Merge "Update attach_volume() with versionedobjects"
This commit is contained in:
@@ -1401,22 +1401,23 @@ def volume_attached(context, attachment_id, instance_uuid, host_name,
|
||||
volume_attachment_ref = volume_attachment_get(context, attachment_id,
|
||||
session=session)
|
||||
|
||||
volume_attachment_ref['mountpoint'] = mountpoint
|
||||
volume_attachment_ref['attach_status'] = (fields.VolumeAttachStatus.
|
||||
ATTACHED)
|
||||
volume_attachment_ref['instance_uuid'] = instance_uuid
|
||||
volume_attachment_ref['attached_host'] = host_name
|
||||
volume_attachment_ref['attach_time'] = timeutils.utcnow()
|
||||
volume_attachment_ref['attach_mode'] = attach_mode
|
||||
updated_values = {'mountpoint': mountpoint,
|
||||
'attach_status': fields.VolumeAttachStatus.ATTACHED,
|
||||
'instance_uuid': instance_uuid,
|
||||
'attached_host': host_name,
|
||||
'attach_time': timeutils.utcnow(),
|
||||
'attach_mode': attach_mode,
|
||||
'updated_at': literal_column('updated_at')}
|
||||
volume_attachment_ref.update(updated_values)
|
||||
volume_attachment_ref.save(session=session)
|
||||
del updated_values['updated_at']
|
||||
|
||||
volume_ref = _volume_get(context, volume_attachment_ref['volume_id'],
|
||||
session=session)
|
||||
volume_attachment_ref.save(session=session)
|
||||
|
||||
volume_ref['status'] = 'in-use'
|
||||
volume_ref['attach_status'] = fields.VolumeAttachStatus.ATTACHED
|
||||
volume_ref.save(session=session)
|
||||
return volume_ref
|
||||
return (volume_ref, updated_values)
|
||||
|
||||
|
||||
@handle_db_data_error
|
||||
|
||||
@@ -374,7 +374,9 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
if updates.get('status') == 'downloading':
|
||||
self.set_worker()
|
||||
|
||||
db.volume_update(self._context, self.id, updates)
|
||||
# updates are changed after popping out metadata.
|
||||
if updates:
|
||||
db.volume_update(self._context, self.id, updates)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def destroy(self):
|
||||
@@ -510,6 +512,17 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
return False
|
||||
return status in ('creating', 'deleting', 'uploading', 'downloading')
|
||||
|
||||
def begin_attach(self, attach_mode):
|
||||
attachment = objects.VolumeAttachment(
|
||||
context=self._context,
|
||||
attach_status=c_fields.VolumeAttachStatus.ATTACHING,
|
||||
volume_id=self.id)
|
||||
attachment.create()
|
||||
with self.obj_as_admin():
|
||||
self.admin_metadata['attached_mode'] = attach_mode
|
||||
self.save()
|
||||
return attachment
|
||||
|
||||
|
||||
@base.CinderObjectRegistry.register
|
||||
class VolumeList(base.ObjectListBase, base.CinderObject):
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder.objects import base
|
||||
from cinder.objects import fields as c_fields
|
||||
@@ -59,6 +61,28 @@ class VolumeAttachment(base.CinderPersistentObject, base.CinderObject,
|
||||
db.volume_attachment_update(self._context, self.id, updates)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def finish_attach(self, instance_uuid, host_name,
|
||||
mount_point, attach_mode='rw'):
|
||||
with self.obj_as_admin():
|
||||
db_volume, updated_values = db.volume_attached(
|
||||
self._context, self.id,
|
||||
instance_uuid, host_name,
|
||||
mount_point, attach_mode)
|
||||
self.update(updated_values)
|
||||
self.obj_reset_changes(updated_values.keys())
|
||||
return objects.Volume._from_db_object(self._context,
|
||||
objects.Volume(),
|
||||
db_volume)
|
||||
|
||||
def create(self):
|
||||
if self.obj_attr_is_set('id'):
|
||||
raise exception.ObjectActionError(action='create',
|
||||
reason=_('already created'))
|
||||
updates = self.cinder_obj_get_changes()
|
||||
with self.obj_as_admin():
|
||||
db_attachment = db.volume_attach(self._context, updates)
|
||||
self._from_db_object(self._context, self, db_attachment)
|
||||
|
||||
|
||||
@base.CinderObjectRegistry.register
|
||||
class VolumeAttachmentList(base.ObjectListBase, base.CinderObject):
|
||||
|
||||
@@ -1072,8 +1072,13 @@ class AdminActionsAttachDetachTest(BaseAdminTest):
|
||||
def test_attach_attaching_volume_with_different_mode(self):
|
||||
"""Test that attaching volume reserved for another mode fails."""
|
||||
# current status is available
|
||||
volume = self._create_volume(self.ctx, {'provider_location': '',
|
||||
'size': 1})
|
||||
volume = self._create_volume(
|
||||
self.ctx,
|
||||
{'provider_location': '',
|
||||
'size': 1,
|
||||
'status': 'attaching',
|
||||
'instance_uuid': fake.INSTANCE_ID,
|
||||
'admin_metadata': {"attached_mode": 'rw'}})
|
||||
|
||||
values = {'status': 'attaching'}
|
||||
db.volume_update(self.ctx, volume['id'], values)
|
||||
|
||||
@@ -21,6 +21,7 @@ import six
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder.tests.unit.consistencygroup import fake_consistencygroup
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
@@ -422,6 +423,29 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
||||
volume = serializer.deserialize_entity(self.context, serialized_volume)
|
||||
self.assertDictEqual({}, volume.obj_get_changes())
|
||||
|
||||
@mock.patch('cinder.db.volume_admin_metadata_update')
|
||||
@mock.patch('cinder.db.sqlalchemy.api.volume_attach')
|
||||
def test_begin_attach(self, volume_attach, metadata_update):
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
db_attachment = fake_volume.fake_db_volume_attachment(
|
||||
volume_id=volume.id,
|
||||
attach_status=fields.VolumeAttachStatus.ATTACHING)
|
||||
volume_attach.return_value = db_attachment
|
||||
metadata_update.return_value = {'attached_mode': 'rw'}
|
||||
|
||||
with mock.patch.object(self.context, 'elevated') as mock_elevated:
|
||||
mock_elevated.return_value = context.get_admin_context()
|
||||
attachment = volume.begin_attach("rw")
|
||||
self.assertIsInstance(attachment, objects.VolumeAttachment)
|
||||
self.assertEqual(volume.id, attachment.volume_id)
|
||||
self.assertEqual(fields.VolumeAttachStatus.ATTACHING,
|
||||
attachment.attach_status)
|
||||
metadata_update.assert_called_once_with(self.context.elevated(),
|
||||
volume.id,
|
||||
{'attached_mode': u'rw'},
|
||||
True)
|
||||
self.assertEqual('rw', volume.admin_metadata['attached_mode'])
|
||||
|
||||
|
||||
class TestVolumeList(test_objects.BaseObjectsTestCase):
|
||||
@mock.patch('cinder.db.volume_get_all')
|
||||
|
||||
@@ -68,6 +68,30 @@ class TestVolumeAttachment(test_objects.BaseObjectsTestCase):
|
||||
mock.call(self.context,
|
||||
fake.ATTACHMENT_ID)])
|
||||
|
||||
@mock.patch('cinder.db.sqlalchemy.api.volume_attached')
|
||||
def test_volume_attached(self, volume_attached):
|
||||
attachment = fake_volume.fake_volume_attachment_obj(self.context)
|
||||
updated_values = {'mountpoint': '/dev/sda',
|
||||
'attach_status': fields.VolumeAttachStatus.ATTACHED,
|
||||
'instance_uuid': fake.INSTANCE_ID}
|
||||
volume_attached.return_value = (fake_volume.fake_db_volume(),
|
||||
updated_values)
|
||||
volume = attachment.finish_attach(fake.INSTANCE_ID,
|
||||
'fake_host',
|
||||
'/dev/sda',
|
||||
'rw')
|
||||
self.assertIsInstance(volume, objects.Volume)
|
||||
volume_attached.assert_called_once_with(mock.ANY,
|
||||
attachment.id,
|
||||
fake.INSTANCE_ID,
|
||||
'fake_host',
|
||||
'/dev/sda',
|
||||
'rw')
|
||||
self.assertEqual('/dev/sda', attachment.mountpoint)
|
||||
self.assertEqual(fake.INSTANCE_ID, attachment.instance_uuid)
|
||||
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
|
||||
attachment.attach_status)
|
||||
|
||||
|
||||
class TestVolumeAttachmentList(test_objects.BaseObjectsTestCase):
|
||||
@mock.patch('cinder.db.volume_attachment_get_all_by_volume_id')
|
||||
|
||||
@@ -285,15 +285,30 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
|
||||
def test_volume_attached_to_instance(self):
|
||||
volume = db.volume_create(self.ctxt, {'host': 'host1'})
|
||||
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||
instance_uuid = fake.INSTANCE_ID
|
||||
values = {'volume_id': volume['id'],
|
||||
'instance_uuid': instance_uuid,
|
||||
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
|
||||
attachment = db.volume_attach(self.ctxt, values)
|
||||
db.volume_attached(self.ctxt, attachment['id'],
|
||||
instance_uuid, None, '/tmp')
|
||||
volume_db, updated_values = db.volume_attached(
|
||||
self.ctxt,
|
||||
attachment['id'],
|
||||
instance_uuid, None, '/tmp')
|
||||
expected_updated_values = {
|
||||
'mountpoint': '/tmp',
|
||||
'attach_status': fields.VolumeAttachStatus.ATTACHED,
|
||||
'instance_uuid': instance_uuid,
|
||||
'attached_host': None,
|
||||
'attach_time': mock.ANY,
|
||||
'attach_mode': 'rw'}
|
||||
self.assertDictEqual(expected_updated_values, updated_values)
|
||||
|
||||
volume = db.volume_get(self.ctxt, volume['id'])
|
||||
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
|
||||
self._assertEqualObjects(volume, volume_db,
|
||||
ignored_keys='volume_attachment')
|
||||
self._assertEqualListsOfObjects(volume.volume_attachment,
|
||||
volume_db.volume_attachment)
|
||||
self.assertEqual('in-use', volume['status'])
|
||||
self.assertEqual('/tmp', attachment['mountpoint'])
|
||||
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
|
||||
@@ -308,9 +323,22 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
'attached_host': host_name,
|
||||
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
|
||||
attachment = db.volume_attach(self.ctxt, values)
|
||||
db.volume_attached(self.ctxt, attachment['id'],
|
||||
None, host_name, '/tmp')
|
||||
volume_db, updated_values = db.volume_attached(
|
||||
self.ctxt, attachment['id'],
|
||||
None, host_name, '/tmp')
|
||||
expected_updated_values = {
|
||||
'mountpoint': '/tmp',
|
||||
'attach_status': fields.VolumeAttachStatus.ATTACHED,
|
||||
'instance_uuid': None,
|
||||
'attached_host': host_name,
|
||||
'attach_time': mock.ANY,
|
||||
'attach_mode': 'rw'}
|
||||
self.assertDictEqual(expected_updated_values, updated_values)
|
||||
volume = db.volume_get(self.ctxt, volume['id'])
|
||||
self._assertEqualObjects(volume, volume_db,
|
||||
ignored_keys='volume_attachment')
|
||||
self._assertEqualListsOfObjects(volume.volume_attachment,
|
||||
volume_db.volume_attachment)
|
||||
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
|
||||
self.assertEqual('in-use', volume['status'])
|
||||
self.assertEqual('/tmp', attachment['mountpoint'])
|
||||
|
||||
@@ -213,6 +213,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
self.patch('cinder.volume.utils.clear_volume', autospec=True)
|
||||
self.expected_status = 'available'
|
||||
self.service_id = 1
|
||||
self.user_context = context.RequestContext(user_id=fake.USER_ID,
|
||||
project_id=fake.PROJECT_ID)
|
||||
|
||||
@mock.patch('cinder.manager.CleanableManager.init_host')
|
||||
def test_init_host_count_allocated_capacity(self, init_host_mock):
|
||||
@@ -2074,27 +2076,34 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
self.volume.initialize_connection,
|
||||
self.context, volume, connector)
|
||||
|
||||
def test_run_attach_detach_volume_for_instance(self):
|
||||
@ddt.data(False, True)
|
||||
def test_run_attach_detach_volume_for_instance(self, volume_object):
|
||||
"""Make sure volume can be attached and detached from instance."""
|
||||
mountpoint = "/dev/sdf"
|
||||
# attach volume to the instance then to detach
|
||||
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
||||
volume = tests_utils.create_volume(self.context,
|
||||
admin_metadata={'readonly': 'True'},
|
||||
volume = tests_utils.create_volume(self.user_context,
|
||||
**self.volume_params)
|
||||
with volume.obj_as_admin():
|
||||
volume.admin_metadata['readonly'] = True
|
||||
volume.save()
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume)
|
||||
attachment = self.volume.attach_volume(self.context, volume_id,
|
||||
self.volume.create_volume(self.user_context,
|
||||
volume=volume)
|
||||
volume_passed = volume if volume_object else None
|
||||
attachment = self.volume.attach_volume(self.user_context,
|
||||
volume_id,
|
||||
instance_uuid, None,
|
||||
mountpoint, 'ro')
|
||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||
self.assertEqual("in-use", vol['status'])
|
||||
mountpoint, 'ro',
|
||||
volume=volume_passed)
|
||||
vol = objects.Volume.get_by_id(self.context, volume_id)
|
||||
self.assertEqual("in-use", vol.status)
|
||||
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
|
||||
attachment['attach_status'])
|
||||
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||
self.assertEqual(instance_uuid, attachment['instance_uuid'])
|
||||
self.assertIsNone(attachment['attached_host'])
|
||||
admin_metadata = vol['volume_admin_metadata']
|
||||
attachment.attach_status)
|
||||
self.assertEqual(mountpoint, attachment.mountpoint)
|
||||
self.assertEqual(instance_uuid, attachment.instance_uuid)
|
||||
self.assertIsNone(attachment.attached_host)
|
||||
admin_metadata = vol.volume_admin_metadata
|
||||
self.assertEqual(2, len(admin_metadata))
|
||||
expected = dict(readonly='True', attached_mode='ro')
|
||||
ret = {}
|
||||
@@ -2103,6 +2112,7 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
self.assertDictMatch(expected, ret)
|
||||
|
||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||
volume = volume if volume_object else vol
|
||||
conn_info = self.volume.initialize_connection(self.context,
|
||||
volume, connector)
|
||||
self.assertEqual('ro', conn_info['data']['access_mode'])
|
||||
@@ -2827,8 +2837,11 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
self.context.project_id, resource_type=resource_types.VOLUME,
|
||||
resource_uuid=volume['id'])
|
||||
|
||||
attachment = objects.VolumeAttachmentList.get_all_by_volume_id(
|
||||
context.get_admin_context(), volume_id)[0]
|
||||
self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING,
|
||||
attachment.attach_status)
|
||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||
self.assertEqual('error_attaching', vol['status'])
|
||||
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
|
||||
vol['attach_status'])
|
||||
admin_metadata = vol['volume_admin_metadata']
|
||||
@@ -2848,8 +2861,11 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
'fake_host',
|
||||
mountpoint,
|
||||
'rw')
|
||||
attachment = objects.VolumeAttachmentList.get_all_by_volume_id(
|
||||
context.get_admin_context(), volume_id)[0]
|
||||
self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING,
|
||||
attachment.attach_status)
|
||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||
self.assertEqual('error_attaching', vol['status'])
|
||||
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
|
||||
vol['attach_status'])
|
||||
admin_metadata = vol['volume_admin_metadata']
|
||||
|
||||
@@ -16,8 +16,9 @@
|
||||
Unit Tests for cinder.volume.rpcapi
|
||||
"""
|
||||
import copy
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
@@ -39,6 +40,7 @@ from cinder.volume import utils
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class VolumeRpcAPITestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@@ -393,7 +395,10 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
unmanage_only=True,
|
||||
version='3.0')
|
||||
|
||||
def test_attach_volume_to_instance(self):
|
||||
@ddt.data('3.0', '3.3')
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version')
|
||||
def test_attach_volume_to_instance(self, version, can_send_version):
|
||||
can_send_version.return_value = (version == '3.3')
|
||||
self._test_volume_api('attach_volume',
|
||||
rpc_method='call',
|
||||
volume=self.fake_volume_obj,
|
||||
@@ -401,9 +406,12 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
host_name=None,
|
||||
mountpoint='fake_mountpoint',
|
||||
mode='ro',
|
||||
version='3.0')
|
||||
version=version)
|
||||
|
||||
def test_attach_volume_to_host(self):
|
||||
@ddt.data('3.0', '3.3')
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version')
|
||||
def test_attach_volume_to_host(self, version, can_send_version):
|
||||
can_send_version.return_value = (version == '3.3')
|
||||
self._test_volume_api('attach_volume',
|
||||
rpc_method='call',
|
||||
volume=self.fake_volume_obj,
|
||||
@@ -411,13 +419,16 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
host_name='fake_host',
|
||||
mountpoint='fake_mountpoint',
|
||||
mode='rw',
|
||||
version='3.0')
|
||||
version=version)
|
||||
|
||||
def _set_cluster(self):
|
||||
self.fake_volume_obj.cluster_name = 'my_cluster'
|
||||
self.fake_volume_obj.obj_reset_changes(['cluster_name'])
|
||||
|
||||
def test_attach_volume_to_cluster(self):
|
||||
@ddt.data('3.0', '3.3')
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version')
|
||||
def test_attach_volume_to_cluster(self, version, can_send_version):
|
||||
can_send_version.return_value = (version == '3.3')
|
||||
self._set_cluster()
|
||||
self._test_volume_api('attach_volume',
|
||||
rpc_method='call',
|
||||
@@ -426,7 +437,7 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
host_name='fake_host',
|
||||
mountpoint='fake_mountpoint',
|
||||
mode='rw',
|
||||
version='3.0')
|
||||
version=version)
|
||||
|
||||
def test_detach_volume(self):
|
||||
self._test_volume_api('detach_volume',
|
||||
|
||||
@@ -101,8 +101,10 @@ def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
|
||||
values['attach_time'] = now
|
||||
|
||||
attachment = db.volume_attach(ctxt, values)
|
||||
return db.volume_attached(ctxt, attachment['id'], instance_uuid,
|
||||
attached_host, mountpoint, mode)
|
||||
volume, updated_values = db.volume_attached(
|
||||
ctxt, attachment['id'], instance_uuid,
|
||||
attached_host, mountpoint, mode)
|
||||
return volume
|
||||
|
||||
|
||||
def create_snapshot(ctxt,
|
||||
|
||||
@@ -672,7 +672,7 @@ class API(base.Base):
|
||||
@wrap_check_policy
|
||||
def attach(self, context, volume, instance_uuid, host_name,
|
||||
mountpoint, mode):
|
||||
if volume['status'] == 'maintenance':
|
||||
if volume.status == 'maintenance':
|
||||
LOG.info(_LI('Unable to attach volume, '
|
||||
'because it is in maintenance.'), resource=volume)
|
||||
msg = _("The volume cannot be attached in maintenance mode.")
|
||||
@@ -685,7 +685,7 @@ class API(base.Base):
|
||||
update=False)['readonly']
|
||||
if readonly == 'True' and mode != 'ro':
|
||||
raise exception.InvalidVolumeAttachMode(mode=mode,
|
||||
volume_id=volume['id'])
|
||||
volume_id=volume.id)
|
||||
|
||||
attach_results = self.volume_rpcapi.attach_volume(context,
|
||||
volume,
|
||||
@@ -1084,7 +1084,7 @@ class API(base.Base):
|
||||
|
||||
"""
|
||||
utils.check_metadata_properties(metadata)
|
||||
db_meta = self.db.volume_admin_metadata_update(context, volume['id'],
|
||||
db_meta = self.db.volume_admin_metadata_update(context, volume.id,
|
||||
metadata, delete, add,
|
||||
update)
|
||||
|
||||
|
||||
@@ -925,70 +925,67 @@ class VolumeManager(manager.CleanableManager,
|
||||
|
||||
@coordination.synchronized('{volume_id}')
|
||||
def attach_volume(self, context, volume_id, instance_uuid, host_name,
|
||||
mountpoint, mode):
|
||||
mountpoint, mode, volume=None):
|
||||
"""Updates db to show volume is attached."""
|
||||
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
|
||||
if volume is None:
|
||||
# For older clients, mimic the old behavior and look
|
||||
# up the volume by its volume_id.
|
||||
volume = objects.Volume.get_by_id(context, volume_id)
|
||||
# Get admin_metadata. This needs admin context.
|
||||
with volume.obj_as_admin():
|
||||
volume_metadata = volume.admin_metadata
|
||||
# check the volume status before attaching
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
volume_metadata = self.db.volume_admin_metadata_get(
|
||||
context.elevated(), volume_id)
|
||||
if volume['status'] == 'attaching':
|
||||
if volume.status == 'attaching':
|
||||
if (volume_metadata.get('attached_mode') and
|
||||
volume_metadata.get('attached_mode') != mode):
|
||||
raise exception.InvalidVolume(
|
||||
reason=_("being attached by different mode"))
|
||||
|
||||
if (volume['status'] == 'in-use' and not volume['multiattach']
|
||||
and not volume['migration_status']):
|
||||
if (volume.status == 'in-use' and not volume.multiattach
|
||||
and not volume.migration_status):
|
||||
raise exception.InvalidVolume(
|
||||
reason=_("volume is already attached"))
|
||||
|
||||
host_name_sanitized = utils.sanitize_hostname(
|
||||
host_name) if host_name else None
|
||||
if instance_uuid:
|
||||
attachments = \
|
||||
self.db.volume_attachment_get_all_by_instance_uuid(
|
||||
context, instance_uuid)
|
||||
attachments = (
|
||||
objects.VolumeAttachmentList.get_all_by_instance_uuid(
|
||||
context, instance_uuid))
|
||||
else:
|
||||
attachments = (
|
||||
self.db.volume_attachment_get_all_by_host(
|
||||
context,
|
||||
host_name_sanitized))
|
||||
objects.VolumeAttachmentList.get_all_by_host(
|
||||
context, host_name_sanitized))
|
||||
if attachments:
|
||||
# check if volume<->instance mapping is already tracked in DB
|
||||
for attachment in attachments:
|
||||
if attachment['volume_id'] == volume_id:
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'in-use'})
|
||||
volume.status = 'in-use'
|
||||
volume.save()
|
||||
return attachment
|
||||
|
||||
self._notify_about_volume_usage(context, volume,
|
||||
"attach.start")
|
||||
values = {'volume_id': volume_id,
|
||||
'attach_status': 'attaching', }
|
||||
|
||||
attachment = self.db.volume_attach(context.elevated(), values)
|
||||
volume_metadata = self.db.volume_admin_metadata_update(
|
||||
context.elevated(), volume_id,
|
||||
{"attached_mode": mode}, False)
|
||||
attachment = volume.begin_attach(mode)
|
||||
|
||||
attachment_id = attachment['id']
|
||||
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
|
||||
self.db.volume_attachment_update(context, attachment_id,
|
||||
{'attach_status':
|
||||
'error_attaching'})
|
||||
attachment.attach_status = (
|
||||
fields.VolumeAttachStatus.ERROR_ATTACHING)
|
||||
attachment.save()
|
||||
raise exception.InvalidUUID(uuid=instance_uuid)
|
||||
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
|
||||
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'error_attaching'})
|
||||
attachment.attach_status = (
|
||||
fields.VolumeAttachStatus.ERROR_ATTACHING)
|
||||
attachment.save()
|
||||
self.message_api.create(
|
||||
context, defined_messages.ATTACH_READONLY_VOLUME,
|
||||
context.project_id, resource_type=resource_types.VOLUME,
|
||||
resource_uuid=volume_id)
|
||||
resource_uuid=volume.id)
|
||||
raise exception.InvalidVolumeAttachMode(mode=mode,
|
||||
volume_id=volume_id)
|
||||
volume_id=volume.id)
|
||||
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
@@ -999,7 +996,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
LOG.debug('Attaching volume %(volume_id)s to instance '
|
||||
'%(instance)s at mountpoint %(mount)s on host '
|
||||
'%(host)s.',
|
||||
{'volume_id': volume_id, 'instance': instance_uuid,
|
||||
{'volume_id': volume.id, 'instance': instance_uuid,
|
||||
'mount': mountpoint, 'host': host_name_sanitized},
|
||||
resource=volume)
|
||||
self.driver.attach_volume(context,
|
||||
@@ -1009,20 +1006,20 @@ class VolumeManager(manager.CleanableManager,
|
||||
mountpoint)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_attachment_update(
|
||||
context, attachment_id,
|
||||
{'attach_status': 'error_attaching'})
|
||||
attachment.attach_status = (
|
||||
fields.VolumeAttachStatus.ERROR_ATTACHING)
|
||||
attachment.save()
|
||||
|
||||
volume = attachment.finish_attach(
|
||||
instance_uuid,
|
||||
host_name_sanitized,
|
||||
mountpoint,
|
||||
mode)
|
||||
|
||||
volume = self.db.volume_attached(context.elevated(),
|
||||
attachment_id,
|
||||
instance_uuid,
|
||||
host_name_sanitized,
|
||||
mountpoint,
|
||||
mode)
|
||||
self._notify_about_volume_usage(context, volume, "attach.end")
|
||||
LOG.info(_LI("Attach volume completed successfully."),
|
||||
resource=volume)
|
||||
return self.db.volume_attachment_get(context, attachment_id)
|
||||
return attachment
|
||||
|
||||
@coordination.synchronized('{volume_id}-{f_name}')
|
||||
def detach_volume(self, context, volume_id, attachment_id=None):
|
||||
|
||||
@@ -113,9 +113,10 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
back in Mitaka when introducing cheesecake replication.
|
||||
3.2 - Adds support for sending objects over RPC in
|
||||
get_backup_device().
|
||||
3.3 - Adds support for sending objects over RPC in attach_volume().
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '3.2'
|
||||
RPC_API_VERSION = '3.3'
|
||||
RPC_DEFAULT_VERSION = '3.0'
|
||||
TOPIC = constants.VOLUME_TOPIC
|
||||
BINARY = 'cinder-volume'
|
||||
@@ -188,13 +189,16 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
|
||||
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
|
||||
mountpoint, mode):
|
||||
cctxt = self._get_cctxt(volume.service_topic_queue)
|
||||
return cctxt.call(ctxt, 'attach_volume',
|
||||
volume_id=volume.id,
|
||||
instance_uuid=instance_uuid,
|
||||
host_name=host_name,
|
||||
mountpoint=mountpoint,
|
||||
mode=mode)
|
||||
msg_args = {'volume_id': volume.id,
|
||||
'instance_uuid': instance_uuid,
|
||||
'host_name': host_name,
|
||||
'mountpoint': mountpoint,
|
||||
'mode': mode,
|
||||
'volume': volume}
|
||||
cctxt = self._get_cctxt(volume.service_topic_queue, ('3.3', '3.0'))
|
||||
if not cctxt.can_send_version('3.3'):
|
||||
msg_args.pop('volume')
|
||||
return cctxt.call(ctxt, 'attach_volume', **msg_args)
|
||||
|
||||
def detach_volume(self, ctxt, volume, attachment_id):
|
||||
cctxt = self._get_cctxt(volume.service_topic_queue)
|
||||
|
||||
Reference in New Issue
Block a user