Support Glance image data colocation

This feature expands the usability of Glance multiple stores for volumes that
are created from images.
In this implementation, a volume created from image sends its
base image id to glance so that glance can store the current volume image
where the base image resides (whether be single or multiple stores).

An conflict between the 'image_service:store_id' [1] and the
'base_image_ref' passed to Glance is expected to be handled on the Glance
side.

Implements: bp copy-image-in-multiple-stores

[1] https://review.opendev.org/#/c/661676/

Change-Id: I5dd00e8d373fd8a7dd4f97e4e5924aefe599d1d9
This commit is contained in:
whoami-rajat 2019-12-06 07:22:16 +00:00 committed by Rajat Dhasmana
parent a039f5e7a5
commit 7c685f0d68
24 changed files with 214 additions and 111 deletions

View File

@ -217,13 +217,17 @@ class GlanceClientWrapper(object):
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
store_id = kwargs.pop('store_id', None)
base_image_ref = kwargs.pop('base_image_ref', None)
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context)
if store_id:
client.http_client.additional_headers = {
'x-image-meta-store': store_id
}
keys = ('x-image-meta-store', 'x-openstack-base-image-ref',)
values = (store_id, base_image_ref,)
headers = {k: v for (k, v) in zip(keys, values) if v is not None}
if headers:
client.http_client.additional_headers = headers
try:
controller = getattr(client,
@ -395,7 +399,7 @@ class GlanceImageService(object):
def update(self, context, image_id,
image_meta, data=None, purge_props=True,
store_id=None):
store_id=None, base_image_ref=None):
"""Modify the given image with the new data."""
# For v2, _translate_to_glance stores custom properties in image meta
# directly. We need the custom properties to identify properties to
@ -412,6 +416,8 @@ class GlanceImageService(object):
kwargs = {}
if store_id:
kwargs['store_id'] = store_id
if base_image_ref:
kwargs['base_image_ref'] = base_image_ref
try:
if data:

View File

@ -681,7 +681,7 @@ def _validate_file_format(image_data, expected_format):
def upload_volume(context, image_service, image_meta, volume_path,
volume_format='raw', run_as_root=True, compress=True,
store_id=None):
store_id=None, base_image_ref=None):
image_id = image_meta['id']
if image_meta.get('container_format') != 'compressed':
if (image_meta['disk_format'] == volume_format):
@ -691,13 +691,15 @@ def upload_volume(context, image_service, image_meta, volume_path,
with open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {},
tpool.Proxy(image_file),
store_id=store_id)
store_id=store_id,
base_image_ref=base_image_ref)
else:
with utils.temporary_chown(volume_path):
with open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {},
tpool.Proxy(image_file),
store_id=store_id)
store_id=store_id,
base_image_ref=base_image_ref)
return
with temporary_file() as tmp:
@ -740,7 +742,8 @@ def upload_volume(context, image_service, image_meta, volume_path,
with open(tmp, 'rb') as image_file:
image_service.update(context, image_id, {},
tpool.Proxy(image_file),
store_id=store_id)
store_id=store_id,
base_image_ref=base_image_ref)
def check_virtual_size(virtual_size, volume_size, image_id):

View File

@ -212,7 +212,7 @@ class _FakeImageService(object):
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False, store_id=None):
purge_props=False, store_id=None, base_image_ref=None):
"""Replace the contents of the given image with the new data.
:raises ImageNotFound: if the image does not exist.

View File

@ -537,6 +537,36 @@ class TestGlanceImageService(test.TestCase):
client.call.assert_called_once_with(
self.context, 'update', image_id, k1='v1', remove_props=['k2'])
@mock.patch.object(glance.GlanceImageService, '_translate_from_glance')
@mock.patch.object(glance.GlanceImageService, 'show')
def test_update_base_image_ref(self, show, translate_from_glance):
image_id = mock.sentinel.image_id
client = mock.Mock(call=mock.Mock())
service = glance.GlanceImageService(client=client)
data = '*' * 256
show.return_value = {}
translate_from_glance.return_value = {}
service.update(self.context, image_id, {}, data,
base_image_ref=123)
calls = [mock.call.call(
self.context, 'upload', image_id, data, base_image_ref=123),
mock.call.call(self.context, 'get', image_id)]
client.assert_has_calls(calls, any_order=True)
def test_call_with_additional_headers(self):
glance_wrapper = glance.GlanceClientWrapper()
fake_client = mock.Mock()
self.mock_object(glance_wrapper, 'client', fake_client)
glance_wrapper.call(self.context, 'upload',
{},
store_id='xyz',
base_image_ref=123)
self.assertDictEqual({
'x-image-meta-store': 'xyz',
'x-openstack-base-image-ref': 123},
fake_client.http_client.additional_headers)
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')

View File

@ -763,7 +763,7 @@ class TestUploadVolume(test.TestCase):
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
store_id=None, base_image_ref=None)
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@ -796,7 +796,7 @@ class TestUploadVolume(test.TestCase):
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
store_id=None, base_image_ref=None)
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
@ -852,7 +852,7 @@ class TestUploadVolume(test.TestCase):
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
store_id=None, base_image_ref=None)
mock_engine.compress_img.assert_called()
@mock.patch('eventlet.tpool.Proxy')
@ -886,7 +886,7 @@ class TestUploadVolume(test.TestCase):
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
store_id=None, base_image_ref=None)
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
@ -943,7 +943,7 @@ class TestUploadVolume(test.TestCase):
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
store_id=None, base_image_ref=None)
mock_engine.compress_img.assert_called()
@mock.patch('cinder.image.image_utils.CONF')
@ -978,6 +978,32 @@ class TestUploadVolume(test.TestCase):
self.assertEqual(2, mock_info.call_count)
self.assertFalse(image_service.update.called)
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_base_image_ref(self, mock_os, mock_temp, mock_convert, mock_info,
mock_open, mock_conf, mock_chown, mock_proxy):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': 'raw',
'container_format': mock.sentinel.container_format}
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
mock_os.access.return_value = False
image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path, base_image_ref='xyz')
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None, base_image_ref='xyz')
class TestFetchToVhd(test.TestCase):
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')

View File

@ -1271,7 +1271,8 @@ class QuobyteDriverTestCase(test.TestCase):
self.assertEqual(self.TEST_MNT_POINT_BASE,
conn_info['mount_point_base'])
def test_copy_volume_to_image_raw_image(self):
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
def test_copy_volume_to_image_raw_image(self, vol_glance_metadata):
drv = self._driver
volume_type_id = db.volume_type_create(
@ -1315,10 +1316,12 @@ class QuobyteDriverTestCase(test.TestCase):
run_as_root=False)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
store_id=None)
store_id=None, base_image_ref=None, compress=True,
volume_format='raw')
self.assertTrue(mock_create_temporary_file.called)
def test_copy_volume_to_image_qcow2_image(self):
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
def test_copy_volume_to_image_qcow2_image(self, vol_glance_metadata):
"""Upload a qcow2 image file which has to be converted to raw first."""
drv = self._driver
@ -1367,10 +1370,12 @@ class QuobyteDriverTestCase(test.TestCase):
volume_path, upload_path, 'raw', run_as_root=False)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
store_id=None)
store_id=None, base_image_ref=None, compress=True,
volume_format='raw')
self.assertTrue(mock_create_temporary_file.called)
def test_copy_volume_to_image_snapshot_exists(self):
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
def test_copy_volume_to_image_snapshot_exists(self, vol_glance_metadata):
"""Upload an active snapshot which has to be converted to raw first."""
drv = self._driver
@ -1421,7 +1426,8 @@ class QuobyteDriverTestCase(test.TestCase):
volume_path, upload_path, 'raw', run_as_root=False)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
store_id=None)
store_id=None, base_image_ref=None, compress=True,
volume_format='raw')
self.assertTrue(mock_create_temporary_file.called)
def test_set_nas_security_options_default(self):

View File

@ -451,7 +451,8 @@ class VMwareVStorageObjectDriverTestCase(test.TestCase):
vmdk_file_path=vmdk_file_path,
vmdk_size=volume.size * units.Gi,
image_name=image_meta['name'],
store_id='fake-store')
store_id='fake-store',
base_image_ref=None)
vops.detach_fcd.assert_called_once_with(backing, fcd_loc)
delete_temp_backing.assert_called_once_with(backing)

View File

@ -1291,6 +1291,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase):
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
store_id='fake-store',
base_image_ref=None,
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume['size'] * units.Gi,

View File

@ -415,8 +415,9 @@ class TestWindowsISCSIDriver(test.TestCase):
expected_tmp_vhd_path)
mock_upload_volume.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
fake_image_meta, expected_tmp_vhd_path, 'vhd',
store_id='fake-store')
fake_image_meta, expected_tmp_vhd_path, volume_format='vhd',
store_id='fake-store', base_image_ref=None,
compress=True, run_as_root=True)
mock_delete_if_exists.assert_called_once_with(
expected_tmp_vhd_path)

View File

@ -788,8 +788,9 @@ class WindowsSmbFsTestCase(test.TestCase):
fake_upload_volume.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
fake_image_meta, upload_path, fake_img_format,
store_id='fake-store')
fake_image_meta, upload_path, volume_format=fake_img_format,
store_id='fake-store', base_image_ref=None, compress=True,
run_as_root=True)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def test_copy_image_to_volume(self, mock_get_vhd_type):

View File

@ -36,6 +36,7 @@ from cinder.volume import configuration
from cinder.volume import driver_utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import throttling
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -902,16 +903,13 @@ class BaseVD(object):
enforce_multipath)
attach_info, volume = self._attach_volume(context, volume, properties)
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
try:
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
attach_info['device']['path'],
compress=True,
store_id=store_id)
volume,
compress=True)
finally:
# Since attached volume was not used for writing we can force
# detach it

View File

@ -1244,14 +1244,12 @@ class VxFlexOSDriver(driver.VolumeDriver):
"service": image_service,
"meta": image_meta,
})
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
try:
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
self._sio_attach_volume(volume),
store_id=store_id)
volume)
finally:
self._sio_detach_volume(volume)

View File

@ -994,13 +994,11 @@ class GPFSDriver(driver.CloneableImageVD,
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume),
store_id=store_id)
volume)
def _migrate_volume(self, volume, host):
"""Migrate vol if source and dest are managed by same GPFS cluster."""

View File

@ -33,6 +33,7 @@ from cinder.image import image_utils
from cinder import interface
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import volume_utils
try:
import linstor
@ -662,13 +663,13 @@ class LinstorBaseDriver(driver.VolumeDriver):
return lin_drv.all_api_responses_success(api_response)
def _copy_vol_to_image(self, context, image_service, image_meta, rsc_path,
store_id=None):
volume):
return image_utils.upload_volume(context,
return volume_utils.upload_volume(context,
image_service,
image_meta,
rsc_path,
store_id=store_id)
volume)
#
# Snapshot
@ -980,13 +981,11 @@ class LinstorBaseDriver(driver.VolumeDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume)
rsc_path = str(self._get_rsc_path(full_rsc_name))
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
self._copy_vol_to_image(context,
image_service,
image_meta,
rsc_path,
store_id=store_id)
volume)
return {}
# Not supported currently

View File

@ -524,14 +524,11 @@ class LVMVolumeDriver(driver.VolumeDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume),
store_id=store_id)
volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""

View File

@ -1626,9 +1626,6 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
volume_id=volume.id)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
tmp_dir = volume_utils.image_conversion_dir()
tmp_file = os.path.join(tmp_dir,
volume.name + '-' + image_meta['id'])
@ -1638,9 +1635,9 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
volume.name, tmp_file]
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
volume_utils.upload_volume(context, image_service,
image_meta, tmp_file,
store_id=store_id)
volume)
os.unlink(tmp_file)
def extend_volume(self, volume, new_size):

View File

@ -474,13 +474,12 @@ class RemoteFSDriver(driver.BaseVD):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume),
run_as_root=self._execute_as_root,
store_id=store_id)
volume,
run_as_root=self._execute_as_root)
def _read_config_file(self, config_file):
# Returns list of lines in file
@ -975,15 +974,12 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
else:
upload_path = active_file_path
if not store_id:
store_id = volume.volume_type.extra_specs.get(
'image_service:store_id')
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
run_as_root=self._execute_as_root,
store_id=store_id)
volume,
run_as_root=self._execute_as_root)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""

View File

@ -26,6 +26,7 @@ from cinder.image import image_utils
from cinder import interface
from cinder import utils
from cinder.volume import driver
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -358,8 +359,6 @@ class SPDKDriver(driver.VolumeDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
volume['provider_location'] = (
self.create_export(context, volume, None)['provider_location'])
connection_data = self.initialize_connection(volume, None)['data']
@ -376,12 +375,11 @@ class SPDKDriver(driver.VolumeDriver):
connection_data['device_path'] = device_info['path']
try:
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
device_info['path'],
store_id=store_id)
volume)
finally:
target_connector.disconnect_volume(connection_data, volume)

View File

@ -33,6 +33,7 @@ from cinder import interface
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import volumeops as vops
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -262,6 +263,9 @@ class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver):
store_id = volume.volume_type.extra_specs.get(
'image_service:store_id')
# TODO (whoami-rajat): Remove store_id and base_image_ref
# parameters when oslo.vmware calls volume_utils wrapper of
# upload_volume instead of image_utils.upload_volume
image_transfer.upload_image(
context,
conf.vmware_image_transfer_timeout_secs,
@ -275,7 +279,8 @@ class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver):
vmdk_file_path=vmdk_file_path,
vmdk_size=volume.size * units.Gi,
image_name=image_meta['name'],
store_id=store_id)
store_id=store_id,
base_image_ref=volume_utils.get_base_image_ref(volume))
finally:
if attached:
self.volumeops.detach_fcd(backing, fcd_loc)

View File

@ -47,6 +47,7 @@ from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
from cinder.volume.drivers.vmware import volumeops
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -1539,6 +1540,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
# TODO (whoami-rajat): Remove store_id and base_image_ref
# parameters when oslo.vmware calls volume_utils wrapper of
# upload_volume instead of image_utils.upload_volume
image_transfer.upload_image(context,
timeout,
image_service,
@ -1552,7 +1556,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'],
image_version=1,
store_id=store_id)
store_id=store_id,
base_image_ref=
volume_utils.get_base_image_ref(volume))
LOG.info("Done copying volume %(vol)s to a new image %(img)s",
{'vol': volume['name'], 'img': image_meta['name']})

View File

@ -285,8 +285,6 @@ class WindowsISCSIDriver(driver.ISCSIDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
disk_format = self._tgt_utils.get_supported_disk_format()
temp_vhd_path = os.path.join(CONF.image_conversion_dir,
str(image_meta['id']) + '.' + disk_format)
@ -296,9 +294,9 @@ class WindowsISCSIDriver(driver.ISCSIDriver):
# qemu-img cannot access VSS snapshots, for which reason it
# must be exported first.
self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path)
image_utils.upload_volume(context, image_service, image_meta,
temp_vhd_path, 'vhd',
store_id=store_id)
volume_utils.upload_volume(
context, image_service, image_meta, temp_vhd_path, volume,
'vhd')
finally:
fileutils.delete_if_exists(temp_vhd_path)

View File

@ -35,6 +35,7 @@ from cinder import objects
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers import remotefs as remotefs_drv
from cinder.volume import volume_utils
VERSION = '1.1.0'
@ -553,8 +554,6 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
# If snapshots exist, flatten to a temporary image, and upload it
@ -580,12 +579,12 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
else:
upload_path = active_file_path
image_utils.upload_volume(context,
volume_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
root_file_fmt,
store_id=store_id)
volume,
root_file_fmt)
finally:
if temp_path:
self._delete(temp_path)

View File

@ -51,6 +51,7 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import objects
from cinder.objects import fields
from cinder import rpc
@ -1252,3 +1253,32 @@ def update_backup_error(backup, err, status=fields.BackupStatus.ERROR):
backup.status = status
backup.fail_reason = err
backup.save()
# TODO (whoami-rajat): Remove this method when oslo.vmware calls volume_utils
# wrapper of upload_volume instead of image_utils.upload_volume
def get_base_image_ref(volume):
# This method fetches the image_id from volume glance metadata and pass
# it to the driver calling it during upload volume to image operation
base_image_ref = None
if volume.glance_metadata:
base_image_ref = volume.glance_metadata.get('image_id')
return base_image_ref
def upload_volume(context, image_service, image_meta, volume_path,
volume, volume_format='raw', run_as_root=True,
compress=True):
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
# This fetches the image_id from volume glance metadata and pass
# it to the driver calling it during upload volume to image operation
base_image_ref = None
if volume.glance_metadata:
base_image_ref = volume.glance_metadata.get('image_id')
image_utils.upload_volume(context, image_service, image_meta, volume_path,
volume_format=volume_format,
run_as_root=run_as_root,
compress=compress, store_id=store_id,
base_image_ref=base_image_ref)

View File

@ -0,0 +1,9 @@
---
features:
- |
This release includes support for Glance automatic image colocation.
When a volume originally created from an image is uploaded to the
Image service, Cinder passes Glance a reference to the original
image. Glance may use this information to colocate the new image data
in the same image store(s) as the original image data. Consult the
Glance documentation for more information.