Support multiple stores of Glance

Glance now has ability to configure multiple stores at a
time. To use this feature in cinder for uploading volume to image, operator
need to define a new field named 'image_service:store_id' in the
volume-type extra-specs. At a time of volume upload to image request, if
'image_service:store_id' is present in the associated volume type, then
image will be uploaded to specified 'store_id'. The value 'store_id' is
nothing but store identifier defined in glance-api.conf. If the value
of 'image_service:store_id' is null or not set in volume-type then the
image will be uploaded to default store in glance.

Co-authored-by: Sagar Waghmare <sawaghma@redhat.com>
Co-authored-by: Abhishek Kekane <akekane@redhat.com>

DocImpact
Implements: bp support-glance-multiple-backend

Change-Id: Ica56833a1d9fb9f47b922dbbc6558901bb3a2800
This commit is contained in:
Abhishek Kekane 2019-05-16 08:53:29 +00:00
parent 9e3ce1427f
commit 350973f3dd
29 changed files with 461 additions and 55 deletions

View File

@ -26,6 +26,7 @@ from cinder import context as ctxt
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.policies import type_extra_specs as policy
from cinder import rpc
from cinder.volume import volume_types
@ -71,6 +72,10 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
self._check_type(context, type_id)
specs = body['extra_specs']
if 'image_service:store_id' in specs:
image_service_store_id = specs['image_service:store_id']
image_utils.validate_stores_id(context, image_service_store_id)
db.volume_type_extra_specs_update_or_create(context,
type_id,
specs)
@ -95,6 +100,10 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
if 'image_service:store_id' in body:
image_service_store_id = body['image_service:store_id']
image_utils.validate_stores_id(context, image_service_store_id)
db.volume_type_extra_specs_update_or_create(context,
type_id,
body)

View File

@ -331,6 +331,14 @@ class NotFound(CinderException):
safe = True
class GlanceStoreNotFound(NotFound):
message = _("Store %(store_id)s not enabled in glance.")
class GlanceStoreReadOnly(Invalid):
message = _("Store %(store_id)s is read-only in glance.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")

View File

@ -216,9 +216,15 @@ class GlanceClientWrapper(object):
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
store_id = kwargs.pop('store_id', None)
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context)
if store_id:
client.http_client.additional_headers = {
'x-image-meta-store': store_id
}
try:
controller = getattr(client,
kwargs.pop('controller', 'images'))
@ -287,6 +293,14 @@ class GlanceImageService(object):
except Exception:
_reraise_translated_image_exception(image_id)
def get_stores(self, context):
"""Returns a list of dicts with stores information."""
try:
return self._client.call(context,
'get_stores_info')
except Exception:
_reraise_translated_exception()
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
try:
@ -380,7 +394,8 @@ class GlanceImageService(object):
return self._translate_from_glance(context, recv_service_image_meta)
def update(self, context, image_id,
image_meta, data=None, purge_props=True):
image_meta, data=None, purge_props=True,
store_id=None):
"""Modify the given image with the new data."""
# For v2, _translate_to_glance stores custom properties in image meta
# directly. We need the custom properties to identify properties to
@ -394,9 +409,13 @@ class GlanceImageService(object):
# NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
kwargs = {}
if store_id:
kwargs['store_id'] = store_id
try:
if data:
self._client.call(context, 'upload', image_id, data)
self._client.call(context, 'upload', image_id, data, **kwargs)
if image_meta:
if purge_props:
# Properties to remove are those not specified in

View File

@ -48,6 +48,7 @@ import six
from cinder import exception
from cinder.i18n import _
from cinder.image import accelerator
from cinder.image import glance
from cinder import utils
from cinder.volume import throttling
from cinder.volume import volume_utils
@ -88,6 +89,19 @@ QEMU_IMG_MIN_CONVERT_LUKS_VERSION = '2.10'
COMPRESSIBLE_IMAGE_FORMATS = ('qcow2',)
def validate_stores_id(context, image_service_store_id):
image_service = glance.get_default_image_service()
stores_info = image_service.get_stores(context)['stores']
for info in stores_info:
if image_service_store_id == info['id']:
if info.get('read-only') == "true":
raise exception.GlanceStoreReadOnly(
store_id=image_service_store_id)
return
raise exception.GlanceStoreNotFound(store_id=image_service_store_id)
def fixup_disk_format(disk_format):
"""Return the format to be provided to qemu-img convert."""
@ -660,7 +674,8 @@ def _validate_file_format(image_data, expected_format):
def upload_volume(context, image_service, image_meta, volume_path,
volume_format='raw', run_as_root=True, compress=True):
volume_format='raw', run_as_root=True, compress=True,
store_id=None):
image_id = image_meta['id']
if image_meta.get('container_format') != 'compressed':
if (image_meta['disk_format'] == volume_format):
@ -669,12 +684,14 @@ def upload_volume(context, image_service, image_meta, volume_path,
if os.name == 'nt' or os.access(volume_path, os.R_OK):
with open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {},
tpool.Proxy(image_file))
tpool.Proxy(image_file),
store_id=store_id)
else:
with utils.temporary_chown(volume_path):
with open(volume_path, 'rb') as image_file:
image_service.update(context, image_id, {},
tpool.Proxy(image_file))
tpool.Proxy(image_file),
store_id=store_id)
return
with temporary_file() as tmp:
@ -716,7 +733,8 @@ def upload_volume(context, image_service, image_meta, volume_path,
accel.compress_img(run_as_root=run_as_root)
with open(tmp, 'rb') as image_file:
image_service.update(context, image_id, {},
tpool.Proxy(image_file))
tpool.Proxy(image_file),
store_id=store_id)
def check_virtual_size(virtual_size, volume_size, image_id):

View File

@ -24,6 +24,7 @@ import webob
from cinder.api.contrib import types_extra_specs
from cinder import exception
from cinder.image import glance as image_store
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -147,6 +148,65 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
self.assertIn('updated_at', self.notifier.notifications[0]['payload'])
self.assertEqual('value1', res_dict['extra_specs']['key1'])
@mock.patch.object(image_store.GlanceImageService, 'get_stores')
def test_create_valid_image_store(self, mock_get_stores):
mock_get_stores.return_value = {
'stores': [{
'default': 'true',
'id': 'cheap'
}, {
'id': 'read_only_store',
'read-only': 'true'
}]
}
self.mock_object(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"extra_specs": {"image_service:store_id": "cheap"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, fake.VOLUME_ID, body=body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertIn('created_at', self.notifier.notifications[0]['payload'])
self.assertIn('updated_at', self.notifier.notifications[0]['payload'])
self.assertEqual(
'cheap', res_dict['extra_specs']['image_service:store_id'])
@mock.patch.object(image_store.GlanceImageService, 'get_stores')
def test_create_invalid_image_store(self, mock_get_stores):
mock_get_stores.return_value = {
'stores': [{
'default': 'true',
'id': 'cheap'
}, {
'id': 'read_only_store',
'read-only': 'true'
}]
}
body = {"extra_specs": {"image_service:store_id": "fast"}}
req = fakes.HTTPRequest.blank(self.api_path)
self.assertRaises(cinder.exception.GlanceStoreNotFound,
self.controller.create,
req, fake.VOLUME_ID, body=body)
@mock.patch.object(image_store.GlanceImageService, 'get_stores')
def test_create_read_only_image_store(self, mock_get_stores):
mock_get_stores.return_value = {
'stores': [{
'default': 'true',
'id': 'cheap'
}, {
'id': 'read_only_store',
'read-only': 'true'
}]
}
body = {"extra_specs": {"image_service:store_id": "read_only_store"}}
req = fakes.HTTPRequest.blank(self.api_path)
self.assertRaises(cinder.exception.GlanceStoreReadOnly,
self.controller.create,
req, fake.VOLUME_ID, body=body)
@mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create')
def test_create_key_allowed_chars(
self, volume_type_extra_specs_update_or_create):
@ -195,6 +255,93 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
self.assertEqual('value3',
res_dict['extra_specs']['other3_alphanum.-_:'])
@mock.patch.object(image_store.GlanceImageService, 'get_stores')
def test_update_valid_image_store(self, mock_get_stores):
mock_get_stores.return_value = {
'stores': [{
'default': 'true',
'id': 'cheap'
}, {
'id': 'fast',
},
{
'id': 'read_only_store',
'read-only': 'true'
}]
}
self.mock_object(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"image_service:store_id": "fast"}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(
self.api_path + "/image_service:store_id")
res_dict = self.controller.update(req, fake.VOLUME_ID,
"image_service:store_id",
body=body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertIn('created_at', self.notifier.notifications[0]['payload'])
self.assertIn('updated_at', self.notifier.notifications[0]['payload'])
self.assertEqual(
'fast', res_dict['image_service:store_id'])
@mock.patch.object(image_store.GlanceImageService, 'get_stores')
def test_update_invalid_image_store(self, mock_get_stores):
mock_get_stores.return_value = {
'stores': [{
'default': 'true',
'id': 'cheap'
}, {
'id': 'fast',
},
{
'id': 'read_only_store',
'read-only': 'true'
}]
}
self.mock_object(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"image_service:store_id": "very_fast"}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(
self.api_path + "/image_service:store_id")
self.assertRaises(cinder.exception.GlanceStoreNotFound,
self.controller.update,
req, fake.VOLUME_ID,
"image_service:store_id",
body=body)
@mock.patch.object(image_store.GlanceImageService, 'get_stores')
def test_update_read_only_image_store(self, mock_get_stores):
mock_get_stores.return_value = {
'stores': [{
'default': 'true',
'id': 'cheap'
}, {
'id': 'fast',
},
{
'id': 'read_only_store',
'read-only': 'true'
}]
}
self.mock_object(cinder.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"image_service:store_id": "read_only_store"}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(
self.api_path + "/image_service:store_id")
self.assertRaises(cinder.exception.GlanceStoreReadOnly,
self.controller.update,
req, fake.VOLUME_ID,
"image_service:store_id",
body=body)
def test_update_item(self):
self.mock_object(cinder.db,
'volume_type_extra_specs_update_or_create',

View File

@ -212,7 +212,7 @@ class _FakeImageService(object):
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
purge_props=False, store_id=None):
"""Replace the contents of the given image with the new data.
:raises ImageNotFound: if the image does not exist.

View File

@ -762,7 +762,8 @@ class TestUploadVolume(test.TestCase):
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@ -794,7 +795,8 @@ class TestUploadVolume(test.TestCase):
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
@ -849,7 +851,8 @@ class TestUploadVolume(test.TestCase):
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
mock_engine.compress_img.assert_called()
@mock.patch('eventlet.tpool.Proxy')
@ -882,7 +885,8 @@ class TestUploadVolume(test.TestCase):
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
@ -938,7 +942,8 @@ class TestUploadVolume(test.TestCase):
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
ctxt, image_meta['id'], {}, mock_proxy.return_value,
store_id=None)
mock_engine.compress_img.assert_called()
@mock.patch('cinder.image.image_utils.CONF')

View File

@ -19,6 +19,7 @@ from unittest import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import units
from cinder import context
@ -27,6 +28,7 @@ from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils as test_utils
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import gpfs
@ -86,6 +88,7 @@ class GPFSDriverTestCase(test.TestCase):
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
self.updated_at = timeutils.utcnow()
CONF.gpfs_images_dir = self.images_dir
def _cleanup(self, images_dir, volumes_path):
@ -1384,7 +1387,16 @@ class GPFSDriverTestCase(test.TestCase):
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.image.image_utils.upload_volume')
def test_copy_volume_to_image(self, mock_upload_volume, mock_local_path):
volume = self._fake_volume()
volume = test_utils.create_volume(
self.context, volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
extra_specs = {
'image_service:store_id': 'fake-store'
}
test_utils.create_volume_type(
self.context.elevated(), id=fake.VOLUME_TYPE_ID,
name="test_type", extra_specs=extra_specs)
self.driver.copy_volume_to_image('', volume, '', '')
@mock.patch('cinder.utils.execute')

View File

@ -30,6 +30,7 @@ import psutil
import six
from cinder import context
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder import test
@ -1273,7 +1274,9 @@ class QuobyteDriverTestCase(test.TestCase):
def test_copy_volume_to_image_raw_image(self):
drv = self._driver
volume = self._simple_volume()
volume_type_id = db.volume_type_create(
self.context, {'name': 'quo_type', 'extra_specs': {}}).get('id')
volume = self._simple_volume(volume_type_id=volume_type_id)
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name'])
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
@ -1311,14 +1314,17 @@ class QuobyteDriverTestCase(test.TestCase):
force_share=True,
run_as_root=False)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False)
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
store_id=None)
self.assertTrue(mock_create_temporary_file.called)
def test_copy_volume_to_image_qcow2_image(self):
"""Upload a qcow2 image file which has to be converted to raw first."""
drv = self._driver
volume = self._simple_volume()
volume_type_id = db.volume_type_create(
self.context, {'name': 'quo_type', 'extra_specs': {}}).get('id')
volume = self._simple_volume(volume_type_id=volume_type_id)
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name'])
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
@ -1360,14 +1366,17 @@ class QuobyteDriverTestCase(test.TestCase):
mock_convert_image.assert_called_once_with(
volume_path, upload_path, 'raw', run_as_root=False)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False)
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
store_id=None)
self.assertTrue(mock_create_temporary_file.called)
def test_copy_volume_to_image_snapshot_exists(self):
"""Upload an active snapshot which has to be converted to raw first."""
drv = self._driver
volume = self._simple_volume()
volume_type_id = db.volume_type_create(
self.context, {'name': 'quo_type', 'extra_specs': {}}).get('id')
volume = self._simple_volume(volume_type_id=volume_type_id)
volume_path = '%s/volume-%s' % (self.TEST_MNT_POINT, self.VOLUME_UUID)
volume_filename = 'volume-%s' % self.VOLUME_UUID
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
@ -1411,7 +1420,8 @@ class QuobyteDriverTestCase(test.TestCase):
mock_convert_image.assert_called_once_with(
volume_path, upload_path, 'raw', run_as_root=False)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False)
mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False,
store_id=None)
self.assertTrue(mock_create_temporary_file.called)
def test_set_nas_security_options_default(self):

View File

@ -16,12 +16,15 @@ from unittest import mock
from os_brick import initiator
from os_brick.initiator import connector
from oslo_utils import timeutils
from oslo_utils import units
from cinder import context
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as test_utils
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import spdk as spdk_driver
@ -513,6 +516,8 @@ class SpdkDriverTestCase(test.TestCase):
self.jsonrpcclient = JSONRPCClient()
self.driver = spdk_driver.SPDKDriver(configuration=
self.configuration)
self._context = context.get_admin_context()
self.updated_at = timeutils.utcnow()
def test__update_volume_stats(self):
with mock.patch.object(self.driver, "_rpc_call",
@ -733,16 +738,24 @@ class SpdkDriverTestCase(test.TestCase):
def test_copy_volume_to_image(self, volume_get):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
db_volume = fake_volume.fake_db_volume()
db_volume['provider_location'] = "127.0.0.1:3262 RDMA " \
"2016-06.io.spdk:cnode2"
provider_location = "127.0.0.1:3262 RDMA 2016-06.io.spdk:cnode2"
volume = test_utils.create_volume(
self._context, volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at,
provider_location=provider_location)
extra_specs = {
'image_service:store_id': 'fake-store'
}
test_utils.create_volume_type(self._context.elevated(),
id=fake.VOLUME_TYPE_ID,
name="test_type",
extra_specs=extra_specs)
ctxt = context.get_admin_context()
db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(),
db_volume)
volume_get.return_value = db_volume
volume_get.return_value = volume
with mock.patch.object(self.driver.target_driver, "_rpc_call",
self.jsonrpcclient.call):
self.driver.copy_volume_to_image(ctxt, db_volume, None, None)
self.driver.copy_volume_to_image(ctxt, volume, None, None)
def test_extend_volume(self):
with mock.patch.object(self.driver, "_rpc_call",

View File

@ -18,6 +18,7 @@
from unittest import mock
import ddt
from oslo_utils import timeutils
from oslo_utils import units
from oslo_vmware import image_transfer
from oslo_vmware.objects import datastore
@ -26,8 +27,10 @@ from oslo_vmware import vim_util
from cinder import context
from cinder import exception as cinder_exceptions
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as test_utils
from cinder.volume import configuration
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import fcd
@ -66,6 +69,7 @@ class VMwareVStorageObjectDriverTestCase(test.TestCase):
self._driver._vc_version = self.VC_VERSION
self._driver._storage_policy_enabled = True
self._context = context.get_admin_context()
self.updated_at = timeutils.utcnow()
@mock.patch.object(VMDK_DRIVER, 'do_setup')
@mock.patch.object(FCD_DRIVER, 'volumeops')
@ -386,7 +390,16 @@ class VMwareVStorageObjectDriverTestCase(test.TestCase):
vops.get_vmdk_path.return_value = vmdk_file_path
vops.get_backing_by_uuid.return_value = backing
volume = self._create_volume_obj()
volume = test_utils.create_volume(
self._context, volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
extra_specs = {
'image_service:store_id': 'fake-store'
}
test_utils.create_volume_type(
self._context.elevated(), id=fake.VOLUME_TYPE_ID,
name="test_type", extra_specs=extra_specs)
image_service = mock.sentinel.image_service
image_meta = self._create_image_meta()
self._driver.copy_volume_to_image(
@ -411,7 +424,8 @@ class VMwareVStorageObjectDriverTestCase(test.TestCase):
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume.size * units.Gi,
image_name=image_meta['name'])
image_name=image_meta['name'],
store_id='fake-store')
vops.detach_fcd.assert_called_once_with(backing, fcd_loc)
delete_temp_backing.assert_called_once_with(backing)

View File

@ -19,6 +19,7 @@ import re
from unittest import mock
import ddt
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import versionutils
from oslo_vmware import exceptions
@ -32,6 +33,7 @@ from cinder import test
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as test_utils
from cinder.volume import configuration
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
@ -105,6 +107,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase):
db=self._db)
self._context = context.get_admin_context()
self.updated_at = timeutils.utcnow()
def test_get_volume_stats(self):
stats = self._driver.get_volume_stats()
@ -1182,7 +1185,17 @@ class VMwareVcVmdkDriverTestCase(test.TestCase):
vops.get_vmdk_path.return_value = vmdk_file_path
context = mock.sentinel.context
volume = self._create_volume_dict()
volume = test_utils.create_volume(
self._context, volume_type_id = fake_constants.VOLUME_TYPE_ID,
updated_at = self.updated_at)
extra_specs = {
'image_service:store_id': 'fake-store'
}
test_utils.create_volume_type(self._context.elevated(),
id=fake_constants.VOLUME_TYPE_ID,
name="test_type",
extra_specs=extra_specs)
image_service = mock.sentinel.image_service
image_meta = self._create_image_meta()
self._driver.copy_volume_to_image(
@ -1202,6 +1215,7 @@ class VMwareVcVmdkDriverTestCase(test.TestCase):
session=session,
host=self._config.vmware_host_ip,
port=self._config.vmware_host_port,
store_id='fake-store',
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume['size'] * units.Gi,

View File

@ -89,6 +89,11 @@ class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
volume_type_id = db.volume_type_create(
self.context, {'name': 'test', 'extra_specs': {
'image_service:store_id': 'fake_store'
}}).get('id')
self.volume_attrs['volume_type_id'] = volume_type_id
db.volume_create(self.context, self.volume_attrs)
# start test
@ -129,6 +134,11 @@ class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
# Creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
volume_type_id = db.volume_type_create(
self.context, {'name': 'test', 'extra_specs': {
'image_service:store_id': 'fake_store'
}}).get('id')
self.volume_attrs['volume_type_id'] = volume_type_id
db.volume_create(self.context, self.volume_attrs)
method = 'volume_update_status_based_on_attachment'
@ -150,6 +160,11 @@ class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
volume_type_id = db.volume_type_create(
self.context, {'name': 'test', 'extra_specs': {
'image_service:store_id': 'fake_store'
}}).get('id')
self.volume_attrs['volume_type_id'] = volume_type_id
db.volume_create(self.context, self.volume_attrs)
# start test
@ -163,6 +178,11 @@ class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = NON_EXISTENT_IMAGE_ID
# creating volume testdata
volume_type_id = db.volume_type_create(
self.context, {'name': 'test', 'extra_specs': {
'image_service:store_id': 'fake_store'
}}).get('id')
self.volume_attrs['volume_type_id'] = volume_type_id
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
@ -296,6 +316,11 @@ class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
self.volume_attrs['snapshot_id'] = fake.SNAPSHOT_ID
volume_type_id = db.volume_type_create(
self.context, {'name': 'test', 'extra_specs': {
'image_service:store_id': 'fake_store'
}}).get('id')
self.volume_attrs['volume_type_id'] = volume_type_id
db.volume_create(self.context, self.volume_attrs)
def fake_create(context, volume, **kwargs):

View File

@ -23,14 +23,17 @@ from unittest import mock
import ddt
from oslo_utils import fileutils
from oslo_utils import timeutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as test_utils
from cinder.tests.unit.windows import db_fakes
from cinder.volume import configuration as conf
from cinder.volume.drivers.windows import iscsi as windows_iscsi
@ -49,6 +52,9 @@ class TestWindowsISCSIDriver(test.TestCase):
self._driver = windows_iscsi.WindowsISCSIDriver(
configuration=self.configuration)
self._context = context.get_admin_context()
self.updated_at = timeutils.utcnow()
@mock.patch.object(fileutils, 'ensure_tree')
def test_do_setup(self, mock_ensure_tree):
self._driver.do_setup(mock.sentinel.context)
@ -375,7 +381,18 @@ class TestWindowsISCSIDriver(test.TestCase):
disk_format = 'vhd'
fake_image_meta = db_fakes.get_fake_image_meta()
volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context)
fake_volume = test_utils.create_volume(
self._context, volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
extra_specs = {
'image_service:store_id': 'fake-store'
}
test_utils.create_volume_type(self._context.elevated(),
id=fake.VOLUME_TYPE_ID, name="test_type",
extra_specs=extra_specs)
fake_img_conv_dir = 'fake_img_conv_dir'
self.flags(image_conversion_dir=fake_img_conv_dir)
@ -388,17 +405,18 @@ class TestWindowsISCSIDriver(test.TestCase):
fake_image_meta['id'] + '.' + disk_format)
self._driver.copy_volume_to_image(
mock.sentinel.context, volume,
mock.sentinel.context, fake_volume,
mock.sentinel.image_service,
fake_image_meta)
mock_tmp_snap.assert_called_once_with(volume.name)
mock_tmp_snap.assert_called_once_with(fake_volume.name)
tgt_utils.export_snapshot.assert_called_once_with(
mock.sentinel.tmp_snap_name,
expected_tmp_vhd_path)
mock_upload_volume.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
fake_image_meta, expected_tmp_vhd_path, 'vhd')
fake_image_meta, expected_tmp_vhd_path, 'vhd',
store_id='fake-store')
mock_delete_if_exists.assert_called_once_with(
expected_tmp_vhd_path)

View File

@ -17,6 +17,7 @@ import os
from unittest import mock
import ddt
from oslo_utils import timeutils
from oslo_utils import units
from cinder import context
@ -24,6 +25,7 @@ from cinder import exception
from cinder.image import image_utils
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as test_utils
from cinder.volume.drivers import remotefs
@ -78,6 +80,9 @@ class WindowsSmbFsTestCase(test.TestCase):
self.volume = self._simple_volume()
self.snapshot = self._simple_snapshot(volume=self.volume)
self._context = context.get_admin_context()
self.updated_at = timeutils.utcnow()
def _simple_volume(self, **kwargs):
updates = {'id': self._FAKE_VOLUME_ID,
'size': self._FAKE_VOLUME_SIZE,
@ -722,6 +727,17 @@ class WindowsSmbFsTestCase(test.TestCase):
def test_copy_volume_to_image(self, has_parent=False):
drv = self._smbfs_driver
volume = test_utils.create_volume(
self._context, volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
extra_specs = {
'image_service:store_id': 'fake-store'
}
test_utils.create_volume_type(self._context.elevated(),
id=fake.VOLUME_TYPE_ID, name="test_type",
extra_specs=extra_specs)
fake_image_meta = {'id': 'fake-image-id'}
fake_img_format = self._smbfs_driver._DISK_FORMAT_VHDX
@ -746,12 +762,12 @@ class WindowsSmbFsTestCase(test.TestCase):
with mock.patch.object(image_utils, 'upload_volume') as (
fake_upload_volume):
drv.copy_volume_to_image(
mock.sentinel.context, self.volume,
mock.sentinel.context, volume,
mock.sentinel.image_service, fake_image_meta)
if has_parent:
fake_temp_image_name = '%s.temp_image.%s.%s' % (
self.volume.id,
volume.id,
fake_image_meta['id'],
fake_img_format)
fake_temp_image_path = os.path.join(
@ -772,7 +788,8 @@ class WindowsSmbFsTestCase(test.TestCase):
fake_upload_volume.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
fake_image_meta, upload_path, fake_img_format)
fake_image_meta, upload_path, fake_img_format,
store_id='fake-store')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def test_copy_image_to_volume(self, mock_get_vhd_type):

View File

@ -899,12 +899,16 @@ class BaseVD(object):
enforce_multipath)
attach_info, volume = self._attach_volume(context, volume, properties)
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
try:
image_utils.upload_volume(context,
image_service,
image_meta,
attach_info['device']['path'],
compress=True)
compress=True,
store_id=store_id)
finally:
# Since attached volume was not used for writing we can force
# detach it

View File

@ -1096,11 +1096,14 @@ class VxFlexOSDriver(driver.VolumeDriver):
{'vol': volume,
'service': six.text_type(image_service),
'meta': six.text_type(image_meta)})
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
try:
image_utils.upload_volume(context,
image_service,
image_meta,
self._sio_attach_volume(volume))
self._sio_attach_volume(volume),
store_id=store_id)
finally:
self._sio_detach_volume(volume)

View File

@ -994,10 +994,13 @@ class GPFSDriver(driver.CloneableImageVD,
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
self.local_path(volume),
store_id=store_id)
def _migrate_volume(self, volume, host):
"""Migrate vol if source and dest are managed by same GPFS cluster."""

View File

@ -661,12 +661,14 @@ class LinstorBaseDriver(driver.VolumeDriver):
# Check if all replies are success
return lin_drv.all_api_responses_success(api_response)
def _copy_vol_to_image(self, context, image_service, image_meta, rsc_path):
def _copy_vol_to_image(self, context, image_service, image_meta, rsc_path,
store_id=None):
return image_utils.upload_volume(context,
image_service,
image_meta,
rsc_path)
rsc_path,
store_id=store_id)
#
# Snapshot
@ -978,11 +980,13 @@ class LinstorBaseDriver(driver.VolumeDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume)
rsc_path = str(self._get_rsc_path(full_rsc_name))
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
self._copy_vol_to_image(context,
image_service,
image_meta,
rsc_path)
rsc_path,
store_id=store_id)
return {}
# Not supported currently

View File

@ -508,10 +508,14 @@ class LVMVolumeDriver(driver.VolumeDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
self.local_path(volume),
store_id=store_id)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""

View File

@ -1583,6 +1583,9 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
volume_id=volume.id)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
tmp_dir = volume_utils.image_conversion_dir()
tmp_file = os.path.join(tmp_dir,
volume.name + '-' + image_meta['id'])
@ -1593,7 +1596,8 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
image_meta, tmp_file)
image_meta, tmp_file,
store_id=store_id)
os.unlink(tmp_file)
def extend_volume(self, volume, new_size):

View File

@ -474,11 +474,13 @@ class RemoteFSDriver(driver.BaseVD):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume),
run_as_root=self._execute_as_root)
run_as_root=self._execute_as_root,
store_id=store_id)
def _read_config_file(self, config_file):
# Returns list of lines in file
@ -945,7 +947,7 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
return self.base
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
image_meta, store_id=None):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
@ -973,11 +975,15 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
else:
upload_path = active_file_path
if not store_id:
store_id = volume.volume_type.extra_specs.get(
'image_service:store_id')
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
run_as_root=self._execute_as_root)
run_as_root=self._execute_as_root,
store_id=store_id)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""

View File

@ -358,7 +358,8 @@ class SPDKDriver(driver.VolumeDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
volume['provider_location'] = (
self.create_export(context, volume, None)['provider_location'])
connection_data = self.initialize_connection(volume, None)['data']
@ -378,7 +379,8 @@ class SPDKDriver(driver.VolumeDriver):
image_utils.upload_volume(context,
image_service,
image_meta,
device_info['path'])
device_info['path'],
store_id=store_id)
finally:
target_connector.disconnect_volume(connection_data, volume)

View File

@ -257,6 +257,11 @@ class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver):
vmdk_file_path = self.volumeops.get_vmdk_path(backing)
conf = self.configuration
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get(
'image_service:store_id')
image_transfer.upload_image(
context,
conf.vmware_image_transfer_timeout_secs,
@ -269,7 +274,8 @@ class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver):
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=volume.size * units.Gi,
image_name=image_meta['name'])
image_name=image_meta['name'],
store_id=store_id)
finally:
if attached:
self.volumeops.detach_fcd(backing, fcd_loc)

View File

@ -1404,6 +1404,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
host_ip = self.configuration.vmware_host_ip
port = self.configuration.vmware_host_port
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
image_transfer.upload_image(context,
timeout,
image_service,
@ -1416,7 +1419,8 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
vmdk_file_path=vmdk_file_path,
vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'],
image_version=1)
image_version=1,
store_id=store_id)
LOG.info("Done copying volume %(vol)s to a new image %(img)s",
{'vol': volume['name'], 'img': image_meta['name']})

View File

@ -285,6 +285,8 @@ class WindowsISCSIDriver(driver.ISCSIDriver):
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
disk_format = self._tgt_utils.get_supported_disk_format()
temp_vhd_path = os.path.join(CONF.image_conversion_dir,
str(image_meta['id']) + '.' + disk_format)
@ -295,7 +297,8 @@ class WindowsISCSIDriver(driver.ISCSIDriver):
# must be exported first.
self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path)
image_utils.upload_volume(context, image_service, image_meta,
temp_vhd_path, 'vhd')
temp_vhd_path, 'vhd',
store_id=store_id)
finally:
fileutils.delete_if_exists(temp_vhd_path)

View File

@ -553,6 +553,8 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
# If snapshots exist, flatten to a temporary image, and upload it
@ -582,7 +584,8 @@ class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin,
image_service,
image_meta,
upload_path,
root_file_fmt)
root_file_fmt,
store_id=store_id)
finally:
if temp_path:
self._delete(temp_path)

View File

@ -1607,9 +1607,17 @@ class VolumeManager(manager.CleanableManager,
uri = 'cinder://%s' % image_volume.id
image_registered = None
# retrieve store information from extra-specs
store_id = volume.volume_type.extra_specs.get('image_service:store_id')
location_metadata = {}
if store_id:
location_metadata['store'] = store_id
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
ctx, image_meta['id'], uri, location_metadata)
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception('Failed to register image volume location '

View File

@ -0,0 +1,23 @@
---
features:
- |
This release includes support for Glance multiple stores. An
operator may now specify which Glance store will be used when
a volume is uploaded to Glance as an image. Some details
about this feature:
* This feature is not directly user-facing. To enable it, an
operator must add the field ``image_service:store_id`` in the
volume-type extra-specs. The value of the field is a valid
store identifier (``id``) configured in Glance, which may be
discovered by making a ``GET /v2/info/stores`` call to the
Image Service API.
* If ``image_service:store_id`` is not set in the extra-specs for a
volume-type, then any volume of that type uploaded as an image will
be uploaded to the default store in Glance.
* The ``image_service:store_id`` can only be set in the extra-specs
for a volume-type when multiple glance stores are configured.
* Cinder validates proposed Glance store identifiers by contacting
Glance at the time the ``image_service:store_id`` is added to a
volume-type's extra-specs. Thus the Image Service API must be
available when a volume-type is updated.