From 87114c8ec70364f2690fdec875180657002c33ab Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Fri, 8 Jun 2018 09:50:32 +0000 Subject: [PATCH] Enable multi store support for glance Added supporting logic to configure, manage and use multiple stores of the same or different type/scheme. Added new config option 'default_backend' which will be used to specifiy default store to which image will be stored. Added support for file and rbd store. The default behavior is maintained for backward compatibility. DocImpact Partial-Implements: bp multi-store Change-Id: I1f2e8fa61d6dfecd8395a1f894f74ec5bcb5573c --- glance_store/__init__.py | 1 + glance_store/_drivers/filesystem.py | 64 +- glance_store/_drivers/rbd.py | 32 +- glance_store/driver.py | 24 +- glance_store/location.py | 53 +- glance_store/multi_backend.py | 438 ++++++++++ glance_store/tests/base.py | 43 + .../tests/unit/test_multistore_filesystem.py | 821 ++++++++++++++++++ .../tests/unit/test_multistore_rbd.py | 467 ++++++++++ 9 files changed, 1914 insertions(+), 29 deletions(-) create mode 100644 glance_store/multi_backend.py create mode 100644 glance_store/tests/unit/test_multistore_filesystem.py create mode 100644 glance_store/tests/unit/test_multistore_rbd.py diff --git a/glance_store/__init__.py b/glance_store/__init__.py index 388d0292..851e55f0 100644 --- a/glance_store/__init__.py +++ b/glance_store/__init__.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from .multi_backend import * # noqa from .backend import * # noqa from .driver import * # noqa from .exceptions import * # noqa diff --git a/glance_store/_drivers/filesystem.py b/glance_store/_drivers/filesystem.py index b32b8976..182fce4f 100644 --- a/glance_store/_drivers/filesystem.py +++ b/glance_store/_drivers/filesystem.py @@ -273,13 +273,18 @@ class Store(glance_store.driver.Store): :datadir is a directory path in which glance writes image files. """ - if self.conf.glance_store.filesystem_store_file_perm <= 0: + if self.backend_group: + fstore_perm = getattr( + self.conf, self.backend_group).filesystem_store_file_perm + else: + fstore_perm = self.conf.glance_store.filesystem_store_file_perm + + if fstore_perm <= 0: return try: mode = os.stat(datadir)[stat.ST_MODE] - perm = int(str(self.conf.glance_store.filesystem_store_file_perm), - 8) + perm = int(str(fstore_perm), 8) if perm & stat.S_IRWXO > 0: if not mode & stat.S_IXOTH: # chmod o+x @@ -378,26 +383,37 @@ class Store(glance_store.driver.Store): this method. If the store was not able to successfully configure itself, it should raise `exceptions.BadStoreConfiguration` """ - if not (self.conf.glance_store.filesystem_store_datadir or - self.conf.glance_store.filesystem_store_datadirs): + if self.backend_group: + fdir = getattr( + self.conf, self.backend_group).filesystem_store_datadir + fdirs = getattr( + self.conf, self.backend_group).filesystem_store_datadirs + fstore_perm = getattr( + self.conf, self.backend_group).filesystem_store_file_perm + meta_file = getattr( + self.conf, self.backend_group).filesystem_store_metadata_file + else: + fdir = self.conf.glance_store.filesystem_store_datadir + fdirs = self.conf.glance_store.filesystem_store_datadirs + fstore_perm = self.conf.glance_store.filesystem_store_file_perm + meta_file = self.conf.glance_store.filesystem_store_metadata_file + + if not (fdir or fdirs): reason = (_("Specify at least 'filesystem_store_datadir' or " "'filesystem_store_datadirs' option")) LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="filesystem", reason=reason) - if (self.conf.glance_store.filesystem_store_datadir and - self.conf.glance_store.filesystem_store_datadirs): - + if fdir and fdirs: reason = (_("Specify either 'filesystem_store_datadir' or " "'filesystem_store_datadirs' option")) LOG.error(reason) raise exceptions.BadStoreConfiguration(store_name="filesystem", reason=reason) - if self.conf.glance_store.filesystem_store_file_perm > 0: - perm = int(str(self.conf.glance_store.filesystem_store_file_perm), - 8) + if fstore_perm > 0: + perm = int(str(fstore_perm), 8) if not perm & stat.S_IRUSR: reason = _LE("Specified an invalid " "'filesystem_store_file_perm' option which " @@ -410,13 +426,13 @@ class Store(glance_store.driver.Store): self.multiple_datadirs = False directory_paths = set() - if self.conf.glance_store.filesystem_store_datadir: - self.datadir = self.conf.glance_store.filesystem_store_datadir + if fdir: + self.datadir = fdir directory_paths.add(self.datadir) else: self.multiple_datadirs = True self.priority_data_map = {} - for datadir in self.conf.glance_store.filesystem_store_datadirs: + for datadir in fdirs: (datadir_path, priority) = self._get_datadir_path_and_priority(datadir) priority_paths = self.priority_data_map.setdefault( @@ -431,9 +447,8 @@ class Store(glance_store.driver.Store): self._create_image_directories(directory_paths) - metadata_file = self.conf.glance_store.filesystem_store_metadata_file - if metadata_file: - self._validate_metadata(metadata_file) + if meta_file: + self._validate_metadata(meta_file) def _check_directory_paths(self, datadir_path, directory_paths, priority_paths): @@ -705,15 +720,24 @@ class Store(glance_store.driver.Store): 'filepath': filepath, 'checksum_hex': checksum_hex}) - if self.conf.glance_store.filesystem_store_file_perm > 0: - perm = int(str(self.conf.glance_store.filesystem_store_file_perm), - 8) + if self.backend_group: + fstore_perm = getattr( + self.conf, self.backend_group).filesystem_store_file_perm + else: + fstore_perm = self.conf.glance_store.filesystem_store_file_perm + + if fstore_perm > 0: + perm = int(str(fstore_perm), 8) try: os.chmod(filepath, perm) except (IOError, OSError): LOG.warning(_LW("Unable to set permission to image: %s") % filepath) + # Add store backend information to location metadata + if self.backend_group: + metadata['backend'] = u"%s" % self.backend_group + return ('file://%s' % filepath, bytes_written, checksum_hex, metadata) @staticmethod diff --git a/glance_store/_drivers/rbd.py b/glance_store/_drivers/rbd.py index 23ba765b..7847b254 100644 --- a/glance_store/_drivers/rbd.py +++ b/glance_store/_drivers/rbd.py @@ -281,17 +281,32 @@ class Store(driver.Store): itself, it should raise `exceptions.BadStoreConfiguration` """ try: - chunk = self.conf.glance_store.rbd_store_chunk_size + if self.backend_group: + chunk = getattr(self.conf, + self.backend_group).rbd_store_chunk_size + pool = getattr(self.conf, self.backend_group).rbd_store_pool + user = getattr(self.conf, self.backend_group).rbd_store_user + conf_file = getattr(self.conf, + self.backend_group).rbd_store_ceph_conf + connect_timeout = getattr( + self.conf, self.backend_group).rados_connect_timeout + else: + chunk = self.conf.glance_store.rbd_store_chunk_size + pool = self.conf.glance_store.rbd_store_pool + user = self.conf.glance_store.rbd_store_user + conf_file = self.conf.glance_store.rbd_store_ceph_conf + connect_timeout = self.conf.glance_store.rados_connect_timeout + self.chunk_size = chunk * units.Mi self.READ_CHUNKSIZE = self.chunk_size self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE # these must not be unicode since they will be passed to a # non-unicode-aware C library - self.pool = str(self.conf.glance_store.rbd_store_pool) - self.user = str(self.conf.glance_store.rbd_store_user) - self.conf_file = str(self.conf.glance_store.rbd_store_ceph_conf) - self.connect_timeout = self.conf.glance_store.rados_connect_timeout + self.pool = str(pool) + self.user = str(user) + self.conf_file = str(conf_file) + self.connect_timeout = connect_timeout except cfg.ConfigFileValueError as e: reason = _("Error in store configuration: %s") % e LOG.error(reason) @@ -514,7 +529,12 @@ class Store(driver.Store): if image_size == 0: image_size = bytes_written - return (loc.get_uri(), image_size, checksum.hexdigest(), {}) + # Add store backend information to location metadata + metadata = {} + if self.backend_group: + metadata['backend'] = u"%s" % self.backend_group + + return (loc.get_uri(), image_size, checksum.hexdigest(), metadata) @capabilities.check def delete(self, location, context=None): diff --git a/glance_store/driver.py b/glance_store/driver.py index 2462fd48..abe9f287 100644 --- a/glance_store/driver.py +++ b/glance_store/driver.py @@ -30,13 +30,25 @@ from glance_store.i18n import _ LOG = logging.getLogger(__name__) +_MULTI_BACKEND_OPTS = [ + cfg.StrOpt('store_description', + help=_(""" +This option will be used to provide a constructive information about +the store backend to end users. Using /v2/stores-info call user can +seek more information on all available backends. + +""")) +] + + class Store(capabilities.StoreCapability): OPTIONS = None + MULTI_BACKEND_OPTIONS = _MULTI_BACKEND_OPTS READ_CHUNKSIZE = 4 * units.Mi # 4M WRITE_CHUNKSIZE = READ_CHUNKSIZE - def __init__(self, conf): + def __init__(self, conf, backend=None): """ Initialize the Store """ @@ -44,11 +56,19 @@ class Store(capabilities.StoreCapability): super(Store, self).__init__() self.conf = conf + self.backend_group = backend self.store_location_class = None try: if self.OPTIONS is not None: - self.conf.register_opts(self.OPTIONS, group='glance_store') + group = 'glance_store' + if self.backend_group: + group = self.backend_group + if self.MULTI_BACKEND_OPTIONS is not None: + self.conf.register_opts( + self.MULTI_BACKEND_OPTIONS, group=group) + + self.conf.register_opts(self.OPTIONS, group=group) except cfg.DuplicateOptError: pass diff --git a/glance_store/location.py b/glance_store/location.py index 42127684..4084179a 100644 --- a/glance_store/location.py +++ b/glance_store/location.py @@ -49,6 +49,7 @@ CONF = cfg.CONF LOG = logging.getLogger(__name__) SCHEME_TO_CLS_MAP = {} +SCHEME_TO_CLS_BACKEND_MAP = {} def get_location_from_uri(uri, conf=CONF): @@ -62,7 +63,7 @@ def get_location_from_uri(uri, conf=CONF): Example URIs: https://user:pass@example.com:80/images/some-id - http://images.oracle.com/123456 + http://example.com/123456 swift://example.com/container/obj-id swift://user:account:pass@authurl.com/container/obj-id swift+http://user:account:pass@authurl.com/container/obj-id @@ -77,6 +78,56 @@ def get_location_from_uri(uri, conf=CONF): conf, uri=uri) +def get_location_from_uri_and_backend(uri, backend, conf=CONF): + """ + Given a URI, return a Location object that has had an appropriate + store parse the URI. + + :param uri: A URI that could come from the end-user in the Location + attribute/header. + :param backend: A backend name for the store. + :param conf: The global configuration. + + Example URIs: + https://user:pass@example.com:80/images/some-id + http://example.com/123456 + swift://example.com/container/obj-id + swift://user:account:pass@authurl.com/container/obj-id + swift+http://user:account:pass@authurl.com/container/obj-id + file:///var/lib/glance/images/1 + cinder://volume-id + """ + + pieces = urllib.parse.urlparse(uri) + + if pieces.scheme not in SCHEME_TO_CLS_BACKEND_MAP.keys(): + raise exceptions.UnknownScheme(scheme=pieces.scheme) + try: + scheme_info = SCHEME_TO_CLS_BACKEND_MAP[pieces.scheme][backend] + except KeyError: + raise exceptions.UnknownScheme(scheme=backend) + + return Location(pieces.scheme, scheme_info['location_class'], + conf, uri=uri) + + +def register_scheme_backend_map(scheme_map): + """ + Given a mapping of 'scheme' to store_name, adds the mapping to the + known list of schemes. + + This function overrides existing stores. + """ + + for (k, v) in scheme_map.items(): + if k not in SCHEME_TO_CLS_BACKEND_MAP: + SCHEME_TO_CLS_BACKEND_MAP[k] = {} + + LOG.debug("Registering scheme %s with %s", k, v) + for key, value in v.items(): + SCHEME_TO_CLS_BACKEND_MAP[k][key] = value + + def register_scheme_map(scheme_map): """ Given a mapping of 'scheme' to store_name, adds the mapping to the diff --git a/glance_store/multi_backend.py b/glance_store/multi_backend.py new file mode 100644 index 00000000..f8b37085 --- /dev/null +++ b/glance_store/multi_backend.py @@ -0,0 +1,438 @@ +# Copyright 2018 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from oslo_config import cfg +from oslo_utils import encodeutils +import six +from stevedore import driver +from stevedore import extension + +from glance_store import capabilities +from glance_store import exceptions +from glance_store.i18n import _, _LW +from glance_store import location + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + +_STORE_OPTS = [ + cfg.StrOpt('default_backend', + help=_(""" +The default scheme to use for storing images. + +Provide a string value representing the default scheme to use for +storing images. If not set, Glance API service will fail to start. + +Related Options: + * enabled_backends + +""")), + cfg.IntOpt('store_capabilities_update_min_interval', + default=0, + min=0, + deprecated_for_removal=True, + deprecated_since='Rocky', + deprecated_reason=_(""" +This option configures a stub method that has not been implemented +for any existing store drivers. Hence it is non-operational, and +giving it a value does absolutely nothing. + +This option is scheduled for removal early in the Stein development +cycle. +"""), + help=_(""" +Minimum interval in seconds to execute updating dynamic storage +capabilities based on current backend status. + +Provide an integer value representing time in seconds to set the +minimum interval before an update of dynamic storage capabilities +for a storage backend can be attempted. Setting +``store_capabilities_update_min_interval`` does not mean updates +occur periodically based on the set interval. Rather, the update +is performed at the elapse of this interval set, if an operation +of the store is triggered. + +By default, this option is set to zero and is disabled. Provide an +integer value greater than zero to enable this option. + +NOTE 1: For more information on store capabilities and their updates, +please visit: https://specs.openstack.org/openstack/glance-specs/\ +specs/kilo/store-capabilities.html + +For more information on setting up a particular store in your +deployment and help with the usage of this feature, please contact +the storage driver maintainers listed here: +https://docs.openstack.org/glance_store/latest/user/drivers.html + +NOTE 2: The dynamic store update capability described above is not +implemented by any current store drivers. Thus, this option DOES +NOT DO ANYTHING (and it never has). It is DEPRECATED and scheduled +for removal early in the Stein development cycle. + +Possible values: + * Zero + * Positive integer + +Related Options: + * None + +""")), +] + +_STORE_CFG_GROUP = 'glance_store' + + +def _list_driver_opts(): + driver_opts = {} + mgr = extension.ExtensionManager('glance_store.drivers') + # NOTE(zhiyan): Handle available drivers entry_points provided + # NOTE(nikhil): Return a sorted list of drivers to ensure that the sample + # configuration files generated by oslo config generator retain the order + # in which the config opts appear across different runs. If this order of + # config opts is not preserved, some downstream packagers may see a long + # diff of the changes though not relevant as only order has changed. See + # some more details at bug 1619487. + drivers = sorted([ext.name for ext in mgr]) + handled_drivers = [] # Used to handle backwards-compatible entries + for store_entry in drivers: + driver_cls = _load_multi_store(None, store_entry, False) + if driver_cls and driver_cls not in handled_drivers: + if getattr(driver_cls, 'OPTIONS', None) is not None: + driver_opts[store_entry] = driver_cls.OPTIONS + handled_drivers.append(driver_cls) + + # NOTE(zhiyan): This separated approach could list + # store options before all driver ones, which easier + # to read and configure by operator. + return driver_opts + + +def register_store_opts(conf): + LOG.debug("Registering options for group %s" % _STORE_CFG_GROUP) + conf.register_opts(_STORE_OPTS, group=_STORE_CFG_GROUP) + + driver_opts = _list_driver_opts() + enabled_backends = conf.enabled_backends + for backend in enabled_backends: + for opt_list in driver_opts: + if enabled_backends[backend] not in opt_list: + continue + + LOG.debug("Registering options for group %s" % backend) + conf.register_opts(driver_opts[opt_list], group=backend) + + +def _load_multi_store(conf, store_entry, + invoke_load=True, + backend=None): + if backend: + invoke_args = [conf, backend] + else: + invoke_args = [conf] + try: + LOG.debug("Attempting to import store %s", store_entry) + mgr = driver.DriverManager('glance_store.drivers', + store_entry, + invoke_args=invoke_args, + invoke_on_load=invoke_load) + return mgr.driver + except RuntimeError as e: + LOG.warning("Failed to load driver %(driver)s. The " + "driver will be disabled" % dict(driver=str([driver, e]))) + + +def _load_multi_stores(conf): + enabled_backends = conf.enabled_backends + for backend, store_entry in enabled_backends.items(): + try: + # FIXME(flaper87): Don't hide BadStoreConfiguration + # exceptions. These exceptions should be propagated + # to the user of the library. + store_instance = _load_multi_store(conf, store_entry, + backend=backend) + + if not store_instance: + continue + + yield (store_entry, store_instance, backend) + + except exceptions.BadStoreConfiguration: + continue + + +def create_multi_stores(conf=CONF): + """ + Registers all store modules and all schemes + from the given config. + """ + store_count = 0 + scheme_map = {} + for (store_entry, store_instance, + store_identifier) in _load_multi_stores(conf): + try: + schemes = store_instance.get_schemes() + store_instance.configure(re_raise_bsc=False) + except NotImplementedError: + continue + + if not schemes: + raise exceptions.BackendException('Unable to register store %s. ' + 'No schemes associated with it.' + % store_entry) + else: + LOG.debug("Registering store %s with schemes %s", + store_entry, schemes) + + loc_cls = store_instance.get_store_location_class() + for scheme in schemes: + if scheme not in scheme_map: + scheme_map[scheme] = {} + scheme_map[scheme][store_identifier] = { + 'store': store_instance, + 'location_class': loc_cls, + 'store_entry': store_entry + } + location.register_scheme_backend_map(scheme_map) + store_count += 1 + + return store_count + + +def verify_store(): + store_id = CONF.glance_store.default_backend + if not store_id: + msg = _("'default_backend' config option is not set.") + raise RuntimeError(msg) + + try: + get_store_from_store_identifier(store_id) + except exceptions.UnknownScheme: + msg = _("Store for identifier %s not found") % store_id + raise RuntimeError(msg) + + +def get_store_from_store_identifier(store_identifier): + """ + Given a store identifier, return the appropriate store object + for handling that scheme. + """ + scheme_map = {} + enabled_backends = CONF.enabled_backends + try: + scheme = enabled_backends[store_identifier] + except KeyError: + msg = _("Store for identifier %s not found") % store_identifier + raise exceptions.UnknownScheme(msg) + + if scheme not in location.SCHEME_TO_CLS_BACKEND_MAP: + raise exceptions.UnknownScheme(scheme=scheme) + + scheme_info = location.SCHEME_TO_CLS_BACKEND_MAP[scheme][store_identifier] + store = scheme_info['store'] + + if not store.is_capable(capabilities.BitMasks.DRIVER_REUSABLE): + # Driver instance isn't stateless so it can't + # be reused safely and need recreation. + store_entry = scheme_info['store_entry'] + store = _load_multi_store(store.conf, store_entry, invoke_load=True, + backend=store_identifier) + store.configure() + try: + loc_cls = store.get_store_location_class() + for new_scheme in store.get_schemes(): + if new_scheme not in scheme_map: + scheme_map[new_scheme] = {} + + scheme_map[new_scheme][store_identifier] = { + 'store': store, + 'location_class': loc_cls, + 'store_entry': store_entry + } + location.register_scheme_backend_map(scheme_map) + except NotImplementedError: + scheme_info['store'] = store + + return store + + +def add(conf, image_id, data, size, backend, context=None, + verifier=None): + if not backend: + backend = conf.glance_store.default_backend + + store = get_store_from_store_identifier(backend) + return store_add_to_backend(image_id, data, size, store, context, + verifier) + + +def store_add_to_backend(image_id, data, size, store, context=None, + verifier=None): + """ + A wrapper around a call to each stores add() method. This gives glance + a common place to check the output + + :param image_id: The image add to which data is added + :param data: The data to be stored + :param size: The length of the data in bytes + :param store: The store to which the data is being added + :param context: The request context + :param verifier: An object used to verify signatures for images + :param backend: Name of the backend to store the image + :return: The url location of the file, + the size amount of data, + the checksum of the data + the storage systems metadata dictionary for the location + """ + (location, size, checksum, metadata) = store.add(image_id, + data, + size, + context=context, + verifier=verifier) + + if metadata is not None: + if not isinstance(metadata, dict): + msg = (_("The storage driver %(driver)s returned invalid " + " metadata %(metadata)s. This must be a dictionary type") + % dict(driver=str(store), metadata=str(metadata))) + LOG.error(msg) + raise exceptions.BackendException(msg) + try: + check_location_metadata(metadata) + except exceptions.BackendException as e: + e_msg = (_("A bad metadata structure was returned from the " + "%(driver)s storage driver: %(metadata)s. %(e)s.") % + dict(driver=encodeutils.exception_to_unicode(store), + metadata=encodeutils.exception_to_unicode(metadata), + e=encodeutils.exception_to_unicode(e))) + LOG.error(e_msg) + raise exceptions.BackendException(e_msg) + return (location, size, checksum, metadata) + + +def check_location_metadata(val, key=''): + if isinstance(val, dict): + for key in val: + check_location_metadata(val[key], key=key) + elif isinstance(val, list): + ndx = 0 + for v in val: + check_location_metadata(v, key='%s[%d]' % (key, ndx)) + ndx = ndx + 1 + elif not isinstance(val, six.text_type): + raise exceptions.BackendException(_("The image metadata key %(key)s " + "has an invalid type of %(type)s. " + "Only dict, list, and unicode are " + "supported.") + % dict(key=key, type=type(val))) + + +def delete(uri, backend, context=None): + """Removes chunks of data from backend specified by uri.""" + if backend: + loc = location.get_location_from_uri_and_backend( + uri, backend, conf=CONF) + store = get_store_from_store_identifier(backend) + return store.delete(loc, context=context) + + msg = _LW('Backend is not set to image, searching ' + 'all backends based on location URI.') + LOG.warn(msg) + + backends = CONF.enabled_backends + for backend in backends: + try: + if not uri.startswith(backends[backend]): + continue + + loc = location.get_location_from_uri_and_backend( + uri, backend, conf=CONF) + store = get_store_from_store_identifier(backend) + return store.delete(loc, context=context) + except (exceptions.NotFound, exceptions.UnknownScheme): + continue + + raise exceptions.NotFound(_("Image not found in any configured backend")) + + +def set_acls_for_multi_store(location_uri, backend, public=False, + read_tenants=[], + write_tenants=None, context=None): + + if write_tenants is None: + write_tenants = [] + + loc = location.get_location_from_uri_and_backend( + location_uri, backend, conf=CONF) + store = get_store_from_store_identifier(backend) + try: + store.set_acls(loc, public=public, + read_tenants=read_tenants, + write_tenants=write_tenants, + context=context) + except NotImplementedError: + LOG.debug("Skipping store.set_acls... not implemented") + + +def get(uri, backend, offset=0, chunk_size=None, context=None): + """Yields chunks of data from backend specified by uri.""" + + if backend: + loc = location.get_location_from_uri_and_backend(uri, backend, + conf=CONF) + store = get_store_from_store_identifier(backend) + + return store.get(loc, offset=offset, + chunk_size=chunk_size, + context=context) + + msg = _LW('Backend is not set to image, searching ' + 'all backends based on location URI.') + LOG.warn(msg) + + backends = CONF.enabled_backends + for backend in backends: + try: + if not uri.startswith(backends[backend]): + continue + + loc = location.get_location_from_uri_and_backend( + uri, backend, conf=CONF) + store = get_store_from_store_identifier(backend) + data, size = store.get(loc, offset=offset, + chunk_size=chunk_size, + context=context) + if data: + return data, size + except (exceptions.NotFound, exceptions.UnknownScheme): + continue + + raise exceptions.NotFound(_("Image not found in any configured backend")) + + +def get_known_schemes_for_multi_store(): + """Returns list of known schemes.""" + return location.SCHEME_TO_CLS_BACKEND_MAP.keys() + + +def get_size_from_uri_and_backend(uri, backend, context=None): + """Retrieves image size from backend specified by uri.""" + + loc = location.get_location_from_uri_and_backend( + uri, backend, conf=CONF) + store = get_store_from_store_identifier(backend) + return store.get_size(loc, context=context) diff --git a/glance_store/tests/base.py b/glance_store/tests/base.py index 15086a1b..977cd814 100644 --- a/glance_store/tests/base.py +++ b/glance_store/tests/base.py @@ -81,3 +81,46 @@ class StoreBaseTest(base.BaseTestCase): 'store_entry': store_entry } location.register_scheme_map(scheme_map) + + +class MultiStoreBaseTest(base.BaseTestCase): + + def copy_data_file(self, file_name, dst_dir): + src_file_name = os.path.join('glance_store/tests/etc', file_name) + shutil.copy(src_file_name, dst_dir) + dst_file_name = os.path.join(dst_dir, file_name) + return dst_file_name + + def config(self, **kw): + """Override some configuration values. + + The keyword arguments are the names of configuration options to + override and their values. + + If a group argument is supplied, the overrides are applied to + the specified configuration option group. + + All overrides are automatically cleared at the end of the current + test by the fixtures cleanup process. + """ + group = kw.pop('group', None) + for k, v in kw.items(): + if group: + self.conf.set_override(k, v, group) + else: + self.conf.set_override(k, v) + + def register_store_backend_schemes(self, store, store_entry, + store_identifier): + schemes = store.get_schemes() + scheme_map = {} + + loc_cls = store.get_store_location_class() + for scheme in schemes: + scheme_map[scheme] = {} + scheme_map[scheme][store_identifier] = { + 'store': store, + 'location_class': loc_cls, + 'store_entry': store_entry + } + location.register_scheme_backend_map(scheme_map) diff --git a/glance_store/tests/unit/test_multistore_filesystem.py b/glance_store/tests/unit/test_multistore_filesystem.py new file mode 100644 index 00000000..abf99c4d --- /dev/null +++ b/glance_store/tests/unit/test_multistore_filesystem.py @@ -0,0 +1,821 @@ +# Copyright 2018 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests the filesystem backend store""" + +import errno +import hashlib +import json +import mock +import os +import stat +import uuid + +import fixtures +from oslo_config import cfg +from oslo_utils import units +import six +from six.moves import builtins +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range + +import glance_store as store +from glance_store._drivers import filesystem +from glance_store import exceptions +from glance_store import location +from glance_store.tests import base +from glance_store.tests.unit import test_store_capabilities + + +class TestMultiStore(base.MultiStoreBaseTest, + test_store_capabilities.TestStoreCapabilitiesChecking): + + # NOTE(flaper87): temporary until we + # can move to a fully-local lib. + # (Swift store's fault) + _CONF = cfg.ConfigOpts() + + def setUp(self): + """Establish a clean test environment.""" + super(TestMultiStore, self).setUp() + enabled_backends = { + "file1": "file", + "file2": "file", + } + self.conf = self._CONF + self.conf(args=[]) + self.conf.register_opt(cfg.DictOpt('enabled_backends')) + self.config(enabled_backends=enabled_backends) + store.register_store_opts(self.conf) + self.config(default_backend='file1', group='glance_store') + + # Ensure stores + locations cleared + location.SCHEME_TO_CLS_BACKEND_MAP = {} + + store.create_multi_stores(self.conf) + self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP', + dict()) + self.test_dir = self.useFixture(fixtures.TempDir()).path + self.addCleanup(self.conf.reset) + + self.orig_chunksize = filesystem.Store.READ_CHUNKSIZE + filesystem.Store.READ_CHUNKSIZE = 10 + self.store = filesystem.Store(self.conf, backend='file1') + self.config(filesystem_store_datadir=self.test_dir, + group="file1") + self.store.configure() + self.register_store_backend_schemes(self.store, 'file', 'file1') + + def tearDown(self): + """Clear the test environment.""" + super(TestMultiStore, self).tearDown() + filesystem.ChunkedFile.CHUNKSIZE = self.orig_chunksize + + def _create_metadata_json_file(self, metadata): + expected_image_id = str(uuid.uuid4()) + jsonfilename = os.path.join(self.test_dir, + "storage_metadata.%s" % expected_image_id) + + self.config(filesystem_store_metadata_file=jsonfilename, + group="file1") + with open(jsonfilename, 'w') as fptr: + json.dump(metadata, fptr) + + def _store_image(self, in_metadata): + expected_image_id = str(uuid.uuid4()) + expected_file_size = 10 + expected_file_contents = b"*" * expected_file_size + image_file = six.BytesIO(expected_file_contents) + self.store.FILESYSTEM_STORE_METADATA = in_metadata + return self.store.add(expected_image_id, image_file, + expected_file_size) + + def test_get(self): + """Test a "normal" retrieval of an image in chunks.""" + # First add an image... + image_id = str(uuid.uuid4()) + file_contents = b"chunk00000remainder" + image_file = six.BytesIO(file_contents) + + loc, size, checksum, metadata = self.store.add( + image_id, image_file, len(file_contents)) + # Check metadata contains 'file1' as a backend + self.assertEqual(u"file1", metadata['backend']) + + # Now read it back... + uri = "file:///%s/%s" % (self.test_dir, image_id) + loc = location.get_location_from_uri_and_backend(uri, 'file1', + conf=self.conf) + (image_file, image_size) = self.store.get(loc) + + expected_data = b"chunk00000remainder" + expected_num_chunks = 2 + data = b"" + num_chunks = 0 + + for chunk in image_file: + num_chunks += 1 + data += chunk + self.assertEqual(expected_data, data) + self.assertEqual(expected_num_chunks, num_chunks) + + def test_get_random_access(self): + """Test a "normal" retrieval of an image in chunks.""" + # First add an image... + image_id = str(uuid.uuid4()) + file_contents = b"chunk00000remainder" + image_file = six.BytesIO(file_contents) + + loc, size, checksum, metadata = self.store.add(image_id, + image_file, + len(file_contents)) + # Check metadata contains 'file1' as a backend + self.assertEqual(u"file1", metadata['backend']) + + # Now read it back... + uri = "file:///%s/%s" % (self.test_dir, image_id) + loc = location.get_location_from_uri_and_backend(uri, 'file1', + conf=self.conf) + + data = b"" + for offset in range(len(file_contents)): + (image_file, image_size) = self.store.get(loc, + offset=offset, + chunk_size=1) + for chunk in image_file: + data += chunk + + self.assertEqual(file_contents, data) + + data = b"" + chunk_size = 5 + (image_file, image_size) = self.store.get(loc, + offset=chunk_size, + chunk_size=chunk_size) + for chunk in image_file: + data += chunk + + self.assertEqual(b'00000', data) + self.assertEqual(chunk_size, image_size) + + def test_get_non_existing(self): + """ + Test that trying to retrieve a file that doesn't exist + raises an error + """ + loc = location.get_location_from_uri_and_backend( + "file:///%s/non-existing" % self.test_dir, 'file1', conf=self.conf) + self.assertRaises(exceptions.NotFound, + self.store.get, + loc) + + def test_get_non_existing_identifier(self): + """ + Test that trying to retrieve a store that doesn't exist + raises an error + """ + self.assertRaises(exceptions.UnknownScheme, + location.get_location_from_uri_and_backend, + "file:///%s/non-existing" % self.test_dir, + 'file3', conf=self.conf) + + def test_add(self): + """Test that we can add an image via the filesystem backend.""" + filesystem.ChunkedFile.CHUNKSIZE = units.Ki + expected_image_id = str(uuid.uuid4()) + expected_file_size = 5 * units.Ki # 5K + expected_file_contents = b"*" * expected_file_size + expected_checksum = hashlib.md5(expected_file_contents).hexdigest() + expected_location = "file://%s/%s" % (self.test_dir, + expected_image_id) + image_file = six.BytesIO(expected_file_contents) + + loc, size, checksum, metadata = self.store.add(expected_image_id, + image_file, + expected_file_size) + + self.assertEqual(expected_location, loc) + self.assertEqual(expected_file_size, size) + self.assertEqual(expected_checksum, checksum) + self.assertEqual(u"file1", metadata['backend']) + + uri = "file:///%s/%s" % (self.test_dir, expected_image_id) + loc = location.get_location_from_uri_and_backend( + uri, 'file1', conf=self.conf) + (new_image_file, new_image_size) = self.store.get(loc) + new_image_contents = b"" + new_image_file_size = 0 + + for chunk in new_image_file: + new_image_file_size += len(chunk) + new_image_contents += chunk + + self.assertEqual(expected_file_contents, new_image_contents) + self.assertEqual(expected_file_size, new_image_file_size) + + def test_add_to_different_backned(self): + """Test that we can add an image via the filesystem backend.""" + self.store = filesystem.Store(self.conf, backend='file2') + self.config(filesystem_store_datadir=self.test_dir, + group="file2") + self.store.configure() + self.register_store_backend_schemes(self.store, 'file', 'file2') + + filesystem.ChunkedFile.CHUNKSIZE = units.Ki + expected_image_id = str(uuid.uuid4()) + expected_file_size = 5 * units.Ki # 5K + expected_file_contents = b"*" * expected_file_size + expected_checksum = hashlib.md5(expected_file_contents).hexdigest() + expected_location = "file://%s/%s" % (self.test_dir, + expected_image_id) + image_file = six.BytesIO(expected_file_contents) + + loc, size, checksum, metadata = self.store.add(expected_image_id, + image_file, + expected_file_size) + + self.assertEqual(expected_location, loc) + self.assertEqual(expected_file_size, size) + self.assertEqual(expected_checksum, checksum) + self.assertEqual(u"file2", metadata['backend']) + + uri = "file:///%s/%s" % (self.test_dir, expected_image_id) + loc = location.get_location_from_uri_and_backend( + uri, 'file2', conf=self.conf) + (new_image_file, new_image_size) = self.store.get(loc) + new_image_contents = b"" + new_image_file_size = 0 + + for chunk in new_image_file: + new_image_file_size += len(chunk) + new_image_contents += chunk + + self.assertEqual(expected_file_contents, new_image_contents) + self.assertEqual(expected_file_size, new_image_file_size) + + def test_add_check_metadata_with_invalid_mountpoint_location(self): + in_metadata = [{'id': 'abcdefg', + 'mountpoint': '/xyz/images'}] + location, size, checksum, metadata = self._store_image(in_metadata) + self.assertEqual({'backend': u'file1'}, metadata) + + def test_add_check_metadata_list_with_invalid_mountpoint_locations(self): + in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'}, + {'id': 'xyz1234', 'mountpoint': '/pqr/images'}] + location, size, checksum, metadata = self._store_image(in_metadata) + self.assertEqual({'backend': u'file1'}, metadata) + + def test_add_check_metadata_list_with_valid_mountpoint_locations(self): + in_metadata = [{'id': 'abcdefg', 'mountpoint': '/tmp'}, + {'id': 'xyz1234', 'mountpoint': '/xyz'}] + location, size, checksum, metadata = self._store_image(in_metadata) + self.assertEqual(in_metadata[0], metadata) + self.assertEqual(u"file1", metadata["backend"]) + + def test_add_check_metadata_bad_nosuch_file(self): + expected_image_id = str(uuid.uuid4()) + jsonfilename = os.path.join(self.test_dir, + "storage_metadata.%s" % expected_image_id) + + self.config(filesystem_store_metadata_file=jsonfilename, + group="file1") + expected_file_size = 10 + expected_file_contents = b"*" * expected_file_size + image_file = six.BytesIO(expected_file_contents) + + location, size, checksum, metadata = self.store.add(expected_image_id, + image_file, + expected_file_size) + + self.assertEqual({'backend': u'file1'}, metadata) + + def test_add_already_existing(self): + """ + Tests that adding an image with an existing identifier + raises an appropriate exception + """ + filesystem.ChunkedFile.CHUNKSIZE = units.Ki + image_id = str(uuid.uuid4()) + file_size = 5 * units.Ki # 5K + file_contents = b"*" * file_size + image_file = six.BytesIO(file_contents) + + location, size, checksum, metadata = self.store.add(image_id, + image_file, + file_size) + self.assertEqual(u"file1", metadata["backend"]) + + image_file = six.BytesIO(b"nevergonnamakeit") + self.assertRaises(exceptions.Duplicate, + self.store.add, + image_id, image_file, 0) + + def _do_test_add_write_failure(self, errno, exception): + filesystem.ChunkedFile.CHUNKSIZE = units.Ki + image_id = str(uuid.uuid4()) + file_size = 5 * units.Ki # 5K + file_contents = b"*" * file_size + path = os.path.join(self.test_dir, image_id) + image_file = six.BytesIO(file_contents) + + with mock.patch.object(builtins, 'open') as popen: + e = IOError() + e.errno = errno + popen.side_effect = e + + self.assertRaises(exception, + self.store.add, + image_id, image_file, 0) + self.assertFalse(os.path.exists(path)) + + def test_add_storage_full(self): + """ + Tests that adding an image without enough space on disk + raises an appropriate exception + """ + self._do_test_add_write_failure(errno.ENOSPC, exceptions.StorageFull) + + def test_add_file_too_big(self): + """ + Tests that adding an excessively large image file + raises an appropriate exception + """ + self._do_test_add_write_failure(errno.EFBIG, exceptions.StorageFull) + + def test_add_storage_write_denied(self): + """ + Tests that adding an image with insufficient filestore permissions + raises an appropriate exception + """ + self._do_test_add_write_failure(errno.EACCES, + exceptions.StorageWriteDenied) + + def test_add_other_failure(self): + """ + Tests that a non-space-related IOError does not raise a + StorageFull exceptions. + """ + self._do_test_add_write_failure(errno.ENOTDIR, IOError) + + def test_add_cleanup_on_read_failure(self): + """ + Tests the partial image file is cleaned up after a read + failure. + """ + filesystem.ChunkedFile.CHUNKSIZE = units.Ki + image_id = str(uuid.uuid4()) + file_size = 5 * units.Ki # 5K + file_contents = b"*" * file_size + path = os.path.join(self.test_dir, image_id) + image_file = six.BytesIO(file_contents) + + def fake_Error(size): + raise AttributeError() + + with mock.patch.object(image_file, 'read') as mock_read: + mock_read.side_effect = fake_Error + + self.assertRaises(AttributeError, + self.store.add, + image_id, image_file, 0) + self.assertFalse(os.path.exists(path)) + + def test_delete(self): + """ + Test we can delete an existing image in the filesystem store + """ + # First add an image + image_id = str(uuid.uuid4()) + file_size = 5 * units.Ki # 5K + file_contents = b"*" * file_size + image_file = six.BytesIO(file_contents) + + loc, size, checksum, metadata = self.store.add(image_id, + image_file, + file_size) + self.assertEqual(u"file1", metadata["backend"]) + + # Now check that we can delete it + uri = "file:///%s/%s" % (self.test_dir, image_id) + loc = location.get_location_from_uri_and_backend(uri, "file1", + conf=self.conf) + self.store.delete(loc) + + self.assertRaises(exceptions.NotFound, self.store.get, loc) + + def test_delete_non_existing(self): + """ + Test that trying to delete a file that doesn't exist + raises an error + """ + loc = location.get_location_from_uri_and_backend( + "file:///tmp/glance-tests/non-existing", "file1", conf=self.conf) + self.assertRaises(exceptions.NotFound, + self.store.delete, + loc) + + def test_delete_forbidden(self): + """ + Tests that trying to delete a file without permissions + raises the correct error + """ + # First add an image + image_id = str(uuid.uuid4()) + file_size = 5 * units.Ki # 5K + file_contents = b"*" * file_size + image_file = six.BytesIO(file_contents) + + loc, size, checksum, metadata = self.store.add(image_id, + image_file, + file_size) + self.assertEqual(u"file1", metadata["backend"]) + + uri = "file:///%s/%s" % (self.test_dir, image_id) + loc = location.get_location_from_uri_and_backend(uri, "file1", + conf=self.conf) + + # Mock unlink to raise an OSError for lack of permissions + # and make sure we can't delete the image + with mock.patch.object(os, 'unlink') as unlink: + e = OSError() + e.errno = errno + unlink.side_effect = e + + self.assertRaises(exceptions.Forbidden, + self.store.delete, + loc) + + # Make sure the image didn't get deleted + loc = location.get_location_from_uri_and_backend(uri, "file1", + conf=self.conf) + self.store.get(loc) + + def test_configure_add_with_multi_datadirs(self): + """ + Tests multiple filesystem specified by filesystem_store_datadirs + are parsed correctly. + """ + store_map = [self.useFixture(fixtures.TempDir()).path, + self.useFixture(fixtures.TempDir()).path] + self.conf.set_override('filesystem_store_datadir', + override=None, + group='file1') + self.conf.set_override('filesystem_store_datadirs', + [store_map[0] + ":100", + store_map[1] + ":200"], + group='file1') + self.store.configure_add() + + expected_priority_map = {100: [store_map[0]], 200: [store_map[1]]} + expected_priority_list = [200, 100] + self.assertEqual(expected_priority_map, self.store.priority_data_map) + self.assertEqual(expected_priority_list, self.store.priority_list) + + def test_configure_add_with_metadata_file_success(self): + metadata = {'id': 'asdf1234', + 'mountpoint': '/tmp'} + self._create_metadata_json_file(metadata) + self.store.configure_add() + self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA) + + def test_configure_add_check_metadata_list_of_dicts_success(self): + metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'}, + {'id': 'xyz1234', 'mountpoint': '/tmp/'}] + self._create_metadata_json_file(metadata) + self.store.configure_add() + self.assertEqual(metadata, self.store.FILESYSTEM_STORE_METADATA) + + def test_configure_add_check_metadata_success_list_val_for_some_key(self): + metadata = {'akey': ['value1', 'value2'], 'id': 'asdf1234', + 'mountpoint': '/tmp'} + self._create_metadata_json_file(metadata) + self.store.configure_add() + self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA) + + def test_configure_add_check_metadata_bad_data(self): + metadata = {'akey': 10, 'id': 'asdf1234', + 'mountpoint': '/tmp'} # only unicode is allowed + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + def test_configure_add_check_metadata_with_no_id_or_mountpoint(self): + metadata = {'mountpoint': '/tmp'} + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + metadata = {'id': 'asdfg1234'} + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + def test_configure_add_check_metadata_id_or_mountpoint_is_not_string(self): + metadata = {'id': 10, 'mountpoint': '/tmp'} + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + metadata = {'id': 'asdf1234', 'mountpoint': 12345} + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + def test_configure_add_check_metadata_list_with_no_id_or_mountpoint(self): + metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'}, + {'mountpoint': '/pqr/images'}] + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + metadata = [{'id': 'abcdefg'}, + {'id': 'xyz1234', 'mountpoint': '/pqr/images'}] + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + def test_add_check_metadata_list_id_or_mountpoint_is_not_string(self): + metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'}, + {'id': 1234, 'mountpoint': '/pqr/images'}] + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + metadata = [{'id': 'abcdefg', 'mountpoint': 1234}, + {'id': 'xyz1234', 'mountpoint': '/pqr/images'}] + self._create_metadata_json_file(metadata) + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + def test_configure_add_same_dir_multiple_times(self): + """ + Tests BadStoreConfiguration exception is raised if same directory + is specified multiple times in filesystem_store_datadirs. + """ + store_map = [self.useFixture(fixtures.TempDir()).path, + self.useFixture(fixtures.TempDir()).path] + self.conf.clear_override('filesystem_store_datadir', + group='file1') + self.conf.set_override('filesystem_store_datadirs', + [store_map[0] + ":100", + store_map[1] + ":200", + store_map[0] + ":300"], + group='file1') + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + def test_configure_add_same_dir_multiple_times_same_priority(self): + """ + Tests BadStoreConfiguration exception is raised if same directory + is specified multiple times in filesystem_store_datadirs. + """ + store_map = [self.useFixture(fixtures.TempDir()).path, + self.useFixture(fixtures.TempDir()).path] + self.conf.set_override('filesystem_store_datadir', + override=None, + group='file1') + self.conf.set_override('filesystem_store_datadirs', + [store_map[0] + ":100", + store_map[1] + ":200", + store_map[0] + ":100"], + group='file1') + try: + self.store.configure() + except exceptions.BadStoreConfiguration: + self.fail("configure() raised BadStoreConfiguration unexpectedly!") + + # Test that we can add an image via the filesystem backend + filesystem.ChunkedFile.CHUNKSIZE = 1024 + expected_image_id = str(uuid.uuid4()) + expected_file_size = 5 * units.Ki # 5K + expected_file_contents = b"*" * expected_file_size + expected_checksum = hashlib.md5(expected_file_contents).hexdigest() + expected_location = "file://%s/%s" % (store_map[1], + expected_image_id) + image_file = six.BytesIO(expected_file_contents) + + loc, size, checksum, metadata = self.store.add(expected_image_id, + image_file, + expected_file_size) + self.assertEqual(u"file1", metadata["backend"]) + + self.assertEqual(expected_location, loc) + self.assertEqual(expected_file_size, size) + self.assertEqual(expected_checksum, checksum) + + loc = location.get_location_from_uri_and_backend( + expected_location, "file1", conf=self.conf) + (new_image_file, new_image_size) = self.store.get(loc) + new_image_contents = b"" + new_image_file_size = 0 + + for chunk in new_image_file: + new_image_file_size += len(chunk) + new_image_contents += chunk + + self.assertEqual(expected_file_contents, new_image_contents) + self.assertEqual(expected_file_size, new_image_file_size) + + def test_add_with_multiple_dirs(self): + """Test adding multiple filesystem directories.""" + store_map = [self.useFixture(fixtures.TempDir()).path, + self.useFixture(fixtures.TempDir()).path] + self.conf.set_override('filesystem_store_datadir', + override=None, + group='file1') + + self.conf.set_override('filesystem_store_datadirs', + [store_map[0] + ":100", + store_map[1] + ":200"], + group='file1') + + self.store.configure() + + # Test that we can add an image via the filesystem backend + filesystem.ChunkedFile.CHUNKSIZE = units.Ki + expected_image_id = str(uuid.uuid4()) + expected_file_size = 5 * units.Ki # 5K + expected_file_contents = b"*" * expected_file_size + expected_checksum = hashlib.md5(expected_file_contents).hexdigest() + expected_location = "file://%s/%s" % (store_map[1], + expected_image_id) + image_file = six.BytesIO(expected_file_contents) + + loc, size, checksum, metadata = self.store.add(expected_image_id, + image_file, + expected_file_size) + self.assertEqual(u"file1", metadata["backend"]) + + self.assertEqual(expected_location, loc) + self.assertEqual(expected_file_size, size) + self.assertEqual(expected_checksum, checksum) + + loc = location.get_location_from_uri_and_backend( + expected_location, "file1", conf=self.conf) + (new_image_file, new_image_size) = self.store.get(loc) + new_image_contents = b"" + new_image_file_size = 0 + + for chunk in new_image_file: + new_image_file_size += len(chunk) + new_image_contents += chunk + + self.assertEqual(expected_file_contents, new_image_contents) + self.assertEqual(expected_file_size, new_image_file_size) + + def test_add_with_multiple_dirs_storage_full(self): + """ + Test StorageFull exception is raised if no filesystem directory + is found that can store an image. + """ + store_map = [self.useFixture(fixtures.TempDir()).path, + self.useFixture(fixtures.TempDir()).path] + self.conf.set_override('filesystem_store_datadir', + override=None, + group='file1') + self.conf.set_override('filesystem_store_datadirs', + [store_map[0] + ":100", + store_map[1] + ":200"], + group='file1') + + self.store.configure_add() + + def fake_get_capacity_info(mount_point): + return 0 + + with mock.patch.object(self.store, '_get_capacity_info') as capacity: + capacity.return_value = 0 + + filesystem.ChunkedFile.CHUNKSIZE = units.Ki + expected_image_id = str(uuid.uuid4()) + expected_file_size = 5 * units.Ki # 5K + expected_file_contents = b"*" * expected_file_size + image_file = six.BytesIO(expected_file_contents) + + self.assertRaises(exceptions.StorageFull, self.store.add, + expected_image_id, image_file, + expected_file_size) + + def test_configure_add_with_file_perm(self): + """ + Tests filesystem specified by filesystem_store_file_perm + are parsed correctly. + """ + store = self.useFixture(fixtures.TempDir()).path + self.conf.set_override('filesystem_store_datadir', store, + group='file1') + self.conf.set_override('filesystem_store_file_perm', 700, # -rwx------ + group='file1') + self.store.configure_add() + self.assertEqual(self.store.datadir, store) + + def test_configure_add_with_unaccessible_file_perm(self): + """ + Tests BadStoreConfiguration exception is raised if an invalid + file permission specified in filesystem_store_file_perm. + """ + store = self.useFixture(fixtures.TempDir()).path + self.conf.set_override('filesystem_store_datadir', store, + group='file1') + self.conf.set_override('filesystem_store_file_perm', 7, # -------rwx + group='file1') + self.assertRaises(exceptions.BadStoreConfiguration, + self.store.configure_add) + + def test_add_with_file_perm_for_group_other_users_access(self): + """ + Test that we can add an image via the filesystem backend with a + required image file permission. + """ + store = self.useFixture(fixtures.TempDir()).path + self.conf.set_override('filesystem_store_datadir', store, + group='file1') + self.conf.set_override('filesystem_store_file_perm', 744, # -rwxr--r-- + group='file1') + + # -rwx------ + os.chmod(store, 0o700) + self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE])) + + self.store.configure_add() + + filesystem.Store.WRITE_CHUNKSIZE = units.Ki + expected_image_id = str(uuid.uuid4()) + expected_file_size = 5 * units.Ki # 5K + expected_file_contents = b"*" * expected_file_size + expected_checksum = hashlib.md5(expected_file_contents).hexdigest() + expected_location = "file://%s/%s" % (store, + expected_image_id) + image_file = six.BytesIO(expected_file_contents) + + location, size, checksum, metadata = self.store.add(expected_image_id, + image_file, + expected_file_size) + self.assertEqual(u"file1", metadata["backend"]) + + self.assertEqual(expected_location, location) + self.assertEqual(expected_file_size, size) + self.assertEqual(expected_checksum, checksum) + + # -rwx--x--x for store directory + self.assertEqual(0o711, stat.S_IMODE(os.stat(store)[stat.ST_MODE])) + # -rwxr--r-- for image file + mode = os.stat(expected_location[len('file:/'):])[stat.ST_MODE] + perm = int(str(getattr(self.conf, + "file1").filesystem_store_file_perm), 8) + self.assertEqual(perm, stat.S_IMODE(mode)) + + def test_add_with_file_perm_for_owner_users_access(self): + """ + Test that we can add an image via the filesystem backend with a + required image file permission. + """ + store = self.useFixture(fixtures.TempDir()).path + self.conf.set_override('filesystem_store_datadir', store, + group='file1') + self.conf.set_override('filesystem_store_file_perm', 600, # -rw------- + group='file1') + + # -rwx------ + os.chmod(store, 0o700) + self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE])) + + self.store.configure_add() + + filesystem.Store.WRITE_CHUNKSIZE = units.Ki + expected_image_id = str(uuid.uuid4()) + expected_file_size = 5 * units.Ki # 5K + expected_file_contents = b"*" * expected_file_size + expected_checksum = hashlib.md5(expected_file_contents).hexdigest() + expected_location = "file://%s/%s" % (store, + expected_image_id) + image_file = six.BytesIO(expected_file_contents) + + location, size, checksum, metadata = self.store.add(expected_image_id, + image_file, + expected_file_size) + self.assertEqual(u"file1", metadata["backend"]) + + self.assertEqual(expected_location, location) + self.assertEqual(expected_file_size, size) + self.assertEqual(expected_checksum, checksum) + + # -rwx------ for store directory + self.assertEqual(0o700, stat.S_IMODE(os.stat(store)[stat.ST_MODE])) + # -rw------- for image file + mode = os.stat(expected_location[len('file:/'):])[stat.ST_MODE] + perm = int(str(getattr(self.conf, + "file1").filesystem_store_file_perm), 8) + self.assertEqual(perm, stat.S_IMODE(mode)) diff --git a/glance_store/tests/unit/test_multistore_rbd.py b/glance_store/tests/unit/test_multistore_rbd.py new file mode 100644 index 00000000..492c64b1 --- /dev/null +++ b/glance_store/tests/unit/test_multistore_rbd.py @@ -0,0 +1,467 @@ +# Copyright 2018 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslo_utils import units +import six + +import glance_store as store +from glance_store._drivers import rbd as rbd_store +from glance_store import exceptions +from glance_store import location as g_location +from glance_store.tests import base +from glance_store.tests.unit import test_store_capabilities + + +class TestException(Exception): + pass + + +class MockRados(object): + + class Error(Exception): + pass + + class ioctx(object): + def __init__(self, *args, **kwargs): + pass + + def __enter__(self, *args, **kwargs): + return self + + def __exit__(self, *args, **kwargs): + return False + + def close(self, *args, **kwargs): + pass + + class Rados(object): + + def __init__(self, *args, **kwargs): + pass + + def __enter__(self, *args, **kwargs): + return self + + def __exit__(self, *args, **kwargs): + return False + + def connect(self, *args, **kwargs): + pass + + def open_ioctx(self, *args, **kwargs): + return MockRados.ioctx() + + def shutdown(self, *args, **kwargs): + pass + + def conf_get(self, *args, **kwargs): + pass + + +class MockRBD(object): + + class ImageExists(Exception): + pass + + class ImageHasSnapshots(Exception): + pass + + class ImageBusy(Exception): + pass + + class ImageNotFound(Exception): + pass + + class InvalidArgument(Exception): + pass + + class Image(object): + + def __init__(self, *args, **kwargs): + pass + + def __enter__(self, *args, **kwargs): + return self + + def __exit__(self, *args, **kwargs): + pass + + def create_snap(self, *args, **kwargs): + pass + + def remove_snap(self, *args, **kwargs): + pass + + def protect_snap(self, *args, **kwargs): + pass + + def unprotect_snap(self, *args, **kwargs): + pass + + def read(self, *args, **kwargs): + raise NotImplementedError() + + def write(self, *args, **kwargs): + raise NotImplementedError() + + def resize(self, *args, **kwargs): + raise NotImplementedError() + + def discard(self, offset, length): + raise NotImplementedError() + + def close(self): + pass + + def list_snaps(self): + raise NotImplementedError() + + def parent_info(self): + raise NotImplementedError() + + def size(self): + raise NotImplementedError() + + class RBD(object): + + def __init__(self, *args, **kwargs): + pass + + def __enter__(self, *args, **kwargs): + return self + + def __exit__(self, *args, **kwargs): + return False + + def create(self, *args, **kwargs): + pass + + def remove(self, *args, **kwargs): + pass + + def list(self, *args, **kwargs): + raise NotImplementedError() + + def clone(self, *args, **kwargs): + raise NotImplementedError() + + RBD_FEATURE_LAYERING = 1 + + +class TestMultiStore(base.MultiStoreBaseTest, + test_store_capabilities.TestStoreCapabilitiesChecking): + + # NOTE(flaper87): temporary until we + # can move to a fully-local lib. + # (Swift store's fault) + _CONF = cfg.ConfigOpts() + + def setUp(self): + """Establish a clean test environment.""" + super(TestMultiStore, self).setUp() + enabled_backends = { + "ceph1": "rbd", + "ceph2": "rbd" + } + self.conf = self._CONF + self.conf(args=[]) + self.conf.register_opt(cfg.DictOpt('enabled_backends')) + self.config(enabled_backends=enabled_backends) + store.register_store_opts(self.conf) + self.config(default_backend='ceph1', group='glance_store') + + # Ensure stores + locations cleared + g_location.SCHEME_TO_CLS_BACKEND_MAP = {} + + store.create_multi_stores(self.conf) + self.addCleanup(setattr, g_location, 'SCHEME_TO_CLS_BACKEND_MAP', + dict()) + self.addCleanup(self.conf.reset) + + rbd_store.rados = MockRados + rbd_store.rbd = MockRBD + + self.store = rbd_store.Store(self.conf, backend="ceph1") + self.store.configure() + self.store.chunk_size = 2 + self.called_commands_actual = [] + self.called_commands_expected = [] + self.store_specs = {'pool': 'fake_pool', + 'image': 'fake_image', + 'snapshot': 'fake_snapshot'} + self.location = rbd_store.StoreLocation(self.store_specs, + self.conf) + # Provide enough data to get more than one chunk iteration. + self.data_len = 3 * units.Ki + self.data_iter = six.BytesIO(b'*' * self.data_len) + + def test_add_w_image_size_zero(self): + """Assert that correct size is returned even though 0 was provided.""" + self.store.chunk_size = units.Ki + with mock.patch.object(rbd_store.rbd.Image, 'resize') as resize: + with mock.patch.object(rbd_store.rbd.Image, 'write') as write: + ret = self.store.add('fake_image_id', self.data_iter, 0) + + self.assertTrue(resize.called) + self.assertTrue(write.called) + self.assertEqual(ret[1], self.data_len) + self.assertEqual("ceph1", ret[3]['backend']) + + def test_add_w_image_size_zero_to_different_backend(self): + """Assert that correct size is returned even though 0 was provided.""" + self.store = rbd_store.Store(self.conf, backend="ceph2") + self.store.configure() + self.called_commands_actual = [] + self.called_commands_expected = [] + self.store_specs = {'pool': 'fake_pool_1', + 'image': 'fake_image_1', + 'snapshot': 'fake_snapshot_1'} + self.location = rbd_store.StoreLocation(self.store_specs, + self.conf) + # Provide enough data to get more than one chunk iteration. + self.data_len = 3 * units.Ki + self.data_iter = six.BytesIO(b'*' * self.data_len) + self.store.chunk_size = units.Ki + with mock.patch.object(rbd_store.rbd.Image, 'resize') as resize: + with mock.patch.object(rbd_store.rbd.Image, 'write') as write: + ret = self.store.add('fake_image_id', self.data_iter, 0) + + self.assertTrue(resize.called) + self.assertTrue(write.called) + self.assertEqual(ret[1], self.data_len) + self.assertEqual("ceph2", ret[3]['backend']) + + @mock.patch.object(MockRBD.Image, '__enter__') + @mock.patch.object(rbd_store.Store, '_create_image') + @mock.patch.object(rbd_store.Store, '_delete_image') + def test_add_w_rbd_image_exception(self, delete, create, enter): + def _fake_create_image(*args, **kwargs): + self.called_commands_actual.append('create') + return self.location + + def _fake_delete_image(target_pool, image_name, snapshot_name=None): + self.assertEqual(self.location.pool, target_pool) + self.assertEqual(self.location.image, image_name) + self.assertEqual(self.location.snapshot, snapshot_name) + self.called_commands_actual.append('delete') + + def _fake_enter(*args, **kwargs): + raise exceptions.NotFound(image="fake_image_id") + + create.side_effect = _fake_create_image + delete.side_effect = _fake_delete_image + enter.side_effect = _fake_enter + + self.assertRaises(exceptions.NotFound, self.store.add, + 'fake_image_id', self.data_iter, self.data_len) + + self.called_commands_expected = ['create', 'delete'] + + def test_add_duplicate_image(self): + + def _fake_create_image(*args, **kwargs): + self.called_commands_actual.append('create') + raise MockRBD.ImageExists() + + with mock.patch.object(self.store, '_create_image') as create_image: + create_image.side_effect = _fake_create_image + + self.assertRaises(exceptions.Duplicate, self.store.add, + 'fake_image_id', self.data_iter, self.data_len) + self.called_commands_expected = ['create'] + + def test_delete(self): + def _fake_remove(*args, **kwargs): + self.called_commands_actual.append('remove') + + with mock.patch.object(MockRBD.RBD, 'remove') as remove_image: + remove_image.side_effect = _fake_remove + + self.store.delete(g_location.Location('test_rbd_store', + rbd_store.StoreLocation, + self.conf, + uri=self.location.get_uri())) + self.called_commands_expected = ['remove'] + + def test_delete_image(self): + def _fake_remove(*args, **kwargs): + self.called_commands_actual.append('remove') + + with mock.patch.object(MockRBD.RBD, 'remove') as remove_image: + remove_image.side_effect = _fake_remove + + self.store._delete_image('fake_pool', self.location.image) + self.called_commands_expected = ['remove'] + + def test_delete_image_exc_image_not_found(self): + def _fake_remove(*args, **kwargs): + self.called_commands_actual.append('remove') + raise MockRBD.ImageNotFound() + + with mock.patch.object(MockRBD.RBD, 'remove') as remove: + remove.side_effect = _fake_remove + self.assertRaises(exceptions.NotFound, self.store._delete_image, + 'fake_pool', self.location.image) + + self.called_commands_expected = ['remove'] + + @mock.patch.object(MockRBD.RBD, 'remove') + @mock.patch.object(MockRBD.Image, 'remove_snap') + @mock.patch.object(MockRBD.Image, 'unprotect_snap') + def test_delete_image_w_snap(self, unprotect, remove_snap, remove): + def _fake_unprotect_snap(*args, **kwargs): + self.called_commands_actual.append('unprotect_snap') + + def _fake_remove_snap(*args, **kwargs): + self.called_commands_actual.append('remove_snap') + + def _fake_remove(*args, **kwargs): + self.called_commands_actual.append('remove') + + remove.side_effect = _fake_remove + unprotect.side_effect = _fake_unprotect_snap + remove_snap.side_effect = _fake_remove_snap + self.store._delete_image('fake_pool', self.location.image, + snapshot_name='snap') + + self.called_commands_expected = ['unprotect_snap', 'remove_snap', + 'remove'] + + @mock.patch.object(MockRBD.RBD, 'remove') + @mock.patch.object(MockRBD.Image, 'remove_snap') + @mock.patch.object(MockRBD.Image, 'unprotect_snap') + def test_delete_image_w_unprotected_snap(self, unprotect, remove_snap, + remove): + def _fake_unprotect_snap(*args, **kwargs): + self.called_commands_actual.append('unprotect_snap') + raise MockRBD.InvalidArgument() + + def _fake_remove_snap(*args, **kwargs): + self.called_commands_actual.append('remove_snap') + + def _fake_remove(*args, **kwargs): + self.called_commands_actual.append('remove') + + remove.side_effect = _fake_remove + unprotect.side_effect = _fake_unprotect_snap + remove_snap.side_effect = _fake_remove_snap + self.store._delete_image('fake_pool', self.location.image, + snapshot_name='snap') + + self.called_commands_expected = ['unprotect_snap', 'remove_snap', + 'remove'] + + @mock.patch.object(MockRBD.RBD, 'remove') + @mock.patch.object(MockRBD.Image, 'remove_snap') + @mock.patch.object(MockRBD.Image, 'unprotect_snap') + def test_delete_image_w_snap_with_error(self, unprotect, remove_snap, + remove): + def _fake_unprotect_snap(*args, **kwargs): + self.called_commands_actual.append('unprotect_snap') + raise TestException() + + def _fake_remove_snap(*args, **kwargs): + self.called_commands_actual.append('remove_snap') + + def _fake_remove(*args, **kwargs): + self.called_commands_actual.append('remove') + + remove.side_effect = _fake_remove + unprotect.side_effect = _fake_unprotect_snap + remove_snap.side_effect = _fake_remove_snap + self.assertRaises(TestException, self.store._delete_image, + 'fake_pool', self.location.image, + snapshot_name='snap') + + self.called_commands_expected = ['unprotect_snap'] + + def test_delete_image_w_snap_exc_image_busy(self): + def _fake_unprotect_snap(*args, **kwargs): + self.called_commands_actual.append('unprotect_snap') + raise MockRBD.ImageBusy() + + with mock.patch.object(MockRBD.Image, 'unprotect_snap') as mocked: + mocked.side_effect = _fake_unprotect_snap + + self.assertRaises(exceptions.InUseByStore, + self.store._delete_image, + 'fake_pool', self.location.image, + snapshot_name='snap') + + self.called_commands_expected = ['unprotect_snap'] + + def test_delete_image_w_snap_exc_image_has_snap(self): + def _fake_remove(*args, **kwargs): + self.called_commands_actual.append('remove') + raise MockRBD.ImageHasSnapshots() + + with mock.patch.object(MockRBD.RBD, 'remove') as remove: + remove.side_effect = _fake_remove + self.assertRaises(exceptions.HasSnapshot, self.store._delete_image, + 'fake_pool', self.location.image) + + self.called_commands_expected = ['remove'] + + def test_get_partial_image(self): + loc = g_location.Location('test_rbd_store', rbd_store.StoreLocation, + self.conf, store_specs=self.store_specs) + self.assertRaises(exceptions.StoreRandomGetNotSupported, + self.store.get, loc, chunk_size=1) + + @mock.patch.object(MockRados.Rados, 'connect') + def test_rados_connect_timeout(self, mock_rados_connect): + socket_timeout = 1 + self.config(rados_connect_timeout=socket_timeout, group="ceph1") + self.store.configure() + with self.store.get_connection('conffile', 'rados_id'): + mock_rados_connect.assert_called_with(timeout=socket_timeout) + + @mock.patch.object(MockRados.Rados, 'connect', side_effect=MockRados.Error) + def test_rados_connect_error(self, _): + rbd_store.rados.Error = MockRados.Error + + def test(): + with self.store.get_connection('conffile', 'rados_id'): + pass + self.assertRaises(exceptions.BackendException, test) + + def test_create_image_conf_features(self): + # Tests that we use non-0 features from ceph.conf and cast to int. + fsid = 'fake' + features = '3' + conf_get_mock = mock.Mock(return_value=features) + conn = mock.Mock(conf_get=conf_get_mock) + ioctxt = mock.sentinel.ioctxt + name = '1' + size = 1024 + order = 3 + with mock.patch.object(rbd_store.rbd.RBD, 'create') as create_mock: + location = self.store._create_image( + fsid, conn, ioctxt, name, size, order) + self.assertEqual(fsid, location.specs['fsid']) + self.assertEqual(rbd_store.DEFAULT_POOL, location.specs['pool']) + self.assertEqual(name, location.specs['image']) + self.assertEqual(rbd_store.DEFAULT_SNAPNAME, + location.specs['snapshot']) + + create_mock.assert_called_once_with(ioctxt, name, size, order, + old_format=False, features=3) + + def tearDown(self): + self.assertEqual(self.called_commands_expected, + self.called_commands_actual) + super(TestMultiStore, self).tearDown()