Merge "Enable multi store support for glance"
commit
3e1cc3e5e6
|
@ -13,6 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from .multi_backend import * # noqa
|
||||
from .backend import * # noqa
|
||||
from .driver import * # noqa
|
||||
from .exceptions import * # noqa
|
||||
|
|
|
@ -273,13 +273,18 @@ class Store(glance_store.driver.Store):
|
|||
:datadir is a directory path in which glance writes image files.
|
||||
"""
|
||||
|
||||
if self.conf.glance_store.filesystem_store_file_perm <= 0:
|
||||
if self.backend_group:
|
||||
fstore_perm = getattr(
|
||||
self.conf, self.backend_group).filesystem_store_file_perm
|
||||
else:
|
||||
fstore_perm = self.conf.glance_store.filesystem_store_file_perm
|
||||
|
||||
if fstore_perm <= 0:
|
||||
return
|
||||
|
||||
try:
|
||||
mode = os.stat(datadir)[stat.ST_MODE]
|
||||
perm = int(str(self.conf.glance_store.filesystem_store_file_perm),
|
||||
8)
|
||||
perm = int(str(fstore_perm), 8)
|
||||
if perm & stat.S_IRWXO > 0:
|
||||
if not mode & stat.S_IXOTH:
|
||||
# chmod o+x
|
||||
|
@ -378,26 +383,37 @@ class Store(glance_store.driver.Store):
|
|||
this method. If the store was not able to successfully configure
|
||||
itself, it should raise `exceptions.BadStoreConfiguration`
|
||||
"""
|
||||
if not (self.conf.glance_store.filesystem_store_datadir or
|
||||
self.conf.glance_store.filesystem_store_datadirs):
|
||||
if self.backend_group:
|
||||
fdir = getattr(
|
||||
self.conf, self.backend_group).filesystem_store_datadir
|
||||
fdirs = getattr(
|
||||
self.conf, self.backend_group).filesystem_store_datadirs
|
||||
fstore_perm = getattr(
|
||||
self.conf, self.backend_group).filesystem_store_file_perm
|
||||
meta_file = getattr(
|
||||
self.conf, self.backend_group).filesystem_store_metadata_file
|
||||
else:
|
||||
fdir = self.conf.glance_store.filesystem_store_datadir
|
||||
fdirs = self.conf.glance_store.filesystem_store_datadirs
|
||||
fstore_perm = self.conf.glance_store.filesystem_store_file_perm
|
||||
meta_file = self.conf.glance_store.filesystem_store_metadata_file
|
||||
|
||||
if not (fdir or fdirs):
|
||||
reason = (_("Specify at least 'filesystem_store_datadir' or "
|
||||
"'filesystem_store_datadirs' option"))
|
||||
LOG.error(reason)
|
||||
raise exceptions.BadStoreConfiguration(store_name="filesystem",
|
||||
reason=reason)
|
||||
|
||||
if (self.conf.glance_store.filesystem_store_datadir and
|
||||
self.conf.glance_store.filesystem_store_datadirs):
|
||||
|
||||
if fdir and fdirs:
|
||||
reason = (_("Specify either 'filesystem_store_datadir' or "
|
||||
"'filesystem_store_datadirs' option"))
|
||||
LOG.error(reason)
|
||||
raise exceptions.BadStoreConfiguration(store_name="filesystem",
|
||||
reason=reason)
|
||||
|
||||
if self.conf.glance_store.filesystem_store_file_perm > 0:
|
||||
perm = int(str(self.conf.glance_store.filesystem_store_file_perm),
|
||||
8)
|
||||
if fstore_perm > 0:
|
||||
perm = int(str(fstore_perm), 8)
|
||||
if not perm & stat.S_IRUSR:
|
||||
reason = _LE("Specified an invalid "
|
||||
"'filesystem_store_file_perm' option which "
|
||||
|
@ -410,13 +426,13 @@ class Store(glance_store.driver.Store):
|
|||
|
||||
self.multiple_datadirs = False
|
||||
directory_paths = set()
|
||||
if self.conf.glance_store.filesystem_store_datadir:
|
||||
self.datadir = self.conf.glance_store.filesystem_store_datadir
|
||||
if fdir:
|
||||
self.datadir = fdir
|
||||
directory_paths.add(self.datadir)
|
||||
else:
|
||||
self.multiple_datadirs = True
|
||||
self.priority_data_map = {}
|
||||
for datadir in self.conf.glance_store.filesystem_store_datadirs:
|
||||
for datadir in fdirs:
|
||||
(datadir_path,
|
||||
priority) = self._get_datadir_path_and_priority(datadir)
|
||||
priority_paths = self.priority_data_map.setdefault(
|
||||
|
@ -431,9 +447,8 @@ class Store(glance_store.driver.Store):
|
|||
|
||||
self._create_image_directories(directory_paths)
|
||||
|
||||
metadata_file = self.conf.glance_store.filesystem_store_metadata_file
|
||||
if metadata_file:
|
||||
self._validate_metadata(metadata_file)
|
||||
if meta_file:
|
||||
self._validate_metadata(meta_file)
|
||||
|
||||
def _check_directory_paths(self, datadir_path, directory_paths,
|
||||
priority_paths):
|
||||
|
@ -705,15 +720,24 @@ class Store(glance_store.driver.Store):
|
|||
'filepath': filepath,
|
||||
'checksum_hex': checksum_hex})
|
||||
|
||||
if self.conf.glance_store.filesystem_store_file_perm > 0:
|
||||
perm = int(str(self.conf.glance_store.filesystem_store_file_perm),
|
||||
8)
|
||||
if self.backend_group:
|
||||
fstore_perm = getattr(
|
||||
self.conf, self.backend_group).filesystem_store_file_perm
|
||||
else:
|
||||
fstore_perm = self.conf.glance_store.filesystem_store_file_perm
|
||||
|
||||
if fstore_perm > 0:
|
||||
perm = int(str(fstore_perm), 8)
|
||||
try:
|
||||
os.chmod(filepath, perm)
|
||||
except (IOError, OSError):
|
||||
LOG.warning(_LW("Unable to set permission to image: %s") %
|
||||
filepath)
|
||||
|
||||
# Add store backend information to location metadata
|
||||
if self.backend_group:
|
||||
metadata['backend'] = u"%s" % self.backend_group
|
||||
|
||||
return ('file://%s' % filepath, bytes_written, checksum_hex, metadata)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -281,17 +281,32 @@ class Store(driver.Store):
|
|||
itself, it should raise `exceptions.BadStoreConfiguration`
|
||||
"""
|
||||
try:
|
||||
chunk = self.conf.glance_store.rbd_store_chunk_size
|
||||
if self.backend_group:
|
||||
chunk = getattr(self.conf,
|
||||
self.backend_group).rbd_store_chunk_size
|
||||
pool = getattr(self.conf, self.backend_group).rbd_store_pool
|
||||
user = getattr(self.conf, self.backend_group).rbd_store_user
|
||||
conf_file = getattr(self.conf,
|
||||
self.backend_group).rbd_store_ceph_conf
|
||||
connect_timeout = getattr(
|
||||
self.conf, self.backend_group).rados_connect_timeout
|
||||
else:
|
||||
chunk = self.conf.glance_store.rbd_store_chunk_size
|
||||
pool = self.conf.glance_store.rbd_store_pool
|
||||
user = self.conf.glance_store.rbd_store_user
|
||||
conf_file = self.conf.glance_store.rbd_store_ceph_conf
|
||||
connect_timeout = self.conf.glance_store.rados_connect_timeout
|
||||
|
||||
self.chunk_size = chunk * units.Mi
|
||||
self.READ_CHUNKSIZE = self.chunk_size
|
||||
self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE
|
||||
|
||||
# these must not be unicode since they will be passed to a
|
||||
# non-unicode-aware C library
|
||||
self.pool = str(self.conf.glance_store.rbd_store_pool)
|
||||
self.user = str(self.conf.glance_store.rbd_store_user)
|
||||
self.conf_file = str(self.conf.glance_store.rbd_store_ceph_conf)
|
||||
self.connect_timeout = self.conf.glance_store.rados_connect_timeout
|
||||
self.pool = str(pool)
|
||||
self.user = str(user)
|
||||
self.conf_file = str(conf_file)
|
||||
self.connect_timeout = connect_timeout
|
||||
except cfg.ConfigFileValueError as e:
|
||||
reason = _("Error in store configuration: %s") % e
|
||||
LOG.error(reason)
|
||||
|
@ -514,7 +529,12 @@ class Store(driver.Store):
|
|||
if image_size == 0:
|
||||
image_size = bytes_written
|
||||
|
||||
return (loc.get_uri(), image_size, checksum.hexdigest(), {})
|
||||
# Add store backend information to location metadata
|
||||
metadata = {}
|
||||
if self.backend_group:
|
||||
metadata['backend'] = u"%s" % self.backend_group
|
||||
|
||||
return (loc.get_uri(), image_size, checksum.hexdigest(), metadata)
|
||||
|
||||
@capabilities.check
|
||||
def delete(self, location, context=None):
|
||||
|
|
|
@ -30,13 +30,25 @@ from glance_store.i18n import _
|
|||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_MULTI_BACKEND_OPTS = [
|
||||
cfg.StrOpt('store_description',
|
||||
help=_("""
|
||||
This option will be used to provide a constructive information about
|
||||
the store backend to end users. Using /v2/stores-info call user can
|
||||
seek more information on all available backends.
|
||||
|
||||
"""))
|
||||
]
|
||||
|
||||
|
||||
class Store(capabilities.StoreCapability):
|
||||
|
||||
OPTIONS = None
|
||||
MULTI_BACKEND_OPTIONS = _MULTI_BACKEND_OPTS
|
||||
READ_CHUNKSIZE = 4 * units.Mi # 4M
|
||||
WRITE_CHUNKSIZE = READ_CHUNKSIZE
|
||||
|
||||
def __init__(self, conf):
|
||||
def __init__(self, conf, backend=None):
|
||||
"""
|
||||
Initialize the Store
|
||||
"""
|
||||
|
@ -44,11 +56,19 @@ class Store(capabilities.StoreCapability):
|
|||
super(Store, self).__init__()
|
||||
|
||||
self.conf = conf
|
||||
self.backend_group = backend
|
||||
self.store_location_class = None
|
||||
|
||||
try:
|
||||
if self.OPTIONS is not None:
|
||||
self.conf.register_opts(self.OPTIONS, group='glance_store')
|
||||
group = 'glance_store'
|
||||
if self.backend_group:
|
||||
group = self.backend_group
|
||||
if self.MULTI_BACKEND_OPTIONS is not None:
|
||||
self.conf.register_opts(
|
||||
self.MULTI_BACKEND_OPTIONS, group=group)
|
||||
|
||||
self.conf.register_opts(self.OPTIONS, group=group)
|
||||
except cfg.DuplicateOptError:
|
||||
pass
|
||||
|
||||
|
|
|
@ -49,6 +49,7 @@ CONF = cfg.CONF
|
|||
LOG = logging.getLogger(__name__)
|
||||
|
||||
SCHEME_TO_CLS_MAP = {}
|
||||
SCHEME_TO_CLS_BACKEND_MAP = {}
|
||||
|
||||
|
||||
def get_location_from_uri(uri, conf=CONF):
|
||||
|
@ -62,7 +63,7 @@ def get_location_from_uri(uri, conf=CONF):
|
|||
|
||||
Example URIs:
|
||||
https://user:pass@example.com:80/images/some-id
|
||||
http://images.oracle.com/123456
|
||||
http://example.com/123456
|
||||
swift://example.com/container/obj-id
|
||||
swift://user:account:pass@authurl.com/container/obj-id
|
||||
swift+http://user:account:pass@authurl.com/container/obj-id
|
||||
|
@ -77,6 +78,56 @@ def get_location_from_uri(uri, conf=CONF):
|
|||
conf, uri=uri)
|
||||
|
||||
|
||||
def get_location_from_uri_and_backend(uri, backend, conf=CONF):
|
||||
"""
|
||||
Given a URI, return a Location object that has had an appropriate
|
||||
store parse the URI.
|
||||
|
||||
:param uri: A URI that could come from the end-user in the Location
|
||||
attribute/header.
|
||||
:param backend: A backend name for the store.
|
||||
:param conf: The global configuration.
|
||||
|
||||
Example URIs:
|
||||
https://user:pass@example.com:80/images/some-id
|
||||
http://example.com/123456
|
||||
swift://example.com/container/obj-id
|
||||
swift://user:account:pass@authurl.com/container/obj-id
|
||||
swift+http://user:account:pass@authurl.com/container/obj-id
|
||||
file:///var/lib/glance/images/1
|
||||
cinder://volume-id
|
||||
"""
|
||||
|
||||
pieces = urllib.parse.urlparse(uri)
|
||||
|
||||
if pieces.scheme not in SCHEME_TO_CLS_BACKEND_MAP.keys():
|
||||
raise exceptions.UnknownScheme(scheme=pieces.scheme)
|
||||
try:
|
||||
scheme_info = SCHEME_TO_CLS_BACKEND_MAP[pieces.scheme][backend]
|
||||
except KeyError:
|
||||
raise exceptions.UnknownScheme(scheme=backend)
|
||||
|
||||
return Location(pieces.scheme, scheme_info['location_class'],
|
||||
conf, uri=uri)
|
||||
|
||||
|
||||
def register_scheme_backend_map(scheme_map):
|
||||
"""
|
||||
Given a mapping of 'scheme' to store_name, adds the mapping to the
|
||||
known list of schemes.
|
||||
|
||||
This function overrides existing stores.
|
||||
"""
|
||||
|
||||
for (k, v) in scheme_map.items():
|
||||
if k not in SCHEME_TO_CLS_BACKEND_MAP:
|
||||
SCHEME_TO_CLS_BACKEND_MAP[k] = {}
|
||||
|
||||
LOG.debug("Registering scheme %s with %s", k, v)
|
||||
for key, value in v.items():
|
||||
SCHEME_TO_CLS_BACKEND_MAP[k][key] = value
|
||||
|
||||
|
||||
def register_scheme_map(scheme_map):
|
||||
"""
|
||||
Given a mapping of 'scheme' to store_name, adds the mapping to the
|
||||
|
|
|
@ -0,0 +1,438 @@
|
|||
# Copyright 2018 RedHat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import encodeutils
|
||||
import six
|
||||
from stevedore import driver
|
||||
from stevedore import extension
|
||||
|
||||
from glance_store import capabilities
|
||||
from glance_store import exceptions
|
||||
from glance_store.i18n import _, _LW
|
||||
from glance_store import location
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_STORE_OPTS = [
|
||||
cfg.StrOpt('default_backend',
|
||||
help=_("""
|
||||
The default scheme to use for storing images.
|
||||
|
||||
Provide a string value representing the default scheme to use for
|
||||
storing images. If not set, Glance API service will fail to start.
|
||||
|
||||
Related Options:
|
||||
* enabled_backends
|
||||
|
||||
""")),
|
||||
cfg.IntOpt('store_capabilities_update_min_interval',
|
||||
default=0,
|
||||
min=0,
|
||||
deprecated_for_removal=True,
|
||||
deprecated_since='Rocky',
|
||||
deprecated_reason=_("""
|
||||
This option configures a stub method that has not been implemented
|
||||
for any existing store drivers. Hence it is non-operational, and
|
||||
giving it a value does absolutely nothing.
|
||||
|
||||
This option is scheduled for removal early in the Stein development
|
||||
cycle.
|
||||
"""),
|
||||
help=_("""
|
||||
Minimum interval in seconds to execute updating dynamic storage
|
||||
capabilities based on current backend status.
|
||||
|
||||
Provide an integer value representing time in seconds to set the
|
||||
minimum interval before an update of dynamic storage capabilities
|
||||
for a storage backend can be attempted. Setting
|
||||
``store_capabilities_update_min_interval`` does not mean updates
|
||||
occur periodically based on the set interval. Rather, the update
|
||||
is performed at the elapse of this interval set, if an operation
|
||||
of the store is triggered.
|
||||
|
||||
By default, this option is set to zero and is disabled. Provide an
|
||||
integer value greater than zero to enable this option.
|
||||
|
||||
NOTE 1: For more information on store capabilities and their updates,
|
||||
please visit: https://specs.openstack.org/openstack/glance-specs/\
|
||||
specs/kilo/store-capabilities.html
|
||||
|
||||
For more information on setting up a particular store in your
|
||||
deployment and help with the usage of this feature, please contact
|
||||
the storage driver maintainers listed here:
|
||||
https://docs.openstack.org/glance_store/latest/user/drivers.html
|
||||
|
||||
NOTE 2: The dynamic store update capability described above is not
|
||||
implemented by any current store drivers. Thus, this option DOES
|
||||
NOT DO ANYTHING (and it never has). It is DEPRECATED and scheduled
|
||||
for removal early in the Stein development cycle.
|
||||
|
||||
Possible values:
|
||||
* Zero
|
||||
* Positive integer
|
||||
|
||||
Related Options:
|
||||
* None
|
||||
|
||||
""")),
|
||||
]
|
||||
|
||||
_STORE_CFG_GROUP = 'glance_store'
|
||||
|
||||
|
||||
def _list_driver_opts():
|
||||
driver_opts = {}
|
||||
mgr = extension.ExtensionManager('glance_store.drivers')
|
||||
# NOTE(zhiyan): Handle available drivers entry_points provided
|
||||
# NOTE(nikhil): Return a sorted list of drivers to ensure that the sample
|
||||
# configuration files generated by oslo config generator retain the order
|
||||
# in which the config opts appear across different runs. If this order of
|
||||
# config opts is not preserved, some downstream packagers may see a long
|
||||
# diff of the changes though not relevant as only order has changed. See
|
||||
# some more details at bug 1619487.
|
||||
drivers = sorted([ext.name for ext in mgr])
|
||||
handled_drivers = [] # Used to handle backwards-compatible entries
|
||||
for store_entry in drivers:
|
||||
driver_cls = _load_multi_store(None, store_entry, False)
|
||||
if driver_cls and driver_cls not in handled_drivers:
|
||||
if getattr(driver_cls, 'OPTIONS', None) is not None:
|
||||
driver_opts[store_entry] = driver_cls.OPTIONS
|
||||
handled_drivers.append(driver_cls)
|
||||
|
||||
# NOTE(zhiyan): This separated approach could list
|
||||
# store options before all driver ones, which easier
|
||||
# to read and configure by operator.
|
||||
return driver_opts
|
||||
|
||||
|
||||
def register_store_opts(conf):
|
||||
LOG.debug("Registering options for group %s" % _STORE_CFG_GROUP)
|
||||
conf.register_opts(_STORE_OPTS, group=_STORE_CFG_GROUP)
|
||||
|
||||
driver_opts = _list_driver_opts()
|
||||
enabled_backends = conf.enabled_backends
|
||||
for backend in enabled_backends:
|
||||
for opt_list in driver_opts:
|
||||
if enabled_backends[backend] not in opt_list:
|
||||
continue
|
||||
|
||||
LOG.debug("Registering options for group %s" % backend)
|
||||
conf.register_opts(driver_opts[opt_list], group=backend)
|
||||
|
||||
|
||||
def _load_multi_store(conf, store_entry,
|
||||
invoke_load=True,
|
||||
backend=None):
|
||||
if backend:
|
||||
invoke_args = [conf, backend]
|
||||
else:
|
||||
invoke_args = [conf]
|
||||
try:
|
||||
LOG.debug("Attempting to import store %s", store_entry)
|
||||
mgr = driver.DriverManager('glance_store.drivers',
|
||||
store_entry,
|
||||
invoke_args=invoke_args,
|
||||
invoke_on_load=invoke_load)
|
||||
return mgr.driver
|
||||
except RuntimeError as e:
|
||||
LOG.warning("Failed to load driver %(driver)s. The "
|
||||
"driver will be disabled" % dict(driver=str([driver, e])))
|
||||
|
||||
|
||||
def _load_multi_stores(conf):
|
||||
enabled_backends = conf.enabled_backends
|
||||
for backend, store_entry in enabled_backends.items():
|
||||
try:
|
||||
# FIXME(flaper87): Don't hide BadStoreConfiguration
|
||||
# exceptions. These exceptions should be propagated
|
||||
# to the user of the library.
|
||||
store_instance = _load_multi_store(conf, store_entry,
|
||||
backend=backend)
|
||||
|
||||
if not store_instance:
|
||||
continue
|
||||
|
||||
yield (store_entry, store_instance, backend)
|
||||
|
||||
except exceptions.BadStoreConfiguration:
|
||||
continue
|
||||
|
||||
|
||||
def create_multi_stores(conf=CONF):
|
||||
"""
|
||||
Registers all store modules and all schemes
|
||||
from the given config.
|
||||
"""
|
||||
store_count = 0
|
||||
scheme_map = {}
|
||||
for (store_entry, store_instance,
|
||||
store_identifier) in _load_multi_stores(conf):
|
||||
try:
|
||||
schemes = store_instance.get_schemes()
|
||||
store_instance.configure(re_raise_bsc=False)
|
||||
except NotImplementedError:
|
||||
continue
|
||||
|
||||
if not schemes:
|
||||
raise exceptions.BackendException('Unable to register store %s. '
|
||||
'No schemes associated with it.'
|
||||
% store_entry)
|
||||
else:
|
||||
LOG.debug("Registering store %s with schemes %s",
|
||||
store_entry, schemes)
|
||||
|
||||
loc_cls = store_instance.get_store_location_class()
|
||||
for scheme in schemes:
|
||||
if scheme not in scheme_map:
|
||||
scheme_map[scheme] = {}
|
||||
scheme_map[scheme][store_identifier] = {
|
||||
'store': store_instance,
|
||||
'location_class': loc_cls,
|
||||
'store_entry': store_entry
|
||||
}
|
||||
location.register_scheme_backend_map(scheme_map)
|
||||
store_count += 1
|
||||
|
||||
return store_count
|
||||
|
||||
|
||||
def verify_store():
|
||||
store_id = CONF.glance_store.default_backend
|
||||
if not store_id:
|
||||
msg = _("'default_backend' config option is not set.")
|
||||
raise RuntimeError(msg)
|
||||
|
||||
try:
|
||||
get_store_from_store_identifier(store_id)
|
||||
except exceptions.UnknownScheme:
|
||||
msg = _("Store for identifier %s not found") % store_id
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
def get_store_from_store_identifier(store_identifier):
|
||||
"""
|
||||
Given a store identifier, return the appropriate store object
|
||||
for handling that scheme.
|
||||
"""
|
||||
scheme_map = {}
|
||||
enabled_backends = CONF.enabled_backends
|
||||
try:
|
||||
scheme = enabled_backends[store_identifier]
|
||||
except KeyError:
|
||||
msg = _("Store for identifier %s not found") % store_identifier
|
||||
raise exceptions.UnknownScheme(msg)
|
||||
|
||||
if scheme not in location.SCHEME_TO_CLS_BACKEND_MAP:
|
||||
raise exceptions.UnknownScheme(scheme=scheme)
|
||||
|
||||
scheme_info = location.SCHEME_TO_CLS_BACKEND_MAP[scheme][store_identifier]
|
||||
store = scheme_info['store']
|
||||
|
||||
if not store.is_capable(capabilities.BitMasks.DRIVER_REUSABLE):
|
||||
# Driver instance isn't stateless so it can't
|
||||
# be reused safely and need recreation.
|
||||
store_entry = scheme_info['store_entry']
|
||||
store = _load_multi_store(store.conf, store_entry, invoke_load=True,
|
||||
backend=store_identifier)
|
||||
store.configure()
|
||||
try:
|
||||
loc_cls = store.get_store_location_class()
|
||||
for new_scheme in store.get_schemes():
|
||||
if new_scheme not in scheme_map:
|
||||
scheme_map[new_scheme] = {}
|
||||
|
||||
scheme_map[new_scheme][store_identifier] = {
|
||||
'store': store,
|
||||
'location_class': loc_cls,
|
||||
'store_entry': store_entry
|
||||
}
|
||||
location.register_scheme_backend_map(scheme_map)
|
||||
except NotImplementedError:
|
||||
scheme_info['store'] = store
|
||||
|
||||
return store
|
||||
|
||||
|
||||
def add(conf, image_id, data, size, backend, context=None,
|
||||
verifier=None):
|
||||
if not backend:
|
||||
backend = conf.glance_store.default_backend
|
||||
|
||||
store = get_store_from_store_identifier(backend)
|
||||
return store_add_to_backend(image_id, data, size, store, context,
|
||||
verifier)
|
||||
|
||||
|
||||
def store_add_to_backend(image_id, data, size, store, context=None,
|
||||
verifier=None):
|
||||
"""
|
||||
A wrapper around a call to each stores add() method. This gives glance
|
||||
a common place to check the output
|
||||
|
||||
:param image_id: The image add to which data is added
|
||||
:param data: The data to be stored
|
||||
:param size: The length of the data in bytes
|
||||
:param store: The store to which the data is being added
|
||||
:param context: The request context
|
||||
:param verifier: An object used to verify signatures for images
|
||||
:param backend: Name of the backend to store the image
|
||||
:return: The url location of the file,
|
||||
the size amount of data,
|
||||
the checksum of the data
|
||||
the storage systems metadata dictionary for the location
|
||||
"""
|
||||
(location, size, checksum, metadata) = store.add(image_id,
|
||||
data,
|
||||
size,
|
||||
context=context,
|
||||
verifier=verifier)
|
||||
|
||||
if metadata is not None:
|
||||
if not isinstance(metadata, dict):
|
||||
msg = (_("The storage driver %(driver)s returned invalid "
|
||||
" metadata %(metadata)s. This must be a dictionary type")
|
||||
% dict(driver=str(store), metadata=str(metadata)))
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
try:
|
||||
check_location_metadata(metadata)
|
||||
except exceptions.BackendException as e:
|
||||
e_msg = (_("A bad metadata structure was returned from the "
|
||||
"%(driver)s storage driver: %(metadata)s. %(e)s.") %
|
||||
dict(driver=encodeutils.exception_to_unicode(store),
|
||||
metadata=encodeutils.exception_to_unicode(metadata),
|
||||
e=encodeutils.exception_to_unicode(e)))
|
||||
LOG.error(e_msg)
|
||||
raise exceptions.BackendException(e_msg)
|
||||
return (location, size, checksum, metadata)
|
||||
|
||||
|
||||
def check_location_metadata(val, key=''):
|
||||
if isinstance(val, dict):
|
||||
for key in val:
|
||||
check_location_metadata(val[key], key=key)
|
||||
elif isinstance(val, list):
|
||||
ndx = 0
|
||||
for v in val:
|
||||
check_location_metadata(v, key='%s[%d]' % (key, ndx))
|
||||
ndx = ndx + 1
|
||||
elif not isinstance(val, six.text_type):
|
||||
raise exceptions.BackendException(_("The image metadata key %(key)s "
|
||||
"has an invalid type of %(type)s. "
|
||||
"Only dict, list, and unicode are "
|
||||
"supported.")
|
||||
% dict(key=key, type=type(val)))
|
||||
|
||||
|
||||
def delete(uri, backend, context=None):
|
||||
"""Removes chunks of data from backend specified by uri."""
|
||||
if backend:
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
uri, backend, conf=CONF)
|
||||
store = get_store_from_store_identifier(backend)
|
||||
return store.delete(loc, context=context)
|
||||
|
||||
msg = _LW('Backend is not set to image, searching '
|
||||
'all backends based on location URI.')
|
||||
LOG.warn(msg)
|
||||
|
||||
backends = CONF.enabled_backends
|
||||
for backend in backends:
|
||||
try:
|
||||
if not uri.startswith(backends[backend]):
|
||||
continue
|
||||
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
uri, backend, conf=CONF)
|
||||
store = get_store_from_store_identifier(backend)
|
||||
return store.delete(loc, context=context)
|
||||
except (exceptions.NotFound, exceptions.UnknownScheme):
|
||||
continue
|
||||
|
||||
raise exceptions.NotFound(_("Image not found in any configured backend"))
|
||||
|
||||
|
||||
def set_acls_for_multi_store(location_uri, backend, public=False,
|
||||
read_tenants=[],
|
||||
write_tenants=None, context=None):
|
||||
|
||||
if write_tenants is None:
|
||||
write_tenants = []
|
||||
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
location_uri, backend, conf=CONF)
|
||||
store = get_store_from_store_identifier(backend)
|
||||
try:
|
||||
store.set_acls(loc, public=public,
|
||||
read_tenants=read_tenants,
|
||||
write_tenants=write_tenants,
|
||||
context=context)
|
||||
except NotImplementedError:
|
||||
LOG.debug("Skipping store.set_acls... not implemented")
|
||||
|
||||
|
||||
def get(uri, backend, offset=0, chunk_size=None, context=None):
|
||||
"""Yields chunks of data from backend specified by uri."""
|
||||
|
||||
if backend:
|
||||
loc = location.get_location_from_uri_and_backend(uri, backend,
|
||||
conf=CONF)
|
||||
store = get_store_from_store_identifier(backend)
|
||||
|
||||
return store.get(loc, offset=offset,
|
||||
chunk_size=chunk_size,
|
||||
context=context)
|
||||
|
||||
msg = _LW('Backend is not set to image, searching '
|
||||
'all backends based on location URI.')
|
||||
LOG.warn(msg)
|
||||
|
||||
backends = CONF.enabled_backends
|
||||
for backend in backends:
|
||||
try:
|
||||
if not uri.startswith(backends[backend]):
|
||||
continue
|
||||
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
uri, backend, conf=CONF)
|
||||
store = get_store_from_store_identifier(backend)
|
||||
data, size = store.get(loc, offset=offset,
|
||||
chunk_size=chunk_size,
|
||||
context=context)
|
||||
if data:
|
||||
return data, size
|
||||
except (exceptions.NotFound, exceptions.UnknownScheme):
|
||||
continue
|
||||
|
||||
raise exceptions.NotFound(_("Image not found in any configured backend"))
|
||||
|
||||
|
||||
def get_known_schemes_for_multi_store():
|
||||
"""Returns list of known schemes."""
|
||||
return location.SCHEME_TO_CLS_BACKEND_MAP.keys()
|
||||
|
||||
|
||||
def get_size_from_uri_and_backend(uri, backend, context=None):
|
||||
"""Retrieves image size from backend specified by uri."""
|
||||
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
uri, backend, conf=CONF)
|
||||
store = get_store_from_store_identifier(backend)
|
||||
return store.get_size(loc, context=context)
|
|
@ -81,3 +81,46 @@ class StoreBaseTest(base.BaseTestCase):
|
|||
'store_entry': store_entry
|
||||
}
|
||||
location.register_scheme_map(scheme_map)
|
||||
|
||||
|
||||
class MultiStoreBaseTest(base.BaseTestCase):
|
||||
|
||||
def copy_data_file(self, file_name, dst_dir):
|
||||
src_file_name = os.path.join('glance_store/tests/etc', file_name)
|
||||
shutil.copy(src_file_name, dst_dir)
|
||||
dst_file_name = os.path.join(dst_dir, file_name)
|
||||
return dst_file_name
|
||||
|
||||
def config(self, **kw):
|
||||
"""Override some configuration values.
|
||||
|
||||
The keyword arguments are the names of configuration options to
|
||||
override and their values.
|
||||
|
||||
If a group argument is supplied, the overrides are applied to
|
||||
the specified configuration option group.
|
||||
|
||||
All overrides are automatically cleared at the end of the current
|
||||
test by the fixtures cleanup process.
|
||||
"""
|
||||
group = kw.pop('group', None)
|
||||
for k, v in kw.items():
|
||||
if group:
|
||||
self.conf.set_override(k, v, group)
|
||||
else:
|
||||
self.conf.set_override(k, v)
|
||||
|
||||
def register_store_backend_schemes(self, store, store_entry,
|
||||
store_identifier):
|
||||
schemes = store.get_schemes()
|
||||
scheme_map = {}
|
||||
|
||||
loc_cls = store.get_store_location_class()
|
||||
for scheme in schemes:
|
||||
scheme_map[scheme] = {}
|
||||
scheme_map[scheme][store_identifier] = {
|
||||
'store': store,
|
||||
'location_class': loc_cls,
|
||||
'store_entry': store_entry
|
||||
}
|
||||
location.register_scheme_backend_map(scheme_map)
|
||||
|
|
|
@ -0,0 +1,821 @@
|
|||
# Copyright 2018 RedHat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests the filesystem backend store"""
|
||||
|
||||
import errno
|
||||
import hashlib
|
||||
import json
|
||||
import mock
|
||||
import os
|
||||
import stat
|
||||
import uuid
|
||||
|
||||
import fixtures
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
import six
|
||||
from six.moves import builtins
|
||||
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
|
||||
from six.moves import range
|
||||
|
||||
import glance_store as store
|
||||
from glance_store._drivers import filesystem
|
||||
from glance_store import exceptions
|
||||
from glance_store import location
|
||||
from glance_store.tests import base
|
||||
from glance_store.tests.unit import test_store_capabilities
|
||||
|
||||
|
||||
class TestMultiStore(base.MultiStoreBaseTest,
|
||||
test_store_capabilities.TestStoreCapabilitiesChecking):
|
||||
|
||||
# NOTE(flaper87): temporary until we
|
||||
# can move to a fully-local lib.
|
||||
# (Swift store's fault)
|
||||
_CONF = cfg.ConfigOpts()
|
||||
|
||||
def setUp(self):
|
||||
"""Establish a clean test environment."""
|
||||
super(TestMultiStore, self).setUp()
|
||||
enabled_backends = {
|
||||
"file1": "file",
|
||||
"file2": "file",
|
||||
}
|
||||
self.conf = self._CONF
|
||||
self.conf(args=[])
|
||||
self.conf.register_opt(cfg.DictOpt('enabled_backends'))
|
||||
self.config(enabled_backends=enabled_backends)
|
||||
store.register_store_opts(self.conf)
|
||||
self.config(default_backend='file1', group='glance_store')
|
||||
|
||||
# Ensure stores + locations cleared
|
||||
location.SCHEME_TO_CLS_BACKEND_MAP = {}
|
||||
|
||||
store.create_multi_stores(self.conf)
|
||||
self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP',
|
||||
dict())
|
||||
self.test_dir = self.useFixture(fixtures.TempDir()).path
|
||||
self.addCleanup(self.conf.reset)
|
||||
|
||||
self.orig_chunksize = filesystem.Store.READ_CHUNKSIZE
|
||||
filesystem.Store.READ_CHUNKSIZE = 10
|
||||
self.store = filesystem.Store(self.conf, backend='file1')
|
||||
self.config(filesystem_store_datadir=self.test_dir,
|
||||
group="file1")
|
||||
self.store.configure()
|
||||
self.register_store_backend_schemes(self.store, 'file', 'file1')
|
||||
|
||||
def tearDown(self):
|
||||
"""Clear the test environment."""
|
||||
super(TestMultiStore, self).tearDown()
|
||||
filesystem.ChunkedFile.CHUNKSIZE = self.orig_chunksize
|
||||
|
||||
def _create_metadata_json_file(self, metadata):
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
jsonfilename = os.path.join(self.test_dir,
|
||||
"storage_metadata.%s" % expected_image_id)
|
||||
|
||||
self.config(filesystem_store_metadata_file=jsonfilename,
|
||||
group="file1")
|
||||
with open(jsonfilename, 'w') as fptr:
|
||||
json.dump(metadata, fptr)
|
||||
|
||||
def _store_image(self, in_metadata):
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
expected_file_size = 10
|
||||
expected_file_contents = b"*" * expected_file_size
|
||||
image_file = six.BytesIO(expected_file_contents)
|
||||
self.store.FILESYSTEM_STORE_METADATA = in_metadata
|
||||
return self.store.add(expected_image_id, image_file,
|
||||
expected_file_size)
|
||||
|
||||
def test_get(self):
|
||||
"""Test a "normal" retrieval of an image in chunks."""
|
||||
# First add an image...
|
||||
image_id = str(uuid.uuid4())
|
||||
file_contents = b"chunk00000remainder"
|
||||
image_file = six.BytesIO(file_contents)
|
||||
|
||||
loc, size, checksum, metadata = self.store.add(
|
||||
image_id, image_file, len(file_contents))
|
||||
# Check metadata contains 'file1' as a backend
|
||||
self.assertEqual(u"file1", metadata['backend'])
|
||||
|
||||
# Now read it back...
|
||||
uri = "file:///%s/%s" % (self.test_dir, image_id)
|
||||
loc = location.get_location_from_uri_and_backend(uri, 'file1',
|
||||
conf=self.conf)
|
||||
(image_file, image_size) = self.store.get(loc)
|
||||
|
||||
expected_data = b"chunk00000remainder"
|
||||
expected_num_chunks = 2
|
||||
data = b""
|
||||
num_chunks = 0
|
||||
|
||||
for chunk in image_file:
|
||||
num_chunks += 1
|
||||
data += chunk
|
||||
self.assertEqual(expected_data, data)
|
||||
self.assertEqual(expected_num_chunks, num_chunks)
|
||||
|
||||
def test_get_random_access(self):
|
||||
"""Test a "normal" retrieval of an image in chunks."""
|
||||
# First add an image...
|
||||
image_id = str(uuid.uuid4())
|
||||
file_contents = b"chunk00000remainder"
|
||||
image_file = six.BytesIO(file_contents)
|
||||
|
||||
loc, size, checksum, metadata = self.store.add(image_id,
|
||||
image_file,
|
||||
len(file_contents))
|
||||
# Check metadata contains 'file1' as a backend
|
||||
self.assertEqual(u"file1", metadata['backend'])
|
||||
|
||||
# Now read it back...
|
||||
uri = "file:///%s/%s" % (self.test_dir, image_id)
|
||||
loc = location.get_location_from_uri_and_backend(uri, 'file1',
|
||||
conf=self.conf)
|
||||
|
||||
data = b""
|
||||
for offset in range(len(file_contents)):
|
||||
(image_file, image_size) = self.store.get(loc,
|
||||
offset=offset,
|
||||
chunk_size=1)
|
||||
for chunk in image_file:
|
||||
data += chunk
|
||||
|
||||
self.assertEqual(file_contents, data)
|
||||
|
||||
data = b""
|
||||
chunk_size = 5
|
||||
(image_file, image_size) = self.store.get(loc,
|
||||
offset=chunk_size,
|
||||
chunk_size=chunk_size)
|
||||
for chunk in image_file:
|
||||
data += chunk
|
||||
|
||||
self.assertEqual(b'00000', data)
|
||||
self.assertEqual(chunk_size, image_size)
|
||||
|
||||
def test_get_non_existing(self):
|
||||
"""
|
||||
Test that trying to retrieve a file that doesn't exist
|
||||
raises an error
|
||||
"""
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
"file:///%s/non-existing" % self.test_dir, 'file1', conf=self.conf)
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.store.get,
|
||||
loc)
|
||||
|
||||
def test_get_non_existing_identifier(self):
|
||||
"""
|
||||
Test that trying to retrieve a store that doesn't exist
|
||||
raises an error
|
||||
"""
|
||||
self.assertRaises(exceptions.UnknownScheme,
|
||||
location.get_location_from_uri_and_backend,
|
||||
"file:///%s/non-existing" % self.test_dir,
|
||||
'file3', conf=self.conf)
|
||||
|
||||
def test_add(self):
|
||||
"""Test that we can add an image via the filesystem backend."""
|
||||
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
expected_file_size = 5 * units.Ki # 5K
|
||||
expected_file_contents = b"*" * expected_file_size
|
||||
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
|
||||
expected_location = "file://%s/%s" % (self.test_dir,
|
||||
expected_image_id)
|
||||
image_file = six.BytesIO(expected_file_contents)
|
||||
|
||||
loc, size, checksum, metadata = self.store.add(expected_image_id,
|
||||
image_file,
|
||||
expected_file_size)
|
||||
|
||||
self.assertEqual(expected_location, loc)
|
||||
self.assertEqual(expected_file_size, size)
|
||||
self.assertEqual(expected_checksum, checksum)
|
||||
self.assertEqual(u"file1", metadata['backend'])
|
||||
|
||||
uri = "file:///%s/%s" % (self.test_dir, expected_image_id)
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
uri, 'file1', conf=self.conf)
|
||||
(new_image_file, new_image_size) = self.store.get(loc)
|
||||
new_image_contents = b""
|
||||
new_image_file_size = 0
|
||||
|
||||
for chunk in new_image_file:
|
||||
new_image_file_size += len(chunk)
|
||||
new_image_contents += chunk
|
||||
|
||||
self.assertEqual(expected_file_contents, new_image_contents)
|
||||
self.assertEqual(expected_file_size, new_image_file_size)
|
||||
|
||||
def test_add_to_different_backned(self):
|
||||
"""Test that we can add an image via the filesystem backend."""
|
||||
self.store = filesystem.Store(self.conf, backend='file2')
|
||||
self.config(filesystem_store_datadir=self.test_dir,
|
||||
group="file2")
|
||||
self.store.configure()
|
||||
self.register_store_backend_schemes(self.store, 'file', 'file2')
|
||||
|
||||
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
expected_file_size = 5 * units.Ki # 5K
|
||||
expected_file_contents = b"*" * expected_file_size
|
||||
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
|
||||
expected_location = "file://%s/%s" % (self.test_dir,
|
||||
expected_image_id)
|
||||
image_file = six.BytesIO(expected_file_contents)
|
||||
|
||||
loc, size, checksum, metadata = self.store.add(expected_image_id,
|
||||
image_file,
|
||||
expected_file_size)
|
||||
|
||||
self.assertEqual(expected_location, loc)
|
||||
self.assertEqual(expected_file_size, size)
|
||||
self.assertEqual(expected_checksum, checksum)
|
||||
self.assertEqual(u"file2", metadata['backend'])
|
||||
|
||||
uri = "file:///%s/%s" % (self.test_dir, expected_image_id)
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
uri, 'file2', conf=self.conf)
|
||||
(new_image_file, new_image_size) = self.store.get(loc)
|
||||
new_image_contents = b""
|
||||
new_image_file_size = 0
|
||||
|
||||
for chunk in new_image_file:
|
||||
new_image_file_size += len(chunk)
|
||||
new_image_contents += chunk
|
||||
|
||||
self.assertEqual(expected_file_contents, new_image_contents)
|
||||
self.assertEqual(expected_file_size, new_image_file_size)
|
||||
|
||||
def test_add_check_metadata_with_invalid_mountpoint_location(self):
|
||||
in_metadata = [{'id': 'abcdefg',
|
||||
'mountpoint': '/xyz/images'}]
|
||||
location, size, checksum, metadata = self._store_image(in_metadata)
|
||||
self.assertEqual({'backend': u'file1'}, metadata)
|
||||
|
||||
def test_add_check_metadata_list_with_invalid_mountpoint_locations(self):
|
||||
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
|
||||
location, size, checksum, metadata = self._store_image(in_metadata)
|
||||
self.assertEqual({'backend': u'file1'}, metadata)
|
||||
|
||||
def test_add_check_metadata_list_with_valid_mountpoint_locations(self):
|
||||
in_metadata = [{'id': 'abcdefg', 'mountpoint': '/tmp'},
|
||||
{'id': 'xyz1234', 'mountpoint': '/xyz'}]
|
||||
location, size, checksum, metadata = self._store_image(in_metadata)
|
||||
self.assertEqual(in_metadata[0], metadata)
|
||||
self.assertEqual(u"file1", metadata["backend"])
|
||||
|
||||
def test_add_check_metadata_bad_nosuch_file(self):
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
jsonfilename = os.path.join(self.test_dir,
|
||||
"storage_metadata.%s" % expected_image_id)
|
||||
|
||||
self.config(filesystem_store_metadata_file=jsonfilename,
|
||||
group="file1")
|
||||
expected_file_size = 10
|
||||
expected_file_contents = b"*" * expected_file_size
|
||||
image_file = six.BytesIO(expected_file_contents)
|
||||
|
||||
location, size, checksum, metadata = self.store.add(expected_image_id,
|
||||
image_file,
|
||||
expected_file_size)
|
||||
|
||||
self.assertEqual({'backend': u'file1'}, metadata)
|
||||
|
||||
def test_add_already_existing(self):
|
||||
"""
|
||||
Tests that adding an image with an existing identifier
|
||||
raises an appropriate exception
|
||||
"""
|
||||
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
|
||||
image_id = str(uuid.uuid4())
|
||||
file_size = 5 * units.Ki # 5K
|
||||
file_contents = b"*" * file_size
|
||||
image_file = six.BytesIO(file_contents)
|
||||
|
||||
location, size, checksum, metadata = self.store.add(image_id,
|
||||
image_file,
|
||||
file_size)
|
||||
self.assertEqual(u"file1", metadata["backend"])
|
||||
|
||||
image_file = six.BytesIO(b"nevergonnamakeit")
|
||||
self.assertRaises(exceptions.Duplicate,
|
||||
self.store.add,
|
||||
image_id, image_file, 0)
|
||||
|
||||
def _do_test_add_write_failure(self, errno, exception):
|
||||
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
|
||||
image_id = str(uuid.uuid4())
|
||||
file_size = 5 * units.Ki # 5K
|
||||
file_contents = b"*" * file_size
|
||||
path = os.path.join(self.test_dir, image_id)
|
||||
image_file = six.BytesIO(file_contents)
|
||||
|
||||
with mock.patch.object(builtins, 'open') as popen:
|
||||
e = IOError()
|
||||
e.errno = errno
|
||||
popen.side_effect = e
|
||||
|
||||
self.assertRaises(exception,
|
||||
self.store.add,
|
||||
image_id, image_file, 0)
|
||||
self.assertFalse(os.path.exists(path))
|
||||
|
||||
def test_add_storage_full(self):
|
||||
"""
|
||||
Tests that adding an image without enough space on disk
|
||||
raises an appropriate exception
|
||||
"""
|
||||
self._do_test_add_write_failure(errno.ENOSPC, exceptions.StorageFull)
|
||||
|
||||
def test_add_file_too_big(self):
|
||||
"""
|
||||
Tests that adding an excessively large image file
|
||||
raises an appropriate exception
|
||||
"""
|
||||
self._do_test_add_write_failure(errno.EFBIG, exceptions.StorageFull)
|
||||
|
||||
def test_add_storage_write_denied(self):
|
||||
"""
|
||||
Tests that adding an image with insufficient filestore permissions
|
||||
raises an appropriate exception
|
||||
"""
|
||||
self._do_test_add_write_failure(errno.EACCES,
|
||||
exceptions.StorageWriteDenied)
|
||||
|
||||
def test_add_other_failure(self):
|
||||
"""
|
||||
Tests that a non-space-related IOError does not raise a
|
||||
StorageFull exceptions.
|
||||
"""
|
||||
self._do_test_add_write_failure(errno.ENOTDIR, IOError)
|
||||
|
||||
def test_add_cleanup_on_read_failure(self):
|
||||
"""
|
||||
Tests the partial image file is cleaned up after a read
|
||||
failure.
|
||||
"""
|
||||
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
|
||||
image_id = str(uuid.uuid4())
|
||||
file_size = 5 * units.Ki # 5K
|
||||
file_contents = b"*" * file_size
|
||||
path = os.path.join(self.test_dir, image_id)
|
||||
image_file = six.BytesIO(file_contents)
|
||||
|
||||
def fake_Error(size):
|
||||
raise AttributeError()
|
||||
|
||||
with mock.patch.object(image_file, 'read') as mock_read:
|
||||
mock_read.side_effect = fake_Error
|
||||
|
||||
self.assertRaises(AttributeError,
|
||||
self.store.add,
|
||||
image_id, image_file, 0)
|
||||
self.assertFalse(os.path.exists(path))
|
||||
|
||||
def test_delete(self):
|
||||
"""
|
||||
Test we can delete an existing image in the filesystem store
|
||||
"""
|
||||
# First add an image
|
||||
image_id = str(uuid.uuid4())
|
||||
file_size = 5 * units.Ki # 5K
|
||||
file_contents = b"*" * file_size
|
||||
image_file = six.BytesIO(file_contents)
|
||||
|
||||
loc, size, checksum, metadata = self.store.add(image_id,
|
||||
image_file,
|
||||
file_size)
|
||||
self.assertEqual(u"file1", metadata["backend"])
|
||||
|
||||
# Now check that we can delete it
|
||||
uri = "file:///%s/%s" % (self.test_dir, image_id)
|
||||
loc = location.get_location_from_uri_and_backend(uri, "file1",
|
||||
conf=self.conf)
|
||||
self.store.delete(loc)
|
||||
|
||||
self.assertRaises(exceptions.NotFound, self.store.get, loc)
|
||||
|
||||
def test_delete_non_existing(self):
|
||||
"""
|
||||
Test that trying to delete a file that doesn't exist
|
||||
raises an error
|
||||
"""
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
"file:///tmp/glance-tests/non-existing", "file1", conf=self.conf)
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.store.delete,
|
||||
loc)
|
||||
|
||||
def test_delete_forbidden(self):
|
||||
"""
|
||||
Tests that trying to delete a file without permissions
|
||||
raises the correct error
|
||||
"""
|
||||
# First add an image
|
||||
image_id = str(uuid.uuid4())
|
||||
file_size = 5 * units.Ki # 5K
|
||||
file_contents = b"*" * file_size
|
||||
image_file = six.BytesIO(file_contents)
|
||||
|
||||
loc, size, checksum, metadata = self.store.add(image_id,
|
||||
image_file,
|
||||
file_size)
|
||||
self.assertEqual(u"file1", metadata["backend"])
|
||||
|
||||
uri = "file:///%s/%s" % (self.test_dir, image_id)
|
||||
loc = location.get_location_from_uri_and_backend(uri, "file1",
|
||||
conf=self.conf)
|
||||
|
||||
# Mock unlink to raise an OSError for lack of permissions
|
||||
# and make sure we can't delete the image
|
||||
with mock.patch.object(os, 'unlink') as unlink:
|
||||
e = OSError()
|
||||
e.errno = errno
|
||||
unlink.side_effect = e
|
||||
|
||||
self.assertRaises(exceptions.Forbidden,
|
||||
self.store.delete,
|
||||
loc)
|
||||
|
||||
# Make sure the image didn't get deleted
|
||||
loc = location.get_location_from_uri_and_backend(uri, "file1",
|
||||
conf=self.conf)
|
||||
self.store.get(loc)
|
||||
|
||||
def test_configure_add_with_multi_datadirs(self):
|
||||
"""
|
||||
Tests multiple filesystem specified by filesystem_store_datadirs
|
||||
are parsed correctly.
|
||||
"""
|
||||
store_map = [self.useFixture(fixtures.TempDir()).path,
|
||||
self.useFixture(fixtures.TempDir()).path]
|
||||
self.conf.set_override('filesystem_store_datadir',
|
||||
override=None,
|
||||
group='file1')
|
||||
self.conf.set_override('filesystem_store_datadirs',
|
||||
[store_map[0] + ":100",
|
||||
store_map[1] + ":200"],
|
||||
group='file1')
|
||||
self.store.configure_add()
|
||||
|
||||
expected_priority_map = {100: [store_map[0]], 200: [store_map[1]]}
|
||||
expected_priority_list = [200, 100]
|
||||
self.assertEqual(expected_priority_map, self.store.priority_data_map)
|
||||
self.assertEqual(expected_priority_list, self.store.priority_list)
|
||||
|
||||
def test_configure_add_with_metadata_file_success(self):
|
||||
metadata = {'id': 'asdf1234',
|
||||
'mountpoint': '/tmp'}
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.store.configure_add()
|
||||
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
|
||||
|
||||
def test_configure_add_check_metadata_list_of_dicts_success(self):
|
||||
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||
{'id': 'xyz1234', 'mountpoint': '/tmp/'}]
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.store.configure_add()
|
||||
self.assertEqual(metadata, self.store.FILESYSTEM_STORE_METADATA)
|
||||
|
||||
def test_configure_add_check_metadata_success_list_val_for_some_key(self):
|
||||
metadata = {'akey': ['value1', 'value2'], 'id': 'asdf1234',
|
||||
'mountpoint': '/tmp'}
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.store.configure_add()
|
||||
self.assertEqual([metadata], self.store.FILESYSTEM_STORE_METADATA)
|
||||
|
||||
def test_configure_add_check_metadata_bad_data(self):
|
||||
metadata = {'akey': 10, 'id': 'asdf1234',
|
||||
'mountpoint': '/tmp'} # only unicode is allowed
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
def test_configure_add_check_metadata_with_no_id_or_mountpoint(self):
|
||||
metadata = {'mountpoint': '/tmp'}
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
metadata = {'id': 'asdfg1234'}
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
def test_configure_add_check_metadata_id_or_mountpoint_is_not_string(self):
|
||||
metadata = {'id': 10, 'mountpoint': '/tmp'}
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
metadata = {'id': 'asdf1234', 'mountpoint': 12345}
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
def test_configure_add_check_metadata_list_with_no_id_or_mountpoint(self):
|
||||
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||
{'mountpoint': '/pqr/images'}]
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
metadata = [{'id': 'abcdefg'},
|
||||
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
def test_add_check_metadata_list_id_or_mountpoint_is_not_string(self):
|
||||
metadata = [{'id': 'abcdefg', 'mountpoint': '/xyz/images'},
|
||||
{'id': 1234, 'mountpoint': '/pqr/images'}]
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
metadata = [{'id': 'abcdefg', 'mountpoint': 1234},
|
||||
{'id': 'xyz1234', 'mountpoint': '/pqr/images'}]
|
||||
self._create_metadata_json_file(metadata)
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
def test_configure_add_same_dir_multiple_times(self):
|
||||
"""
|
||||
Tests BadStoreConfiguration exception is raised if same directory
|
||||
is specified multiple times in filesystem_store_datadirs.
|
||||
"""
|
||||
store_map = [self.useFixture(fixtures.TempDir()).path,
|
||||
self.useFixture(fixtures.TempDir()).path]
|
||||
self.conf.clear_override('filesystem_store_datadir',
|
||||
group='file1')
|
||||
self.conf.set_override('filesystem_store_datadirs',
|
||||
[store_map[0] + ":100",
|
||||
store_map[1] + ":200",
|
||||
store_map[0] + ":300"],
|
||||
group='file1')
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
self.store.configure_add)
|
||||
|
||||
def test_configure_add_same_dir_multiple_times_same_priority(self):
|
||||
"""
|
||||
Tests BadStoreConfiguration exception is raised if same directory
|
||||
is specified multiple times in filesystem_store_datadirs.
|
||||
"""
|
||||
store_map = [self.useFixture(fixtures.TempDir()).path,
|
||||
self.useFixture(fixtures.TempDir()).path]
|
||||
self.conf.set_override('filesystem_store_datadir',
|
||||
override=None,
|
||||
group='file1')
|
||||
self.conf.set_override('filesystem_store_datadirs',
|
||||
[store_map[0] + ":100",
|
||||
store_map[1] + ":200",
|
||||
store_map[0] + ":100"],
|
||||
group='file1')
|
||||
try:
|
||||
self.store.configure()
|
||||
except exceptions.BadStoreConfiguration:
|
||||
self.fail("configure() raised BadStoreConfiguration unexpectedly!")
|
||||
|
||||
# Test that we can add an image via the filesystem backend
|
||||
filesystem.ChunkedFile.CHUNKSIZE = 1024
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
expected_file_size = 5 * units.Ki # 5K
|
||||
expected_file_contents = b"*" * expected_file_size
|
||||
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
|
||||
expected_location = "file://%s/%s" % (store_map[1],
|
||||
expected_image_id)
|
||||
image_file = six.BytesIO(expected_file_contents)
|
||||
|
||||
loc, size, checksum, metadata = self.store.add(expected_image_id,
|
||||
image_file,
|
||||
expected_file_size)
|
||||
self.assertEqual(u"file1", metadata["backend"])
|
||||
|
||||
self.assertEqual(expected_location, loc)
|
||||
self.assertEqual(expected_file_size, size)
|
||||
self.assertEqual(expected_checksum, checksum)
|
||||
|
||||
loc = location.get_location_from_uri_and_backend(
|
||||
expected_location, "file1", conf=self.conf)
|
||||
(new_image_file, new_image_size) = self.store.get(loc)
|
||||
new_image_contents = b""
|
||||
new_image_file_size = 0
|
||||
|
||||
for chunk in new_image_file:
|
||||
new_image_file_size += len(chunk)
|
||||