Implement get, add and delete for cinder store
This implements get, add, delete methods for cinder storage driver to enable users to upload/download the images to/from the volumes. This also will be useful for users who want to utilize storage features such as copy-on-write cloning for creating a new volume from an image volume efficiently. Change-Id: Ie228d1d95365e81881691af4cb8b170335a73868 Implements: blueprint cinder-store-upload-download
This commit is contained in:
parent
d4eb2c9ed2
commit
410747822c
27
etc/rootwrap.conf
Normal file
27
etc/rootwrap.conf
Normal file
@ -0,0 +1,27 @@
|
||||
# Configuration for glance-rootwrap
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[DEFAULT]
|
||||
# List of directories to load filter definitions from (separated by ',').
|
||||
# These directories MUST all be only writeable by root !
|
||||
filters_path=/etc/glance/rootwrap.d,/usr/share/glance/rootwrap
|
||||
|
||||
# List of directories to search executables in, in case filters do not
|
||||
# explicitely specify a full path (separated by ',')
|
||||
# If not specified, defaults to system PATH environment variable.
|
||||
# These directories MUST all be only writeable by root !
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin
|
||||
|
||||
# Enable logging to syslog
|
||||
# Default value is False
|
||||
use_syslog=False
|
||||
|
||||
# Which syslog facility to use.
|
||||
# Valid values include auth, authpriv, syslog, local0, local1...
|
||||
# Default value is 'syslog'
|
||||
syslog_log_facility=syslog
|
||||
|
||||
# Which messages to log.
|
||||
# INFO means log all usage
|
||||
# ERROR means only log unsuccessful attempts
|
||||
syslog_log_level=ERROR
|
29
etc/rootwrap.d/glance_cinder_store.filters
Normal file
29
etc/rootwrap.d/glance_cinder_store.filters
Normal file
@ -0,0 +1,29 @@
|
||||
# glance-rootwrap command filters for glance cinder store
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[Filters]
|
||||
# cinder store driver
|
||||
disk_chown: RegExpFilter, chown, root, chown, \d+, /dev/(?!.*/\.\.).*
|
||||
|
||||
# os-brick
|
||||
mount: CommandFilter, mount, root
|
||||
blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
|
||||
tee: CommandFilter, tee, root
|
||||
mkdir: CommandFilter, mkdir, root
|
||||
chown: RegExpFilter, chown, root, chown root:root /etc/pstorage/clusters/(?!.*/\.\.).*
|
||||
ip: CommandFilter, ip, root
|
||||
dd: CommandFilter, dd, root
|
||||
iscsiadm: CommandFilter, iscsiadm, root
|
||||
aoe-revalidate: CommandFilter, aoe-revalidate, root
|
||||
aoe-discover: CommandFilter, aoe-discover, root
|
||||
aoe-flush: CommandFilter, aoe-flush, root
|
||||
read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
|
||||
multipath: CommandFilter, multipath, root
|
||||
multipathd: CommandFilter, multipathd, root
|
||||
systool: CommandFilter, systool, root
|
||||
sg_scan: CommandFilter, sg_scan, root
|
||||
cp: CommandFilter, cp, root
|
||||
drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
|
||||
sds_cli: CommandFilter, /usr/local/bin/sds/sds_cli, root
|
||||
vgc-cluster: CommandFilter, vgc-cluster, root
|
||||
scsi_id: CommandFilter, /lib/udev/scsi_id, root
|
@ -12,8 +12,15 @@
|
||||
|
||||
"""Storage backend for Cinder"""
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
|
||||
@ -21,86 +28,129 @@ from glance_store import capabilities
|
||||
from glance_store.common import utils
|
||||
import glance_store.driver
|
||||
from glance_store import exceptions
|
||||
from glance_store.i18n import _
|
||||
from glance_store.i18n import _, _LE, _LW, _LI
|
||||
import glance_store.location
|
||||
from keystoneclient import exceptions as keystone_exc
|
||||
from keystoneclient import service_catalog as keystone_sc
|
||||
|
||||
try:
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from cinderclient import service_catalog
|
||||
from cinderclient.v2 import client as cinderclient
|
||||
from os_brick.initiator import connector
|
||||
except ImportError:
|
||||
cinder_exception = None
|
||||
service_catalog = None
|
||||
cinderclient = None
|
||||
connector = None
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_CINDER_OPTS = [
|
||||
cfg.StrOpt('cinder_catalog_info',
|
||||
default='volume:cinder:publicURL',
|
||||
help='Info to match when looking for cinder in the service '
|
||||
default='volumev2::publicURL',
|
||||
help=_('Info to match when looking for cinder in the service '
|
||||
'catalog. Format is : separated values of the form: '
|
||||
'<service_type>:<service_name>:<endpoint_type>'),
|
||||
'<service_type>:<service_name>:<endpoint_type>')),
|
||||
cfg.StrOpt('cinder_endpoint_template',
|
||||
help='Override service catalog lookup with template for cinder '
|
||||
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
|
||||
cfg.StrOpt('os_region_name',
|
||||
help='Region name of this node'),
|
||||
help=_('Override service catalog lookup with template for '
|
||||
'cinder endpoint e.g. '
|
||||
'http://localhost:8776/v2/%(tenant)s')),
|
||||
cfg.StrOpt('cinder_os_region_name', deprecated_name='os_region_name',
|
||||
help=_('Region name of this node. If specified, it will be '
|
||||
'used to locate OpenStack services for stores.')),
|
||||
cfg.StrOpt('cinder_ca_certificates_file',
|
||||
help='Location of ca certicates file to use for cinder client '
|
||||
'requests.'),
|
||||
help=_('Location of ca certicates file to use for cinder '
|
||||
'client requests.')),
|
||||
cfg.IntOpt('cinder_http_retries',
|
||||
default=3,
|
||||
help='Number of cinderclient retries on failed http calls'),
|
||||
help=_('Number of cinderclient retries on failed http calls')),
|
||||
cfg.IntOpt('cinder_state_transition_timeout',
|
||||
default=300,
|
||||
help=_('Time period of time in seconds to wait for a cinder '
|
||||
'volume transition to complete.')),
|
||||
cfg.BoolOpt('cinder_api_insecure',
|
||||
default=False,
|
||||
help='Allow to perform insecure SSL requests to cinder'),
|
||||
help=_('Allow to perform insecure SSL requests to cinder')),
|
||||
cfg.StrOpt('cinder_store_auth_address',
|
||||
default=None,
|
||||
help=_('The address where the Cinder authentication service '
|
||||
'is listening. If <None>, the cinder endpoint in the '
|
||||
'service catalog is used.')),
|
||||
cfg.StrOpt('cinder_store_user_name',
|
||||
default=None,
|
||||
help=_('User name to authenticate against Cinder. If <None>, '
|
||||
'the user of current context is used.')),
|
||||
cfg.StrOpt('cinder_store_password', secret=True,
|
||||
default=None,
|
||||
help=_('Password for the user authenticating against Cinder. '
|
||||
'If <None>, the current context auth token is used.')),
|
||||
cfg.StrOpt('cinder_store_project_name',
|
||||
default=None,
|
||||
help=_('Project name where the image is stored in Cinder. '
|
||||
'If <None>, the project in current context is used.')),
|
||||
cfg.StrOpt('rootwrap_config',
|
||||
default='/etc/glance/rootwrap.conf',
|
||||
help=_('Path to the rootwrap configuration file to use for '
|
||||
'running commands as root.')),
|
||||
]
|
||||
|
||||
|
||||
def get_cinderclient(conf, context):
|
||||
if conf.glance_store.cinder_endpoint_template:
|
||||
url = conf.glance_store.cinder_endpoint_template % context.to_dict()
|
||||
def get_root_helper():
|
||||
return 'sudo glance-rootwrap %s' % CONF.glance_store.rootwrap_config
|
||||
|
||||
|
||||
def is_user_overriden(conf):
|
||||
return all([conf.glance_store.get('cinder_store_' + key)
|
||||
for key in ['user_name', 'password',
|
||||
'project_name', 'auth_address']])
|
||||
|
||||
|
||||
def get_cinderclient(conf, context=None):
|
||||
glance_store = conf.glance_store
|
||||
user_overriden = is_user_overriden(conf)
|
||||
if user_overriden:
|
||||
username = glance_store.cinder_store_user_name
|
||||
password = glance_store.cinder_store_password
|
||||
project = glance_store.cinder_store_project_name
|
||||
url = glance_store.cinder_store_auth_address
|
||||
else:
|
||||
info = conf.glance_store.cinder_catalog_info
|
||||
username = context.user
|
||||
password = context.auth_token
|
||||
project = context.tenant
|
||||
|
||||
if glance_store.cinder_endpoint_template:
|
||||
url = glance_store.cinder_endpoint_template % context.to_dict()
|
||||
else:
|
||||
info = glance_store.cinder_catalog_info
|
||||
service_type, service_name, endpoint_type = info.split(':')
|
||||
|
||||
# extract the region if set in configuration
|
||||
if conf.glance_store.os_region_name:
|
||||
attr = 'region'
|
||||
filter_value = conf.glance_store.os_region_name
|
||||
else:
|
||||
attr = None
|
||||
filter_value = None
|
||||
|
||||
# FIXME: the cinderclient ServiceCatalog object is mis-named.
|
||||
# It actually contains the entire access blob.
|
||||
# Only needed parts of the service catalog are passed in, see
|
||||
# nova/context.py.
|
||||
compat_catalog = {
|
||||
'access': {'serviceCatalog': context.service_catalog or []}}
|
||||
sc = service_catalog.ServiceCatalog(compat_catalog)
|
||||
|
||||
url = sc.url_for(attr=attr,
|
||||
filter_value=filter_value,
|
||||
sc = {'serviceCatalog': context.service_catalog}
|
||||
try:
|
||||
url = keystone_sc.ServiceCatalogV2(sc).url_for(
|
||||
region_name=glance_store.cinder_os_region_name,
|
||||
service_type=service_type,
|
||||
service_name=service_name,
|
||||
endpoint_type=endpoint_type)
|
||||
except keystone_exc.EndpointNotFound:
|
||||
reason = _("Failed to find Cinder from a service catalog.")
|
||||
raise exceptions.BadStoreConfiguration(store_name="cinder",
|
||||
reason=reason)
|
||||
|
||||
LOG.debug(_('Cinderclient connection created using URL: %s') % url)
|
||||
|
||||
glance_store = conf.glance_store
|
||||
c = cinderclient.Client(context.user,
|
||||
context.auth_token,
|
||||
project_id=context.tenant,
|
||||
c = cinderclient.Client(username,
|
||||
password,
|
||||
project,
|
||||
auth_url=url,
|
||||
insecure=glance_store.cinder_api_insecure,
|
||||
retries=glance_store.cinder_http_retries,
|
||||
cacert=glance_store.cinder_ca_certificates_file)
|
||||
|
||||
LOG.debug('Cinderclient connection created for user %(user)s using URL: '
|
||||
'%(url)s.', {'user': username, 'url': url})
|
||||
|
||||
# noauth extracts user_id:project_id from auth_token
|
||||
c.client.auth_token = context.auth_token or '%s:%s' % (context.user,
|
||||
context.tenant)
|
||||
if not user_overriden:
|
||||
c.client.auth_token = context.auth_token or '%s:%s' % (username,
|
||||
project)
|
||||
c.client.management_url = url
|
||||
return c
|
||||
|
||||
@ -131,34 +181,206 @@ class StoreLocation(glance_store.location.StoreLocation):
|
||||
raise exceptions.BadStoreUri(message=reason)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temporary_chown(path):
|
||||
owner_uid = os.getuid()
|
||||
orig_uid = os.stat(path).st_uid
|
||||
|
||||
if orig_uid != owner_uid:
|
||||
processutils.execute('chown', owner_uid, path,
|
||||
run_as_root=True,
|
||||
root_helper=get_root_helper())
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if orig_uid != owner_uid:
|
||||
processutils.execute('chown', orig_uid, path,
|
||||
run_as_root=True,
|
||||
root_helper=get_root_helper())
|
||||
|
||||
|
||||
class Store(glance_store.driver.Store):
|
||||
|
||||
"""Cinder backend store adapter."""
|
||||
|
||||
_CAPABILITIES = capabilities.BitMasks.DRIVER_REUSABLE
|
||||
_CAPABILITIES = (capabilities.BitMasks.READ_RANDOM |
|
||||
capabilities.BitMasks.WRITE_ACCESS |
|
||||
capabilities.BitMasks.DRIVER_REUSABLE)
|
||||
OPTIONS = _CINDER_OPTS
|
||||
EXAMPLE_URL = "cinder://<VOLUME_ID>"
|
||||
|
||||
def __init__(self, *args, **kargs):
|
||||
super(Store, self).__init__(*args, **kargs)
|
||||
LOG.warning(_LW("Cinder store is considered experimental. "
|
||||
"Current deployers should be aware that the use "
|
||||
"of it in production right now may be risky."))
|
||||
|
||||
def get_schemes(self):
|
||||
return ('cinder',)
|
||||
|
||||
def _check_context(self, context):
|
||||
"""
|
||||
Configure the Store to use the stored configuration options
|
||||
Any store that needs special configuration should implement
|
||||
this method. If the store was not able to successfully configure
|
||||
itself, it should raise `exceptions.BadStoreConfiguration`
|
||||
"""
|
||||
|
||||
def _check_context(self, context, require_tenant=False):
|
||||
user_overriden = is_user_overriden(self.conf)
|
||||
if user_overriden and not require_tenant:
|
||||
return
|
||||
if context is None:
|
||||
reason = _("Cinder storage requires a context.")
|
||||
raise exceptions.BadStoreConfiguration(store_name="cinder",
|
||||
reason=reason)
|
||||
if context.service_catalog is None:
|
||||
if not user_overriden and context.service_catalog is None:
|
||||
reason = _("Cinder storage requires a service catalog.")
|
||||
raise exceptions.BadStoreConfiguration(store_name="cinder",
|
||||
reason=reason)
|
||||
|
||||
def _wait_volume_status(self, volume, status_transition, status_expected):
|
||||
max_recheck_wait = 15
|
||||
timeout = self.conf.glance_store.cinder_state_transition_timeout
|
||||
volume = volume.manager.get(volume.id)
|
||||
tries = 0
|
||||
elapsed = 0
|
||||
while volume.status == status_transition:
|
||||
if elapsed >= timeout:
|
||||
msg = (_('Timeout while waiting while volume %(volume_id)s '
|
||||
'status is %(status)s.')
|
||||
% {'volume_id': volume.id, 'status': status_transition})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
wait = min(0.5 * 2 ** tries, max_recheck_wait)
|
||||
time.sleep(wait)
|
||||
tries += 1
|
||||
elapsed += wait
|
||||
volume = volume.manager.get(volume.id)
|
||||
if volume.status != status_expected:
|
||||
msg = (_('The status of volume %(volume_id)s is unexpected: '
|
||||
'status = %(status)s, expected = %(expected)s.')
|
||||
% {'volume_id': volume.id, 'status': volume.status,
|
||||
'expected': status_expected})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
return volume
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _open_cinder_volume(self, client, volume, mode):
|
||||
attach_mode = 'rw' if mode == 'wb' else 'ro'
|
||||
device = None
|
||||
root_helper = get_root_helper()
|
||||
host = socket.gethostname()
|
||||
properties = connector.get_connector_properties(root_helper, host,
|
||||
False, False)
|
||||
|
||||
try:
|
||||
volume.reserve(volume)
|
||||
except cinder_exception.ClientException as e:
|
||||
msg = (_('Failed to reserve volume %(volume_id)s: %(error)s')
|
||||
% {'volume_id': volume.id, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
try:
|
||||
connection_info = volume.initialize_connection(volume, properties)
|
||||
conn = connector.InitiatorConnector.factory(
|
||||
connection_info['driver_volume_type'], root_helper)
|
||||
device = conn.connect_volume(connection_info['data'])
|
||||
volume.attach(None, None, attach_mode, host_name=host)
|
||||
volume = self._wait_volume_status(volume, 'attaching', 'in-use')
|
||||
LOG.debug('Opening host device "%s"', device['path'])
|
||||
with temporary_chown(device['path']), \
|
||||
open(device['path'], mode) as f:
|
||||
yield f
|
||||
except Exception:
|
||||
LOG.exception(_LE('Exception while accessing to cinder volume '
|
||||
'%(volume_id)s.'), {'volume_id': volume.id})
|
||||
raise
|
||||
finally:
|
||||
if volume.status == 'in-use':
|
||||
volume.begin_detaching(volume)
|
||||
elif volume.status == 'attaching':
|
||||
volume.unreserve(volume)
|
||||
|
||||
if device:
|
||||
try:
|
||||
conn.disconnect_volume(connection_info['data'], device)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to disconnect volume '
|
||||
'%(volume_id)s.'),
|
||||
{'volume_id': volume.id})
|
||||
|
||||
try:
|
||||
volume.terminate_connection(volume, properties)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to terminate connection of volume '
|
||||
'%(volume_id)s.'), {'volume_id': volume.id})
|
||||
|
||||
try:
|
||||
client.volumes.detach(volume)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to detach volume %(volume_id)s.'),
|
||||
{'volume_id': volume.id})
|
||||
|
||||
def _cinder_volume_data_iterator(self, client, volume, max_size, offset=0,
|
||||
chunk_size=None, partial_length=None):
|
||||
chunk_size = chunk_size if chunk_size else self.READ_CHUNKSIZE
|
||||
partial = partial_length is not None
|
||||
with self._open_cinder_volume(client, volume, 'rb') as fp:
|
||||
if offset:
|
||||
fp.seek(offset)
|
||||
max_size -= offset
|
||||
while True:
|
||||
if partial:
|
||||
size = min(chunk_size, partial_length, max_size)
|
||||
else:
|
||||
size = min(chunk_size, max_size)
|
||||
|
||||
chunk = fp.read(size)
|
||||
if chunk:
|
||||
yield chunk
|
||||
max_size -= len(chunk)
|
||||
if max_size <= 0:
|
||||
break
|
||||
if partial:
|
||||
partial_length -= len(chunk)
|
||||
if partial_length <= 0:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
@capabilities.check
|
||||
def get(self, location, offset=0, chunk_size=None, context=None):
|
||||
"""
|
||||
Takes a `glance_store.location.Location` object that indicates
|
||||
where to find the image file, and returns a tuple of generator
|
||||
(for reading the image file) and image_size
|
||||
|
||||
:param location `glance_store.location.Location` object, supplied
|
||||
from glance_store.location.get_location_from_uri()
|
||||
:param offset: offset to start reading
|
||||
:param chunk_size: size to read, or None to get all the image
|
||||
:param context: Request context
|
||||
:raises `glance_store.exceptions.NotFound` if image does not exist
|
||||
"""
|
||||
|
||||
loc = location.store_location
|
||||
self._check_context(context)
|
||||
try:
|
||||
client = get_cinderclient(self.conf, context)
|
||||
volume = client.volumes.get(loc.volume_id)
|
||||
size = int(volume.metadata.get('image_size',
|
||||
volume.size * units.Gi))
|
||||
iterator = self._cinder_volume_data_iterator(
|
||||
client, volume, size, offset=offset,
|
||||
chunk_size=self.READ_CHUNKSIZE, partial_length=chunk_size)
|
||||
return (iterator, chunk_size or size)
|
||||
except cinder_exception.NotFound:
|
||||
reason = _("Failed to get image size due to "
|
||||
"volume can not be found: %s") % volume.id
|
||||
LOG.error(reason)
|
||||
raise exceptions.NotFound(reason)
|
||||
except cinder_exception.ClientException as e:
|
||||
msg = (_('Failed to get image volume %(volume_id): %(error)s')
|
||||
% {'volume_id': loc.volume_id, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
def get_size(self, location, context=None):
|
||||
"""
|
||||
Takes a `glance_store.location.Location` object that indicates
|
||||
@ -178,12 +400,145 @@ class Store(glance_store.driver.Store):
|
||||
context).volumes.get(loc.volume_id)
|
||||
# GB unit convert to byte
|
||||
return volume.size * units.Gi
|
||||
except cinder_exception.NotFound as e:
|
||||
reason = _("Failed to get image size due to "
|
||||
"volume can not be found: %s") % self.volume_id
|
||||
LOG.error(reason)
|
||||
raise exceptions.NotFound(reason)
|
||||
except Exception as e:
|
||||
LOG.exception(_("Failed to get image size due to "
|
||||
"internal error: %s") % e)
|
||||
except cinder_exception.NotFound:
|
||||
raise exceptions.NotFound(image=loc.volume_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to get image size due to "
|
||||
"internal error."))
|
||||
return 0
|
||||
|
||||
@capabilities.check
|
||||
def add(self, image_id, image_file, image_size, context=None,
|
||||
verifier=None):
|
||||
"""
|
||||
Stores an image file with supplied identifier to the backend
|
||||
storage system and returns a tuple containing information
|
||||
about the stored image.
|
||||
|
||||
:param image_id: The opaque image identifier
|
||||
:param image_file: The image data to write, as a file-like object
|
||||
:param image_size: The size of the image data to write, in bytes
|
||||
:param context: The request context
|
||||
:param verifier: An object used to verify signatures for images
|
||||
|
||||
:retval tuple of URL in backing store, bytes written, checksum
|
||||
and a dictionary with storage system specific information
|
||||
:raises `glance_store.exceptions.Duplicate` if the image already
|
||||
existed
|
||||
"""
|
||||
|
||||
self._check_context(context, require_tenant=True)
|
||||
client = get_cinderclient(self.conf, context)
|
||||
|
||||
checksum = hashlib.md5()
|
||||
bytes_written = 0
|
||||
size_gb = int((image_size + units.Gi - 1) / units.Gi)
|
||||
if size_gb == 0:
|
||||
size_gb = 1
|
||||
name = "image-%s" % image_id
|
||||
owner = context.tenant
|
||||
metadata = {'glance_image_id': image_id,
|
||||
'image_size': str(image_size),
|
||||
'image_owner': owner}
|
||||
LOG.debug('Creating a new volume: image_size=%d size_gb=%d',
|
||||
image_size, size_gb)
|
||||
if image_size == 0:
|
||||
LOG.info(_LI("Since image size is zero, we will be doing "
|
||||
"resize-before-write for each GB which "
|
||||
"will be considerably slower than normal."))
|
||||
volume = client.volumes.create(size_gb, name=name, metadata=metadata)
|
||||
volume = self._wait_volume_status(volume, 'creating', 'available')
|
||||
|
||||
failed = True
|
||||
need_extend = True
|
||||
buf = None
|
||||
try:
|
||||
while need_extend:
|
||||
with self._open_cinder_volume(client, volume, 'wb') as f:
|
||||
f.seek(bytes_written)
|
||||
if buf:
|
||||
f.write(buf)
|
||||
bytes_written += len(buf)
|
||||
while True:
|
||||
buf = image_file.read(self.WRITE_CHUNKSIZE)
|
||||
if not buf:
|
||||
need_extend = False
|
||||
break
|
||||
checksum.update(buf)
|
||||
if verifier:
|
||||
verifier.update(buf)
|
||||
if (bytes_written + len(buf) > size_gb * units.Gi and
|
||||
image_size == 0):
|
||||
break
|
||||
f.write(buf)
|
||||
bytes_written += len(buf)
|
||||
|
||||
if need_extend:
|
||||
size_gb += 1
|
||||
LOG.debug("Extending volume %(volume_id)s to %(size)s GB.",
|
||||
{'volume_id': volume.id, 'size': size_gb})
|
||||
volume.extend(volume, size_gb)
|
||||
try:
|
||||
volume = self._wait_volume_status(volume,
|
||||
'extending',
|
||||
'available')
|
||||
except exceptions.BackendException:
|
||||
raise exceptions.StorageFull()
|
||||
|
||||
failed = False
|
||||
except IOError as e:
|
||||
# Convert IOError reasons to Glance Store exceptions
|
||||
errors = {errno.EFBIG: exceptions.StorageFull(),
|
||||
errno.ENOSPC: exceptions.StorageFull(),
|
||||
errno.EACCES: exceptions.StorageWriteDenied()}
|
||||
raise errors.get(e.errno, e)
|
||||
finally:
|
||||
if failed:
|
||||
LOG.error(_LE("Failed to write to volume %(volume_id)s."),
|
||||
{'volume_id': volume.id})
|
||||
try:
|
||||
volume.delete()
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to delete of volume '
|
||||
'%(volume_id)s.'),
|
||||
{'volume_id': volume.id})
|
||||
|
||||
if image_size == 0:
|
||||
metadata.update({'image_size': str(bytes_written)})
|
||||
volume.update_all_metadata(metadata)
|
||||
volume.update_readonly_flag(volume, True)
|
||||
|
||||
checksum_hex = checksum.hexdigest()
|
||||
|
||||
LOG.debug("Wrote %(bytes_written)d bytes to volume %(volume_id)s "
|
||||
"with checksum %(checksum_hex)s.",
|
||||
{'bytes_written': bytes_written,
|
||||
'volume_id': volume.id,
|
||||
'checksum_hex': checksum_hex})
|
||||
|
||||
return ('cinder://%s' % volume.id, bytes_written, checksum_hex, {})
|
||||
|
||||
@capabilities.check
|
||||
def delete(self, location, context=None):
|
||||
"""
|
||||
Takes a `glance_store.location.Location` object that indicates
|
||||
where to find the image file to delete
|
||||
|
||||
:location `glance_store.location.Location` object, supplied
|
||||
from glance_store.location.get_location_from_uri()
|
||||
|
||||
:raises NotFound if image does not exist
|
||||
:raises Forbidden if cannot delete because of permissions
|
||||
"""
|
||||
loc = location.store_location
|
||||
self._check_context(context)
|
||||
try:
|
||||
volume = get_cinderclient(self.conf,
|
||||
context).volumes.get(loc.volume_id)
|
||||
volume.delete()
|
||||
except cinder_exception.NotFound:
|
||||
raise exceptions.NotFound(image=loc.volume_id)
|
||||
except cinder_exception.ClientException as e:
|
||||
msg = (_('Failed to delete volume %(volume_id)s: %(error)s') %
|
||||
{'volume_id': loc.volume_id, 'error': e})
|
||||
raise exceptions.BackendException(msg)
|
||||
|
@ -13,11 +13,21 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import hashlib
|
||||
import mock
|
||||
from oslo_utils import units
|
||||
import os
|
||||
import six
|
||||
import socket
|
||||
import tempfile
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from os_brick.initiator import connector
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_utils import units
|
||||
|
||||
import glance_store
|
||||
from glance_store._drivers import cinder
|
||||
from glance_store import exceptions
|
||||
from glance_store import location
|
||||
@ -39,6 +49,140 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
self.store = cinder.Store(self.conf)
|
||||
self.store.configure()
|
||||
self.register_store_schemes(self.store, 'cinder')
|
||||
self.store.READ_CHUNKSIZE = 4096
|
||||
self.store.WRITE_CHUNKSIZE = 4096
|
||||
|
||||
fake_sc = [{u'endpoints': [{u'publicURL': u'http://foo/public_url'}],
|
||||
u'endpoints_links': [],
|
||||
u'name': u'cinder',
|
||||
u'type': u'volumev2'}]
|
||||
self.context = FakeObject(service_catalog=fake_sc,
|
||||
user='fake_user',
|
||||
auth_token='fake_token',
|
||||
tenant='fake_tenant')
|
||||
|
||||
def test_get_cinderclient(self):
|
||||
cc = cinder.get_cinderclient(self.conf, self.context)
|
||||
self.assertEqual('fake_token', cc.client.auth_token)
|
||||
self.assertEqual('http://foo/public_url', cc.client.management_url)
|
||||
|
||||
def test_get_cinderclient_with_user_overriden(self):
|
||||
self.config(cinder_store_user_name='test_user')
|
||||
self.config(cinder_store_password='test_password')
|
||||
self.config(cinder_store_project_name='test_project')
|
||||
self.config(cinder_store_auth_address='test_address')
|
||||
cc = cinder.get_cinderclient(self.conf, self.context)
|
||||
self.assertIsNone(cc.client.auth_token)
|
||||
self.assertEqual('test_address', cc.client.management_url)
|
||||
|
||||
def test_temporary_chown(self):
|
||||
class fake_stat(object):
|
||||
st_uid = 1
|
||||
|
||||
with mock.patch.object(os, 'stat', return_value=fake_stat()), \
|
||||
mock.patch.object(os, 'getuid', return_value=2), \
|
||||
mock.patch.object(processutils, 'execute') as mock_execute, \
|
||||
mock.patch.object(cinder, 'get_root_helper',
|
||||
return_value='sudo'):
|
||||
with cinder.temporary_chown('test'):
|
||||
pass
|
||||
expected_calls = [mock.call('chown', 2, 'test', run_as_root=True,
|
||||
root_helper='sudo'),
|
||||
mock.call('chown', 1, 'test', run_as_root=True,
|
||||
root_helper='sudo')]
|
||||
self.assertEqual(expected_calls, mock_execute.call_args_list)
|
||||
|
||||
@mock.patch.object(time, 'sleep')
|
||||
def test_wait_volume_status(self, mock_sleep):
|
||||
fake_manager = FakeObject(get=mock.Mock())
|
||||
volume_available = FakeObject(manager=fake_manager,
|
||||
id='fake-id',
|
||||
status='available')
|
||||
volume_in_use = FakeObject(manager=fake_manager,
|
||||
id='fake-id',
|
||||
status='in-use')
|
||||
fake_manager.get.side_effect = [volume_available, volume_in_use]
|
||||
self.assertEqual(volume_in_use,
|
||||
self.store._wait_volume_status(
|
||||
volume_available, 'available', 'in-use'))
|
||||
fake_manager.get.assert_called_with('fake-id')
|
||||
mock_sleep.assert_called_once_with(0.5)
|
||||
|
||||
@mock.patch.object(time, 'sleep')
|
||||
def test_wait_volume_status_unexpected(self, mock_sleep):
|
||||
fake_manager = FakeObject(get=mock.Mock())
|
||||
volume_available = FakeObject(manager=fake_manager,
|
||||
id='fake-id',
|
||||
status='error')
|
||||
fake_manager.get.return_value = volume_available
|
||||
self.assertRaises(exceptions.BackendException,
|
||||
self.store._wait_volume_status,
|
||||
volume_available, 'available', 'in-use')
|
||||
fake_manager.get.assert_called_with('fake-id')
|
||||
|
||||
@mock.patch.object(time, 'sleep')
|
||||
def test_wait_volume_status_timeout(self, mock_sleep):
|
||||
fake_manager = FakeObject(get=mock.Mock())
|
||||
volume_available = FakeObject(manager=fake_manager,
|
||||
id='fake-id',
|
||||
status='available')
|
||||
fake_manager.get.return_value = volume_available
|
||||
self.assertRaises(exceptions.BackendException,
|
||||
self.store._wait_volume_status,
|
||||
volume_available, 'available', 'in-use')
|
||||
fake_manager.get.assert_called_with('fake-id')
|
||||
|
||||
def _test_open_cinder_volume(self, open_mode, attach_mode, error):
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
|
||||
fake_volumes = FakeObject(get=lambda id: fake_volume,
|
||||
detach=mock.Mock())
|
||||
fake_client = FakeObject(volumes=fake_volumes)
|
||||
_, fake_dev_path = tempfile.mkstemp(dir=self.test_dir)
|
||||
fake_devinfo = {'path': fake_dev_path}
|
||||
fake_connector = FakeObject(
|
||||
connect_volume=mock.Mock(return_value=fake_devinfo),
|
||||
disconnect_volume=mock.Mock())
|
||||
|
||||
@contextlib.contextmanager
|
||||
def fake_chown(path):
|
||||
yield
|
||||
|
||||
def do_open():
|
||||
with self.store._open_cinder_volume(
|
||||
fake_client, fake_volume, open_mode):
|
||||
if error:
|
||||
raise error
|
||||
|
||||
with mock.patch.object(cinder.Store,
|
||||
'_wait_volume_status',
|
||||
return_value=fake_volume), \
|
||||
mock.patch.object(cinder, 'temporary_chown',
|
||||
side_effect=fake_chown), \
|
||||
mock.patch.object(cinder, 'get_root_helper'), \
|
||||
mock.patch.object(connector, 'get_connector_properties'), \
|
||||
mock.patch.object(connector.InitiatorConnector, 'factory',
|
||||
return_value=fake_connector):
|
||||
|
||||
if error:
|
||||
self.assertRaises(error, do_open)
|
||||
else:
|
||||
do_open()
|
||||
|
||||
fake_connector.connect_volume.assert_called_once_with(mock.ANY)
|
||||
fake_connector.disconnect_volume.assert_called_once_with(
|
||||
mock.ANY, fake_devinfo)
|
||||
fake_volume.attach.assert_called_once_with(
|
||||
None, None, attach_mode, host_name=socket.gethostname())
|
||||
fake_volumes.detach.assert_called_once_with(fake_volume)
|
||||
|
||||
def test_open_cinder_volume_rw(self):
|
||||
self._test_open_cinder_volume('wb', 'rw', None)
|
||||
|
||||
def test_open_cinder_volume_ro(self):
|
||||
self._test_open_cinder_volume('rb', 'ro', None)
|
||||
|
||||
def test_open_cinder_volume_error(self):
|
||||
self._test_open_cinder_volume('wb', 'rw', IOError)
|
||||
|
||||
def test_cinder_configure_add(self):
|
||||
self.assertRaises(exceptions.BadStoreConfiguration,
|
||||
@ -50,9 +194,46 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
|
||||
self.store._check_context(FakeObject(service_catalog='fake'))
|
||||
|
||||
def test_cinder_get(self):
|
||||
expected_size = 5 * units.Ki
|
||||
expected_file_contents = b"*" * expected_size
|
||||
volume_file = six.BytesIO(expected_file_contents)
|
||||
fake_client = FakeObject(auth_token=None, management_url=None)
|
||||
fake_volume_uuid = str(uuid.uuid4())
|
||||
fake_volume = mock.MagicMock(id=fake_volume_uuid,
|
||||
metadata={'image_size': expected_size},
|
||||
status='available')
|
||||
fake_volume.manager.get.return_value = fake_volume
|
||||
fake_volumes = FakeObject(get=lambda id: fake_volume)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def fake_open(client, volume, mode):
|
||||
self.assertEqual(mode, 'rb')
|
||||
yield volume_file
|
||||
|
||||
with mock.patch.object(cinder, 'get_cinderclient') as mock_cc, \
|
||||
mock.patch.object(self.store, '_open_cinder_volume',
|
||||
side_effect=fake_open):
|
||||
mock_cc.return_value = FakeObject(client=fake_client,
|
||||
volumes=fake_volumes)
|
||||
uri = "cinder://%s" % fake_volume_uuid
|
||||
loc = location.get_location_from_uri(uri, conf=self.conf)
|
||||
(image_file, image_size) = self.store.get(loc,
|
||||
context=self.context)
|
||||
|
||||
expected_num_chunks = 2
|
||||
data = b""
|
||||
num_chunks = 0
|
||||
|
||||
for chunk in image_file:
|
||||
num_chunks += 1
|
||||
data += chunk
|
||||
self.assertEqual(expected_num_chunks, num_chunks)
|
||||
self.assertEqual(expected_file_contents, data)
|
||||
|
||||
def test_cinder_get_size(self):
|
||||
fake_client = FakeObject(auth_token=None, management_url=None)
|
||||
fake_volume_uuid = '12345678-9012-3455-6789-012345678901'
|
||||
fake_volume_uuid = str(uuid.uuid4())
|
||||
fake_volume = FakeObject(size=5)
|
||||
fake_volumes = {fake_volume_uuid: fake_volume}
|
||||
|
||||
@ -60,31 +241,81 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
mocked_cc.return_value = FakeObject(client=fake_client,
|
||||
volumes=fake_volumes)
|
||||
|
||||
fake_sc = [{u'endpoints': [{u'publicURL': u'foo_public_url'}],
|
||||
u'endpoints_links': [],
|
||||
u'name': u'cinder',
|
||||
u'type': u'volume'}]
|
||||
fake_context = FakeObject(service_catalog=fake_sc,
|
||||
user='fake_uer',
|
||||
auth_tok='fake_token',
|
||||
tenant='fake_tenant')
|
||||
uri = 'cinder://%s' % fake_volume_uuid
|
||||
loc = location.get_location_from_uri(uri, conf=self.conf)
|
||||
image_size = self.store.get_size(loc, context=self.context)
|
||||
self.assertEqual(image_size, fake_volume.size * units.Gi)
|
||||
|
||||
def _test_cinder_add(self, fake_volume, volume_file, size_kb=5,
|
||||
verifier=None):
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
expected_size = size_kb * units.Ki
|
||||
expected_file_contents = b"*" * expected_size
|
||||
image_file = six.BytesIO(expected_file_contents)
|
||||
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
|
||||
expected_location = 'cinder://%s' % fake_volume.id
|
||||
fake_client = FakeObject(auth_token=None, management_url=None)
|
||||
fake_volume.manager.get.return_value = fake_volume
|
||||
fake_volumes = FakeObject(create=mock.Mock(return_value=fake_volume))
|
||||
|
||||
@contextlib.contextmanager
|
||||
def fake_open(client, volume, mode):
|
||||
self.assertEqual(mode, 'wb')
|
||||
yield volume_file
|
||||
|
||||
with mock.patch.object(cinder, 'get_cinderclient') as mock_cc, \
|
||||
mock.patch.object(self.store, '_open_cinder_volume',
|
||||
side_effect=fake_open):
|
||||
mock_cc.return_value = FakeObject(client=fake_client,
|
||||
volumes=fake_volumes)
|
||||
loc, size, checksum, _ = self.store.add(expected_image_id,
|
||||
image_file,
|
||||
expected_size,
|
||||
self.context,
|
||||
verifier)
|
||||
self.assertEqual(expected_location, loc)
|
||||
self.assertEqual(expected_size, size)
|
||||
self.assertEqual(expected_checksum, checksum)
|
||||
fake_volumes.create.assert_called_once_with(
|
||||
1,
|
||||
name='image-%s' % expected_image_id,
|
||||
metadata={'image_owner': self.context.tenant,
|
||||
'glance_image_id': expected_image_id,
|
||||
'image_size': str(expected_size)})
|
||||
|
||||
def test_cinder_add(self):
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
|
||||
volume_file = six.BytesIO()
|
||||
self._test_cinder_add(fake_volume, volume_file)
|
||||
|
||||
def test_cinder_add_with_verifier(self):
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
|
||||
volume_file = six.BytesIO()
|
||||
verifier = mock.MagicMock()
|
||||
self._test_cinder_add(fake_volume, volume_file, 1, verifier)
|
||||
verifier.update.assert_called_with(b"*" * units.Ki)
|
||||
|
||||
def test_cinder_add_volume_full(self):
|
||||
e = IOError()
|
||||
volume_file = six.BytesIO()
|
||||
e.errno = errno.ENOSPC
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
|
||||
with mock.patch.object(volume_file, 'write', side_effect=e):
|
||||
self.assertRaises(exceptions.StorageFull,
|
||||
self._test_cinder_add, fake_volume, volume_file)
|
||||
fake_volume.delete.assert_called_once_with()
|
||||
|
||||
def test_cinder_delete(self):
|
||||
fake_client = FakeObject(auth_token=None, management_url=None)
|
||||
fake_volume_uuid = str(uuid.uuid4())
|
||||
fake_volume = FakeObject(delete=mock.Mock())
|
||||
fake_volumes = {fake_volume_uuid: fake_volume}
|
||||
|
||||
with mock.patch.object(cinder, 'get_cinderclient') as mocked_cc:
|
||||
mocked_cc.return_value = FakeObject(client=fake_client,
|
||||
volumes=fake_volumes)
|
||||
|
||||
uri = 'cinder://%s' % fake_volume_uuid
|
||||
loc = location.get_location_from_uri(uri, conf=self.conf)
|
||||
image_size = self.store.get_size(loc, context=fake_context)
|
||||
self.assertEqual(image_size, fake_volume.size * units.Gi)
|
||||
|
||||
def test_cinder_delete_raise_error(self):
|
||||
uri = 'cinder://12345678-9012-3455-6789-012345678901'
|
||||
loc = location.get_location_from_uri(uri, conf=self.conf)
|
||||
self.assertRaises(exceptions.StoreDeleteNotSupported,
|
||||
self.store.delete, loc)
|
||||
self.assertRaises(exceptions.StoreDeleteNotSupported,
|
||||
glance_store.delete_from_backend, uri, {})
|
||||
|
||||
def test_cinder_add_raise_error(self):
|
||||
self.assertRaises(exceptions.StoreAddDisabled,
|
||||
self.store.add, None, None, None, None)
|
||||
self.assertRaises(exceptions.StoreAddDisabled,
|
||||
glance_store.add_to_backend, None, None,
|
||||
None, None, 'cinder')
|
||||
self.store.delete(loc, context=self.context)
|
||||
fake_volume.delete.assert_called_once_with()
|
||||
|
@ -64,17 +64,23 @@ class OptsTestCase(base.StoreBaseTest):
|
||||
'cinder_catalog_info',
|
||||
'cinder_endpoint_template',
|
||||
'cinder_http_retries',
|
||||
'cinder_os_region_name',
|
||||
'cinder_state_transition_timeout',
|
||||
'cinder_store_auth_address',
|
||||
'cinder_store_user_name',
|
||||
'cinder_store_password',
|
||||
'cinder_store_project_name',
|
||||
'default_swift_reference',
|
||||
'filesystem_store_datadir',
|
||||
'filesystem_store_datadirs',
|
||||
'filesystem_store_file_perm',
|
||||
'filesystem_store_metadata_file',
|
||||
'os_region_name',
|
||||
'rbd_store_ceph_conf',
|
||||
'rbd_store_chunk_size',
|
||||
'rbd_store_pool',
|
||||
'rbd_store_user',
|
||||
'rados_connect_timeout',
|
||||
'rootwrap_config',
|
||||
's3_store_access_key',
|
||||
's3_store_bucket',
|
||||
's3_store_bucket_url_format',
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- Implemented image uploading, downloading and deletion for cinder store.
|
||||
It also supports new settings to put image volumes into a specific project
|
||||
to hide them from users and to control them based on ACL of the images.
|
||||
Note that cinder store is currently considered experimental, so
|
||||
current deployers should be aware that the use of it in production right
|
||||
now may be riscky.
|
@ -50,6 +50,9 @@ glance_store.drivers =
|
||||
oslo.config.opts =
|
||||
glance.store = glance_store.backend:_list_opts
|
||||
|
||||
console_scripts =
|
||||
glance-rootwrap = oslo_rootwrap.cmd:main
|
||||
|
||||
[extras]
|
||||
# Dependencies for each of the optional stores
|
||||
s3 =
|
||||
@ -61,6 +64,8 @@ swift =
|
||||
python-swiftclient>=2.2.0 # Apache-2.0
|
||||
cinder =
|
||||
python-cinderclient>=1.3.1 # Apache-2.0
|
||||
os-brick>=1.0.0 # Apache-2.0
|
||||
oslo.rootwrap>=2.0.0 # Apache-2.0
|
||||
|
||||
[build_sphinx]
|
||||
source-dir = doc/source
|
||||
|
Loading…
Reference in New Issue
Block a user