Remove S3 driver

As per the deprecation indicated in change
I139c36c2d591a55f0f91ea32efa963f80c28e444 , we are on track to removing
the S3 driver from glance_store in Newton.

The process of maintainers for individual drives was initiated in [1].
However, S3 doesn't have a maintainer and has been deprecated in Mitaka.

This commit intends to remove the S3 driver completely from the
glance_store source tree. If required, we recommend that this driver be
maintained outside of the glance_store source tree until you or someone
you know intend to support the code for long term. In the later case, we
hope that you will welcome our spec process to help plan the project
priorities.

DocImpact
UpgradeImpact

[1] http://lists.openstack.org/pipermail/openstack-dev/2015-December/081966.html

Change-Id: I032b0fc16400cbd2112687d38e010128be699221
This commit is contained in:
Nikhil Komawar 2016-07-27 00:15:43 -04:00
parent a4b43bcd01
commit 4432e60af2
8 changed files with 17 additions and 1530 deletions

View File

@ -1,832 +0,0 @@
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
import hashlib
import logging
import math
import re
import tempfile
import debtcollector
import eventlet
from oslo_config import cfg
from oslo_utils import encodeutils
from oslo_utils import netutils
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
import glance_store
from glance_store import capabilities
from glance_store.common import utils
import glance_store.driver
from glance_store import exceptions
from glance_store.i18n import _, _LE, _LI
import glance_store.location
LOG = logging.getLogger(__name__)
DEFAULT_LARGE_OBJECT_SIZE = 100 # 100M
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 10 # 10M
DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE = 5 # 5M
DEFAULT_THREAD_POOLS = 10 # 10 pools
MAX_PART_NUM = 10000 # 10000 upload parts
_S3_OPTS = [
cfg.StrOpt('s3_store_host',
help=_('The host where the S3 server is listening.')),
cfg.StrOpt('s3_store_access_key', secret=True,
help=_('The S3 query token access key.')),
cfg.StrOpt('s3_store_secret_key', secret=True,
help=_('The S3 query token secret key.')),
cfg.StrOpt('s3_store_bucket',
help=_('The S3 bucket to be used to store the Glance data.')),
cfg.StrOpt('s3_store_object_buffer_dir',
help=_('The local directory where uploads will be staged '
'before they are transferred into S3.')),
cfg.BoolOpt('s3_store_create_bucket_on_put', default=False,
help=_('A boolean to determine if the S3 bucket should be '
'created on upload if it does not exist or if '
'an error should be returned to the user.')),
cfg.StrOpt('s3_store_bucket_url_format', default='subdomain',
help=_('The S3 calling format used to determine the bucket. '
'Either subdomain or path can be used.')),
cfg.IntOpt('s3_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE,
help=_('What size, in MB, should S3 start chunking image files '
'and do a multipart upload in S3.')),
cfg.IntOpt('s3_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE,
help=_('What multipart upload part size, in MB, should S3 use '
'when uploading parts. The size must be greater than or '
'equal to 5M.')),
cfg.IntOpt('s3_store_thread_pools', default=DEFAULT_THREAD_POOLS,
help=_('The number of thread pools to perform a multipart '
'upload in S3.')),
cfg.BoolOpt('s3_store_enable_proxy', default=False,
help=_('Enable the use of a proxy.')),
cfg.StrOpt('s3_store_proxy_host',
help=_('Address or hostname for the proxy server.')),
cfg.IntOpt('s3_store_proxy_port', default=8080,
help=_('The port to use when connecting over a proxy.')),
cfg.StrOpt('s3_store_proxy_user',
help=_('The username to connect to the proxy.')),
cfg.StrOpt('s3_store_proxy_password', secret=True,
help=_('The password to use when connecting over a proxy.'))
]
class UploadPart(object):
"""
The class for the upload part
"""
def __init__(self, mpu, fp, partnum, chunks):
self.mpu = mpu
self.partnum = partnum
self.fp = fp
self.size = 0
self.chunks = chunks
self.etag = {} # partnum -> etag
self.success = True
def run_upload(part):
"""
Upload the upload part into S3 and set returned etag and size
to its part info.
"""
# We defer importing boto until now since it is an optional dependency.
import boto.exception
pnum = part.partnum
bsize = part.chunks
LOG.info(_LI("Uploading upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d, key=%(key)s, UploadId=%(UploadId)s") %
{'pnum': pnum,
'bsize': bsize,
'key': part.mpu.key_name,
'UploadId': part.mpu.id})
try:
key = part.mpu.upload_part_from_file(part.fp,
part_num=part.partnum,
size=bsize)
part.etag[part.partnum] = key.etag
part.size = key.size
except boto.exception.BotoServerError as e:
status = e.status
reason = e.reason
LOG.error(_LE("Failed to upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d, status=%(status)d, "
"reason=%(reason)s") %
{'pnum': pnum,
'bsize': bsize,
'status': status,
'reason': reason})
part.success = False
except Exception as e:
LOG.error(_LE("Failed to upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d due to internal error: %(err)s") %
{'pnum': pnum,
'bsize': bsize,
'err': e})
part.success = False
finally:
part.fp.close()
class StoreLocation(glance_store.location.StoreLocation):
"""
Class describing an S3 URI. An S3 URI can look like any of
the following:
s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+http://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
The s3+https:// URIs indicate there is an HTTPS s3service URL
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 's3')
self.accesskey = self.specs.get('accesskey')
self.secretkey = self.specs.get('secretkey')
s3_host = self.specs.get('s3serviceurl')
self.bucket = self.specs.get('bucket')
self.key = self.specs.get('key')
if s3_host.startswith('https://'):
self.scheme = 's3+https'
s3_host = s3_host[8:].strip('/')
elif s3_host.startswith('http://'):
s3_host = s3_host[7:].strip('/')
self.s3serviceurl = s3_host.strip('/')
def _get_credstring(self):
if self.accesskey:
return '%s:%s@' % (self.accesskey, self.secretkey)
return ''
def get_uri(self):
return "%s://%s%s/%s/%s" % (
self.scheme,
self._get_credstring(),
self.s3serviceurl,
self.bucket,
self.key)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
Note that an Amazon AWS secret key can contain the forward slash,
which is entirely retarded, and breaks urlparse miserably.
This function works around that issue.
"""
# Make sure that URIs that contain multiple schemes, such as:
# s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"s3://accesskey:secretkey@"
"https://s3.amazonaws.com/bucket/key-id"
", you need to change it to use the "
"s3+https:// scheme, like so: "
"s3+https://accesskey:secretkey@"
"s3.amazonaws.com/bucket/key-id")
LOG.info(_LI("Invalid store uri: %s") % reason)
raise exceptions.BadStoreUri(message=reason)
pieces = urllib.parse.urlparse(uri)
assert pieces.scheme in ('s3', 's3+http', 's3+https')
self.scheme = pieces.scheme
path = pieces.path.strip('/')
netloc = pieces.netloc.strip('/')
entire_path = (netloc + '/' + path).strip('/')
if '@' in uri:
creds, path = entire_path.split('@')
cred_parts = creds.split(':')
try:
access_key = cred_parts[0]
secret_key = cred_parts[1]
if six.PY2:
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
access_key = access_key.encode('utf-8')
secret_key = secret_key.encode('utf-8')
self.accesskey = access_key
self.secretkey = secret_key
except IndexError:
reason = _("Badly formed S3 credentials")
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
else:
self.accesskey = None
path = entire_path
try:
path_parts = path.split('/')
self.key = path_parts.pop()
self.bucket = path_parts.pop()
if path_parts:
self.s3serviceurl = '/'.join(path_parts).strip('/')
else:
reason = _("Badly formed S3 URI. Missing s3 service URL.")
raise exceptions.BadStoreUri(message=reason)
except IndexError:
reason = _("Badly formed S3 URI")
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
class ChunkedFile(object):
"""
We send this back to the Glance API server as
something that can iterate over a ``boto.s3.key.Key``
"""
def __init__(self, fp, chunk_size):
self.fp = fp
self.chunk_size = chunk_size
def __iter__(self):
"""Return an iterator over the image file."""
try:
if self.fp:
while True:
chunk = self.fp.read(self.chunk_size)
if chunk:
yield chunk
else:
break
finally:
self.close()
def getvalue(self):
"""Return entire string value... used in testing."""
data = b""
self.len = 0
for chunk in self:
read_bytes = len(chunk)
data = data + chunk
self.len = self.len + read_bytes
return data
def close(self):
"""Close the internal file pointer."""
if self.fp:
self.fp.close()
self.fp = None
@debtcollector.removals.remove(message=("This store has been marked as "
"deprecated due to the lack of "
"support and maintenance. Its removal "
"is scheduled for tentatively N-2 "
"milestone."))
class Store(glance_store.driver.Store):
"""An implementation of the s3 adapter."""
_CAPABILITIES = capabilities.BitMasks.RW_ACCESS
OPTIONS = _S3_OPTS
EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>"
READ_CHUNKSIZE = 64 * units.Ki
WRITE_CHUNKSIZE = READ_CHUNKSIZE
def get_schemes(self):
return ('s3', 's3+http', 's3+https')
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exceptions.BadStoreConfiguration`
"""
self.s3_host = self._option_get('s3_store_host')
access_key = self._option_get('s3_store_access_key')
secret_key = self._option_get('s3_store_secret_key')
if six.PY2:
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
access_key = access_key.encode('utf-8')
secret_key = secret_key.encode('utf-8')
self.access_key = access_key
self.secret_key = secret_key
self.bucket = self._option_get('s3_store_bucket')
self.scheme = 's3'
if self.s3_host.startswith('https://'):
self.scheme = 's3+https'
self.full_s3_host = self.s3_host
elif self.s3_host.startswith('http://'):
self.full_s3_host = self.s3_host
else: # Defaults http
self.full_s3_host = 'http://' + self.s3_host
buffer_dir = self.conf.glance_store.s3_store_object_buffer_dir
self.s3_store_object_buffer_dir = buffer_dir
_s3_obj_size = self._option_get('s3_store_large_object_size')
self.s3_store_large_object_size = _s3_obj_size * units.Mi
_s3_ck_size = self._option_get('s3_store_large_object_chunk_size')
_s3_ck_min = DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE
if _s3_ck_size < _s3_ck_min:
reason = (_("s3_store_large_object_chunk_size must be at "
"least %(_s3_ck_min)d MB. "
"You configured it as %(_s3_ck_size)d MB") %
{'_s3_ck_min': _s3_ck_min,
'_s3_ck_size': _s3_ck_size})
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="s3",
reason=reason)
self.s3_store_large_object_chunk_size = _s3_ck_size * units.Mi
self.s3_store_thread_pools = self._option_get('s3_store_thread_pools')
if self.s3_store_thread_pools <= 0:
reason = (_("s3_store_thread_pools must be a positive "
"integer. %s") % self.s3_store_thread_pools)
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="s3",
reason=reason)
def _option_get(self, param):
result = getattr(self.conf.glance_store, param)
if not result:
reason = ("Could not find %(param)s in configuration "
"options." % {'param': param})
LOG.debug(reason)
raise exceptions.BadStoreConfiguration(store_name="s3",
reason=reason)
return result
def _create_connection(self, loc):
from boto.s3.connection import S3Connection
s3host, s3port = netutils.parse_host_port(loc.s3serviceurl, 80)
uformat = self.conf.glance_store.s3_store_bucket_url_format
calling_format = get_calling_format(s3_store_bucket_url_format=uformat)
use_proxy = self.conf.glance_store.s3_store_enable_proxy
if use_proxy:
proxy_host = self._option_get('s3_store_proxy_host')
proxy_user = self.conf.glance_store.s3_store_proxy_user
proxy_pass = self.conf.glance_store.s3_store_proxy_password
proxy_port = self.conf.glance_store.s3_store_proxy_port
return S3Connection(loc.accesskey, loc.secretkey,
proxy=proxy_host,
proxy_port=proxy_port,
proxy_user=proxy_user,
proxy_pass=proxy_pass,
is_secure=(loc.scheme == 's3+https'),
calling_format=calling_format)
return S3Connection(loc.accesskey, loc.secretkey,
host=s3host, port=s3port,
is_secure=(loc.scheme == 's3+https'),
calling_format=calling_format)
@capabilities.check
def get(self, location, offset=0, chunk_size=None, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
"""
key = self._retrieve_key(location)
cs = self.READ_CHUNKSIZE
key.BufferSize = cs
class ChunkedIndexable(glance_store.Indexable):
def another(self):
return (self.wrapped.fp.read(cs)
if self.wrapped.fp else None)
return (ChunkedIndexable(ChunkedFile(key, cs), key.size), key.size)
def get_size(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns the image_size (or 0
if unavailable)
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
"""
try:
key = self._retrieve_key(location)
return key.size
except Exception:
return 0
def _retrieve_key(self, location):
loc = location.store_location
s3_conn = self._create_connection(loc)
bucket_obj = get_bucket(s3_conn, loc.bucket)
key = get_key(bucket_obj, loc.key)
msg = ("Retrieved image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)" % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey,
'bucket': loc.bucket,
'obj_name': loc.key}))
LOG.debug(msg)
return key
@capabilities.check
def add(self, image_id, image_file, image_size, context=None,
verifier=None):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:param verifier: An object used to verify signatures for images
:retval: tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises: `glance_store.exceptions.Duplicate` if the image already
existed
S3 writes the image data using the scheme:
s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
where:
<USER> = ``s3_store_user``
<KEY> = ``s3_store_key``
<S3_HOST> = ``s3_store_host``
<BUCKET> = ``s3_store_bucket``
<ID> = The id of the image being added
"""
loc = StoreLocation({'scheme': self.scheme,
'bucket': self.bucket,
'key': image_id,
's3serviceurl': self.full_s3_host,
'accesskey': self.access_key,
'secretkey': self.secret_key}, self.conf)
s3_conn = self._create_connection(loc)
create_bucket_if_missing(self.conf, self.bucket, s3_conn)
bucket_obj = get_bucket(s3_conn, self.bucket)
obj_name = str(image_id)
key = bucket_obj.get_key(obj_name)
if key and key.exists():
raise exceptions.Duplicate(message=_("S3 already has an image at "
"location %s") %
self._sanitize(loc.get_uri()))
msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, "
"access_key=%(access_key)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': self.s3_host,
'access_key': self.access_key,
'bucket': self.bucket,
'obj_name': obj_name})
LOG.debug(msg)
LOG.debug("Uploading an image file to S3 for %s" %
self._sanitize(loc.get_uri()))
if image_size < self.s3_store_large_object_size:
return self.add_singlepart(image_file, bucket_obj, obj_name, loc,
verifier)
else:
return self.add_multipart(image_file, image_size, bucket_obj,
obj_name, loc, verifier)
def _sanitize(self, uri):
return re.sub('//.*:.*@',
'//s3_store_secret_key:s3_store_access_key@',
uri)
def add_singlepart(self, image_file, bucket_obj, obj_name, loc, verifier):
"""
Stores an image file with a single part upload to S3 backend
:param image_file: The image data to write, as a file-like object
:param bucket_obj: S3 bucket object
:param obj_name: The object name to be stored(image identifier)
:param verifier: An object used to verify signatures for images
:loc: The Store Location Info
"""
key = bucket_obj.new_key(obj_name)
# We need to wrap image_file, which is a reference to the
# webob.Request.body_file, with a seekable file-like object,
# otherwise the call to set_contents_from_file() will die
# with an error about Input object has no method 'seek'. We
# might want to call webob.Request.make_body_seekable(), but
# unfortunately, that method copies the entire image into
# memory and results in LP Bug #818292 occurring. So, here
# we write temporary file in as memory-efficient manner as
# possible and then supply the temporary file to S3. We also
# take this opportunity to calculate the image checksum while
# writing the tempfile, so we don't need to call key.compute_md5()
msg = ("Writing request body file to temporary file "
"for %s") % self._sanitize(loc.get_uri())
LOG.debug(msg)
tmpdir = self.s3_store_object_buffer_dir
temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
checksum = hashlib.md5()
for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
checksum.update(chunk)
if verifier:
verifier.update(chunk)
temp_file.write(chunk)
temp_file.flush()
msg = ("Uploading temporary file to S3 "
"for %s") % self._sanitize(loc.get_uri())
LOG.debug(msg)
# OK, now upload the data into the key
key.set_contents_from_file(open(temp_file.name, 'rb'),
replace=False)
size = key.size
checksum_hex = checksum.hexdigest()
LOG.debug("Wrote %(size)d bytes to S3 key named %(obj_name)s "
"with checksum %(checksum_hex)s" %
{'size': size,
'obj_name': obj_name,
'checksum_hex': checksum_hex})
return (loc.get_uri(), size, checksum_hex, {})
def add_multipart(self, image_file, image_size, bucket_obj, obj_name, loc,
verifier):
"""
Stores an image file with a multi part upload to S3 backend
:param image_file: The image data to write, as a file-like object
:param bucket_obj: S3 bucket object
:param obj_name: The object name to be stored(image identifier)
:param verifier: An object used to verify signatures for images
:loc: The Store Location Info
"""
checksum = hashlib.md5()
pool_size = self.s3_store_thread_pools
pool = eventlet.greenpool.GreenPool(size=pool_size)
mpu = bucket_obj.initiate_multipart_upload(obj_name)
LOG.debug("Multipart initiate key=%(obj_name)s, "
"UploadId=%(UploadId)s" %
{'obj_name': obj_name,
'UploadId': mpu.id})
cstart = 0
plist = []
chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
write_chunk_size = max(self.s3_store_large_object_chunk_size,
chunk_size)
it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
buffered_chunk = b''
while True:
try:
buffered_clen = len(buffered_chunk)
if buffered_clen < write_chunk_size:
# keep reading data
read_chunk = next(it)
buffered_chunk += read_chunk
continue
else:
write_chunk = buffered_chunk[:write_chunk_size]
remained_data = buffered_chunk[write_chunk_size:]
checksum.update(write_chunk)
if verifier:
verifier.update(write_chunk)
fp = six.BytesIO(write_chunk)
fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
pool.spawn_n(run_upload, part)
plist.append(part)
cstart += 1
buffered_chunk = remained_data
except StopIteration:
if len(buffered_chunk) > 0:
# Write the last chunk data
write_chunk = buffered_chunk
checksum.update(write_chunk)
if verifier:
verifier.update(write_chunk)
fp = six.BytesIO(write_chunk)
fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
pool.spawn_n(run_upload, part)
plist.append(part)
break
pedict = {}
total_size = 0
pool.waitall()
for part in plist:
pedict.update(part.etag)
total_size += part.size
success = True
for part in plist:
if not part.success:
success = False
if success:
# Complete
xml = get_mpu_xml(pedict)
bucket_obj.complete_multipart_upload(obj_name,
mpu.id,
xml)
checksum_hex = checksum.hexdigest()
LOG.info(_LI("Multipart complete key=%(obj_name)s "
"UploadId=%(UploadId)s "
"Wrote %(total_size)d bytes to S3 key"
"named %(obj_name)s "
"with checksum %(checksum_hex)s") %
{'obj_name': obj_name,
'UploadId': mpu.id,
'total_size': total_size,
'checksum_hex': checksum_hex})
return (loc.get_uri(), total_size, checksum_hex, {})
else:
# Abort
bucket_obj.cancel_multipart_upload(obj_name, mpu.id)
LOG.error(_LE("Some parts failed to upload to S3. "
"Aborted the object key=%(obj_name)s") %
{'obj_name': obj_name})
msg = (_("Failed to add image object to S3. "
"key=%(obj_name)s") % {'obj_name': obj_name})
raise glance_store.BackendException(msg)
@capabilities.check
def delete(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file to delete
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: NotFound if image does not exist
"""
loc = location.store_location
s3_conn = self._create_connection(loc)
bucket_obj = get_bucket(s3_conn, loc.bucket)
# Close the key when we're through.
key = get_key(bucket_obj, loc.key)
msg = _("Deleting image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)") % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey,
'bucket': loc.bucket,
'obj_name': loc.key})
LOG.debug(msg)
return key.delete()
def get_bucket(conn, bucket_id):
"""
Get a bucket from an s3 connection
:param conn: The ``boto.s3.connection.S3Connection``
:param bucket_id: ID of the bucket to fetch
:raises: ``glance_store.exceptions.NotFound`` if bucket is not found.
"""
bucket = conn.get_bucket(bucket_id)
if not bucket:
msg = _("Could not find bucket with ID %s") % bucket_id
LOG.debug(msg)
raise exceptions.NotFound(msg)
return bucket
def get_s3_location(s3_host):
from boto.s3.connection import Location
locations = {
's3.amazonaws.com': Location.DEFAULT,
's3-eu-west-1.amazonaws.com': Location.EU,
's3-us-west-1.amazonaws.com': Location.USWest,
's3-ap-southeast-1.amazonaws.com': Location.APSoutheast,
's3-ap-northeast-1.amazonaws.com': Location.APNortheast,
}
# strip off scheme and port if present
key = re.sub('^(https?://)?(?P<host>[^:]+)(:[0-9]+)?$',
'\g<host>',
s3_host)
return locations.get(key, Location.DEFAULT)
def create_bucket_if_missing(conf, bucket, s3_conn):
"""
Creates a missing bucket in S3 if the
``s3_store_create_bucket_on_put`` option is set.
:param conf: Configuration
:param bucket: Name of bucket to create
:param s3_conn: Connection to S3
"""
from boto.exception import S3ResponseError
try:
s3_conn.get_bucket(bucket)
except S3ResponseError as e:
if e.status == http_client.NOT_FOUND:
if conf.glance_store.s3_store_create_bucket_on_put:
host = conf.glance_store.s3_store_host
location = get_s3_location(host)
try:
s3_conn.create_bucket(bucket, location=location)
except S3ResponseError as e:
msg = (_("Failed to add bucket to S3.\n"
"Got error from S3: %s.") %
encodeutils.exception_to_unicode(e))
raise glance_store.BackendException(msg)
else:
msg = (_("The bucket %(bucket)s does not exist in "
"S3. Please set the "
"s3_store_create_bucket_on_put option "
"to add bucket to S3 automatically.")
% {'bucket': bucket})
raise glance_store.BackendException(msg)
def get_key(bucket, obj):
"""
Get a key from a bucket
:param bucket: The ``boto.s3.Bucket``
:param obj: Object to get the key for
:raises: ``glance_store.exceptions.NotFound`` if key is not found.
"""
key = bucket.get_key(obj)
if not key or not key.exists():
msg = (_("Could not find key %(obj)s in bucket %(bucket)s") %
{'obj': obj, 'bucket': bucket})
LOG.debug(msg)
raise exceptions.NotFound(message=msg)
return key
def get_calling_format(bucket_format=None,
s3_store_bucket_url_format='subdomain'):
import boto.s3.connection
if bucket_format is None:
bucket_format = s3_store_bucket_url_format
if bucket_format.lower() == 'path':
return boto.s3.connection.OrdinaryCallingFormat()
else:
return boto.s3.connection.SubdomainCallingFormat()
def get_mpu_xml(pedict):
xml = '<CompleteMultipartUpload>\n'
for pnum, etag in six.iteritems(pedict):
xml += ' <Part>\n'
xml += ' <PartNumber>%d</PartNumber>\n' % pnum
xml += ' <ETag>%s</ETag>\n' % etag
xml += ' </Part>\n'
xml += '</CompleteMultipartUpload>'
return xml

View File

@ -34,7 +34,7 @@ _STORE_OPTS = [
cfg.ListOpt('stores', default=['file', 'http'],
help=_("List of stores enabled. Valid stores are: "
"cinder, file, http, rbd, sheepdog, swift, "
"s3, vsphere")),
"vsphere")),
cfg.StrOpt('default_store', default='file',
help=_("Default scheme to use to store image data. The "
"scheme must be registered by one of the stores "

View File

@ -65,8 +65,6 @@ def get_location_from_uri(uri, conf=CONF):
swift://example.com/container/obj-id
swift://user:account:pass@authurl.com/container/obj-id
swift+http://user:account:pass@authurl.com/container/obj-id
s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
file:///var/lib/glance/images/1
cinder://volume-id
"""

View File

@ -84,22 +84,7 @@ class OptsTestCase(base.StoreBaseTest):
'rbd_store_user',
'rados_connect_timeout',
'rootwrap_config',
's3_store_access_key',
's3_store_bucket',
's3_store_bucket_url_format',
's3_store_create_bucket_on_put',
's3_store_host',
's3_store_object_buffer_dir',
's3_store_secret_key',
's3_store_large_object_size',
's3_store_large_object_chunk_size',
's3_store_thread_pools',
's3_store_enable_proxy',
'swift_store_expire_soon_interval',
's3_store_proxy_host',
's3_store_proxy_port',
's3_store_proxy_user',
's3_store_proxy_password',
'sheepdog_store_address',
'sheepdog_store_chunk_size',
'sheepdog_store_port',

View File

@ -1,675 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the S3 backend store"""
import hashlib
import uuid
import xml.etree.ElementTree
import boto.s3.connection
import mock
from oslo_utils import units
import six
from glance_store._drivers import s3
from glance_store import capabilities
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
FAKE_UUID = str(uuid.uuid4())
FIVE_KB = 5 * units.Ki
S3_CONF = {'s3_store_access_key': 'user',
's3_store_secret_key': 'key',
's3_store_host': 'localhost:8080',
's3_store_bucket': 'glance',
's3_store_large_object_size': 5, # over 5MB is large
's3_store_large_object_chunk_size': 6, # part size is 6MB
's3_store_enable_proxy': False,
's3_store_proxy_host': None,
's3_store_proxy_port': 8080,
's3_store_proxy_user': 'user',
's3_store_proxy_password': 'foobar'}
# ensure that mpu api is used and parts are uploaded as expected
mpu_parts_uploaded = 0
class FakeKey(object):
"""
Acts like a ``boto.s3.key.Key``
"""
def __init__(self, bucket, name):
self.bucket = bucket
self.name = name
self.data = None
self.size = 0
self.etag = None
self.BufferSize = units.Ki
def close(self):
pass
def exists(self):
return self.bucket.exists(self.name)
def delete(self):
self.bucket.delete(self.name)
def compute_md5(self, data):
chunk = data.read(self.BufferSize)
checksum = hashlib.md5()
while chunk:
checksum.update(chunk)
chunk = data.read(self.BufferSize)
checksum_hex = checksum.hexdigest()
return checksum_hex, None
def set_contents_from_file(self, fp, replace=False, **kwargs):
max_read = kwargs.get('size')
self.data = six.BytesIO()
checksum = hashlib.md5()
while True:
if max_read is None or max_read > self.BufferSize:
read_size = self.BufferSize
elif max_read <= 0:
break
else:
read_size = max_read
chunk = fp.read(read_size)
if not chunk:
break
checksum.update(chunk)
self.data.write(chunk)
if max_read is not None:
max_read -= len(chunk)
self.size = self.data.tell()
# Reset the buffer to start
self.data.seek(0)
self.etag = checksum.hexdigest()
self.read = self.data.read
def get_file(self):
return self.data
class FakeMPU(object):
"""
Acts like a ``boto.s3.multipart.MultiPartUpload``
"""
def __init__(self, bucket, key_name):
self.bucket = bucket
self.id = str(uuid.uuid4())
self.key_name = key_name
self.parts = {} # pnum -> FakeKey
global mpu_parts_uploaded
mpu_parts_uploaded = 0
def upload_part_from_file(self, fp, part_num, **kwargs):
size = kwargs.get('size')
part = FakeKey(self.bucket, self.key_name)
part.set_contents_from_file(fp, size=size)
self.parts[part_num] = part
global mpu_parts_uploaded
mpu_parts_uploaded += 1
return part
def verify_xml(self, xml_body):
"""
Verify xml matches our part info.
"""
xmlparts = {}
cmuroot = xml.etree.ElementTree.fromstring(xml_body)
for cmupart in cmuroot:
pnum = int(cmupart.findtext('PartNumber'))
etag = cmupart.findtext('ETag')
xmlparts[pnum] = etag
if len(xmlparts) != len(self.parts):
return False
for pnum in xmlparts.keys():
if self.parts[pnum] is None:
return False
if xmlparts[pnum] != self.parts[pnum].etag:
return False
return True
def complete_key(self):
"""
Complete the parts into one big FakeKey
"""
key = FakeKey(self.bucket, self.key_name)
key.data = six.BytesIO()
checksum = hashlib.md5()
cnt = 0
for pnum in sorted(self.parts.keys()):
cnt += 1
part = self.parts[pnum]
chunk = part.data.read(key.BufferSize)
while chunk:
checksum.update(chunk)
key.data.write(chunk)
chunk = part.data.read(key.BufferSize)
key.size = key.data.tell()
key.data.seek(0)
key.etag = checksum.hexdigest() + '-%d' % cnt
key.read = key.data.read
return key
class FakeBucket(object):
"""Acts like a ``boto.s3.bucket.Bucket``."""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or {}
self.mpus = {} # {key_name -> {id -> FakeMPU}}
def __str__(self):
return self.name
def exists(self, key):
return key in self.keys
def delete(self, key):
del self.keys[key]
def get_key(self, key_name, **kwargs):
key = self.keys.get(key_name)
if not key:
return FakeKey(self, key_name)
return key
def new_key(self, key_name):
new_key = FakeKey(self, key_name)
self.keys[key_name] = new_key
return new_key
def initiate_multipart_upload(self, key_name, **kwargs):
mpu = FakeMPU(self, key_name)
if key_name not in self.mpus:
self.mpus[key_name] = {}
self.mpus[key_name][mpu.id] = mpu
return mpu
def cancel_multipart_upload(self, key_name, upload_id, **kwargs):
if key_name in self.mpus:
if upload_id in self.mpus[key_name]:
del self.mpus[key_name][upload_id]
if not self.mpus[key_name]:
del self.mpus[key_name]
def complete_multipart_upload(self, key_name, upload_id,
xml_body, **kwargs):
if key_name in self.mpus:
if upload_id in self.mpus[key_name]:
mpu = self.mpus[key_name][upload_id]
if mpu.verify_xml(xml_body):
key = mpu.complete_key()
self.cancel_multipart_upload(key_name, upload_id)
self.keys[key_name] = key
cmpu = mock.Mock()
cmpu.bucket = self
cmpu.bucket_name = self.name
cmpu.key_name = key_name
cmpu.etag = key.etag
return cmpu
return None # tho raising an exception might be better
def fakers():
fixture_buckets = {'glance': FakeBucket('glance')}
b = fixture_buckets['glance']
k = b.new_key(FAKE_UUID)
k.set_contents_from_file(six.BytesIO(b"*" * FIVE_KB))
def fake_connection_constructor(self, *args, **kwargs):
host = kwargs.get('host')
if host.startswith('http://') or host.startswith('https://'):
raise exceptions.UnsupportedBackend(host)
def fake_get_bucket(bucket_id):
bucket = fixture_buckets.get(bucket_id)
if not bucket:
bucket = FakeBucket(bucket_id)
return bucket
return fake_connection_constructor, fake_get_bucket
def format_s3_location(user, key, authurl, bucket, obj):
"""
Helper method that returns a S3 store URI given
the component pieces.
"""
scheme = 's3'
if authurl.startswith('https://'):
scheme = 's3+https'
authurl = authurl[8:]
elif authurl.startswith('http://'):
authurl = authurl[7:]
authurl = authurl.strip('/')
return "%s://%s:%s@%s/%s/%s" % (scheme, user, key, authurl,
bucket, obj)
class TestStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
def setUp(self):
"""Establish a clean test environment."""
super(TestStore, self).setUp()
self.store = s3.Store(self.conf)
self.config(**S3_CONF)
self.store.configure()
self.register_store_schemes(self.store, 's3')
fctor, fbucket = fakers()
init = mock.patch.object(boto.s3.connection.S3Connection,
'__init__').start()
init.side_effect = fctor
self.addCleanup(init.stop)
bucket = mock.patch.object(boto.s3.connection.S3Connection,
'get_bucket').start()
bucket.side_effect = fbucket
self.addCleanup(bucket.stop)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks."""
loc = location.get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID,
conf=self.conf)
(image_s3, image_size) = self.store.get(loc)
self.assertEqual(FIVE_KB, image_size)
expected_data = b"*" * FIVE_KB
data = b""
for chunk in image_s3:
data += chunk
self.assertEqual(expected_data, data)
def test_partial_get(self):
"""Test a "normal" retrieval of an image in chunks."""
loc = location.get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID,
conf=self.conf)
self.assertRaises(exceptions.StoreRandomGetNotSupported,
self.store.get, loc, chunk_size=1)
def test_get_calling_format_path(self):
"""Test a "normal" retrieval of an image in chunks."""
self.config(s3_store_bucket_url_format='path')
def fake_S3Connection_init(*args, **kwargs):
expected_cls = boto.s3.connection.OrdinaryCallingFormat
self.assertIsInstance(kwargs.get('calling_format'), expected_cls)
s3_connection = boto.s3.connection.S3Connection
with mock.patch.object(s3_connection, '__init__') as m:
m.side_effect = fake_S3Connection_init
loc = location.get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID,
conf=self.conf)
(image_s3, image_size) = self.store.get(loc)
def test_get_calling_format_default(self):
"""Test a "normal" retrieval of an image in chunks."""
def fake_S3Connection_init(*args, **kwargs):
expected_cls = boto.s3.connection.SubdomainCallingFormat
self.assertIsInstance(kwargs.get('calling_format'), expected_cls)
s3_connection = boto.s3.connection.S3Connection
with mock.patch.object(s3_connection, '__init__') as m:
m.side_effect = fake_S3Connection_init
loc = location.get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID,
conf=self.conf)
(image_s3, image_size) = self.store.get(loc)
def test_get_non_existing(self):
"""
Test that trying to retrieve a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/badbucket/%s" % FAKE_UUID
loc = location.get_location_from_uri(uri, conf=self.conf)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
uri = "s3://user:key@auth_address/glance/noexist"
loc = location.get_location_from_uri(uri, conf=self.conf)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_add(self):
"""Test that we can add an image via the s3 backend."""
expected_image_id = str(uuid.uuid4())
expected_s3_size = FIVE_KB
expected_s3_contents = b"*" * expected_s3_size
expected_checksum = hashlib.md5(expected_s3_contents).hexdigest()
expected_location = format_s3_location(
S3_CONF['s3_store_access_key'],
S3_CONF['s3_store_secret_key'],
S3_CONF['s3_store_host'],
S3_CONF['s3_store_bucket'],
expected_image_id)
image_s3 = six.BytesIO(expected_s3_contents)
loc, size, checksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_checksum, checksum)
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = six.BytesIO()
for chunk in new_image_s3:
new_image_contents.write(chunk)
new_image_s3_size = new_image_contents.tell()
self.assertEqual(expected_s3_contents, new_image_contents.getvalue())
self.assertEqual(expected_s3_size, new_image_s3_size)
def test_add_size_variations(self):
"""
Test that adding images of various sizes which exercise both S3
single uploads and the multipart upload apis. We've configured
the big upload threshold to 5MB and the part size to 6MB.
"""
variations = [(FIVE_KB, 0), # simple put (5KB < 5MB)
(5242880, 1), # 1 part (5MB <= 5MB < 6MB)
(6291456, 1), # 1 part exact (5MB <= 6MB <= 6MB)
(7340032, 2)] # 2 parts (6MB < 7MB <= 12MB)
for (vsize, vcnt) in variations:
expected_image_id = str(uuid.uuid4())
expected_s3_size = vsize
expected_s3_contents = b"12345678" * (expected_s3_size // 8)
expected_chksum = hashlib.md5(expected_s3_contents).hexdigest()
expected_location = format_s3_location(
S3_CONF['s3_store_access_key'],
S3_CONF['s3_store_secret_key'],
S3_CONF['s3_store_host'],
S3_CONF['s3_store_bucket'],
expected_image_id)
image_s3 = six.BytesIO(expected_s3_contents)
# add image
loc, size, chksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_chksum, chksum)
self.assertEqual(vcnt, mpu_parts_uploaded)
# get image
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_s3, new_image_s3_size) = self.store.get(loc)
new_image_contents = six.BytesIO()
for chunk in new_image_s3:
new_image_contents.write(chunk)
new_image_size = new_image_contents.tell()
self.assertEqual(expected_s3_size, new_image_s3_size)
self.assertEqual(expected_s3_size, new_image_size)
self.assertEqual(expected_s3_contents,
new_image_contents.getvalue())
def test_add_with_verifier(self):
"""
Assert 'verifier.update' is called when verifier is provided, both
for multipart and for single uploads.
"""
one_part_max = 6 * units.Mi
variations = [(FIVE_KB, 1), # simple put (5KB < 5MB)
(5 * units.Mi, 1), # 1 part (5MB <= 5MB < 6MB)
(one_part_max, 1), # 1 part exact (5MB <= 6MB <= 6MB)
(one_part_max + one_part_max // 2, 2), # 1.5 parts
(one_part_max * 2, 2)] # 2 parts exact
for (s3_size, update_calls) in variations:
image_id = str(uuid.uuid4())
base_byte = b"12345678"
s3_contents = base_byte * (s3_size // 8)
image_s3 = six.BytesIO(s3_contents)
verifier = mock.MagicMock(name='mock_verifier')
# add image
self.store.add(image_id, image_s3, s3_size, verifier=verifier)
# confirm update called expected number of times
self.assertEqual(verifier.update.call_count, update_calls)
if (update_calls <= 1):
# the contents weren't broken into pieces
verifier.update.assert_called_with(s3_contents)
else:
# most calls to update should be with the max one part size
s3_contents_max_part = base_byte * (one_part_max // 8)
# the last call to verify.update should be with what's left
s3_contents_last_part = base_byte * ((s3_size - one_part_max)
// 8)
# confirm all expected calls to update have occurred
calls = [mock.call(s3_contents_max_part),
mock.call(s3_contents_last_part)]
verifier.update.assert_has_calls(calls)
def test_add_host_variations(self):
"""
Test that having http(s):// in the s3serviceurl in config
options works as expected.
"""
variations = ['http://localhost:80',
'http://localhost',
'https://localhost',
'https://localhost:8080',
'localhost',
'localhost:8080']
for variation in variations:
expected_image_id = str(uuid.uuid4())
expected_s3_size = FIVE_KB
expected_s3_contents = b"*" * expected_s3_size
expected_checksum = hashlib.md5(expected_s3_contents).hexdigest()
new_conf = S3_CONF.copy()
new_conf['s3_store_host'] = variation
expected_location = format_s3_location(
new_conf['s3_store_access_key'],
new_conf['s3_store_secret_key'],
new_conf['s3_store_host'],
new_conf['s3_store_bucket'],
expected_image_id)
image_s3 = six.BytesIO(expected_s3_contents)
self.config(**new_conf)
self.store = s3.Store(self.conf)
self.store.configure()
loc, size, checksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_checksum, checksum)
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = new_image_s3.getvalue()
new_image_s3_size = len(new_image_s3)
self.assertEqual(expected_s3_contents, new_image_contents)
self.assertEqual(expected_s3_size, new_image_s3_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_s3 = six.BytesIO(b"nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
FAKE_UUID, image_s3, 0)
def _option_required(self, key):
conf = S3_CONF.copy()
conf[key] = None
try:
self.config(**conf)
self.store = s3.Store(self.conf)
self.store.configure()
return not self.store.is_capable(
capabilities.BitMasks.WRITE_ACCESS)
except Exception:
return False
def test_no_access_key(self):
"""
Tests that options without access key disables the add method
"""
self.assertTrue(self._option_required('s3_store_access_key'))
def test_no_secret_key(self):
"""
Tests that options without secret key disables the add method
"""
self.assertTrue(self._option_required('s3_store_secret_key'))
def test_no_host(self):
"""
Tests that options without host disables the add method
"""
self.assertTrue(self._option_required('s3_store_host'))
def test_delete(self):
"""
Test we can delete an existing image in the s3 store
"""
uri = "s3://user:key@auth_address/glance/%s" % FAKE_UUID
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/glance/noexist"
loc = location.get_location_from_uri(uri, conf=self.conf)
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def _do_test_get_s3_location(self, host, loc):
self.assertEqual(s3.get_s3_location(host), loc)
self.assertEqual(s3.get_s3_location(host + ':80'), loc)
self.assertEqual(s3.get_s3_location('http://' + host), loc)
self.assertEqual(s3.get_s3_location('http://' + host + ':80'), loc)
self.assertEqual(s3.get_s3_location('https://' + host), loc)
self.assertEqual(s3.get_s3_location('https://' + host + ':80'), loc)
def test_get_s3_good_location(self):
"""
Test that the s3 location can be derived from the host
"""
good_locations = [
('s3.amazonaws.com', ''),
('s3-eu-west-1.amazonaws.com', 'EU'),
('s3-us-west-1.amazonaws.com', 'us-west-1'),
('s3-ap-southeast-1.amazonaws.com', 'ap-southeast-1'),
('s3-ap-northeast-1.amazonaws.com', 'ap-northeast-1'),
]
for (url, expected) in good_locations:
self._do_test_get_s3_location(url, expected)
def test_get_s3_bad_location(self):
"""
Test that the s3 location cannot be derived from an unexpected host
"""
bad_locations = [
('', ''),
('s3.amazon.co.uk', ''),
('s3-govcloud.amazonaws.com', ''),
('cloudfiles.rackspace.com', ''),
]
for (url, expected) in bad_locations:
self._do_test_get_s3_location(url, expected)
def test_calling_format_path(self):
cf = s3.get_calling_format(s3_store_bucket_url_format='path')
self.assertIsInstance(cf, boto.s3.connection.OrdinaryCallingFormat)
def test_calling_format_subdomain(self):
cf = s3.get_calling_format(s3_store_bucket_url_format='subdomain')
self.assertIsInstance(cf, boto.s3.connection.SubdomainCallingFormat)
def test_calling_format_default(self):
self.assertIsInstance(s3.get_calling_format(),
boto.s3.connection.SubdomainCallingFormat)
def test_image_get_with_proxy_without_host(self):
"""Test s3 backend with unconfigured proxy connection."""
self.config(s3_store_enable_proxy=True)
loc = location.get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID,
conf=self.conf)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.get, loc)
def test_image_get_with_proxy(self):
"""Test s3 get with proxy connection."""
self.config(s3_store_enable_proxy=True)
proxy_host = '127.0.0.1'
self.config(s3_store_proxy_host=proxy_host)
with mock.patch.object(boto.s3.connection, 'S3Connection') as m:
cf = s3.get_calling_format(bucket_format=None,
s3_store_bucket_url_format='subdomain')
with mock.patch.object(s3, 'get_calling_format') as gcf:
gcf.return_value = cf
loc = location.get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID,
conf=self.conf)
self.store.get(loc)
accesskey = S3_CONF['s3_store_access_key']
secretkey = S3_CONF['s3_store_secret_key']
proxy_port = S3_CONF['s3_store_proxy_port']
proxy_pass = S3_CONF['s3_store_proxy_password']
proxy_user = S3_CONF['s3_store_proxy_user']
m.assert_called_with(accesskey, secretkey,
calling_format=cf,
is_secure=False,
proxy=proxy_host,
proxy_pass=proxy_pass,
proxy_port=proxy_port,
proxy_user=proxy_user)

View File

@ -0,0 +1,15 @@
---
prelude: >
glance_store._drivers.s3 removed from tree.
upgrade:
- The S3 driver has been removed completely from the
glance_store source tree. All environments running
and (or) using this s3-driver piece of code and have
not been migrated will stop working after the upgrade.
We recommend you use a different storage backend that
is still being supported by Glance. The standard
deprecation path has been used to remove this. The
proces requiring store driver maintainers was initiated
at http://lists.openstack.org/pipermail/openstack-dev/2015-December/081966.html .
Since, S3 driver did not get any maintainer, it was
decided to remove it.

View File

@ -29,7 +29,6 @@ glance_store.drivers =
http = glance_store._drivers.http:Store
swift = glance_store._drivers.swift:Store
rbd = glance_store._drivers.rbd:Store
s3 = glance_store._drivers.s3:Store
sheepdog = glance_store._drivers.sheepdog:Store
cinder = glance_store._drivers.cinder:Store
vmware = glance_store._drivers.vmware_datastore:Store
@ -42,7 +41,6 @@ glance_store.drivers =
glance.store.http.Store = glance_store._drivers.http:Store
glance.store.swift.Store = glance_store._drivers.swift:Store
glance.store.rbd.Store = glance_store._drivers.rbd:Store
glance.store.s3.Store = glance_store._drivers.s3:Store
glance.store.sheepdog.Store = glance_store._drivers.sheepdog:Store
glance.store.cinder.Store = glance_store._drivers.cinder:Store
glance.store.vmware_datastore.Store = glance_store._drivers.vmware_datastore:Store
@ -55,8 +53,6 @@ console_scripts =
[extras]
# Dependencies for each of the optional stores
s3 =
boto>=2.32.1 # MIT
vmware =
oslo.vmware>=2.11.0 # Apache-2.0
swift =

View File

@ -9,7 +9,7 @@ usedevelop = True
install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} --allow-all-external --allow-insecure netaddr -U {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
.[s3,vmware,swift,cinder]
.[vmware,swift,cinder]
passenv = OS_TEST_*
commands = ostestr --slowest {posargs}