Merge "Delete even more glance code"
This commit is contained in:
@@ -1,46 +0,0 @@
|
|||||||
# Copyright (c) 2015 Mirantis, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
|
|
||||||
|
|
||||||
class Showlevel(object):
|
|
||||||
# None - do not show additional properties and blobs with locations;
|
|
||||||
# Basic - show all artifact fields except dependencies;
|
|
||||||
# Direct - show all artifact fields with only direct dependencies;
|
|
||||||
# Transitive - show all artifact fields with all of dependencies.
|
|
||||||
NONE = 0
|
|
||||||
BASIC = 1
|
|
||||||
DIRECT = 2
|
|
||||||
TRANSITIVE = 3
|
|
||||||
|
|
||||||
_level_map = {'none': NONE, 'basic': BASIC, 'direct': DIRECT,
|
|
||||||
'transitive': TRANSITIVE}
|
|
||||||
_inverted_level_map = {v: k for k, v in six.iteritems(_level_map)}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def to_str(n):
|
|
||||||
try:
|
|
||||||
return Showlevel._inverted_level_map[n]
|
|
||||||
except KeyError:
|
|
||||||
raise exception.ArtifactUnsupportedShowLevel()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_str(str_value):
|
|
||||||
try:
|
|
||||||
return Showlevel._level_map[str_value]
|
|
||||||
except KeyError:
|
|
||||||
raise exception.ArtifactUnsupportedShowLevel()
|
|
@@ -159,11 +159,6 @@ common_opts = [
|
|||||||
cfg.IntOpt('pydev_worker_debug_port', default=5678,
|
cfg.IntOpt('pydev_worker_debug_port', default=5678,
|
||||||
help=_('The port on which a pydev process is listening for '
|
help=_('The port on which a pydev process is listening for '
|
||||||
'connections.')),
|
'connections.')),
|
||||||
cfg.StrOpt('metadata_encryption_key', secret=True,
|
|
||||||
help=_('AES key for encrypting store \'location\' metadata. '
|
|
||||||
'This includes, if used, Swift or S3 credentials. '
|
|
||||||
'Should be set to a random string of length 16, 24 or '
|
|
||||||
'32 bytes')),
|
|
||||||
cfg.StrOpt('digest_algorithm', default='sha1',
|
cfg.StrOpt('digest_algorithm', default='sha1',
|
||||||
help=_('Digest algorithm which will be used for digital '
|
help=_('Digest algorithm which will be used for digital '
|
||||||
'signature; the default is sha1 the default in Kilo '
|
'signature; the default is sha1 the default in Kilo '
|
||||||
|
@@ -1,158 +0,0 @@
|
|||||||
# Copyright (c) 2014 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
import pep8
|
|
||||||
|
|
||||||
"""
|
|
||||||
Guidelines for writing new hacking checks
|
|
||||||
|
|
||||||
- Use only for Glance-specific tests. OpenStack general tests
|
|
||||||
should be submitted to the common 'hacking' module.
|
|
||||||
- Pick numbers in the range G3xx. Find the current test with
|
|
||||||
the highest allocated number and then pick the next value.
|
|
||||||
If nova has an N3xx code for that test, use the same number.
|
|
||||||
- Keep the test method code in the source file ordered based
|
|
||||||
on the G3xx value.
|
|
||||||
- List the new rule in the top level HACKING.rst file
|
|
||||||
- Add test cases for each new rule to glance/tests/test_hacking.py
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
asse_trueinst_re = re.compile(
|
|
||||||
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
|
|
||||||
"(\w|\.|\'|\"|\[|\])+\)\)")
|
|
||||||
asse_equal_type_re = re.compile(
|
|
||||||
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
|
|
||||||
"(\w|\.|\'|\"|\[|\])+\)")
|
|
||||||
asse_equal_end_with_none_re = re.compile(
|
|
||||||
r"(.)*assertEqual\((\w|\.|\'|\"|\[|\])+, None\)")
|
|
||||||
asse_equal_start_with_none_re = re.compile(
|
|
||||||
r"(.)*assertEqual\(None, (\w|\.|\'|\"|\[|\])+\)")
|
|
||||||
unicode_func_re = re.compile(r"(\s|\W|^)unicode\(")
|
|
||||||
log_translation = re.compile(
|
|
||||||
r"(.)*LOG\.(audit)\(\s*('|\")")
|
|
||||||
log_translation_info = re.compile(
|
|
||||||
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
|
|
||||||
log_translation_exception = re.compile(
|
|
||||||
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
|
|
||||||
log_translation_error = re.compile(
|
|
||||||
r"(.)*LOG\.(error)\(\s*(_\(|'|\")")
|
|
||||||
log_translation_critical = re.compile(
|
|
||||||
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
|
|
||||||
log_translation_warning = re.compile(
|
|
||||||
r"(.)*LOG\.(warning)\(\s*(_\(|'|\")")
|
|
||||||
|
|
||||||
|
|
||||||
def assert_true_instance(logical_line):
|
|
||||||
"""Check for assertTrue(isinstance(a, b)) sentences
|
|
||||||
|
|
||||||
G316
|
|
||||||
"""
|
|
||||||
if asse_trueinst_re.match(logical_line):
|
|
||||||
yield (0, "G316: assertTrue(isinstance(a, b)) sentences not allowed")
|
|
||||||
|
|
||||||
|
|
||||||
def assert_equal_type(logical_line):
|
|
||||||
"""Check for assertEqual(type(A), B) sentences
|
|
||||||
|
|
||||||
G317
|
|
||||||
"""
|
|
||||||
if asse_equal_type_re.match(logical_line):
|
|
||||||
yield (0, "G317: assertEqual(type(A), B) sentences not allowed")
|
|
||||||
|
|
||||||
|
|
||||||
def assert_equal_none(logical_line):
|
|
||||||
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
|
|
||||||
|
|
||||||
G318
|
|
||||||
"""
|
|
||||||
res = (asse_equal_start_with_none_re.match(logical_line) or
|
|
||||||
asse_equal_end_with_none_re.match(logical_line))
|
|
||||||
if res:
|
|
||||||
yield (0, "G318: assertEqual(A, None) or assertEqual(None, A) "
|
|
||||||
"sentences not allowed")
|
|
||||||
|
|
||||||
|
|
||||||
def no_translate_debug_logs(logical_line, filename):
|
|
||||||
dirs = [
|
|
||||||
"glance/api",
|
|
||||||
"glance/cmd",
|
|
||||||
"glance/common",
|
|
||||||
"glance/db",
|
|
||||||
"glance/domain",
|
|
||||||
"glance/image_cache",
|
|
||||||
"glance/quota",
|
|
||||||
"glance/registry",
|
|
||||||
"glance/store",
|
|
||||||
"glance/tests",
|
|
||||||
]
|
|
||||||
|
|
||||||
if max([name in filename for name in dirs]):
|
|
||||||
if logical_line.startswith("LOG.debug(_("):
|
|
||||||
yield(0, "G319: Don't translate debug level logs")
|
|
||||||
|
|
||||||
|
|
||||||
def no_direct_use_of_unicode_function(logical_line):
|
|
||||||
"""Check for use of unicode() builtin
|
|
||||||
|
|
||||||
G320
|
|
||||||
"""
|
|
||||||
if unicode_func_re.match(logical_line):
|
|
||||||
yield(0, "G320: Use six.text_type() instead of unicode()")
|
|
||||||
|
|
||||||
|
|
||||||
def validate_log_translations(logical_line, physical_line, filename):
|
|
||||||
# Translations are not required in the test directory
|
|
||||||
if pep8.noqa(physical_line):
|
|
||||||
return
|
|
||||||
msg = "G322: LOG.info messages require translations `_LI()`!"
|
|
||||||
if log_translation_info.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
msg = "G323: LOG.exception messages require translations `_LE()`!"
|
|
||||||
if log_translation_exception.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
msg = "G324: LOG.error messages require translations `_LE()`!"
|
|
||||||
if log_translation_error.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
msg = "G325: LOG.critical messages require translations `_LC()`!"
|
|
||||||
if log_translation_critical.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
msg = "G326: LOG.warning messages require translations `_LW()`!"
|
|
||||||
if log_translation_warning.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
msg = "G321: Log messages require translations!"
|
|
||||||
if log_translation.match(logical_line):
|
|
||||||
yield (0, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def check_no_contextlib_nested(logical_line):
|
|
||||||
msg = ("G327: contextlib.nested is deprecated since Python 2.7. See "
|
|
||||||
"https://docs.python.org/2/library/contextlib.html#contextlib."
|
|
||||||
"nested for more information.")
|
|
||||||
if ("with contextlib.nested(" in logical_line or
|
|
||||||
"with nested(" in logical_line):
|
|
||||||
yield(0, msg)
|
|
||||||
|
|
||||||
|
|
||||||
def factory(register):
|
|
||||||
register(assert_true_instance)
|
|
||||||
register(assert_equal_type)
|
|
||||||
register(assert_equal_none)
|
|
||||||
register(no_translate_debug_logs)
|
|
||||||
register(no_direct_use_of_unicode_function)
|
|
||||||
register(validate_log_translations)
|
|
||||||
register(check_no_contextlib_nested)
|
|
@@ -1,341 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LRU Cache for Image Data
|
|
||||||
"""
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import excutils
|
|
||||||
from oslo_utils import importutils
|
|
||||||
from oslo_utils import units
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy.common import utils
|
|
||||||
from daisy import i18n
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
_ = i18n._
|
|
||||||
_LE = i18n._LE
|
|
||||||
_LI = i18n._LI
|
|
||||||
_LW = i18n._LW
|
|
||||||
|
|
||||||
image_cache_opts = [
|
|
||||||
cfg.StrOpt('image_cache_driver', default='sqlite',
|
|
||||||
help=_('The driver to use for image cache management.')),
|
|
||||||
cfg.IntOpt('image_cache_max_size', default=10 * units.Gi, # 10 GB
|
|
||||||
help=_('The upper limit (the maximum size of accumulated '
|
|
||||||
'cache in bytes) beyond which pruner, if running, '
|
|
||||||
'starts cleaning the images cache.')),
|
|
||||||
cfg.IntOpt('image_cache_stall_time', default=86400, # 24 hours
|
|
||||||
help=_('The amount of time to let an image remain in the '
|
|
||||||
'cache without being accessed.')),
|
|
||||||
cfg.StrOpt('image_cache_dir',
|
|
||||||
help=_('Base directory that the Image Cache uses.')),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(image_cache_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class ImageCache(object):
|
|
||||||
|
|
||||||
"""Provides an LRU cache for image data."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.init_driver()
|
|
||||||
|
|
||||||
def init_driver(self):
|
|
||||||
"""
|
|
||||||
Create the driver for the cache
|
|
||||||
"""
|
|
||||||
driver_name = CONF.image_cache_driver
|
|
||||||
driver_module = (__name__ + '.drivers.' + driver_name + '.Driver')
|
|
||||||
try:
|
|
||||||
self.driver_class = importutils.import_class(driver_module)
|
|
||||||
LOG.info(_LI("Image cache loaded driver '%s'.") %
|
|
||||||
driver_name)
|
|
||||||
except ImportError as import_err:
|
|
||||||
LOG.warn(_LW("Image cache driver "
|
|
||||||
"'%(driver_name)s' failed to load. "
|
|
||||||
"Got error: '%(import_err)s."),
|
|
||||||
{'driver_name': driver_name,
|
|
||||||
'import_err': import_err})
|
|
||||||
|
|
||||||
driver_module = __name__ + '.drivers.sqlite.Driver'
|
|
||||||
LOG.info(_LI("Defaulting to SQLite driver."))
|
|
||||||
self.driver_class = importutils.import_class(driver_module)
|
|
||||||
self.configure_driver()
|
|
||||||
|
|
||||||
def configure_driver(self):
|
|
||||||
"""
|
|
||||||
Configure the driver for the cache and, if it fails to configure,
|
|
||||||
fall back to using the SQLite driver which has no odd dependencies
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.driver = self.driver_class()
|
|
||||||
self.driver.configure()
|
|
||||||
except exception.BadDriverConfiguration as config_err:
|
|
||||||
driver_module = self.driver_class.__module__
|
|
||||||
LOG.warn(_LW("Image cache driver "
|
|
||||||
"'%(driver_module)s' failed to configure. "
|
|
||||||
"Got error: '%(config_err)s"),
|
|
||||||
{'driver_module': driver_module,
|
|
||||||
'config_err': config_err})
|
|
||||||
LOG.info(_LI("Defaulting to SQLite driver."))
|
|
||||||
default_module = __name__ + '.drivers.sqlite.Driver'
|
|
||||||
self.driver_class = importutils.import_class(default_module)
|
|
||||||
self.driver = self.driver_class()
|
|
||||||
self.driver.configure()
|
|
||||||
|
|
||||||
def is_cached(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with the supplied ID has its image
|
|
||||||
file cached.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
return self.driver.is_cached(image_id)
|
|
||||||
|
|
||||||
def is_queued(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image identifier is in our cache queue.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
return self.driver.is_queued(image_id)
|
|
||||||
|
|
||||||
def get_cache_size(self):
|
|
||||||
"""
|
|
||||||
Returns the total size in bytes of the image cache.
|
|
||||||
"""
|
|
||||||
return self.driver.get_cache_size()
|
|
||||||
|
|
||||||
def get_hit_count(self, image_id):
|
|
||||||
"""
|
|
||||||
Return the number of hits that an image has
|
|
||||||
|
|
||||||
:param image_id: Opaque image identifier
|
|
||||||
"""
|
|
||||||
return self.driver.get_hit_count(image_id)
|
|
||||||
|
|
||||||
def get_cached_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of records about cached images.
|
|
||||||
"""
|
|
||||||
return self.driver.get_cached_images()
|
|
||||||
|
|
||||||
def delete_all_cached_images(self):
|
|
||||||
"""
|
|
||||||
Removes all cached image files and any attributes about the images
|
|
||||||
and returns the number of cached image files that were deleted.
|
|
||||||
"""
|
|
||||||
return self.driver.delete_all_cached_images()
|
|
||||||
|
|
||||||
def delete_cached_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific cached image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
self.driver.delete_cached_image(image_id)
|
|
||||||
|
|
||||||
def delete_all_queued_images(self):
|
|
||||||
"""
|
|
||||||
Removes all queued image files and any attributes about the images
|
|
||||||
and returns the number of queued image files that were deleted.
|
|
||||||
"""
|
|
||||||
return self.driver.delete_all_queued_images()
|
|
||||||
|
|
||||||
def delete_queued_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific queued image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
self.driver.delete_queued_image(image_id)
|
|
||||||
|
|
||||||
def prune(self):
|
|
||||||
"""
|
|
||||||
Removes all cached image files above the cache's maximum
|
|
||||||
size. Returns a tuple containing the total number of cached
|
|
||||||
files removed and the total size of all pruned image files.
|
|
||||||
"""
|
|
||||||
max_size = CONF.image_cache_max_size
|
|
||||||
current_size = self.driver.get_cache_size()
|
|
||||||
if max_size > current_size:
|
|
||||||
LOG.debug("Image cache has free space, skipping prune...")
|
|
||||||
return (0, 0)
|
|
||||||
|
|
||||||
overage = current_size - max_size
|
|
||||||
LOG.debug("Image cache currently %(overage)d bytes over max "
|
|
||||||
"size. Starting prune to max size of %(max_size)d ",
|
|
||||||
{'overage': overage, 'max_size': max_size})
|
|
||||||
|
|
||||||
total_bytes_pruned = 0
|
|
||||||
total_files_pruned = 0
|
|
||||||
entry = self.driver.get_least_recently_accessed()
|
|
||||||
while entry and current_size > max_size:
|
|
||||||
image_id, size = entry
|
|
||||||
LOG.debug("Pruning '%(image_id)s' to free %(size)d bytes",
|
|
||||||
{'image_id': image_id, 'size': size})
|
|
||||||
self.driver.delete_cached_image(image_id)
|
|
||||||
total_bytes_pruned = total_bytes_pruned + size
|
|
||||||
total_files_pruned = total_files_pruned + 1
|
|
||||||
current_size = current_size - size
|
|
||||||
entry = self.driver.get_least_recently_accessed()
|
|
||||||
|
|
||||||
LOG.debug("Pruning finished pruning. "
|
|
||||||
"Pruned %(total_files_pruned)d and "
|
|
||||||
"%(total_bytes_pruned)d.",
|
|
||||||
{'total_files_pruned': total_files_pruned,
|
|
||||||
'total_bytes_pruned': total_bytes_pruned})
|
|
||||||
return total_files_pruned, total_bytes_pruned
|
|
||||||
|
|
||||||
def clean(self, stall_time=None):
|
|
||||||
"""
|
|
||||||
Cleans up any invalid or incomplete cached images. The cache driver
|
|
||||||
decides what that means...
|
|
||||||
"""
|
|
||||||
self.driver.clean(stall_time)
|
|
||||||
|
|
||||||
def queue_image(self, image_id):
|
|
||||||
"""
|
|
||||||
This adds a image to be cache to the queue.
|
|
||||||
|
|
||||||
If the image already exists in the queue or has already been
|
|
||||||
cached, we return False, True otherwise
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
return self.driver.queue_image(image_id)
|
|
||||||
|
|
||||||
def get_caching_iter(self, image_id, image_checksum, image_iter):
|
|
||||||
"""
|
|
||||||
Returns an iterator that caches the contents of an image
|
|
||||||
while the image contents are read through the supplied
|
|
||||||
iterator.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
:param image_checksum: checksum expected to be generated while
|
|
||||||
iterating over image data
|
|
||||||
:param image_iter: Iterator that will read image contents
|
|
||||||
"""
|
|
||||||
if not self.driver.is_cacheable(image_id):
|
|
||||||
return image_iter
|
|
||||||
|
|
||||||
LOG.debug("Tee'ing image '%s' into cache", image_id)
|
|
||||||
|
|
||||||
return self.cache_tee_iter(image_id, image_iter, image_checksum)
|
|
||||||
|
|
||||||
def cache_tee_iter(self, image_id, image_iter, image_checksum):
|
|
||||||
try:
|
|
||||||
current_checksum = hashlib.md5()
|
|
||||||
|
|
||||||
with self.driver.open_for_write(image_id) as cache_file:
|
|
||||||
for chunk in image_iter:
|
|
||||||
try:
|
|
||||||
cache_file.write(chunk)
|
|
||||||
finally:
|
|
||||||
current_checksum.update(chunk)
|
|
||||||
yield chunk
|
|
||||||
cache_file.flush()
|
|
||||||
|
|
||||||
if (image_checksum and
|
|
||||||
image_checksum != current_checksum.hexdigest()):
|
|
||||||
msg = _("Checksum verification failed. Aborted "
|
|
||||||
"caching of image '%s'.") % image_id
|
|
||||||
raise exception.DaisyException(msg)
|
|
||||||
|
|
||||||
except exception.DaisyException as e:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
# image_iter has given us bad, (size_checked_iter has found a
|
|
||||||
# bad length), or corrupt data (checksum is wrong).
|
|
||||||
LOG.exception(utils.exception_to_str(e))
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(_LE("Exception encountered while tee'ing "
|
|
||||||
"image '%(image_id)s' into cache: %(error)s. "
|
|
||||||
"Continuing with response.") %
|
|
||||||
{'image_id': image_id,
|
|
||||||
'error': utils.exception_to_str(e)})
|
|
||||||
|
|
||||||
# If no checksum provided continue responding even if
|
|
||||||
# caching failed.
|
|
||||||
for chunk in image_iter:
|
|
||||||
yield chunk
|
|
||||||
|
|
||||||
def cache_image_iter(self, image_id, image_iter, image_checksum=None):
|
|
||||||
"""
|
|
||||||
Cache an image with supplied iterator.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
:param image_file: Iterator retrieving image chunks
|
|
||||||
:param image_checksum: Checksum of image
|
|
||||||
|
|
||||||
:retval True if image file was cached, False otherwise
|
|
||||||
"""
|
|
||||||
if not self.driver.is_cacheable(image_id):
|
|
||||||
return False
|
|
||||||
|
|
||||||
for chunk in self.get_caching_iter(image_id, image_checksum,
|
|
||||||
image_iter):
|
|
||||||
pass
|
|
||||||
return True
|
|
||||||
|
|
||||||
def cache_image_file(self, image_id, image_file):
|
|
||||||
"""
|
|
||||||
Cache an image file.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
:param image_file: Image file to cache
|
|
||||||
|
|
||||||
:retval True if image file was cached, False otherwise
|
|
||||||
"""
|
|
||||||
CHUNKSIZE = 64 * units.Mi
|
|
||||||
|
|
||||||
return self.cache_image_iter(image_id,
|
|
||||||
utils.chunkiter(image_file, CHUNKSIZE))
|
|
||||||
|
|
||||||
def open_for_read(self, image_id):
|
|
||||||
"""
|
|
||||||
Open and yield file for reading the image file for an image
|
|
||||||
with supplied identifier.
|
|
||||||
|
|
||||||
:note Upon successful reading of the image file, the image's
|
|
||||||
hit count will be incremented.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
return self.driver.open_for_read(image_id)
|
|
||||||
|
|
||||||
def get_image_size(self, image_id):
|
|
||||||
"""
|
|
||||||
Return the size of the image file for an image with supplied
|
|
||||||
identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
return self.driver.get_image_size(image_id)
|
|
||||||
|
|
||||||
def get_queued_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of image IDs that are in the queue. The
|
|
||||||
list should be sorted by the time the image ID was inserted
|
|
||||||
into the queue.
|
|
||||||
"""
|
|
||||||
return self.driver.get_queued_images()
|
|
@@ -1,21 +0,0 @@
|
|||||||
# Copyright 2012 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from daisy.image_cache import ImageCache
|
|
||||||
|
|
||||||
|
|
||||||
class CacheApp(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.cache = ImageCache()
|
|
@@ -1,27 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Cleans up any invalid cache entries
|
|
||||||
"""
|
|
||||||
|
|
||||||
from daisy.image_cache import base
|
|
||||||
|
|
||||||
|
|
||||||
class Cleaner(base.CacheApp):
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
self.cache.clean()
|
|
@@ -1,133 +0,0 @@
|
|||||||
# Copyright 2012 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from oslo_serialization import jsonutils as json
|
|
||||||
|
|
||||||
from daisy.common import client as base_client
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy import i18n
|
|
||||||
|
|
||||||
_ = i18n._
|
|
||||||
|
|
||||||
|
|
||||||
class CacheClient(base_client.BaseClient):
|
|
||||||
|
|
||||||
DEFAULT_PORT = 9292
|
|
||||||
DEFAULT_DOC_ROOT = '/v1'
|
|
||||||
|
|
||||||
def delete_cached_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Delete a specified image from the cache
|
|
||||||
"""
|
|
||||||
self.do_request("DELETE", "/cached_images/%s" % image_id)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_cached_images(self, **kwargs):
|
|
||||||
"""
|
|
||||||
Returns a list of images stored in the image cache.
|
|
||||||
"""
|
|
||||||
res = self.do_request("GET", "/cached_images")
|
|
||||||
data = json.loads(res.read())['cached_images']
|
|
||||||
return data
|
|
||||||
|
|
||||||
def get_queued_images(self, **kwargs):
|
|
||||||
"""
|
|
||||||
Returns a list of images queued for caching
|
|
||||||
"""
|
|
||||||
res = self.do_request("GET", "/queued_images")
|
|
||||||
data = json.loads(res.read())['queued_images']
|
|
||||||
return data
|
|
||||||
|
|
||||||
def delete_all_cached_images(self):
|
|
||||||
"""
|
|
||||||
Delete all cached images
|
|
||||||
"""
|
|
||||||
res = self.do_request("DELETE", "/cached_images")
|
|
||||||
data = json.loads(res.read())
|
|
||||||
num_deleted = data['num_deleted']
|
|
||||||
return num_deleted
|
|
||||||
|
|
||||||
def queue_image_for_caching(self, image_id):
|
|
||||||
"""
|
|
||||||
Queue an image for prefetching into cache
|
|
||||||
"""
|
|
||||||
self.do_request("PUT", "/queued_images/%s" % image_id)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def delete_queued_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Delete a specified image from the cache queue
|
|
||||||
"""
|
|
||||||
self.do_request("DELETE", "/queued_images/%s" % image_id)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def delete_all_queued_images(self):
|
|
||||||
"""
|
|
||||||
Delete all queued images
|
|
||||||
"""
|
|
||||||
res = self.do_request("DELETE", "/queued_images")
|
|
||||||
data = json.loads(res.read())
|
|
||||||
num_deleted = data['num_deleted']
|
|
||||||
return num_deleted
|
|
||||||
|
|
||||||
|
|
||||||
def get_client(host, port=None, timeout=None, use_ssl=False, username=None,
|
|
||||||
password=None, tenant=None,
|
|
||||||
auth_url=None, auth_strategy=None,
|
|
||||||
auth_token=None, region=None,
|
|
||||||
is_silent_upload=False, insecure=False):
|
|
||||||
"""
|
|
||||||
Returns a new client Glance client object based on common kwargs.
|
|
||||||
If an option isn't specified falls back to common environment variable
|
|
||||||
defaults.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if auth_url or os.getenv('OS_AUTH_URL'):
|
|
||||||
force_strategy = 'keystone'
|
|
||||||
else:
|
|
||||||
force_strategy = None
|
|
||||||
|
|
||||||
creds = {
|
|
||||||
'username': username or
|
|
||||||
os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')),
|
|
||||||
'password': password or
|
|
||||||
os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')),
|
|
||||||
'tenant': tenant or
|
|
||||||
os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')),
|
|
||||||
'auth_url': auth_url or
|
|
||||||
os.getenv('OS_AUTH_URL'),
|
|
||||||
'strategy': force_strategy or
|
|
||||||
auth_strategy or
|
|
||||||
os.getenv('OS_AUTH_STRATEGY', 'noauth'),
|
|
||||||
'region': region or
|
|
||||||
os.getenv('OS_REGION_NAME'),
|
|
||||||
}
|
|
||||||
|
|
||||||
if creds['strategy'] == 'keystone' and not creds['auth_url']:
|
|
||||||
msg = _("--os_auth_url option or OS_AUTH_URL environment variable "
|
|
||||||
"required when keystone authentication strategy is enabled\n")
|
|
||||||
raise exception.ClientConfigurationError(msg)
|
|
||||||
|
|
||||||
return CacheClient(
|
|
||||||
host=host,
|
|
||||||
port=port,
|
|
||||||
timeout=timeout,
|
|
||||||
use_ssl=use_ssl,
|
|
||||||
auth_token=auth_token or
|
|
||||||
os.getenv('OS_TOKEN'),
|
|
||||||
creds=creds,
|
|
||||||
insecure=insecure)
|
|
@@ -1,219 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Base attribute driver class
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os.path
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy.common import utils
|
|
||||||
from daisy import i18n
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
_ = i18n._
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class Driver(object):
|
|
||||||
|
|
||||||
def configure(self):
|
|
||||||
"""
|
|
||||||
Configure the driver to use the stored configuration options
|
|
||||||
Any store that needs special configuration should implement
|
|
||||||
this method. If the store was not able to successfully configure
|
|
||||||
itself, it should raise `exception.BadDriverConfiguration`
|
|
||||||
"""
|
|
||||||
# Here we set up the various file-based image cache paths
|
|
||||||
# that we need in order to find the files in different states
|
|
||||||
# of cache management.
|
|
||||||
self.set_paths()
|
|
||||||
|
|
||||||
def set_paths(self):
|
|
||||||
"""
|
|
||||||
Creates all necessary directories under the base cache directory
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.base_dir = CONF.image_cache_dir
|
|
||||||
if self.base_dir is None:
|
|
||||||
msg = _('Failed to read %s from config') % 'image_cache_dir'
|
|
||||||
LOG.error(msg)
|
|
||||||
driver = self.__class__.__module__
|
|
||||||
raise exception.BadDriverConfiguration(driver_name=driver,
|
|
||||||
reason=msg)
|
|
||||||
|
|
||||||
self.incomplete_dir = os.path.join(self.base_dir, 'incomplete')
|
|
||||||
self.invalid_dir = os.path.join(self.base_dir, 'invalid')
|
|
||||||
self.queue_dir = os.path.join(self.base_dir, 'queue')
|
|
||||||
|
|
||||||
dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir]
|
|
||||||
|
|
||||||
for path in dirs:
|
|
||||||
utils.safe_mkdirs(path)
|
|
||||||
|
|
||||||
def get_cache_size(self):
|
|
||||||
"""
|
|
||||||
Returns the total size in bytes of the image cache.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def get_cached_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of records about cached images.
|
|
||||||
|
|
||||||
The list of records shall be ordered by image ID and shall look like::
|
|
||||||
|
|
||||||
[
|
|
||||||
{
|
|
||||||
'image_id': <IMAGE_ID>,
|
|
||||||
'hits': INTEGER,
|
|
||||||
'last_modified': ISO_TIMESTAMP,
|
|
||||||
'last_accessed': ISO_TIMESTAMP,
|
|
||||||
'size': INTEGER
|
|
||||||
}, ...
|
|
||||||
]
|
|
||||||
|
|
||||||
"""
|
|
||||||
return NotImplementedError
|
|
||||||
|
|
||||||
def is_cached(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with the supplied ID has its image
|
|
||||||
file cached.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def is_cacheable(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with the supplied ID can have its
|
|
||||||
image file cached, False otherwise.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def is_queued(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image identifier is in our cache queue.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def delete_all_cached_images(self):
|
|
||||||
"""
|
|
||||||
Removes all cached image files and any attributes about the images
|
|
||||||
and returns the number of cached image files that were deleted.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def delete_cached_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific cached image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def delete_all_queued_images(self):
|
|
||||||
"""
|
|
||||||
Removes all queued image files and any attributes about the images
|
|
||||||
and returns the number of queued image files that were deleted.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def delete_queued_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific queued image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def queue_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Puts an image identifier in a queue for caching. Return True
|
|
||||||
on successful add to the queue, False otherwise...
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
|
|
||||||
def clean(self, stall_time=None):
|
|
||||||
"""
|
|
||||||
Dependent on the driver, clean up and destroy any invalid or incomplete
|
|
||||||
cached images
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def get_least_recently_accessed(self):
|
|
||||||
"""
|
|
||||||
Return a tuple containing the image_id and size of the least recently
|
|
||||||
accessed cached file, or None if no cached files.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def open_for_write(self, image_id):
|
|
||||||
"""
|
|
||||||
Open a file for writing the image file for an image
|
|
||||||
with supplied identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def open_for_read(self, image_id):
|
|
||||||
"""
|
|
||||||
Open and yield file for reading the image file for an image
|
|
||||||
with supplied identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def get_image_filepath(self, image_id, cache_status='active'):
|
|
||||||
"""
|
|
||||||
This crafts an absolute path to a specific entry
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
:param cache_status: Status of the image in the cache
|
|
||||||
"""
|
|
||||||
if cache_status == 'active':
|
|
||||||
return os.path.join(self.base_dir, str(image_id))
|
|
||||||
return os.path.join(self.base_dir, cache_status, str(image_id))
|
|
||||||
|
|
||||||
def get_image_size(self, image_id):
|
|
||||||
"""
|
|
||||||
Return the size of the image file for an image with supplied
|
|
||||||
identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
return os.path.getsize(path)
|
|
||||||
|
|
||||||
def get_queued_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of image IDs that are in the queue. The
|
|
||||||
list should be sorted by the time the image ID was inserted
|
|
||||||
into the queue.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
@@ -1,497 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Cache driver that uses SQLite to store information about cached images
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from contextlib import contextmanager
|
|
||||||
import os
|
|
||||||
import sqlite3
|
|
||||||
import stat
|
|
||||||
import time
|
|
||||||
|
|
||||||
from eventlet import sleep
|
|
||||||
from eventlet import timeout
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import excutils
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy import i18n
|
|
||||||
from daisy.image_cache.drivers import base
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
_ = i18n._
|
|
||||||
_LE = i18n._LE
|
|
||||||
_LI = i18n._LI
|
|
||||||
_LW = i18n._LW
|
|
||||||
|
|
||||||
sqlite_opts = [
|
|
||||||
cfg.StrOpt('image_cache_sqlite_db', default='cache.db',
|
|
||||||
help=_('The path to the sqlite file database that will be '
|
|
||||||
'used for image cache management.')),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(sqlite_opts)
|
|
||||||
|
|
||||||
DEFAULT_SQL_CALL_TIMEOUT = 2
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteConnection(sqlite3.Connection):
|
|
||||||
|
|
||||||
"""
|
|
||||||
SQLite DB Connection handler that plays well with eventlet,
|
|
||||||
slightly modified from Swift's similar code.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.timeout_seconds = kwargs.get('timeout', DEFAULT_SQL_CALL_TIMEOUT)
|
|
||||||
kwargs['timeout'] = 0
|
|
||||||
sqlite3.Connection.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
def _timeout(self, call):
|
|
||||||
with timeout.Timeout(self.timeout_seconds):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return call()
|
|
||||||
except sqlite3.OperationalError as e:
|
|
||||||
if 'locked' not in str(e):
|
|
||||||
raise
|
|
||||||
sleep(0.05)
|
|
||||||
|
|
||||||
def execute(self, *args, **kwargs):
|
|
||||||
return self._timeout(lambda: sqlite3.Connection.execute(
|
|
||||||
self, *args, **kwargs))
|
|
||||||
|
|
||||||
def commit(self):
|
|
||||||
return self._timeout(lambda: sqlite3.Connection.commit(self))
|
|
||||||
|
|
||||||
|
|
||||||
def dict_factory(cur, row):
|
|
||||||
return dict(
|
|
||||||
((col[0], row[idx]) for idx, col in enumerate(cur.description)))
|
|
||||||
|
|
||||||
|
|
||||||
class Driver(base.Driver):
|
|
||||||
|
|
||||||
"""
|
|
||||||
Cache driver that uses xattr file tags and requires a filesystem
|
|
||||||
that has atimes set.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def configure(self):
|
|
||||||
"""
|
|
||||||
Configure the driver to use the stored configuration options
|
|
||||||
Any store that needs special configuration should implement
|
|
||||||
this method. If the store was not able to successfully configure
|
|
||||||
itself, it should raise `exception.BadDriverConfiguration`
|
|
||||||
"""
|
|
||||||
super(Driver, self).configure()
|
|
||||||
|
|
||||||
# Create the SQLite database that will hold our cache attributes
|
|
||||||
self.initialize_db()
|
|
||||||
|
|
||||||
def initialize_db(self):
|
|
||||||
db = CONF.image_cache_sqlite_db
|
|
||||||
self.db_path = os.path.join(self.base_dir, db)
|
|
||||||
try:
|
|
||||||
conn = sqlite3.connect(self.db_path, check_same_thread=False,
|
|
||||||
factory=SqliteConnection)
|
|
||||||
conn.executescript("""
|
|
||||||
CREATE TABLE IF NOT EXISTS cached_images (
|
|
||||||
image_id TEXT PRIMARY KEY,
|
|
||||||
last_accessed REAL DEFAULT 0.0,
|
|
||||||
last_modified REAL DEFAULT 0.0,
|
|
||||||
size INTEGER DEFAULT 0,
|
|
||||||
hits INTEGER DEFAULT 0,
|
|
||||||
checksum TEXT
|
|
||||||
);
|
|
||||||
""")
|
|
||||||
conn.close()
|
|
||||||
except sqlite3.DatabaseError as e:
|
|
||||||
msg = _("Failed to initialize the image cache database. "
|
|
||||||
"Got error: %s") % e
|
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.BadDriverConfiguration(driver_name='sqlite',
|
|
||||||
reason=msg)
|
|
||||||
|
|
||||||
def get_cache_size(self):
|
|
||||||
"""
|
|
||||||
Returns the total size in bytes of the image cache.
|
|
||||||
"""
|
|
||||||
sizes = []
|
|
||||||
for path in self.get_cache_files(self.base_dir):
|
|
||||||
if path == self.db_path:
|
|
||||||
continue
|
|
||||||
file_info = os.stat(path)
|
|
||||||
sizes.append(file_info[stat.ST_SIZE])
|
|
||||||
return sum(sizes)
|
|
||||||
|
|
||||||
def get_hit_count(self, image_id):
|
|
||||||
"""
|
|
||||||
Return the number of hits that an image has.
|
|
||||||
|
|
||||||
:param image_id: Opaque image identifier
|
|
||||||
"""
|
|
||||||
if not self.is_cached(image_id):
|
|
||||||
return 0
|
|
||||||
|
|
||||||
hits = 0
|
|
||||||
with self.get_db() as db:
|
|
||||||
cur = db.execute("""SELECT hits FROM cached_images
|
|
||||||
WHERE image_id = ?""",
|
|
||||||
(image_id,))
|
|
||||||
hits = cur.fetchone()[0]
|
|
||||||
return hits
|
|
||||||
|
|
||||||
def get_cached_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of records about cached images.
|
|
||||||
"""
|
|
||||||
LOG.debug("Gathering cached image entries.")
|
|
||||||
with self.get_db() as db:
|
|
||||||
cur = db.execute("""SELECT
|
|
||||||
image_id, hits, last_accessed, last_modified, size
|
|
||||||
FROM cached_images
|
|
||||||
ORDER BY image_id""")
|
|
||||||
cur.row_factory = dict_factory
|
|
||||||
return [r for r in cur]
|
|
||||||
|
|
||||||
def is_cached(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with the supplied ID has its image
|
|
||||||
file cached.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
return os.path.exists(self.get_image_filepath(image_id))
|
|
||||||
|
|
||||||
def is_cacheable(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with the supplied ID can have its
|
|
||||||
image file cached, False otherwise.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
# Make sure we're not already cached or caching the image
|
|
||||||
return not (self.is_cached(image_id) or
|
|
||||||
self.is_being_cached(image_id))
|
|
||||||
|
|
||||||
def is_being_cached(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with supplied id is currently
|
|
||||||
in the process of having its image file cached.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id, 'incomplete')
|
|
||||||
return os.path.exists(path)
|
|
||||||
|
|
||||||
def is_queued(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image identifier is in our cache queue.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id, 'queue')
|
|
||||||
return os.path.exists(path)
|
|
||||||
|
|
||||||
def delete_all_cached_images(self):
|
|
||||||
"""
|
|
||||||
Removes all cached image files and any attributes about the images
|
|
||||||
"""
|
|
||||||
deleted = 0
|
|
||||||
with self.get_db() as db:
|
|
||||||
for path in self.get_cache_files(self.base_dir):
|
|
||||||
delete_cached_file(path)
|
|
||||||
deleted += 1
|
|
||||||
db.execute("""DELETE FROM cached_images""")
|
|
||||||
db.commit()
|
|
||||||
return deleted
|
|
||||||
|
|
||||||
def delete_cached_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific cached image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
with self.get_db() as db:
|
|
||||||
delete_cached_file(path)
|
|
||||||
db.execute("""DELETE FROM cached_images WHERE image_id = ?""",
|
|
||||||
(image_id, ))
|
|
||||||
db.commit()
|
|
||||||
|
|
||||||
def delete_all_queued_images(self):
|
|
||||||
"""
|
|
||||||
Removes all queued image files and any attributes about the images
|
|
||||||
"""
|
|
||||||
files = [f for f in self.get_cache_files(self.queue_dir)]
|
|
||||||
for file in files:
|
|
||||||
os.unlink(file)
|
|
||||||
return len(files)
|
|
||||||
|
|
||||||
def delete_queued_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific queued image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id, 'queue')
|
|
||||||
if os.path.exists(path):
|
|
||||||
os.unlink(path)
|
|
||||||
|
|
||||||
def clean(self, stall_time=None):
|
|
||||||
"""
|
|
||||||
Delete any image files in the invalid directory and any
|
|
||||||
files in the incomplete directory that are older than a
|
|
||||||
configurable amount of time.
|
|
||||||
"""
|
|
||||||
self.delete_invalid_files()
|
|
||||||
|
|
||||||
if stall_time is None:
|
|
||||||
stall_time = CONF.image_cache_stall_time
|
|
||||||
|
|
||||||
now = time.time()
|
|
||||||
older_than = now - stall_time
|
|
||||||
self.delete_stalled_files(older_than)
|
|
||||||
|
|
||||||
def get_least_recently_accessed(self):
|
|
||||||
"""
|
|
||||||
Return a tuple containing the image_id and size of the least recently
|
|
||||||
accessed cached file, or None if no cached files.
|
|
||||||
"""
|
|
||||||
with self.get_db() as db:
|
|
||||||
cur = db.execute("""SELECT image_id FROM cached_images
|
|
||||||
ORDER BY last_accessed LIMIT 1""")
|
|
||||||
try:
|
|
||||||
image_id = cur.fetchone()[0]
|
|
||||||
except TypeError:
|
|
||||||
# There are no more cached images
|
|
||||||
return None
|
|
||||||
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
try:
|
|
||||||
file_info = os.stat(path)
|
|
||||||
size = file_info[stat.ST_SIZE]
|
|
||||||
except OSError:
|
|
||||||
size = 0
|
|
||||||
return image_id, size
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def open_for_write(self, image_id):
|
|
||||||
"""
|
|
||||||
Open a file for writing the image file for an image
|
|
||||||
with supplied identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
incomplete_path = self.get_image_filepath(image_id, 'incomplete')
|
|
||||||
|
|
||||||
def commit():
|
|
||||||
with self.get_db() as db:
|
|
||||||
final_path = self.get_image_filepath(image_id)
|
|
||||||
LOG.debug("Fetch finished, moving "
|
|
||||||
"'%(incomplete_path)s' to '%(final_path)s'",
|
|
||||||
dict(incomplete_path=incomplete_path,
|
|
||||||
final_path=final_path))
|
|
||||||
os.rename(incomplete_path, final_path)
|
|
||||||
|
|
||||||
# Make sure that we "pop" the image from the queue...
|
|
||||||
if self.is_queued(image_id):
|
|
||||||
os.unlink(self.get_image_filepath(image_id, 'queue'))
|
|
||||||
|
|
||||||
filesize = os.path.getsize(final_path)
|
|
||||||
now = time.time()
|
|
||||||
|
|
||||||
db.execute("""INSERT INTO cached_images
|
|
||||||
(image_id, last_accessed, last_modified, hits, size)
|
|
||||||
VALUES (?, ?, ?, 0, ?)""",
|
|
||||||
(image_id, now, now, filesize))
|
|
||||||
db.commit()
|
|
||||||
|
|
||||||
def rollback(e):
|
|
||||||
with self.get_db() as db:
|
|
||||||
if os.path.exists(incomplete_path):
|
|
||||||
invalid_path = self.get_image_filepath(image_id, 'invalid')
|
|
||||||
|
|
||||||
LOG.warn(_LW("Fetch of cache file failed (%(e)s), rolling "
|
|
||||||
"back by moving '%(incomplete_path)s' to "
|
|
||||||
"'%(invalid_path)s'") %
|
|
||||||
{'e': e,
|
|
||||||
'incomplete_path': incomplete_path,
|
|
||||||
'invalid_path': invalid_path})
|
|
||||||
os.rename(incomplete_path, invalid_path)
|
|
||||||
|
|
||||||
db.execute("""DELETE FROM cached_images
|
|
||||||
WHERE image_id = ?""", (image_id, ))
|
|
||||||
db.commit()
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(incomplete_path, 'wb') as cache_file:
|
|
||||||
yield cache_file
|
|
||||||
except Exception as e:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
rollback(e)
|
|
||||||
else:
|
|
||||||
commit()
|
|
||||||
finally:
|
|
||||||
# if the generator filling the cache file neither raises an
|
|
||||||
# exception, nor completes fetching all data, neither rollback
|
|
||||||
# nor commit will have been called, so the incomplete file
|
|
||||||
# will persist - in that case remove it as it is unusable
|
|
||||||
# example: ^c from client fetch
|
|
||||||
if os.path.exists(incomplete_path):
|
|
||||||
rollback('incomplete fetch')
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def open_for_read(self, image_id):
|
|
||||||
"""
|
|
||||||
Open and yield file for reading the image file for an image
|
|
||||||
with supplied identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
with open(path, 'rb') as cache_file:
|
|
||||||
yield cache_file
|
|
||||||
now = time.time()
|
|
||||||
with self.get_db() as db:
|
|
||||||
db.execute("""UPDATE cached_images
|
|
||||||
SET hits = hits + 1, last_accessed = ?
|
|
||||||
WHERE image_id = ?""",
|
|
||||||
(now, image_id))
|
|
||||||
db.commit()
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def get_db(self):
|
|
||||||
"""
|
|
||||||
Returns a context manager that produces a database connection that
|
|
||||||
self-closes and calls rollback if an error occurs while using the
|
|
||||||
database connection
|
|
||||||
"""
|
|
||||||
conn = sqlite3.connect(self.db_path, check_same_thread=False,
|
|
||||||
factory=SqliteConnection)
|
|
||||||
conn.row_factory = sqlite3.Row
|
|
||||||
conn.text_factory = str
|
|
||||||
conn.execute('PRAGMA synchronous = NORMAL')
|
|
||||||
conn.execute('PRAGMA count_changes = OFF')
|
|
||||||
conn.execute('PRAGMA temp_store = MEMORY')
|
|
||||||
try:
|
|
||||||
yield conn
|
|
||||||
except sqlite3.DatabaseError as e:
|
|
||||||
msg = _LE("Error executing SQLite call. Got error: %s") % e
|
|
||||||
LOG.error(msg)
|
|
||||||
conn.rollback()
|
|
||||||
finally:
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
def queue_image(self, image_id):
|
|
||||||
"""
|
|
||||||
This adds a image to be cache to the queue.
|
|
||||||
|
|
||||||
If the image already exists in the queue or has already been
|
|
||||||
cached, we return False, True otherwise
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
if self.is_cached(image_id):
|
|
||||||
msg = _LI("Not queueing image '%s'. Already cached.") % image_id
|
|
||||||
LOG.info(msg)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if self.is_being_cached(image_id):
|
|
||||||
msg = _LI("Not queueing image '%s'. Already being "
|
|
||||||
"written to cache") % image_id
|
|
||||||
LOG.info(msg)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if self.is_queued(image_id):
|
|
||||||
msg = _LI("Not queueing image '%s'. Already queued.") % image_id
|
|
||||||
LOG.info(msg)
|
|
||||||
return False
|
|
||||||
|
|
||||||
path = self.get_image_filepath(image_id, 'queue')
|
|
||||||
|
|
||||||
# Touch the file to add it to the queue
|
|
||||||
with open(path, "w"):
|
|
||||||
pass
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def delete_invalid_files(self):
|
|
||||||
"""
|
|
||||||
Removes any invalid cache entries
|
|
||||||
"""
|
|
||||||
for path in self.get_cache_files(self.invalid_dir):
|
|
||||||
os.unlink(path)
|
|
||||||
LOG.info(_LI("Removed invalid cache file %s") % path)
|
|
||||||
|
|
||||||
def delete_stalled_files(self, older_than):
|
|
||||||
"""
|
|
||||||
Removes any incomplete cache entries older than a
|
|
||||||
supplied modified time.
|
|
||||||
|
|
||||||
:param older_than: Files written to on or before this timestemp
|
|
||||||
will be deleted.
|
|
||||||
"""
|
|
||||||
for path in self.get_cache_files(self.incomplete_dir):
|
|
||||||
if os.path.getmtime(path) < older_than:
|
|
||||||
try:
|
|
||||||
os.unlink(path)
|
|
||||||
LOG.info(_LI("Removed stalled cache file %s") % path)
|
|
||||||
except Exception as e:
|
|
||||||
msg = (_LW("Failed to delete file %(path)s. "
|
|
||||||
"Got error: %(e)s"),
|
|
||||||
dict(path=path, e=e))
|
|
||||||
LOG.warn(msg)
|
|
||||||
|
|
||||||
def get_queued_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of image IDs that are in the queue. The
|
|
||||||
list should be sorted by the time the image ID was inserted
|
|
||||||
into the queue.
|
|
||||||
"""
|
|
||||||
files = [f for f in self.get_cache_files(self.queue_dir)]
|
|
||||||
items = []
|
|
||||||
for path in files:
|
|
||||||
mtime = os.path.getmtime(path)
|
|
||||||
items.append((mtime, os.path.basename(path)))
|
|
||||||
|
|
||||||
items.sort()
|
|
||||||
return [image_id for (modtime, image_id) in items]
|
|
||||||
|
|
||||||
def get_cache_files(self, basepath):
|
|
||||||
"""
|
|
||||||
Returns cache files in the supplied directory
|
|
||||||
|
|
||||||
:param basepath: Directory to look in for cache files
|
|
||||||
"""
|
|
||||||
for fname in os.listdir(basepath):
|
|
||||||
path = os.path.join(basepath, fname)
|
|
||||||
if path != self.db_path and os.path.isfile(path):
|
|
||||||
yield path
|
|
||||||
|
|
||||||
|
|
||||||
def delete_cached_file(path):
|
|
||||||
if os.path.exists(path):
|
|
||||||
LOG.debug("Deleting image cache file '%s'", path)
|
|
||||||
os.unlink(path)
|
|
||||||
else:
|
|
||||||
LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to"
|
|
||||||
" delete") % path)
|
|
@@ -1,510 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Cache driver that uses xattr file tags and requires a filesystem
|
|
||||||
that has atimes set.
|
|
||||||
|
|
||||||
Assumptions
|
|
||||||
===========
|
|
||||||
|
|
||||||
1. Cache data directory exists on a filesytem that updates atime on
|
|
||||||
reads ('noatime' should NOT be set)
|
|
||||||
|
|
||||||
2. Cache data directory exists on a filesystem that supports xattrs.
|
|
||||||
This is optional, but highly recommended since it allows us to
|
|
||||||
present ops with useful information pertaining to the cache, like
|
|
||||||
human readable filenames and statistics.
|
|
||||||
|
|
||||||
3. `glance-prune` is scheduled to run as a periodic job via cron. This
|
|
||||||
is needed to run the LRU prune strategy to keep the cache size
|
|
||||||
within the limits set by the config file.
|
|
||||||
|
|
||||||
|
|
||||||
Cache Directory Notes
|
|
||||||
=====================
|
|
||||||
|
|
||||||
The image cache data directory contains the main cache path, where the
|
|
||||||
active cache entries and subdirectories for handling partial downloads
|
|
||||||
and errored-out cache images.
|
|
||||||
|
|
||||||
The layout looks like:
|
|
||||||
|
|
||||||
$image_cache_dir/
|
|
||||||
entry1
|
|
||||||
entry2
|
|
||||||
...
|
|
||||||
incomplete/
|
|
||||||
invalid/
|
|
||||||
queue/
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from contextlib import contextmanager
|
|
||||||
import errno
|
|
||||||
import os
|
|
||||||
import stat
|
|
||||||
import time
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import excutils
|
|
||||||
import xattr
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy.common import utils
|
|
||||||
from daisy import i18n
|
|
||||||
from daisy.image_cache.drivers import base
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
_ = i18n._
|
|
||||||
_LE = i18n._LE
|
|
||||||
_LI = i18n._LI
|
|
||||||
_LW = i18n._LW
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class Driver(base.Driver):
|
|
||||||
|
|
||||||
"""
|
|
||||||
Cache driver that uses xattr file tags and requires a filesystem
|
|
||||||
that has atimes set.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def configure(self):
|
|
||||||
"""
|
|
||||||
Configure the driver to use the stored configuration options
|
|
||||||
Any store that needs special configuration should implement
|
|
||||||
this method. If the store was not able to successfully configure
|
|
||||||
itself, it should raise `exception.BadDriverConfiguration`
|
|
||||||
"""
|
|
||||||
# Here we set up the various file-based image cache paths
|
|
||||||
# that we need in order to find the files in different states
|
|
||||||
# of cache management.
|
|
||||||
self.set_paths()
|
|
||||||
|
|
||||||
# We do a quick attempt to write a user xattr to a temporary file
|
|
||||||
# to check that the filesystem is even enabled to support xattrs
|
|
||||||
image_cache_dir = self.base_dir
|
|
||||||
fake_image_filepath = os.path.join(image_cache_dir, 'checkme')
|
|
||||||
with open(fake_image_filepath, 'wb') as fake_file:
|
|
||||||
fake_file.write("XXX")
|
|
||||||
fake_file.flush()
|
|
||||||
try:
|
|
||||||
set_xattr(fake_image_filepath, 'hits', '1')
|
|
||||||
except IOError as e:
|
|
||||||
if e.errno == errno.EOPNOTSUPP:
|
|
||||||
msg = (_("The device housing the image cache directory "
|
|
||||||
"%(image_cache_dir)s does not support xattr. It is"
|
|
||||||
" likely you need to edit your fstab and add the "
|
|
||||||
"user_xattr option to the appropriate line for the"
|
|
||||||
" device housing the cache directory.") %
|
|
||||||
{'image_cache_dir': image_cache_dir})
|
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.BadDriverConfiguration(driver_name="xattr",
|
|
||||||
reason=msg)
|
|
||||||
else:
|
|
||||||
# Cleanup after ourselves...
|
|
||||||
if os.path.exists(fake_image_filepath):
|
|
||||||
os.unlink(fake_image_filepath)
|
|
||||||
|
|
||||||
def get_cache_size(self):
|
|
||||||
"""
|
|
||||||
Returns the total size in bytes of the image cache.
|
|
||||||
"""
|
|
||||||
sizes = []
|
|
||||||
for path in get_all_regular_files(self.base_dir):
|
|
||||||
file_info = os.stat(path)
|
|
||||||
sizes.append(file_info[stat.ST_SIZE])
|
|
||||||
return sum(sizes)
|
|
||||||
|
|
||||||
def get_hit_count(self, image_id):
|
|
||||||
"""
|
|
||||||
Return the number of hits that an image has.
|
|
||||||
|
|
||||||
:param image_id: Opaque image identifier
|
|
||||||
"""
|
|
||||||
if not self.is_cached(image_id):
|
|
||||||
return 0
|
|
||||||
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
return int(get_xattr(path, 'hits', default=0))
|
|
||||||
|
|
||||||
def get_cached_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of records about cached images.
|
|
||||||
"""
|
|
||||||
LOG.debug("Gathering cached image entries.")
|
|
||||||
entries = []
|
|
||||||
for path in get_all_regular_files(self.base_dir):
|
|
||||||
image_id = os.path.basename(path)
|
|
||||||
|
|
||||||
entry = {}
|
|
||||||
entry['image_id'] = image_id
|
|
||||||
|
|
||||||
file_info = os.stat(path)
|
|
||||||
entry['last_modified'] = file_info[stat.ST_MTIME]
|
|
||||||
entry['last_accessed'] = file_info[stat.ST_ATIME]
|
|
||||||
entry['size'] = file_info[stat.ST_SIZE]
|
|
||||||
entry['hits'] = self.get_hit_count(image_id)
|
|
||||||
|
|
||||||
entries.append(entry)
|
|
||||||
entries.sort() # Order by ID
|
|
||||||
return entries
|
|
||||||
|
|
||||||
def is_cached(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with the supplied ID has its image
|
|
||||||
file cached.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
return os.path.exists(self.get_image_filepath(image_id))
|
|
||||||
|
|
||||||
def is_cacheable(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with the supplied ID can have its
|
|
||||||
image file cached, False otherwise.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
# Make sure we're not already cached or caching the image
|
|
||||||
return not (self.is_cached(image_id) or
|
|
||||||
self.is_being_cached(image_id))
|
|
||||||
|
|
||||||
def is_being_cached(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image with supplied id is currently
|
|
||||||
in the process of having its image file cached.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id, 'incomplete')
|
|
||||||
return os.path.exists(path)
|
|
||||||
|
|
||||||
def is_queued(self, image_id):
|
|
||||||
"""
|
|
||||||
Returns True if the image identifier is in our cache queue.
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id, 'queue')
|
|
||||||
return os.path.exists(path)
|
|
||||||
|
|
||||||
def delete_all_cached_images(self):
|
|
||||||
"""
|
|
||||||
Removes all cached image files and any attributes about the images
|
|
||||||
"""
|
|
||||||
deleted = 0
|
|
||||||
for path in get_all_regular_files(self.base_dir):
|
|
||||||
delete_cached_file(path)
|
|
||||||
deleted += 1
|
|
||||||
return deleted
|
|
||||||
|
|
||||||
def delete_cached_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific cached image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
delete_cached_file(path)
|
|
||||||
|
|
||||||
def delete_all_queued_images(self):
|
|
||||||
"""
|
|
||||||
Removes all queued image files and any attributes about the images
|
|
||||||
"""
|
|
||||||
files = [f for f in get_all_regular_files(self.queue_dir)]
|
|
||||||
for file in files:
|
|
||||||
os.unlink(file)
|
|
||||||
return len(files)
|
|
||||||
|
|
||||||
def delete_queued_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Removes a specific queued image file and any attributes about the image
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id, 'queue')
|
|
||||||
if os.path.exists(path):
|
|
||||||
os.unlink(path)
|
|
||||||
|
|
||||||
def get_least_recently_accessed(self):
|
|
||||||
"""
|
|
||||||
Return a tuple containing the image_id and size of the least recently
|
|
||||||
accessed cached file, or None if no cached files.
|
|
||||||
"""
|
|
||||||
stats = []
|
|
||||||
for path in get_all_regular_files(self.base_dir):
|
|
||||||
file_info = os.stat(path)
|
|
||||||
stats.append((file_info[stat.ST_ATIME], # access time
|
|
||||||
file_info[stat.ST_SIZE], # size in bytes
|
|
||||||
path)) # absolute path
|
|
||||||
|
|
||||||
if not stats:
|
|
||||||
return None
|
|
||||||
|
|
||||||
stats.sort()
|
|
||||||
return os.path.basename(stats[0][2]), stats[0][1]
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def open_for_write(self, image_id):
|
|
||||||
"""
|
|
||||||
Open a file for writing the image file for an image
|
|
||||||
with supplied identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
incomplete_path = self.get_image_filepath(image_id, 'incomplete')
|
|
||||||
|
|
||||||
def set_attr(key, value):
|
|
||||||
set_xattr(incomplete_path, key, value)
|
|
||||||
|
|
||||||
def commit():
|
|
||||||
set_attr('hits', 0)
|
|
||||||
|
|
||||||
final_path = self.get_image_filepath(image_id)
|
|
||||||
LOG.debug("Fetch finished, moving "
|
|
||||||
"'%(incomplete_path)s' to '%(final_path)s'",
|
|
||||||
dict(incomplete_path=incomplete_path,
|
|
||||||
final_path=final_path))
|
|
||||||
os.rename(incomplete_path, final_path)
|
|
||||||
|
|
||||||
# Make sure that we "pop" the image from the queue...
|
|
||||||
if self.is_queued(image_id):
|
|
||||||
LOG.debug("Removing image '%s' from queue after "
|
|
||||||
"caching it." % image_id)
|
|
||||||
os.unlink(self.get_image_filepath(image_id, 'queue'))
|
|
||||||
|
|
||||||
def rollback(e):
|
|
||||||
set_attr('error', utils.exception_to_str(e))
|
|
||||||
|
|
||||||
invalid_path = self.get_image_filepath(image_id, 'invalid')
|
|
||||||
LOG.debug("Fetch of cache file failed (%(e)s), rolling back by "
|
|
||||||
"moving '%(incomplete_path)s' to "
|
|
||||||
"'%(invalid_path)s'" %
|
|
||||||
{'e': utils.exception_to_str(e),
|
|
||||||
'incomplete_path': incomplete_path,
|
|
||||||
'invalid_path': invalid_path})
|
|
||||||
os.rename(incomplete_path, invalid_path)
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(incomplete_path, 'wb') as cache_file:
|
|
||||||
yield cache_file
|
|
||||||
except Exception as e:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
rollback(e)
|
|
||||||
else:
|
|
||||||
commit()
|
|
||||||
finally:
|
|
||||||
# if the generator filling the cache file neither raises an
|
|
||||||
# exception, nor completes fetching all data, neither rollback
|
|
||||||
# nor commit will have been called, so the incomplete file
|
|
||||||
# will persist - in that case remove it as it is unusable
|
|
||||||
# example: ^c from client fetch
|
|
||||||
if os.path.exists(incomplete_path):
|
|
||||||
rollback('incomplete fetch')
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def open_for_read(self, image_id):
|
|
||||||
"""
|
|
||||||
Open and yield file for reading the image file for an image
|
|
||||||
with supplied identifier.
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
with open(path, 'rb') as cache_file:
|
|
||||||
yield cache_file
|
|
||||||
path = self.get_image_filepath(image_id)
|
|
||||||
inc_xattr(path, 'hits', 1)
|
|
||||||
|
|
||||||
def queue_image(self, image_id):
|
|
||||||
"""
|
|
||||||
This adds a image to be cache to the queue.
|
|
||||||
|
|
||||||
If the image already exists in the queue or has already been
|
|
||||||
cached, we return False, True otherwise
|
|
||||||
|
|
||||||
:param image_id: Image ID
|
|
||||||
"""
|
|
||||||
if self.is_cached(image_id):
|
|
||||||
msg = _LI("Not queueing image '%s'. Already cached.") % image_id
|
|
||||||
LOG.info(msg)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if self.is_being_cached(image_id):
|
|
||||||
msg = _LI("Not queueing image '%s'. Already being "
|
|
||||||
"written to cache") % image_id
|
|
||||||
LOG.info(msg)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if self.is_queued(image_id):
|
|
||||||
msg = _LI("Not queueing image '%s'. Already queued.") % image_id
|
|
||||||
LOG.info(msg)
|
|
||||||
return False
|
|
||||||
|
|
||||||
path = self.get_image_filepath(image_id, 'queue')
|
|
||||||
LOG.debug("Queueing image '%s'.", image_id)
|
|
||||||
|
|
||||||
# Touch the file to add it to the queue
|
|
||||||
with open(path, "w"):
|
|
||||||
pass
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_queued_images(self):
|
|
||||||
"""
|
|
||||||
Returns a list of image IDs that are in the queue. The
|
|
||||||
list should be sorted by the time the image ID was inserted
|
|
||||||
into the queue.
|
|
||||||
"""
|
|
||||||
files = [f for f in get_all_regular_files(self.queue_dir)]
|
|
||||||
items = []
|
|
||||||
for path in files:
|
|
||||||
mtime = os.path.getmtime(path)
|
|
||||||
items.append((mtime, os.path.basename(path)))
|
|
||||||
|
|
||||||
items.sort()
|
|
||||||
return [image_id for (modtime, image_id) in items]
|
|
||||||
|
|
||||||
def _reap_old_files(self, dirpath, entry_type, grace=None):
|
|
||||||
now = time.time()
|
|
||||||
reaped = 0
|
|
||||||
for path in get_all_regular_files(dirpath):
|
|
||||||
mtime = os.path.getmtime(path)
|
|
||||||
age = now - mtime
|
|
||||||
if not grace:
|
|
||||||
LOG.debug("No grace period, reaping '%(path)s'"
|
|
||||||
" immediately", {'path': path})
|
|
||||||
delete_cached_file(path)
|
|
||||||
reaped += 1
|
|
||||||
elif age > grace:
|
|
||||||
LOG.debug("Cache entry '%(path)s' exceeds grace period, "
|
|
||||||
"(%(age)i s > %(grace)i s)",
|
|
||||||
{'path': path, 'age': age, 'grace': grace})
|
|
||||||
delete_cached_file(path)
|
|
||||||
reaped += 1
|
|
||||||
|
|
||||||
LOG.info(_LI("Reaped %(reaped)s %(entry_type)s cache entries"),
|
|
||||||
{'reaped': reaped, 'entry_type': entry_type})
|
|
||||||
return reaped
|
|
||||||
|
|
||||||
def reap_invalid(self, grace=None):
|
|
||||||
"""Remove any invalid cache entries
|
|
||||||
|
|
||||||
:param grace: Number of seconds to keep an invalid entry around for
|
|
||||||
debugging purposes. If None, then delete immediately.
|
|
||||||
"""
|
|
||||||
return self._reap_old_files(self.invalid_dir, 'invalid', grace=grace)
|
|
||||||
|
|
||||||
def reap_stalled(self, grace=None):
|
|
||||||
"""Remove any stalled cache entries
|
|
||||||
|
|
||||||
:param grace: Number of seconds to keep an invalid entry around for
|
|
||||||
debugging purposes. If None, then delete immediately.
|
|
||||||
"""
|
|
||||||
return self._reap_old_files(self.incomplete_dir, 'stalled',
|
|
||||||
grace=grace)
|
|
||||||
|
|
||||||
def clean(self, stall_time=None):
|
|
||||||
"""
|
|
||||||
Delete any image files in the invalid directory and any
|
|
||||||
files in the incomplete directory that are older than a
|
|
||||||
configurable amount of time.
|
|
||||||
"""
|
|
||||||
self.reap_invalid()
|
|
||||||
|
|
||||||
if stall_time is None:
|
|
||||||
stall_time = CONF.image_cache_stall_time
|
|
||||||
|
|
||||||
self.reap_stalled(stall_time)
|
|
||||||
|
|
||||||
|
|
||||||
def get_all_regular_files(basepath):
|
|
||||||
for fname in os.listdir(basepath):
|
|
||||||
path = os.path.join(basepath, fname)
|
|
||||||
if os.path.isfile(path):
|
|
||||||
yield path
|
|
||||||
|
|
||||||
|
|
||||||
def delete_cached_file(path):
|
|
||||||
if os.path.exists(path):
|
|
||||||
LOG.debug("Deleting image cache file '%s'" % path)
|
|
||||||
os.unlink(path)
|
|
||||||
else:
|
|
||||||
LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to"
|
|
||||||
" delete") % path)
|
|
||||||
|
|
||||||
|
|
||||||
def _make_namespaced_xattr_key(key, namespace='user'):
|
|
||||||
"""
|
|
||||||
Create a fully-qualified xattr-key by including the intended namespace.
|
|
||||||
|
|
||||||
Namespacing differs among OSes[1]:
|
|
||||||
|
|
||||||
FreeBSD: user, system
|
|
||||||
Linux: user, system, trusted, security
|
|
||||||
MacOS X: not needed
|
|
||||||
|
|
||||||
Mac OS X won't break if we include a namespace qualifier, so, for
|
|
||||||
simplicity, we always include it.
|
|
||||||
|
|
||||||
--
|
|
||||||
[1] http://en.wikipedia.org/wiki/Extended_file_attributes
|
|
||||||
"""
|
|
||||||
namespaced_key = ".".join([namespace, key])
|
|
||||||
return namespaced_key
|
|
||||||
|
|
||||||
|
|
||||||
def get_xattr(path, key, **kwargs):
|
|
||||||
"""Return the value for a particular xattr
|
|
||||||
|
|
||||||
If the key doesn't not exist, or xattrs aren't supported by the file
|
|
||||||
system then a KeyError will be raised, that is, unless you specify a
|
|
||||||
default using kwargs.
|
|
||||||
"""
|
|
||||||
namespaced_key = _make_namespaced_xattr_key(key)
|
|
||||||
try:
|
|
||||||
return xattr.getxattr(path, namespaced_key)
|
|
||||||
except IOError:
|
|
||||||
if 'default' in kwargs:
|
|
||||||
return kwargs['default']
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def set_xattr(path, key, value):
|
|
||||||
"""Set the value of a specified xattr.
|
|
||||||
|
|
||||||
If xattrs aren't supported by the file-system, we skip setting the value.
|
|
||||||
"""
|
|
||||||
namespaced_key = _make_namespaced_xattr_key(key)
|
|
||||||
xattr.setxattr(path, namespaced_key, str(value))
|
|
||||||
|
|
||||||
|
|
||||||
def inc_xattr(path, key, n=1):
|
|
||||||
"""
|
|
||||||
Increment the value of an xattr (assuming it is an integer).
|
|
||||||
|
|
||||||
BEWARE, this code *does* have a RACE CONDITION, since the
|
|
||||||
read/update/write sequence is not atomic.
|
|
||||||
|
|
||||||
Since the use-case for this function is collecting stats--not critical--
|
|
||||||
the benefits of simple, lock-free code out-weighs the possibility of an
|
|
||||||
occasional hit not being counted.
|
|
||||||
"""
|
|
||||||
count = int(get_xattr(path, key))
|
|
||||||
count += n
|
|
||||||
set_xattr(path, key, str(count))
|
|
@@ -1,86 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Prefetches images into the Image Cache
|
|
||||||
"""
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import glance_store
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy import context
|
|
||||||
from daisy import i18n
|
|
||||||
from daisy.image_cache import base
|
|
||||||
import daisy.registry.client.v1.api as registry
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
_LI = i18n._LI
|
|
||||||
_LW = i18n._LW
|
|
||||||
|
|
||||||
|
|
||||||
class Prefetcher(base.CacheApp):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super(Prefetcher, self).__init__()
|
|
||||||
registry.configure_registry_client()
|
|
||||||
registry.configure_registry_admin_creds()
|
|
||||||
|
|
||||||
def fetch_image_into_cache(self, image_id):
|
|
||||||
ctx = context.RequestContext(is_admin=True, show_deleted=True)
|
|
||||||
|
|
||||||
try:
|
|
||||||
image_meta = registry.get_image_metadata(ctx, image_id)
|
|
||||||
if image_meta['status'] != 'active':
|
|
||||||
LOG.warn(_LW("Image '%s' is not active. Not caching.") %
|
|
||||||
image_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
except exception.NotFound:
|
|
||||||
LOG.warn(_LW("No metadata found for image '%s'") % image_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
location = image_meta['location']
|
|
||||||
image_data, image_size = glance_store.get_from_backend(location,
|
|
||||||
context=ctx)
|
|
||||||
LOG.debug("Caching image '%s'", image_id)
|
|
||||||
cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data,
|
|
||||||
image_meta['checksum'])
|
|
||||||
# Image is tee'd into cache and checksum verified
|
|
||||||
# as we iterate
|
|
||||||
list(cache_tee_iter)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
|
|
||||||
images = self.cache.get_queued_images()
|
|
||||||
if not images:
|
|
||||||
LOG.debug("Nothing to prefetch.")
|
|
||||||
return True
|
|
||||||
|
|
||||||
num_images = len(images)
|
|
||||||
LOG.debug("Found %d images to prefetch", num_images)
|
|
||||||
|
|
||||||
pool = eventlet.GreenPool(num_images)
|
|
||||||
results = pool.imap(self.fetch_image_into_cache, images)
|
|
||||||
successes = sum([1 for r in results if r is True])
|
|
||||||
if successes != num_images:
|
|
||||||
LOG.warn(_LW("Failed to successfully cache all "
|
|
||||||
"images in queue."))
|
|
||||||
return False
|
|
||||||
|
|
||||||
LOG.info(_LI("Successfully cached all %d images") % num_images)
|
|
||||||
return True
|
|
@@ -1,26 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Prunes the Image Cache
|
|
||||||
"""
|
|
||||||
|
|
||||||
from daisy.image_cache import base
|
|
||||||
|
|
||||||
|
|
||||||
class Pruner(base.CacheApp):
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
self.cache.prune()
|
|
@@ -1,555 +0,0 @@
|
|||||||
# Copyright 2010-2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Reference implementation registry server WSGI controller
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import strutils
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
from webob import exc
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy.common import utils
|
|
||||||
from daisy.common import wsgi
|
|
||||||
import daisy.db
|
|
||||||
from daisy import i18n
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
_ = i18n._
|
|
||||||
_LE = i18n._LE
|
|
||||||
_LI = i18n._LI
|
|
||||||
_LW = i18n._LW
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size',
|
|
||||||
'disk_format', 'container_format',
|
|
||||||
'checksum']
|
|
||||||
|
|
||||||
SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format',
|
|
||||||
'min_ram', 'min_disk', 'size_min', 'size_max',
|
|
||||||
'changes-since', 'protected']
|
|
||||||
|
|
||||||
SUPPORTED_SORT_KEYS = ('name', 'status', 'container_format', 'disk_format',
|
|
||||||
'size', 'id', 'created_at', 'updated_at')
|
|
||||||
|
|
||||||
SUPPORTED_SORT_DIRS = ('asc', 'desc')
|
|
||||||
|
|
||||||
SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir')
|
|
||||||
|
|
||||||
|
|
||||||
def _normalize_image_location_for_db(image_data):
|
|
||||||
"""
|
|
||||||
This function takes the legacy locations field and the newly added
|
|
||||||
location_data field from the image_data values dictionary which flows
|
|
||||||
over the wire between the registry and API servers and converts it
|
|
||||||
into the location_data format only which is then consumable by the
|
|
||||||
Image object.
|
|
||||||
|
|
||||||
:param image_data: a dict of values representing information in the image
|
|
||||||
:return: a new image data dict
|
|
||||||
"""
|
|
||||||
if 'locations' not in image_data and 'location_data' not in image_data:
|
|
||||||
image_data['locations'] = None
|
|
||||||
return image_data
|
|
||||||
|
|
||||||
locations = image_data.pop('locations', [])
|
|
||||||
location_data = image_data.pop('location_data', [])
|
|
||||||
|
|
||||||
location_data_dict = {}
|
|
||||||
for l in locations:
|
|
||||||
location_data_dict[l] = {}
|
|
||||||
for l in location_data:
|
|
||||||
location_data_dict[l['url']] = {'metadata': l['metadata'],
|
|
||||||
'status': l['status'],
|
|
||||||
# Note(zhiyan): New location has no ID.
|
|
||||||
'id': l['id'] if 'id' in l else None}
|
|
||||||
|
|
||||||
# NOTE(jbresnah) preserve original order. tests assume original order,
|
|
||||||
# should that be defined functionality
|
|
||||||
ordered_keys = locations[:]
|
|
||||||
for ld in location_data:
|
|
||||||
if ld['url'] not in ordered_keys:
|
|
||||||
ordered_keys.append(ld['url'])
|
|
||||||
|
|
||||||
location_data = []
|
|
||||||
for loc in ordered_keys:
|
|
||||||
data = location_data_dict[loc]
|
|
||||||
if data:
|
|
||||||
location_data.append({'url': loc,
|
|
||||||
'metadata': data['metadata'],
|
|
||||||
'status': data['status'],
|
|
||||||
'id': data['id']})
|
|
||||||
else:
|
|
||||||
location_data.append({'url': loc,
|
|
||||||
'metadata': {},
|
|
||||||
'status': 'active',
|
|
||||||
'id': None})
|
|
||||||
|
|
||||||
image_data['locations'] = location_data
|
|
||||||
return image_data
|
|
||||||
|
|
||||||
|
|
||||||
class Controller(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.db_api = daisy.db.get_api()
|
|
||||||
|
|
||||||
def _get_images(self, context, filters, **params):
|
|
||||||
"""Get images, wrapping in exception if necessary."""
|
|
||||||
# NOTE(markwash): for backwards compatibility, is_public=True for
|
|
||||||
# admins actually means "treat me as if I'm not an admin and show me
|
|
||||||
# all my images"
|
|
||||||
if context.is_admin and params.get('is_public') is True:
|
|
||||||
params['admin_as_user'] = True
|
|
||||||
del params['is_public']
|
|
||||||
try:
|
|
||||||
return self.db_api.image_get_all(context, filters=filters,
|
|
||||||
**params)
|
|
||||||
except exception.NotFound:
|
|
||||||
LOG.warn(_LW("Invalid marker. Image %(id)s could not be "
|
|
||||||
"found.") % {'id': params.get('marker')})
|
|
||||||
msg = _("Invalid marker. Image could not be found.")
|
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
|
||||||
except exception.Forbidden:
|
|
||||||
LOG.warn(_LW("Access denied to image %(id)s but returning "
|
|
||||||
"'not found'") % {'id': params.get('marker')})
|
|
||||||
msg = _("Invalid marker. Image could not be found.")
|
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unable to get images"))
|
|
||||||
raise
|
|
||||||
|
|
||||||
def index(self, req):
|
|
||||||
"""Return a basic filtered list of public, non-deleted images
|
|
||||||
|
|
||||||
:param req: the Request object coming from the wsgi layer
|
|
||||||
:retval a mapping of the following form::
|
|
||||||
|
|
||||||
dict(images=[image_list])
|
|
||||||
|
|
||||||
Where image_list is a sequence of mappings::
|
|
||||||
|
|
||||||
{
|
|
||||||
'id': <ID>,
|
|
||||||
'name': <NAME>,
|
|
||||||
'size': <SIZE>,
|
|
||||||
'disk_format': <DISK_FORMAT>,
|
|
||||||
'container_format': <CONTAINER_FORMAT>,
|
|
||||||
'checksum': <CHECKSUM>
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
params = self._get_query_params(req)
|
|
||||||
images = self._get_images(req.context, **params)
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for image in images:
|
|
||||||
result = {}
|
|
||||||
for field in DISPLAY_FIELDS_IN_INDEX:
|
|
||||||
result[field] = image[field]
|
|
||||||
results.append(result)
|
|
||||||
|
|
||||||
LOG.debug("Returning image list")
|
|
||||||
return dict(images=results)
|
|
||||||
|
|
||||||
def detail(self, req):
|
|
||||||
"""Return a filtered list of public, non-deleted images in detail
|
|
||||||
|
|
||||||
:param req: the Request object coming from the wsgi layer
|
|
||||||
:retval a mapping of the following form::
|
|
||||||
|
|
||||||
dict(images=[image_list])
|
|
||||||
|
|
||||||
Where image_list is a sequence of mappings containing
|
|
||||||
all image model fields.
|
|
||||||
"""
|
|
||||||
params = self._get_query_params(req)
|
|
||||||
|
|
||||||
images = self._get_images(req.context, **params)
|
|
||||||
image_dicts = [make_image_dict(i) for i in images]
|
|
||||||
LOG.debug("Returning detailed image list")
|
|
||||||
return dict(images=image_dicts)
|
|
||||||
|
|
||||||
def _get_query_params(self, req):
|
|
||||||
"""Extract necessary query parameters from http request.
|
|
||||||
|
|
||||||
:param req: the Request object coming from the wsgi layer
|
|
||||||
:retval dictionary of filters to apply to list of images
|
|
||||||
"""
|
|
||||||
params = {
|
|
||||||
'filters': self._get_filters(req),
|
|
||||||
'limit': self._get_limit(req),
|
|
||||||
'sort_key': [self._get_sort_key(req)],
|
|
||||||
'sort_dir': [self._get_sort_dir(req)],
|
|
||||||
'marker': self._get_marker(req),
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.context.is_admin:
|
|
||||||
# Only admin gets to look for non-public images
|
|
||||||
params['is_public'] = self._get_is_public(req)
|
|
||||||
|
|
||||||
for key, value in params.items():
|
|
||||||
if value is None:
|
|
||||||
del params[key]
|
|
||||||
|
|
||||||
# Fix for LP Bug #1132294
|
|
||||||
# Ensure all shared images are returned in v1
|
|
||||||
params['member_status'] = 'all'
|
|
||||||
return params
|
|
||||||
|
|
||||||
def _get_filters(self, req):
|
|
||||||
"""Return a dictionary of query param filters from the request
|
|
||||||
|
|
||||||
:param req: the Request object coming from the wsgi layer
|
|
||||||
:retval a dict of key/value filters
|
|
||||||
"""
|
|
||||||
filters = {}
|
|
||||||
properties = {}
|
|
||||||
|
|
||||||
for param in req.params:
|
|
||||||
if param in SUPPORTED_FILTERS:
|
|
||||||
filters[param] = req.params.get(param)
|
|
||||||
if param.startswith('property-'):
|
|
||||||
_param = param[9:]
|
|
||||||
properties[_param] = req.params.get(param)
|
|
||||||
|
|
||||||
if 'changes-since' in filters:
|
|
||||||
isotime = filters['changes-since']
|
|
||||||
try:
|
|
||||||
filters['changes-since'] = timeutils.parse_isotime(isotime)
|
|
||||||
except ValueError:
|
|
||||||
raise exc.HTTPBadRequest(_("Unrecognized changes-since value"))
|
|
||||||
|
|
||||||
if 'protected' in filters:
|
|
||||||
value = self._get_bool(filters['protected'])
|
|
||||||
if value is None:
|
|
||||||
raise exc.HTTPBadRequest(_("protected must be True, or "
|
|
||||||
"False"))
|
|
||||||
|
|
||||||
filters['protected'] = value
|
|
||||||
|
|
||||||
# only allow admins to filter on 'deleted'
|
|
||||||
if req.context.is_admin:
|
|
||||||
deleted_filter = self._parse_deleted_filter(req)
|
|
||||||
if deleted_filter is not None:
|
|
||||||
filters['deleted'] = deleted_filter
|
|
||||||
elif 'changes-since' not in filters:
|
|
||||||
filters['deleted'] = False
|
|
||||||
elif 'changes-since' not in filters:
|
|
||||||
filters['deleted'] = False
|
|
||||||
|
|
||||||
if properties:
|
|
||||||
filters['properties'] = properties
|
|
||||||
|
|
||||||
return filters
|
|
||||||
|
|
||||||
def _get_limit(self, req):
|
|
||||||
"""Parse a limit query param into something usable."""
|
|
||||||
try:
|
|
||||||
limit = int(req.params.get('limit', CONF.limit_param_default))
|
|
||||||
except ValueError:
|
|
||||||
raise exc.HTTPBadRequest(_("limit param must be an integer"))
|
|
||||||
|
|
||||||
if limit < 0:
|
|
||||||
raise exc.HTTPBadRequest(_("limit param must be positive"))
|
|
||||||
|
|
||||||
return min(CONF.api_limit_max, limit)
|
|
||||||
|
|
||||||
def _get_marker(self, req):
|
|
||||||
"""Parse a marker query param into something usable."""
|
|
||||||
marker = req.params.get('marker', None)
|
|
||||||
|
|
||||||
if marker and not utils.is_uuid_like(marker):
|
|
||||||
msg = _('Invalid marker format')
|
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
|
||||||
|
|
||||||
return marker
|
|
||||||
|
|
||||||
def _get_sort_key(self, req):
|
|
||||||
"""Parse a sort key query param from the request object."""
|
|
||||||
sort_key = req.params.get('sort_key', 'created_at')
|
|
||||||
if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS:
|
|
||||||
_keys = ', '.join(SUPPORTED_SORT_KEYS)
|
|
||||||
msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,)
|
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
|
||||||
return sort_key
|
|
||||||
|
|
||||||
def _get_sort_dir(self, req):
|
|
||||||
"""Parse a sort direction query param from the request object."""
|
|
||||||
sort_dir = req.params.get('sort_dir', 'desc')
|
|
||||||
if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS:
|
|
||||||
_keys = ', '.join(SUPPORTED_SORT_DIRS)
|
|
||||||
msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,)
|
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
|
||||||
return sort_dir
|
|
||||||
|
|
||||||
def _get_bool(self, value):
|
|
||||||
value = value.lower()
|
|
||||||
if value == 'true' or value == '1':
|
|
||||||
return True
|
|
||||||
elif value == 'false' or value == '0':
|
|
||||||
return False
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _get_is_public(self, req):
|
|
||||||
"""Parse is_public into something usable."""
|
|
||||||
is_public = req.params.get('is_public', None)
|
|
||||||
|
|
||||||
if is_public is None:
|
|
||||||
# NOTE(vish): This preserves the default value of showing only
|
|
||||||
# public images.
|
|
||||||
return True
|
|
||||||
elif is_public.lower() == 'none':
|
|
||||||
return None
|
|
||||||
|
|
||||||
value = self._get_bool(is_public)
|
|
||||||
if value is None:
|
|
||||||
raise exc.HTTPBadRequest(_("is_public must be None, True, or "
|
|
||||||
"False"))
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
def _parse_deleted_filter(self, req):
|
|
||||||
"""Parse deleted into something usable."""
|
|
||||||
deleted = req.params.get('deleted')
|
|
||||||
if deleted is None:
|
|
||||||
return None
|
|
||||||
return strutils.bool_from_string(deleted)
|
|
||||||
|
|
||||||
def show(self, req, id):
|
|
||||||
"""Return data about the given image id."""
|
|
||||||
try:
|
|
||||||
image = self.db_api.image_get(req.context, id)
|
|
||||||
msg = "Successfully retrieved image %(id)s" % {'id': id}
|
|
||||||
LOG.debug(msg)
|
|
||||||
except exception.NotFound:
|
|
||||||
msg = _LI("Image %(id)s not found") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
raise exc.HTTPNotFound()
|
|
||||||
except exception.Forbidden:
|
|
||||||
# If it's private and doesn't belong to them, don't let on
|
|
||||||
# that it exists
|
|
||||||
msg = _LI("Access denied to image %(id)s but returning"
|
|
||||||
" 'not found'") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
raise exc.HTTPNotFound()
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unable to show image %s") % id)
|
|
||||||
raise
|
|
||||||
|
|
||||||
return dict(image=make_image_dict(image))
|
|
||||||
|
|
||||||
@utils.mutating
|
|
||||||
def delete(self, req, id):
|
|
||||||
"""Deletes an existing image with the registry.
|
|
||||||
|
|
||||||
:param req: wsgi Request object
|
|
||||||
:param id: The opaque internal identifier for the image
|
|
||||||
|
|
||||||
:retval Returns 200 if delete was successful, a fault if not. On
|
|
||||||
success, the body contains the deleted image information as a mapping.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
deleted_image = self.db_api.image_destroy(req.context, id)
|
|
||||||
msg = _LI("Successfully deleted image %(id)s") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
return dict(image=make_image_dict(deleted_image))
|
|
||||||
except exception.ForbiddenPublicImage:
|
|
||||||
msg = _LI("Delete denied for public image %(id)s") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
raise exc.HTTPForbidden()
|
|
||||||
except exception.Forbidden:
|
|
||||||
# If it's private and doesn't belong to them, don't let on
|
|
||||||
# that it exists
|
|
||||||
msg = _LI("Access denied to image %(id)s but returning"
|
|
||||||
" 'not found'") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
return exc.HTTPNotFound()
|
|
||||||
except exception.NotFound:
|
|
||||||
msg = _LI("Image %(id)s not found") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
return exc.HTTPNotFound()
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unable to delete image %s") % id)
|
|
||||||
raise
|
|
||||||
|
|
||||||
@utils.mutating
|
|
||||||
def create(self, req, body):
|
|
||||||
"""Registers a new image with the registry.
|
|
||||||
|
|
||||||
:param req: wsgi Request object
|
|
||||||
:param body: Dictionary of information about the image
|
|
||||||
|
|
||||||
:retval Returns the newly-created image information as a mapping,
|
|
||||||
which will include the newly-created image's internal id
|
|
||||||
in the 'id' field
|
|
||||||
"""
|
|
||||||
image_data = body['image']
|
|
||||||
|
|
||||||
# Ensure the image has a status set
|
|
||||||
image_data.setdefault('status', 'active')
|
|
||||||
|
|
||||||
# Set up the image owner
|
|
||||||
if not req.context.is_admin or 'owner' not in image_data:
|
|
||||||
image_data['owner'] = req.context.owner
|
|
||||||
|
|
||||||
image_id = image_data.get('id')
|
|
||||||
if image_id and not utils.is_uuid_like(image_id):
|
|
||||||
msg = _LI("Rejecting image creation request for invalid image "
|
|
||||||
"id '%(bad_id)s'") % {'bad_id': image_id}
|
|
||||||
LOG.info(msg)
|
|
||||||
msg = _("Invalid image id format")
|
|
||||||
return exc.HTTPBadRequest(explanation=msg)
|
|
||||||
|
|
||||||
if 'location' in image_data:
|
|
||||||
image_data['locations'] = [image_data.pop('location')]
|
|
||||||
|
|
||||||
try:
|
|
||||||
image_data = _normalize_image_location_for_db(image_data)
|
|
||||||
image_data = self.db_api.image_create(req.context, image_data)
|
|
||||||
image_data = dict(image=make_image_dict(image_data))
|
|
||||||
msg = (_LI("Successfully created image %(id)s") %
|
|
||||||
image_data['image'])
|
|
||||||
LOG.info(msg)
|
|
||||||
return image_data
|
|
||||||
except exception.Duplicate:
|
|
||||||
msg = _("Image with identifier %s already exists!") % image_id
|
|
||||||
LOG.warn(msg)
|
|
||||||
return exc.HTTPConflict(msg)
|
|
||||||
except exception.Invalid as e:
|
|
||||||
msg = (_("Failed to add image metadata. "
|
|
||||||
"Got error: %s") % utils.exception_to_str(e))
|
|
||||||
LOG.error(msg)
|
|
||||||
return exc.HTTPBadRequest(msg)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unable to create image %s"), image_id)
|
|
||||||
raise
|
|
||||||
|
|
||||||
@utils.mutating
|
|
||||||
def update(self, req, id, body):
|
|
||||||
"""Updates an existing image with the registry.
|
|
||||||
|
|
||||||
:param req: wsgi Request object
|
|
||||||
:param body: Dictionary of information about the image
|
|
||||||
:param id: The opaque internal identifier for the image
|
|
||||||
|
|
||||||
:retval Returns the updated image information as a mapping,
|
|
||||||
"""
|
|
||||||
image_data = body['image']
|
|
||||||
from_state = body.get('from_state', None)
|
|
||||||
|
|
||||||
# Prohibit modification of 'owner'
|
|
||||||
if not req.context.is_admin and 'owner' in image_data:
|
|
||||||
del image_data['owner']
|
|
||||||
|
|
||||||
if 'location' in image_data:
|
|
||||||
image_data['locations'] = [image_data.pop('location')]
|
|
||||||
|
|
||||||
purge_props = req.headers.get("X-Glance-Registry-Purge-Props", "false")
|
|
||||||
try:
|
|
||||||
LOG.debug("Updating image %(id)s with metadata: %(image_data)r",
|
|
||||||
{'id': id,
|
|
||||||
'image_data': dict((k, v) for k, v in image_data.items()
|
|
||||||
if k != 'locations')})
|
|
||||||
image_data = _normalize_image_location_for_db(image_data)
|
|
||||||
if purge_props == "true":
|
|
||||||
purge_props = True
|
|
||||||
else:
|
|
||||||
purge_props = False
|
|
||||||
|
|
||||||
updated_image = self.db_api.image_update(req.context, id,
|
|
||||||
image_data,
|
|
||||||
purge_props=purge_props,
|
|
||||||
from_state=from_state)
|
|
||||||
|
|
||||||
msg = _LI("Updating metadata for image %(id)s") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
return dict(image=make_image_dict(updated_image))
|
|
||||||
except exception.Invalid as e:
|
|
||||||
msg = (_("Failed to update image metadata. "
|
|
||||||
"Got error: %s") % utils.exception_to_str(e))
|
|
||||||
LOG.error(msg)
|
|
||||||
return exc.HTTPBadRequest(msg)
|
|
||||||
except exception.NotFound:
|
|
||||||
msg = _LI("Image %(id)s not found") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
raise exc.HTTPNotFound(body='Image not found',
|
|
||||||
request=req,
|
|
||||||
content_type='text/plain')
|
|
||||||
except exception.ForbiddenPublicImage:
|
|
||||||
msg = _LI("Update denied for public image %(id)s") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
raise exc.HTTPForbidden()
|
|
||||||
except exception.Forbidden:
|
|
||||||
# If it's private and doesn't belong to them, don't let on
|
|
||||||
# that it exists
|
|
||||||
msg = _LI("Access denied to image %(id)s but returning"
|
|
||||||
" 'not found'") % {'id': id}
|
|
||||||
LOG.info(msg)
|
|
||||||
raise exc.HTTPNotFound(body='Image not found',
|
|
||||||
request=req,
|
|
||||||
content_type='text/plain')
|
|
||||||
except exception.Conflict as e:
|
|
||||||
LOG.info(utils.exception_to_str(e))
|
|
||||||
raise exc.HTTPConflict(body='Image operation conflicts',
|
|
||||||
request=req,
|
|
||||||
content_type='text/plain')
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unable to update image %s") % id)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def _limit_locations(image):
|
|
||||||
locations = image.pop('locations', [])
|
|
||||||
image['location_data'] = locations
|
|
||||||
image['location'] = None
|
|
||||||
for loc in locations:
|
|
||||||
if loc['status'] == 'active':
|
|
||||||
image['location'] = loc['url']
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
def make_image_dict(image):
|
|
||||||
"""Create a dict representation of an image which we can use to
|
|
||||||
serialize the image.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _fetch_attrs(d, attrs):
|
|
||||||
return dict([(a, d[a]) for a in attrs
|
|
||||||
if a in d.keys()])
|
|
||||||
|
|
||||||
# TODO(sirp): should this be a dict, or a list of dicts?
|
|
||||||
# A plain dict is more convenient, but list of dicts would provide
|
|
||||||
# access to created_at, etc
|
|
||||||
properties = dict((p['name'], p['value'])
|
|
||||||
for p in image['properties'] if not p['deleted'])
|
|
||||||
|
|
||||||
image_dict = _fetch_attrs(image, daisy.db.IMAGE_ATTRS)
|
|
||||||
image_dict['properties'] = properties
|
|
||||||
_limit_locations(image_dict)
|
|
||||||
|
|
||||||
return image_dict
|
|
||||||
|
|
||||||
|
|
||||||
def create_resource():
|
|
||||||
"""Images resource factory method."""
|
|
||||||
deserializer = wsgi.JSONRequestDeserializer()
|
|
||||||
serializer = wsgi.JSONResponseSerializer()
|
|
||||||
return wsgi.Resource(Controller(), deserializer, serializer)
|
|
@@ -59,243 +59,6 @@ class Controller(object):
|
|||||||
return dict(members=make_member_list(members,
|
return dict(members=make_member_list(members,
|
||||||
host_id='host_id'))
|
host_id='host_id'))
|
||||||
|
|
||||||
@utils.mutating
|
|
||||||
def update_all(self, req, image_id, body):
|
|
||||||
"""
|
|
||||||
Replaces the members of the image with those specified in the
|
|
||||||
body. The body is a dict with the following format::
|
|
||||||
|
|
||||||
{"memberships": [
|
|
||||||
{"member_id": <MEMBER_ID>,
|
|
||||||
["can_share": [True|False]]}, ...
|
|
||||||
]}
|
|
||||||
"""
|
|
||||||
self._check_can_access_image_members(req.context)
|
|
||||||
|
|
||||||
# Make sure the image exists
|
|
||||||
try:
|
|
||||||
image = self.db_api.image_get(req.context, image_id)
|
|
||||||
except exception.NotFound:
|
|
||||||
msg = _("Image %(id)s not found") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
raise webob.exc.HTTPNotFound(msg)
|
|
||||||
except exception.Forbidden:
|
|
||||||
# If it's private and doesn't belong to them, don't let on
|
|
||||||
# that it exists
|
|
||||||
msg = _LW("Access denied to image %(id)s but returning"
|
|
||||||
" 'not found'") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
raise webob.exc.HTTPNotFound()
|
|
||||||
|
|
||||||
# Can they manipulate the membership?
|
|
||||||
if not self.is_image_sharable(req.context, image):
|
|
||||||
msg = (_LW("User lacks permission to share image %(id)s") %
|
|
||||||
{'id': image_id})
|
|
||||||
LOG.warn(msg)
|
|
||||||
msg = _("No permission to share that image")
|
|
||||||
raise webob.exc.HTTPForbidden(msg)
|
|
||||||
|
|
||||||
# Get the membership list
|
|
||||||
try:
|
|
||||||
memb_list = body['memberships']
|
|
||||||
except Exception as e:
|
|
||||||
# Malformed entity...
|
|
||||||
msg = _LW("Invalid membership association specified for "
|
|
||||||
"image %(id)s") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
msg = (_("Invalid membership association: %s") %
|
|
||||||
utils.exception_to_str(e))
|
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
|
||||||
|
|
||||||
add = []
|
|
||||||
existing = {}
|
|
||||||
# Walk through the incoming memberships
|
|
||||||
for memb in memb_list:
|
|
||||||
try:
|
|
||||||
datum = dict(image_id=image['id'],
|
|
||||||
member=memb['member_id'],
|
|
||||||
can_share=None)
|
|
||||||
except Exception as e:
|
|
||||||
# Malformed entity...
|
|
||||||
msg = _LW("Invalid membership association specified for "
|
|
||||||
"image %(id)s") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
msg = (_("Invalid membership association: %s") %
|
|
||||||
utils.exception_to_str(e))
|
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
|
||||||
|
|
||||||
# Figure out what can_share should be
|
|
||||||
if 'can_share' in memb:
|
|
||||||
datum['can_share'] = bool(memb['can_share'])
|
|
||||||
|
|
||||||
# Try to find the corresponding membership
|
|
||||||
members = self.db_api.image_member_find(req.context,
|
|
||||||
image_id=datum['image_id'],
|
|
||||||
member=datum['member'])
|
|
||||||
try:
|
|
||||||
member = members[0]
|
|
||||||
except IndexError:
|
|
||||||
# Default can_share
|
|
||||||
datum['can_share'] = bool(datum['can_share'])
|
|
||||||
add.append(datum)
|
|
||||||
else:
|
|
||||||
# Are we overriding can_share?
|
|
||||||
if datum['can_share'] is None:
|
|
||||||
datum['can_share'] = members[0]['can_share']
|
|
||||||
|
|
||||||
existing[member['id']] = {
|
|
||||||
'values': datum,
|
|
||||||
'membership': member,
|
|
||||||
}
|
|
||||||
|
|
||||||
# We now have a filtered list of memberships to add and
|
|
||||||
# memberships to modify. Let's start by walking through all
|
|
||||||
# the existing image memberships...
|
|
||||||
existing_members = self.db_api.image_member_find(req.context,
|
|
||||||
image_id=image['id'])
|
|
||||||
for member in existing_members:
|
|
||||||
if member['id'] in existing:
|
|
||||||
# Just update the membership in place
|
|
||||||
update = existing[member['id']]['values']
|
|
||||||
self.db_api.image_member_update(req.context,
|
|
||||||
member['id'],
|
|
||||||
update)
|
|
||||||
else:
|
|
||||||
# Outdated one; needs to be deleted
|
|
||||||
self.db_api.image_member_delete(req.context, member['id'])
|
|
||||||
|
|
||||||
# Now add the non-existent ones
|
|
||||||
for memb in add:
|
|
||||||
self.db_api.image_member_create(req.context, memb)
|
|
||||||
|
|
||||||
# Make an appropriate result
|
|
||||||
msg = (_LI("Successfully updated memberships for image %(id)s") %
|
|
||||||
{'id': image_id})
|
|
||||||
LOG.info(msg)
|
|
||||||
return webob.exc.HTTPNoContent()
|
|
||||||
|
|
||||||
@utils.mutating
|
|
||||||
def update(self, req, image_id, id, body=None):
|
|
||||||
"""
|
|
||||||
Adds a membership to the image, or updates an existing one.
|
|
||||||
If a body is present, it is a dict with the following format::
|
|
||||||
|
|
||||||
{"member": {
|
|
||||||
"can_share": [True|False]
|
|
||||||
}}
|
|
||||||
|
|
||||||
If "can_share" is provided, the member's ability to share is
|
|
||||||
set accordingly. If it is not provided, existing memberships
|
|
||||||
remain unchanged and new memberships default to False.
|
|
||||||
"""
|
|
||||||
self._check_can_access_image_members(req.context)
|
|
||||||
|
|
||||||
# Make sure the image exists
|
|
||||||
try:
|
|
||||||
image = self.db_api.image_get(req.context, image_id)
|
|
||||||
except exception.NotFound:
|
|
||||||
msg = _("Image %(id)s not found") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
raise webob.exc.HTTPNotFound(msg)
|
|
||||||
except exception.Forbidden:
|
|
||||||
# If it's private and doesn't belong to them, don't let on
|
|
||||||
# that it exists
|
|
||||||
msg = _LW("Access denied to image %(id)s but returning"
|
|
||||||
" 'not found'") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
raise webob.exc.HTTPNotFound()
|
|
||||||
|
|
||||||
# Can they manipulate the membership?
|
|
||||||
if not self.is_image_sharable(req.context, image):
|
|
||||||
msg = (_LW("User lacks permission to share image %(id)s") %
|
|
||||||
{'id': image_id})
|
|
||||||
LOG.warn(msg)
|
|
||||||
msg = _("No permission to share that image")
|
|
||||||
raise webob.exc.HTTPForbidden(msg)
|
|
||||||
|
|
||||||
# Determine the applicable can_share value
|
|
||||||
can_share = None
|
|
||||||
if body:
|
|
||||||
try:
|
|
||||||
can_share = bool(body['member']['can_share'])
|
|
||||||
except Exception as e:
|
|
||||||
# Malformed entity...
|
|
||||||
msg = _LW("Invalid membership association specified for "
|
|
||||||
"image %(id)s") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
msg = (_("Invalid membership association: %s") %
|
|
||||||
utils.exception_to_str(e))
|
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
|
||||||
|
|
||||||
# Look up an existing membership...
|
|
||||||
members = self.db_api.image_member_find(req.context,
|
|
||||||
image_id=image_id,
|
|
||||||
member=id)
|
|
||||||
if members:
|
|
||||||
if can_share is not None:
|
|
||||||
values = dict(can_share=can_share)
|
|
||||||
self.db_api.image_member_update(req.context,
|
|
||||||
members[0]['id'],
|
|
||||||
values)
|
|
||||||
else:
|
|
||||||
values = dict(image_id=image['id'], member=id,
|
|
||||||
can_share=bool(can_share))
|
|
||||||
self.db_api.image_member_create(req.context, values)
|
|
||||||
|
|
||||||
msg = (_LI("Successfully updated a membership for image %(id)s") %
|
|
||||||
{'id': image_id})
|
|
||||||
LOG.info(msg)
|
|
||||||
return webob.exc.HTTPNoContent()
|
|
||||||
|
|
||||||
@utils.mutating
|
|
||||||
def delete(self, req, image_id, id):
|
|
||||||
"""
|
|
||||||
Removes a membership from the image.
|
|
||||||
"""
|
|
||||||
self._check_can_access_image_members(req.context)
|
|
||||||
|
|
||||||
# Make sure the image exists
|
|
||||||
try:
|
|
||||||
image = self.db_api.image_get(req.context, image_id)
|
|
||||||
except exception.NotFound:
|
|
||||||
msg = _("Image %(id)s not found") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
raise webob.exc.HTTPNotFound(msg)
|
|
||||||
except exception.Forbidden:
|
|
||||||
# If it's private and doesn't belong to them, don't let on
|
|
||||||
# that it exists
|
|
||||||
msg = _LW("Access denied to image %(id)s but returning"
|
|
||||||
" 'not found'") % {'id': image_id}
|
|
||||||
LOG.warn(msg)
|
|
||||||
raise webob.exc.HTTPNotFound()
|
|
||||||
|
|
||||||
# Can they manipulate the membership?
|
|
||||||
if not self.is_image_sharable(req.context, image):
|
|
||||||
msg = (_LW("User lacks permission to share image %(id)s") %
|
|
||||||
{'id': image_id})
|
|
||||||
LOG.warn(msg)
|
|
||||||
msg = _("No permission to share that image")
|
|
||||||
raise webob.exc.HTTPForbidden(msg)
|
|
||||||
|
|
||||||
# Look up an existing membership
|
|
||||||
members = self.db_api.image_member_find(req.context,
|
|
||||||
image_id=image_id,
|
|
||||||
member=id)
|
|
||||||
if members:
|
|
||||||
self.db_api.image_member_delete(req.context, members[0]['id'])
|
|
||||||
else:
|
|
||||||
msg = ("%(id)s is not a member of image %(image_id)s" %
|
|
||||||
{'id': id, 'image_id': image_id})
|
|
||||||
LOG.debug(msg)
|
|
||||||
msg = _("Membership could not be found.")
|
|
||||||
raise webob.exc.HTTPNotFound(explanation=msg)
|
|
||||||
|
|
||||||
# Make an appropriate result
|
|
||||||
msg = (_LI("Successfully deleted a membership from image %(id)s") %
|
|
||||||
{'id': image_id})
|
|
||||||
LOG.info(msg)
|
|
||||||
return webob.exc.HTTPNoContent()
|
|
||||||
|
|
||||||
@utils.mutating
|
@utils.mutating
|
||||||
def add_cluster_host(self, req, cluster_id, host_id, body=None):
|
def add_cluster_host(self, req, cluster_id, host_id, body=None):
|
||||||
"""
|
"""
|
||||||
|
@@ -59,21 +59,18 @@ CONF.import_opt('admin_tenant_name', _registry_client)
|
|||||||
CONF.import_opt('auth_url', _registry_client)
|
CONF.import_opt('auth_url', _registry_client)
|
||||||
CONF.import_opt('auth_strategy', _registry_client)
|
CONF.import_opt('auth_strategy', _registry_client)
|
||||||
CONF.import_opt('auth_region', _registry_client)
|
CONF.import_opt('auth_region', _registry_client)
|
||||||
CONF.import_opt('metadata_encryption_key', 'daisy.common.config')
|
|
||||||
|
|
||||||
_CLIENT_CREDS = None
|
_CLIENT_CREDS = None
|
||||||
_CLIENT_HOST = None
|
_CLIENT_HOST = None
|
||||||
_CLIENT_PORT = None
|
_CLIENT_PORT = None
|
||||||
_CLIENT_KWARGS = {}
|
_CLIENT_KWARGS = {}
|
||||||
# AES key used to encrypt 'location' metadata
|
|
||||||
_METADATA_ENCRYPTION_KEY = None
|
|
||||||
|
|
||||||
|
|
||||||
def configure_registry_client():
|
def configure_registry_client():
|
||||||
"""
|
"""
|
||||||
Sets up a registry client for use in registry lookups
|
Sets up a registry client for use in registry lookups
|
||||||
"""
|
"""
|
||||||
global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY
|
global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
|
||||||
try:
|
try:
|
||||||
host, port = CONF.registry_host, CONF.registry_port
|
host, port = CONF.registry_host, CONF.registry_port
|
||||||
except cfg.ConfigFileValueError:
|
except cfg.ConfigFileValueError:
|
||||||
@@ -87,7 +84,6 @@ def configure_registry_client():
|
|||||||
|
|
||||||
_CLIENT_HOST = host
|
_CLIENT_HOST = host
|
||||||
_CLIENT_PORT = port
|
_CLIENT_PORT = port
|
||||||
_METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key
|
|
||||||
_CLIENT_KWARGS = {
|
_CLIENT_KWARGS = {
|
||||||
'use_ssl': CONF.registry_client_protocol.lower() == 'https',
|
'use_ssl': CONF.registry_client_protocol.lower() == 'https',
|
||||||
'key_file': CONF.registry_client_key_file,
|
'key_file': CONF.registry_client_key_file,
|
||||||
@@ -122,7 +118,6 @@ def configure_registry_admin_creds():
|
|||||||
|
|
||||||
def get_registry_client(cxt):
|
def get_registry_client(cxt):
|
||||||
global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
|
global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
|
||||||
global _METADATA_ENCRYPTION_KEY
|
|
||||||
kwargs = _CLIENT_KWARGS.copy()
|
kwargs = _CLIENT_KWARGS.copy()
|
||||||
if CONF.use_user_token:
|
if CONF.use_user_token:
|
||||||
kwargs['auth_token'] = cxt.auth_token
|
kwargs['auth_token'] = cxt.auth_token
|
||||||
@@ -139,67 +134,7 @@ def get_registry_client(cxt):
|
|||||||
}
|
}
|
||||||
kwargs['identity_headers'] = identity_headers
|
kwargs['identity_headers'] = identity_headers
|
||||||
return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT,
|
return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT,
|
||||||
_METADATA_ENCRYPTION_KEY, **kwargs)
|
**kwargs)
|
||||||
|
|
||||||
|
|
||||||
def get_images_list(context, **kwargs):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.get_images(**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_images_detail(context, **kwargs):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.get_images_detailed(**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_image_metadata(context, image_id):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.get_image(image_id)
|
|
||||||
|
|
||||||
|
|
||||||
def add_image_metadata(context, image_meta):
|
|
||||||
LOG.debug("Adding image metadata...")
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.add_image(image_meta)
|
|
||||||
|
|
||||||
|
|
||||||
def update_image_metadata(context, image_id, image_meta,
|
|
||||||
purge_props=False, from_state=None):
|
|
||||||
LOG.debug("Updating image metadata for image %s...", image_id)
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.update_image(image_id, image_meta, purge_props=purge_props,
|
|
||||||
from_state=from_state)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_image_metadata(context, image_id):
|
|
||||||
LOG.debug("Deleting image metadata for image %s...", image_id)
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.delete_image(image_id)
|
|
||||||
|
|
||||||
|
|
||||||
def get_image_members(context, image_id):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.get_image_members(image_id)
|
|
||||||
|
|
||||||
|
|
||||||
def get_member_images(context, member_id):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.get_member_images(member_id)
|
|
||||||
|
|
||||||
|
|
||||||
def replace_members(context, image_id, member_data):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.replace_members(image_id, member_data)
|
|
||||||
|
|
||||||
|
|
||||||
def add_member(context, image_id, member_id, can_share=None):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.add_member(image_id, member_id, can_share=can_share)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_member(context, image_id, member_id):
|
|
||||||
c = get_registry_client(context)
|
|
||||||
return c.delete_member(image_id, member_id)
|
|
||||||
|
|
||||||
|
|
||||||
def add_host_metadata(context, host_meta):
|
def add_host_metadata(context, host_meta):
|
||||||
|
@@ -25,7 +25,6 @@ from oslo_utils import excutils
|
|||||||
from daisy.common import client
|
from daisy.common import client
|
||||||
from daisy.common import crypt
|
from daisy.common import crypt
|
||||||
from daisy import i18n
|
from daisy import i18n
|
||||||
from daisy.registry.api.v1 import images
|
|
||||||
from daisy.registry.api.v1 import hosts
|
from daisy.registry.api.v1 import hosts
|
||||||
from daisy.registry.api.v1 import config_files
|
from daisy.registry.api.v1 import config_files
|
||||||
from daisy.registry.api.v1 import config_sets
|
from daisy.registry.api.v1 import config_sets
|
||||||
@@ -44,12 +43,10 @@ class RegistryClient(client.BaseClient):
|
|||||||
|
|
||||||
DEFAULT_PORT = 19191
|
DEFAULT_PORT = 19191
|
||||||
|
|
||||||
def __init__(self, host=None, port=None, metadata_encryption_key=None,
|
def __init__(self, host=None, port=None,
|
||||||
identity_headers=None, **kwargs):
|
identity_headers=None, **kwargs):
|
||||||
"""
|
"""
|
||||||
:param metadata_encryption_key: Key used to encrypt 'location' metadata
|
|
||||||
"""
|
"""
|
||||||
self.metadata_encryption_key = metadata_encryption_key
|
|
||||||
# NOTE (dprince): by default base client overwrites host and port
|
# NOTE (dprince): by default base client overwrites host and port
|
||||||
# settings when using keystone. configure_via_auth=False disables
|
# settings when using keystone. configure_via_auth=False disables
|
||||||
# this behaviour to ensure we still send requests to the Registry API
|
# this behaviour to ensure we still send requests to the Registry API
|
||||||
@@ -57,63 +54,6 @@ class RegistryClient(client.BaseClient):
|
|||||||
client.BaseClient.__init__(self, host, port, configure_via_auth=False,
|
client.BaseClient.__init__(self, host, port, configure_via_auth=False,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def decrypt_metadata(self, image_metadata):
|
|
||||||
if self.metadata_encryption_key:
|
|
||||||
if image_metadata.get('location'):
|
|
||||||
location = crypt.urlsafe_decrypt(self.metadata_encryption_key,
|
|
||||||
image_metadata['location'])
|
|
||||||
image_metadata['location'] = location
|
|
||||||
if image_metadata.get('location_data'):
|
|
||||||
ld = []
|
|
||||||
for loc in image_metadata['location_data']:
|
|
||||||
url = crypt.urlsafe_decrypt(self.metadata_encryption_key,
|
|
||||||
loc['url'])
|
|
||||||
ld.append({'id': loc['id'], 'url': url,
|
|
||||||
'metadata': loc['metadata'],
|
|
||||||
'status': loc['status']})
|
|
||||||
image_metadata['location_data'] = ld
|
|
||||||
return image_metadata
|
|
||||||
|
|
||||||
def encrypt_metadata(self, image_metadata):
|
|
||||||
if self.metadata_encryption_key:
|
|
||||||
location_url = image_metadata.get('location')
|
|
||||||
if location_url:
|
|
||||||
location = crypt.urlsafe_encrypt(self.metadata_encryption_key,
|
|
||||||
location_url,
|
|
||||||
64)
|
|
||||||
image_metadata['location'] = location
|
|
||||||
if image_metadata.get('location_data'):
|
|
||||||
ld = []
|
|
||||||
for loc in image_metadata['location_data']:
|
|
||||||
if loc['url'] == location_url:
|
|
||||||
url = location
|
|
||||||
else:
|
|
||||||
url = crypt.urlsafe_encrypt(
|
|
||||||
self.metadata_encryption_key, loc['url'], 64)
|
|
||||||
ld.append({'url': url, 'metadata': loc['metadata'],
|
|
||||||
'status': loc['status'],
|
|
||||||
# NOTE(zhiyan): New location has no ID field.
|
|
||||||
'id': loc.get('id')})
|
|
||||||
image_metadata['location_data'] = ld
|
|
||||||
return image_metadata
|
|
||||||
|
|
||||||
def get_images(self, **kwargs):
|
|
||||||
"""
|
|
||||||
Returns a list of image id/name mappings from Registry
|
|
||||||
|
|
||||||
:param filters: dict of keys & expected values to filter results
|
|
||||||
:param marker: image id after which to start page
|
|
||||||
:param limit: max number of images to return
|
|
||||||
:param sort_key: results will be ordered by this image attribute
|
|
||||||
:param sort_dir: direction in which to order results (asc, desc)
|
|
||||||
"""
|
|
||||||
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
|
|
||||||
res = self.do_request("GET", "/images", params=params)
|
|
||||||
image_list = jsonutils.loads(res.read())['images']
|
|
||||||
for image in image_list:
|
|
||||||
image = self.decrypt_metadata(image)
|
|
||||||
return image_list
|
|
||||||
|
|
||||||
def do_request(self, method, action, **kwargs):
|
def do_request(self, method, action, **kwargs):
|
||||||
try:
|
try:
|
||||||
kwargs['headers'] = kwargs.get('headers', {})
|
kwargs['headers'] = kwargs.get('headers', {})
|
||||||
@@ -138,133 +78,6 @@ class RegistryClient(client.BaseClient):
|
|||||||
'exc_name': exc_name})
|
'exc_name': exc_name})
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def get_images_detailed(self, **kwargs):
|
|
||||||
"""
|
|
||||||
Returns a list of detailed image data mappings from Registry
|
|
||||||
|
|
||||||
:param filters: dict of keys & expected values to filter results
|
|
||||||
:param marker: image id after which to start page
|
|
||||||
:param limit: max number of images to return
|
|
||||||
:param sort_key: results will be ordered by this image attribute
|
|
||||||
:param sort_dir: direction in which to order results (asc, desc)
|
|
||||||
"""
|
|
||||||
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
|
|
||||||
res = self.do_request("GET", "/images/detail", params=params)
|
|
||||||
image_list = jsonutils.loads(res.read())['images']
|
|
||||||
for image in image_list:
|
|
||||||
image = self.decrypt_metadata(image)
|
|
||||||
return image_list
|
|
||||||
|
|
||||||
def get_image(self, image_id):
|
|
||||||
"""Returns a mapping of image metadata from Registry."""
|
|
||||||
res = self.do_request("GET", "/images/%s" % image_id)
|
|
||||||
data = jsonutils.loads(res.read())['image']
|
|
||||||
return self.decrypt_metadata(data)
|
|
||||||
|
|
||||||
def add_image(self, image_metadata):
|
|
||||||
"""
|
|
||||||
Tells registry about an image's metadata
|
|
||||||
"""
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
}
|
|
||||||
|
|
||||||
if 'image' not in image_metadata:
|
|
||||||
image_metadata = dict(image=image_metadata)
|
|
||||||
|
|
||||||
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
|
|
||||||
image_metadata['image'] = encrypted_metadata
|
|
||||||
body = jsonutils.dumps(image_metadata)
|
|
||||||
|
|
||||||
res = self.do_request("POST", "/images", body=body, headers=headers)
|
|
||||||
# Registry returns a JSONified dict(image=image_info)
|
|
||||||
data = jsonutils.loads(res.read())
|
|
||||||
image = data['image']
|
|
||||||
return self.decrypt_metadata(image)
|
|
||||||
|
|
||||||
def update_image(self, image_id, image_metadata, purge_props=False,
|
|
||||||
from_state=None):
|
|
||||||
"""
|
|
||||||
Updates Registry's information about an image
|
|
||||||
"""
|
|
||||||
if 'image' not in image_metadata:
|
|
||||||
image_metadata = dict(image=image_metadata)
|
|
||||||
|
|
||||||
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
|
|
||||||
image_metadata['image'] = encrypted_metadata
|
|
||||||
image_metadata['from_state'] = from_state
|
|
||||||
body = jsonutils.dumps(image_metadata)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
}
|
|
||||||
|
|
||||||
if purge_props:
|
|
||||||
headers["X-Glance-Registry-Purge-Props"] = "true"
|
|
||||||
|
|
||||||
res = self.do_request("PUT", "/images/%s" % image_id, body=body,
|
|
||||||
headers=headers)
|
|
||||||
data = jsonutils.loads(res.read())
|
|
||||||
image = data['image']
|
|
||||||
return self.decrypt_metadata(image)
|
|
||||||
|
|
||||||
def delete_image(self, image_id):
|
|
||||||
"""
|
|
||||||
Deletes Registry's information about an image
|
|
||||||
"""
|
|
||||||
res = self.do_request("DELETE", "/images/%s" % image_id)
|
|
||||||
data = jsonutils.loads(res.read())
|
|
||||||
image = data['image']
|
|
||||||
return image
|
|
||||||
|
|
||||||
def get_image_members(self, image_id):
|
|
||||||
"""Return a list of membership associations from Registry."""
|
|
||||||
res = self.do_request("GET", "/images/%s/members" % image_id)
|
|
||||||
data = jsonutils.loads(res.read())['members']
|
|
||||||
return data
|
|
||||||
|
|
||||||
def get_member_images(self, member_id):
|
|
||||||
"""Return a list of membership associations from Registry."""
|
|
||||||
res = self.do_request("GET", "/shared-images/%s" % member_id)
|
|
||||||
data = jsonutils.loads(res.read())['shared_images']
|
|
||||||
return data
|
|
||||||
|
|
||||||
def replace_members(self, image_id, member_data):
|
|
||||||
"""Replace registry's information about image membership."""
|
|
||||||
if isinstance(member_data, (list, tuple)):
|
|
||||||
member_data = dict(memberships=list(member_data))
|
|
||||||
elif (isinstance(member_data, dict) and
|
|
||||||
'memberships' not in member_data):
|
|
||||||
member_data = dict(memberships=[member_data])
|
|
||||||
|
|
||||||
body = jsonutils.dumps(member_data)
|
|
||||||
|
|
||||||
headers = {'Content-Type': 'application/json', }
|
|
||||||
|
|
||||||
res = self.do_request("PUT", "/images/%s/members" % image_id,
|
|
||||||
body=body, headers=headers)
|
|
||||||
return self.get_status_code(res) == 204
|
|
||||||
|
|
||||||
def add_member(self, image_id, member_id, can_share=None):
|
|
||||||
"""Add to registry's information about image membership."""
|
|
||||||
body = None
|
|
||||||
headers = {}
|
|
||||||
# Build up a body if can_share is specified
|
|
||||||
if can_share is not None:
|
|
||||||
body = jsonutils.dumps(dict(member=dict(can_share=can_share)))
|
|
||||||
headers['Content-Type'] = 'application/json'
|
|
||||||
|
|
||||||
url = "/images/%s/members/%s" % (image_id, member_id)
|
|
||||||
res = self.do_request("PUT", url, body=body,
|
|
||||||
headers=headers)
|
|
||||||
return self.get_status_code(res) == 204
|
|
||||||
|
|
||||||
def delete_member(self, image_id, member_id):
|
|
||||||
"""Delete registry's information about image membership."""
|
|
||||||
res = self.do_request("DELETE", "/images/%s/members/%s" %
|
|
||||||
(image_id, member_id))
|
|
||||||
return self.get_status_code(res) == 204
|
|
||||||
|
|
||||||
def add_host(self, host_metadata):
|
def add_host(self, host_metadata):
|
||||||
"""
|
"""
|
||||||
Tells registry about an host's metadata
|
Tells registry about an host's metadata
|
||||||
|
@@ -1,111 +0,0 @@
|
|||||||
# Copyright 2013 Red Hat, Inc
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Registry's Client V2
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy import i18n
|
|
||||||
from daisy.registry.client.v2 import client
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
_ = i18n._
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
_registry_client = 'daisy.registry.client'
|
|
||||||
CONF.import_opt('registry_client_protocol', _registry_client)
|
|
||||||
CONF.import_opt('registry_client_key_file', _registry_client)
|
|
||||||
CONF.import_opt('registry_client_cert_file', _registry_client)
|
|
||||||
CONF.import_opt('registry_client_ca_file', _registry_client)
|
|
||||||
CONF.import_opt('registry_client_insecure', _registry_client)
|
|
||||||
CONF.import_opt('registry_client_timeout', _registry_client)
|
|
||||||
CONF.import_opt('use_user_token', _registry_client)
|
|
||||||
CONF.import_opt('admin_user', _registry_client)
|
|
||||||
CONF.import_opt('admin_password', _registry_client)
|
|
||||||
CONF.import_opt('admin_tenant_name', _registry_client)
|
|
||||||
CONF.import_opt('auth_url', _registry_client)
|
|
||||||
CONF.import_opt('auth_strategy', _registry_client)
|
|
||||||
CONF.import_opt('auth_region', _registry_client)
|
|
||||||
|
|
||||||
_CLIENT_CREDS = None
|
|
||||||
_CLIENT_HOST = None
|
|
||||||
_CLIENT_PORT = None
|
|
||||||
_CLIENT_KWARGS = {}
|
|
||||||
|
|
||||||
|
|
||||||
def configure_registry_client():
|
|
||||||
"""
|
|
||||||
Sets up a registry client for use in registry lookups
|
|
||||||
"""
|
|
||||||
global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
|
|
||||||
try:
|
|
||||||
host, port = CONF.registry_host, CONF.registry_port
|
|
||||||
except cfg.ConfigFileValueError:
|
|
||||||
msg = _("Configuration option was not valid")
|
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.BadRegistryConnectionConfiguration(msg)
|
|
||||||
except IndexError:
|
|
||||||
msg = _("Could not find required configuration option")
|
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.BadRegistryConnectionConfiguration(msg)
|
|
||||||
|
|
||||||
_CLIENT_HOST = host
|
|
||||||
_CLIENT_PORT = port
|
|
||||||
_CLIENT_KWARGS = {
|
|
||||||
'use_ssl': CONF.registry_client_protocol.lower() == 'https',
|
|
||||||
'key_file': CONF.registry_client_key_file,
|
|
||||||
'cert_file': CONF.registry_client_cert_file,
|
|
||||||
'ca_file': CONF.registry_client_ca_file,
|
|
||||||
'insecure': CONF.registry_client_insecure,
|
|
||||||
'timeout': CONF.registry_client_timeout,
|
|
||||||
}
|
|
||||||
|
|
||||||
if not CONF.use_user_token:
|
|
||||||
configure_registry_admin_creds()
|
|
||||||
|
|
||||||
|
|
||||||
def configure_registry_admin_creds():
|
|
||||||
global _CLIENT_CREDS
|
|
||||||
|
|
||||||
if CONF.auth_url or os.getenv('OS_AUTH_URL'):
|
|
||||||
strategy = 'keystone'
|
|
||||||
else:
|
|
||||||
strategy = CONF.auth_strategy
|
|
||||||
|
|
||||||
_CLIENT_CREDS = {
|
|
||||||
'user': CONF.admin_user,
|
|
||||||
'password': CONF.admin_password,
|
|
||||||
'username': CONF.admin_user,
|
|
||||||
'tenant': CONF.admin_tenant_name,
|
|
||||||
'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url,
|
|
||||||
'strategy': strategy,
|
|
||||||
'region': CONF.auth_region,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_registry_client(cxt):
|
|
||||||
global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
|
|
||||||
kwargs = _CLIENT_KWARGS.copy()
|
|
||||||
if CONF.use_user_token:
|
|
||||||
kwargs['auth_token'] = cxt.auth_token
|
|
||||||
if _CLIENT_CREDS:
|
|
||||||
kwargs['creds'] = _CLIENT_CREDS
|
|
||||||
return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs)
|
|
@@ -1,31 +0,0 @@
|
|||||||
# Copyright 2013 Red Hat, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Simple client class to speak with any RESTful service that implements
|
|
||||||
the Glance Registry API
|
|
||||||
"""
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from daisy.common import rpc
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RegistryClient(rpc.RPCClient):
|
|
||||||
"""Registry's V2 Client."""
|
|
||||||
|
|
||||||
DEFAULT_PORT = 9191
|
|
@@ -1,226 +0,0 @@
|
|||||||
# Copyright 2012 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import jsonschema
|
|
||||||
import six
|
|
||||||
|
|
||||||
from daisy.common import exception
|
|
||||||
from daisy.common import utils
|
|
||||||
from daisy import i18n
|
|
||||||
|
|
||||||
_ = i18n._
|
|
||||||
|
|
||||||
|
|
||||||
class Schema(object):
|
|
||||||
|
|
||||||
def __init__(self, name, properties=None, links=None, required=None,
|
|
||||||
definitions=None):
|
|
||||||
self.name = name
|
|
||||||
if properties is None:
|
|
||||||
properties = {}
|
|
||||||
self.properties = properties
|
|
||||||
self.links = links
|
|
||||||
self.required = required
|
|
||||||
self.definitions = definitions
|
|
||||||
|
|
||||||
def validate(self, obj):
|
|
||||||
try:
|
|
||||||
jsonschema.validate(obj, self.raw())
|
|
||||||
except jsonschema.ValidationError as e:
|
|
||||||
raise exception.InvalidObject(schema=self.name,
|
|
||||||
reason=utils.exception_to_str(e))
|
|
||||||
|
|
||||||
def filter(self, obj):
|
|
||||||
filtered = {}
|
|
||||||
for key, value in six.iteritems(obj):
|
|
||||||
if self._filter_func(self.properties, key):
|
|
||||||
filtered[key] = value
|
|
||||||
return filtered
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _filter_func(properties, key):
|
|
||||||
return key in properties
|
|
||||||
|
|
||||||
def merge_properties(self, properties):
|
|
||||||
# Ensure custom props aren't attempting to override base props
|
|
||||||
original_keys = set(self.properties.keys())
|
|
||||||
new_keys = set(properties.keys())
|
|
||||||
intersecting_keys = original_keys.intersection(new_keys)
|
|
||||||
conflicting_keys = [k for k in intersecting_keys
|
|
||||||
if self.properties[k] != properties[k]]
|
|
||||||
if conflicting_keys:
|
|
||||||
props = ', '.join(conflicting_keys)
|
|
||||||
reason = _("custom properties (%(props)s) conflict "
|
|
||||||
"with base properties")
|
|
||||||
raise exception.SchemaLoadError(reason=reason % {'props': props})
|
|
||||||
|
|
||||||
self.properties.update(properties)
|
|
||||||
|
|
||||||
def raw(self):
|
|
||||||
raw = {
|
|
||||||
'name': self.name,
|
|
||||||
'properties': self.properties,
|
|
||||||
'additionalProperties': False,
|
|
||||||
}
|
|
||||||
if self.definitions:
|
|
||||||
raw['definitions'] = self.definitions
|
|
||||||
if self.required:
|
|
||||||
raw['required'] = self.required
|
|
||||||
if self.links:
|
|
||||||
raw['links'] = self.links
|
|
||||||
return raw
|
|
||||||
|
|
||||||
def minimal(self):
|
|
||||||
minimal = {
|
|
||||||
'name': self.name,
|
|
||||||
'properties': self.properties
|
|
||||||
}
|
|
||||||
if self.definitions:
|
|
||||||
minimal['definitions'] = self.definitions
|
|
||||||
if self.required:
|
|
||||||
minimal['required'] = self.required
|
|
||||||
return minimal
|
|
||||||
|
|
||||||
|
|
||||||
class PermissiveSchema(Schema):
|
|
||||||
@staticmethod
|
|
||||||
def _filter_func(properties, key):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def raw(self):
|
|
||||||
raw = super(PermissiveSchema, self).raw()
|
|
||||||
raw['additionalProperties'] = {'type': 'string'}
|
|
||||||
return raw
|
|
||||||
|
|
||||||
def minimal(self):
|
|
||||||
minimal = super(PermissiveSchema, self).raw()
|
|
||||||
return minimal
|
|
||||||
|
|
||||||
|
|
||||||
class CollectionSchema(object):
|
|
||||||
|
|
||||||
def __init__(self, name, item_schema):
|
|
||||||
self.name = name
|
|
||||||
self.item_schema = item_schema
|
|
||||||
|
|
||||||
def raw(self):
|
|
||||||
definitions = None
|
|
||||||
if self.item_schema.definitions:
|
|
||||||
definitions = self.item_schema.definitions
|
|
||||||
self.item_schema.definitions = None
|
|
||||||
raw = {
|
|
||||||
'name': self.name,
|
|
||||||
'properties': {
|
|
||||||
self.name: {
|
|
||||||
'type': 'array',
|
|
||||||
'items': self.item_schema.raw(),
|
|
||||||
},
|
|
||||||
'first': {'type': 'string'},
|
|
||||||
'next': {'type': 'string'},
|
|
||||||
'schema': {'type': 'string'},
|
|
||||||
},
|
|
||||||
'links': [
|
|
||||||
{'rel': 'first', 'href': '{first}'},
|
|
||||||
{'rel': 'next', 'href': '{next}'},
|
|
||||||
{'rel': 'describedby', 'href': '{schema}'},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
if definitions:
|
|
||||||
raw['definitions'] = definitions
|
|
||||||
self.item_schema.definitions = definitions
|
|
||||||
|
|
||||||
return raw
|
|
||||||
|
|
||||||
def minimal(self):
|
|
||||||
definitions = None
|
|
||||||
if self.item_schema.definitions:
|
|
||||||
definitions = self.item_schema.definitions
|
|
||||||
self.item_schema.definitions = None
|
|
||||||
minimal = {
|
|
||||||
'name': self.name,
|
|
||||||
'properties': {
|
|
||||||
self.name: {
|
|
||||||
'type': 'array',
|
|
||||||
'items': self.item_schema.minimal(),
|
|
||||||
},
|
|
||||||
'schema': {'type': 'string'},
|
|
||||||
},
|
|
||||||
'links': [
|
|
||||||
{'rel': 'describedby', 'href': '{schema}'},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
if definitions:
|
|
||||||
minimal['definitions'] = definitions
|
|
||||||
self.item_schema.definitions = definitions
|
|
||||||
|
|
||||||
return minimal
|
|
||||||
|
|
||||||
|
|
||||||
class DictCollectionSchema(Schema):
|
|
||||||
def __init__(self, name, item_schema):
|
|
||||||
self.name = name
|
|
||||||
self.item_schema = item_schema
|
|
||||||
|
|
||||||
def raw(self):
|
|
||||||
definitions = None
|
|
||||||
if self.item_schema.definitions:
|
|
||||||
definitions = self.item_schema.definitions
|
|
||||||
self.item_schema.definitions = None
|
|
||||||
raw = {
|
|
||||||
'name': self.name,
|
|
||||||
'properties': {
|
|
||||||
self.name: {
|
|
||||||
'type': 'object',
|
|
||||||
'additionalProperties': self.item_schema.raw(),
|
|
||||||
},
|
|
||||||
'first': {'type': 'string'},
|
|
||||||
'next': {'type': 'string'},
|
|
||||||
'schema': {'type': 'string'},
|
|
||||||
},
|
|
||||||
'links': [
|
|
||||||
{'rel': 'first', 'href': '{first}'},
|
|
||||||
{'rel': 'next', 'href': '{next}'},
|
|
||||||
{'rel': 'describedby', 'href': '{schema}'},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
if definitions:
|
|
||||||
raw['definitions'] = definitions
|
|
||||||
self.item_schema.definitions = definitions
|
|
||||||
|
|
||||||
return raw
|
|
||||||
|
|
||||||
def minimal(self):
|
|
||||||
definitions = None
|
|
||||||
if self.item_schema.definitions:
|
|
||||||
definitions = self.item_schema.definitions
|
|
||||||
self.item_schema.definitions = None
|
|
||||||
minimal = {
|
|
||||||
'name': self.name,
|
|
||||||
'properties': {
|
|
||||||
self.name: {
|
|
||||||
'type': 'object',
|
|
||||||
'additionalProperties': self.item_schema.minimal(),
|
|
||||||
},
|
|
||||||
'schema': {'type': 'string'},
|
|
||||||
},
|
|
||||||
'links': [
|
|
||||||
{'rel': 'describedby', 'href': '{schema}'},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
if definitions:
|
|
||||||
minimal['definitions'] = definitions
|
|
||||||
self.item_schema.definitions = definitions
|
|
||||||
|
|
||||||
return minimal
|
|
Reference in New Issue
Block a user