Remove image cache image verification

There was a substantial amount of code in the image cache devoted to
checksumming images. However, these checksums were never used beyond
the generation of a log message during the image cache manager's
periodic task of the form:

  Corrupt base files: ...

This code was expensive to run, added significant complexity to the
image cache manager, and not actually used by anything. This patch
removes it. The only functional change should be that the above log
message is no longer generated.

We can't remove it completely in this pass because the image cache
may be on shared storage with an old compute host which expects these
files to exist. Consequently we retain the code to delete the image
metadata file when deleting an entry from the cache, and we don't
delete these files automatically when we find them. These vestiges can
be removed in Ocata.

Additional context can be found here:
http://lists.openstack.org/pipermail/openstack-dev/2016-May/095751.html

Change-Id: I3d84117809d5058cb2c779754523913347d5374b
This commit is contained in:
Matthew Booth 2016-05-25 12:06:33 +01:00
parent f124b85fa4
commit 2c389ccc8c
4 changed files with 86 additions and 467 deletions

View File

@ -249,17 +249,30 @@ libvirt_imagecache_opts = [
default='$instances_path/$image_cache_subdirectory_name/'
'%(image)s.info',
help='Allows image information files to be stored in '
'non-standard locations'),
'non-standard locations',
deprecated_for_removal=True,
deprecated_reason='Image info files are no longer used by the '
'image cache'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
'removed'),
cfg.BoolOpt('checksum_base_images',
default=False,
help='Write a checksum for files in _base to disk'),
help='Write a checksum for files in _base to disk',
deprecated_for_removal=True,
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.'),
cfg.IntOpt('checksum_interval_seconds',
default=3600,
help='How frequently to checksum base images'),
help='How frequently to checksum base images',
deprecated_for_removal=True,
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.'),
]
libvirt_lvm_opts = [

View File

@ -15,7 +15,6 @@
import contextlib
import hashlib
import os
import time
@ -24,7 +23,6 @@ from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import formatters
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from six.moves import cStringIO
@ -63,11 +61,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
'instance-00000003',
'banana-42-hamster'])
def test_read_stored_checksum_missing(self):
self.stub_out('os.path.exists', lambda x: False)
csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
self.assertIsNone(csum)
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(time, 'time', return_value=2000000)
@mock.patch.object(os.path, 'getmtime', return_value=1000000)
@ -84,45 +77,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertFalse(exists)
self.assertEqual(0, age)
def test_read_stored_checksum(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
f = open(info_fname, 'w')
f.write(csum_input)
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_input.rstrip(),
'{"sha1": "%s"}' % csum_output)
def test_read_stored_checksum_legacy_essex(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
old_fname = fname + '.sha1'
f = open(old_fname, 'w')
f.write('fdghkfhkgjjksfdgjksjkghsdf')
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
self.assertFalse(os.path.exists(old_fname))
info_fname = imagecache.get_info_filename(fname)
self.assertTrue(os.path.exists(info_fname))
def test_list_base_images(self):
listing = ['00000001',
'ephemeral_0_20_None',
@ -333,7 +287,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
(base_file3, False, True)])
@contextlib.contextmanager
def _make_base_file(self, checksum=True, lock=True):
def _make_base_file(self, lock=True, info=False):
"""Make a base file for testing."""
with utils.tempdir() as tmpdir:
@ -357,14 +311,23 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
base_file = open(fname, 'r')
if checksum:
imagecache.write_stored_checksum(fname)
# TODO(mdbooth): Info files are no longer created by Newton,
# but we must test that we continue to handle them correctly as
# they may still be around from before the upgrade, and they may
# be created by pre-Newton computes if we're on shared storage.
# Once we can be sure that all computes are running at least
# Newton (i.e. in Ocata), we can be sure that nothing is
# creating info files any more, and we can delete the tests for
# them.
if info:
# We're only checking for deletion, so contents are irrelevant
open(imagecache.get_info_filename(fname), 'w').close()
base_file.close()
yield fname
def test_remove_base_file(self):
with self._make_base_file() as fname:
with self._make_base_file(info=True) as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
@ -383,11 +346,14 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
self.assertFalse(os.path.exists(lock_file))
# TODO(mdbooth): Remove test for deletion of info file in Ocata
# (see comment in _make_base_file)
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_original(self):
with self._make_base_file() as fname:
with self._make_base_file(info=True) as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.originals = [fname]
image_cache_manager._remove_base_file(fname)
@ -409,6 +375,9 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
# TODO(mdbooth): Remove test for deletion of info file in Ocata
# (see comment in _make_base_file)
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_dne(self):
@ -458,7 +427,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files,
[fname])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_used(self, mock_mtime):
@ -473,7 +441,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_used_remotely(self, mock_mtime):
@ -488,7 +455,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_absent(self):
img = '123'
@ -500,7 +466,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
self.assertNotEqual(stream.getvalue().find('an absent base file'),
-1)
@ -522,32 +487,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_checksum_fails(self, mock_mtime):
self.flags(checksum_base_images=True, group='libvirt')
img = '123'
with self._make_base_file() as fname:
with open(fname, 'w') as f:
f.write('banana')
d = {'sha1': '21323454'}
with open('%s.info' % fname, 'w') as f:
f.write(jsonutils.dumps(d))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files,
[fname])
@mock.patch.object(libvirt_utils, 'update_mtime')
@mock.patch.object(lockutils, 'external_lock')
@ -670,10 +609,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: get_disk_backing_file(x))
# Fake out verifying checksums, as that is tested elsewhere
self.stubs.Set(image_cache_manager, '_verify_checksum',
lambda x, y: True)
# Fake getmtime as well
orig_getmtime = os.path.getmtime
@ -723,9 +658,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
fq_path('%s_10737418240' % hashed_1)]:
self.assertIn(rem, image_cache_manager.removable_base_files)
# Ensure there are no "corrupt" images as well
self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
def test_verify_base_images_no_base(self):
self.flags(instances_path='/tmp/no/such/dir/name/please')
image_cache_manager = imagecache.ImageCacheManager()
@ -748,66 +680,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
self.assertTrue(is_valid_info_file(base_filename + '.info'))
def test_configured_checksum_path(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
# Ensure there is a base directory
os.mkdir(os.path.join(tmpdir, '_base'))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = []
for instance in instances:
all_instances.append(fake_instance.fake_instance_obj(
None, **instance))
def touch(filename):
f = open(filename, 'w')
f.write('Touched')
f.close()
old = time.time() - (25 * 3600)
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
base_filename = os.path.join(tmpdir, hashed)
touch(base_filename)
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
self.mox.StubOutWithMock(
objects.block_device.BlockDeviceMappingList,
'bdms_by_instance_uuid')
ctxt = context.get_admin_context()
objects.block_device.BlockDeviceMappingList.bdms_by_instance_uuid(
ctxt, ['123', '456']).AndReturn({})
self.mox.ReplayAll()
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.update(ctxt,
all_instances)
self.assertTrue(os.path.exists(base_filename))
self.assertTrue(os.path.exists(base_filename + '.info'))
def test_run_image_cache_manager_pass(self):
was = {'called': False}
@ -897,111 +769,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
lock_path = os.path.join(CONF.instances_path, 'locks')
lock_file = os.path.split(base_file)[-1]
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_old_enough_file(
base_file, 60, remove_sig=False, remove_lock=False)
image_cache_manager._remove_old_enough_file(base_file, 60,
remove_lock=False)
mock_synchronized.assert_called_once_with(lock_file, external=True,
lock_path=lock_path)
class VerifyChecksumTestCase(test.NoDBTestCase):
def setUp(self):
super(VerifyChecksumTestCase, self).setUp()
self.img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True, group='libvirt')
def _make_checksum(self, tmpdir):
testdata = ('OpenStack Software delivers a massively scalable cloud '
'operating system.')
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
with open(fname, 'w') as f:
f.write(testdata)
return fname, info_fname, testdata
def _write_file(self, info_fname, info_attr, testdata):
f = open(info_fname, 'w')
if info_attr == "csum valid":
csum = hashlib.sha1()
csum.update(testdata)
f.write('{"sha1": "%s"}\n' % csum.hexdigest())
elif info_attr == "csum invalid, not json":
f.write('banana')
else:
f.write('{"sha1": "banana"}')
f.close()
def _check_body(self, tmpdir, info_attr):
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
self._write_file(info_fname, info_attr, testdata)
image_cache_manager = imagecache.ImageCacheManager()
return image_cache_manager, fname
def test_verify_checksum(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertTrue(res)
def test_verify_checksum_disabled(self):
self.flags(checksum_base_images=False, group='libvirt')
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertIsNone(res)
def test_verify_checksum_invalid_json(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=False)
self.assertFalse(res)
log = stream.getvalue()
# NOTE(mikal): this is a skip not a fail because the file is
# present, but is not in valid JSON format and therefore is
# skipped.
self.assertNotEqual(log.find('image verification skipped'), -1)
def test_verify_checksum_invalid_repaired(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=True)
self.assertIsNone(res)
def test_verify_checksum_invalid(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, valid json"))
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertFalse(res)
log = stream.getvalue()
self.assertNotEqual(log.find('image verification failed'), -1)
def test_verify_checksum_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum('aaa', fname)
self.assertIsNone(res)
# Checksum requests for a file with no checksum now have the
# side effect of creating the checksum
self.assertTrue(os.path.exists(info_fname))

View File

@ -28,8 +28,6 @@ import time
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import fileutils
import nova.conf
from nova.i18n import _LE
@ -82,116 +80,6 @@ def is_valid_info_file(path):
return False
def _read_possible_json(serialized, info_file):
try:
d = jsonutils.loads(serialized)
except ValueError as e:
LOG.error(_LE('Error reading image info file %(filename)s: '
'%(error)s'),
{'filename': info_file,
'error': e})
d = {}
return d
def read_stored_info(target, field=None, timestamped=False):
"""Read information about an image.
Returns an empty dictionary if there is no info, just the field value if
a field is requested, or the entire dictionary otherwise.
"""
info_file = get_info_filename(target)
if not os.path.exists(info_file):
# NOTE(mikal): Special case to handle essex checksums being converted.
# There is an assumption here that target is a base image filename.
old_filename = target + '.sha1'
if field == 'sha1' and os.path.exists(old_filename):
with open(old_filename) as hash_file:
hash_value = hash_file.read()
write_stored_info(target, field=field, value=hash_value)
os.remove(old_filename)
d = {field: hash_value}
else:
d = {}
else:
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def read_file(info_file):
LOG.debug('Reading image info file: %s', info_file)
with open(info_file, 'r') as f:
return f.read().rstrip()
serialized = read_file(info_file)
d = _read_possible_json(serialized, info_file)
if field:
if timestamped:
return (d.get(field, None), d.get('%s-timestamp' % field, None))
else:
return d.get(field, None)
return d
def write_stored_info(target, field=None, value=None):
"""Write information about an image."""
if not field:
return
info_file = get_info_filename(target)
LOG.info(_LI('Writing stored info to %s'), info_file)
fileutils.ensure_tree(os.path.dirname(info_file))
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def write_file(info_file, field, value):
d = {}
if os.path.exists(info_file):
with open(info_file, 'r') as f:
d = _read_possible_json(f.read(), info_file)
d[field] = value
d['%s-timestamp' % field] = time.time()
with open(info_file, 'w') as f:
f.write(jsonutils.dumps(d))
write_file(info_file, field, value)
def _hash_file(filename):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
with open(filename) as f:
for chunk in iter(lambda: f.read(32768), b''):
checksum.update(chunk)
return checksum.hexdigest()
def read_stored_checksum(target, timestamped=True):
"""Read the checksum.
Returns the checksum (as hex) or None.
"""
return read_stored_info(target, field='sha1', timestamped=timestamped)
def write_stored_checksum(target):
"""Write a checksum to disk for a file in _base."""
write_stored_info(target, field='sha1', value=_hash_file(target))
class ImageCacheManager(imagecache.ImageCacheManager):
def __init__(self):
super(ImageCacheManager, self).__init__()
@ -209,7 +97,6 @@ class ImageCacheManager(imagecache.ImageCacheManager):
self.used_swap_images = set()
self.active_base_files = []
self.corrupt_base_files = []
self.originals = []
self.removable_base_files = []
self.unexplained_images = []
@ -240,13 +127,34 @@ class ImageCacheManager(imagecache.ImageCacheManager):
digest_size = hashlib.sha1().digestsize * 2
for ent in os.listdir(base_dir):
if len(ent) == digest_size:
path = os.path.join(base_dir, ent)
if is_valid_info_file(path):
# TODO(mdbooth): In Newton we ignore these files, because if
# we're on shared storage they may be in use by a pre-Newton
# compute host. However, we have already removed all uses of
# these files in Newton, so once we can be sure that all
# compute hosts are running at least Newton (i.e. in Ocata),
# we can be sure that nothing is using info files any more.
# Therefore in Ocata, we should update this to simply delete
# these files here, i.e.:
# os.unlink(path)
#
# This will obsolete the code to cleanup these files in
# _remove_old_enough_file, so when updating this code to
# delete immediately, the cleanup code in
# _remove_old_enough_file can be removed.
#
# This cleanup code will delete all info files the first
# time it runs in Ocata, which means we can delete this
# block entirely in P.
pass
elif len(ent) == digest_size:
self._store_image(base_dir, ent, original=True)
elif (len(ent) > digest_size + 2 and
ent[digest_size] == '_' and
not is_valid_info_file(os.path.join(base_dir, ent))):
elif len(ent) > digest_size + 2 and ent[digest_size] == '_':
self._store_image(base_dir, ent, original=False)
else:
self._store_swap_image(ent)
@ -323,72 +231,6 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if m:
yield img, False, True
def _verify_checksum(self, img_id, base_file, create_if_missing=True):
"""Compare the checksum stored on disk with the current file.
Note that if the checksum fails to verify this is logged, but no actual
action occurs. This is something sysadmins should monitor for and
handle manually when it occurs.
"""
if not CONF.libvirt.checksum_base_images:
return None
lock_name = 'hash-%s' % os.path.split(base_file)[-1]
# Protect against other nova-computes performing checksums at the same
# time if we are using shared storage
@utils.synchronized(lock_name, external=True, lock_path=self.lock_path)
def inner_verify_checksum():
(stored_checksum, stored_timestamp) = read_stored_checksum(
base_file, timestamped=True)
if stored_checksum:
# NOTE(mikal): Checksums are timestamped. If we have recently
# checksummed (possibly on another compute node if we are using
# shared storage), then we don't need to checksum again.
if (stored_timestamp and
time.time() - stored_timestamp <
CONF.libvirt.checksum_interval_seconds):
return True
# NOTE(mikal): If there is no timestamp, then the checksum was
# performed by a previous version of the code.
if not stored_timestamp:
write_stored_info(base_file, field='sha1',
value=stored_checksum)
current_checksum = _hash_file(base_file)
if current_checksum != stored_checksum:
LOG.error(_LE('image %(id)s at (%(base_file)s): image '
'verification failed'),
{'id': img_id,
'base_file': base_file})
return False
else:
return True
else:
LOG.info(_LI('image %(id)s at (%(base_file)s): image '
'verification skipped, no hash stored'),
{'id': img_id,
'base_file': base_file})
# NOTE(mikal): If the checksum file is missing, then we should
# create one. We don't create checksums when we download images
# from glance because that would delay VM startup.
if CONF.libvirt.checksum_base_images and create_if_missing:
LOG.info(_LI('%(id)s (%(base_file)s): generating '
'checksum'),
{'id': img_id,
'base_file': base_file})
write_stored_checksum(base_file)
return None
return inner_verify_checksum()
@staticmethod
def _get_age_of_file(base_file):
if not os.path.exists(base_file):
@ -400,8 +242,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
return (True, age)
def _remove_old_enough_file(self, base_file, maxage, remove_sig=True,
remove_lock=True):
def _remove_old_enough_file(self, base_file, maxage, remove_lock=True):
"""Remove a single swap or base file if it is old enough."""
exists, age = self._get_age_of_file(base_file)
if not exists:
@ -422,10 +263,20 @@ class ImageCacheManager(imagecache.ImageCacheManager):
LOG.info(_LI('Removing base or swap file: %s'), base_file)
try:
os.remove(base_file)
if remove_sig:
signature = get_info_filename(base_file)
if os.path.exists(signature):
os.remove(signature)
# TODO(mdbooth): We have removed all uses of info files in
# Newton and we no longer create them, but they may still
# exist from before we upgraded, and they may still be
# created by older compute hosts if we're on shared storage.
# While there may still be pre-Newton computes writing here,
# the only safe place to delete info files is here,
# when deleting the cache entry. Once we can be sure that
# all computes are running at least Newton (i.e. in Ocata),
# we can delete these files unconditionally during the
# periodic task, which will make this code obsolete.
signature = get_info_filename(base_file)
if os.path.exists(signature):
os.remove(signature)
except OSError as e:
LOG.error(_LE('Failed to remove %(base_file)s, '
'error was %(error)s'),
@ -456,8 +307,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
"""Remove a single swap base file if it is old enough."""
maxage = CONF.remove_unused_original_minimum_age_seconds
self._remove_old_enough_file(base_file, maxage, remove_sig=False,
remove_lock=False)
self._remove_old_enough_file(base_file, maxage, remove_lock=False)
def _remove_base_file(self, base_file):
"""Remove a single base file if it is old enough."""
@ -470,7 +320,6 @@ class ImageCacheManager(imagecache.ImageCacheManager):
def _handle_base_image(self, img_id, base_file):
"""Handle the checks for a single base image."""
image_bad = False
image_in_use = False
LOG.info(_LI('image %(id)s at (%(base_file)s): checking'),
@ -480,17 +329,6 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if base_file in self.unexplained_images:
self.unexplained_images.remove(base_file)
if (base_file and os.path.exists(base_file)
and os.path.isfile(base_file)):
# _verify_checksum returns True if the checksum is ok, and None if
# there is no checksum file
checksum_result = self._verify_checksum(img_id, base_file)
if checksum_result is not None:
image_bad = not checksum_result
# Give other threads a chance to run
time.sleep(0)
if img_id in self.used_images:
local, remote, instances = self.used_images[img_id]
@ -515,9 +353,6 @@ class ImageCacheManager(imagecache.ImageCacheManager):
'base_file': base_file,
'instance_list': ' '.join(instances)})
if image_bad:
self.corrupt_base_files.append(base_file)
if base_file:
if not image_in_use:
LOG.debug('image %(id)s at (%(base_file)s): image is not in '
@ -579,9 +414,6 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if self.active_base_files:
LOG.info(_LI('Active base files: %s'),
' '.join(self.active_base_files))
if self.corrupt_base_files:
LOG.info(_LI('Corrupt base files: %s'),
' '.join(self.corrupt_base_files))
if self.removable_base_files:
LOG.info(_LI('Removable base files: %s'),

View File

@ -0,0 +1,6 @@
---
deprecations:
- The ``image_info_filename_pattern``, ``checksum_base_images``, and
``checksum_interval_seconds`` options have been deprecated in the
``[libvirt]`` config section. They are no longer used. Any value given will
be ignored.