728b4ba140
Currently, our integrity checking for objects is pretty weak when it comes to object metadata. If the extended attributes on a .data or .meta file get corrupted in such a way that we can still unpickle it, we don't have anything that detects that. This could be especially bad with encrypted etags; if the encrypted etag (X-Object-Sysmeta-Crypto-Etag or whatever it is) gets some bits flipped, then we'll cheerfully decrypt the cipherjunk into plainjunk, then send it to the client. Net effect is that the client sees a GET response with an ETag that doesn't match the MD5 of the object *and* Swift has no way of detecting and quarantining this object. Note that, with an unencrypted object, if the ETag metadatum gets mangled, then the object will be quarantined by the object server or auditor, whichever notices first. As part of this commit, I also ripped out some mocking of getxattr/setxattr in tests. It appears to be there to allow unit tests to run on systems where /tmp doesn't support xattrs. However, since the mock is keyed off of inode number and inode numbers get re-used, there's lots of leakage between different test runs. On a real FS, unlinking a file and then creating a new one of the same name will also reset the xattrs; this isn't the case with the mock. The mock was pretty old; Ubuntu 12.04 and up all support xattrs in /tmp, and recent Red Hat / CentOS releases do too. The xattr mock was added in 2011; maybe it was to support Ubuntu Lucid Lynx? Bonus: now you can pause a test with the debugger, inspect its files in /tmp, and actually see the xattrs along with the data. Since this patch now uses a real filesystem for testing filesystem operations, tests are skipped if the underlying filesystem does not support setting xattrs (eg tmpfs or more than 4k of xattrs on ext4). References to "/tmp" have been replaced with calls to tempfile.gettempdir(). This will allow setting the TMPDIR envvar in test setup and getting an XFS filesystem instead of ext4 or tmpfs. THIS PATCH SIGNIFICANTLY CHANGES TESTING ENVIRONMENTS With this patch, every test environment will require TMPDIR to be using a filesystem that supports at least 4k of extended attributes. Neither ext4 nor tempfs support this. XFS is recommended. So why all the SkipTests? Why not simply raise an error? We still need the tests to run on the base image for OpenStack's CI system. Since we were previously mocking out xattr, there wasn't a problem, but we also weren't actually testing anything. This patch adds functionality to validate xattr data, so we need to drop the mock. `test.unit.skip_if_no_xattrs()` is also imported into `test.functional` so that functional tests can import it from the functional test namespace. The related OpenStack CI infrastructure changes are made in https://review.openstack.org/#/c/394600/. Co-Authored-By: John Dickinson <me@not.mn> Change-Id: I98a37c0d451f4960b7a12f648e4405c6c6716808
174 lines
6.6 KiB
Python
174 lines
6.6 KiB
Python
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import binascii
|
|
import os
|
|
import shutil
|
|
import struct
|
|
import tempfile
|
|
import unittest
|
|
|
|
from swift.cli import relinker
|
|
from swift.common import exceptions, ring, utils
|
|
from swift.common import storage_policy
|
|
from swift.common.storage_policy import (
|
|
StoragePolicy, StoragePolicyCollection, POLICIES)
|
|
|
|
from swift.obj.diskfile import write_metadata
|
|
|
|
from test.unit import FakeLogger, skip_if_no_xattrs
|
|
|
|
|
|
class TestRelinker(unittest.TestCase):
|
|
def setUp(self):
|
|
skip_if_no_xattrs()
|
|
self.logger = FakeLogger()
|
|
self.testdir = tempfile.mkdtemp()
|
|
self.devices = os.path.join(self.testdir, 'node')
|
|
shutil.rmtree(self.testdir, ignore_errors=1)
|
|
os.mkdir(self.testdir)
|
|
os.mkdir(self.devices)
|
|
|
|
self.rb = ring.RingBuilder(8, 6.0, 1)
|
|
|
|
for i in range(6):
|
|
ip = "127.0.0.%s" % i
|
|
self.rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
|
|
'ip': ip, 'port': 10000, 'device': 'sda1'})
|
|
self.rb.rebalance(seed=1)
|
|
|
|
self.existing_device = 'sda1'
|
|
os.mkdir(os.path.join(self.devices, self.existing_device))
|
|
self.objects = os.path.join(self.devices, self.existing_device,
|
|
'objects')
|
|
os.mkdir(self.objects)
|
|
self._hash = utils.hash_path('a/c/o')
|
|
digest = binascii.unhexlify(self._hash)
|
|
part = struct.unpack_from('>I', digest)[0] >> 24
|
|
self.next_part = struct.unpack_from('>I', digest)[0] >> 23
|
|
self.objdir = os.path.join(
|
|
self.objects, str(part), self._hash[-3:], self._hash)
|
|
os.makedirs(self.objdir)
|
|
self.object_fname = "1278553064.00000.data"
|
|
self.objname = os.path.join(self.objdir, self.object_fname)
|
|
with open(self.objname, "wb") as dummy:
|
|
dummy.write("Hello World!")
|
|
write_metadata(dummy, {'name': '/a/c/o', 'Content-Length': '12'})
|
|
|
|
test_policies = [StoragePolicy(0, 'platin', True)]
|
|
storage_policy._POLICIES = StoragePolicyCollection(test_policies)
|
|
|
|
self.expected_dir = os.path.join(
|
|
self.objects, str(self.next_part), self._hash[-3:], self._hash)
|
|
self.expected_file = os.path.join(self.expected_dir, self.object_fname)
|
|
|
|
def _save_ring(self):
|
|
rd = self.rb.get_ring()
|
|
for policy in POLICIES:
|
|
rd.save(os.path.join(
|
|
self.testdir, '%s.ring.gz' % policy.ring_name))
|
|
# Enforce ring reloading in relinker
|
|
policy.object_ring = None
|
|
|
|
def tearDown(self):
|
|
shutil.rmtree(self.testdir, ignore_errors=1)
|
|
storage_policy.reload_storage_policies()
|
|
|
|
def test_relink(self):
|
|
self.rb.prepare_increase_partition_power()
|
|
self._save_ring()
|
|
relinker.relink(self.testdir, self.devices, True)
|
|
|
|
self.assertTrue(os.path.isdir(self.expected_dir))
|
|
self.assertTrue(os.path.isfile(self.expected_file))
|
|
|
|
stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
|
|
stat_new = os.stat(self.expected_file)
|
|
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
|
|
|
|
def _common_test_cleanup(self, relink=True):
|
|
# Create a ring that has prev_part_power set
|
|
self.rb.prepare_increase_partition_power()
|
|
self.rb.increase_partition_power()
|
|
self._save_ring()
|
|
|
|
os.makedirs(self.expected_dir)
|
|
|
|
if relink:
|
|
# Create a hardlink to the original object name. This is expected
|
|
# after a normal relinker run
|
|
os.link(os.path.join(self.objdir, self.object_fname),
|
|
self.expected_file)
|
|
|
|
def test_cleanup(self):
|
|
self._common_test_cleanup()
|
|
self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))
|
|
|
|
# Old objectname should be removed, new should still exist
|
|
self.assertTrue(os.path.isdir(self.expected_dir))
|
|
self.assertTrue(os.path.isfile(self.expected_file))
|
|
self.assertFalse(os.path.isfile(
|
|
os.path.join(self.objdir, self.object_fname)))
|
|
|
|
def test_cleanup_not_yet_relinked(self):
|
|
self._common_test_cleanup(relink=False)
|
|
self.assertEqual(1, relinker.cleanup(self.testdir, self.devices, True))
|
|
|
|
self.assertTrue(os.path.isfile(
|
|
os.path.join(self.objdir, self.object_fname)))
|
|
|
|
def test_cleanup_deleted(self):
|
|
self._common_test_cleanup()
|
|
|
|
# Pretend the object got deleted inbetween and there is a tombstone
|
|
fname_ts = self.expected_file[:-4] + "ts"
|
|
os.rename(self.expected_file, fname_ts)
|
|
|
|
self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))
|
|
|
|
def test_cleanup_doesnotexist(self):
|
|
self._common_test_cleanup()
|
|
|
|
# Pretend the file in the new place got deleted inbetween
|
|
os.remove(self.expected_file)
|
|
|
|
self.assertEqual(
|
|
1, relinker.cleanup(self.testdir, self.devices, True, self.logger))
|
|
self.assertEqual(self.logger.get_lines_for_level('warning'),
|
|
['Error cleaning up %s: %s' % (self.objname,
|
|
repr(exceptions.DiskFileNotExist()))])
|
|
|
|
def test_cleanup_non_durable_fragment(self):
|
|
self._common_test_cleanup()
|
|
|
|
# Actually all fragments are non-durable and raise and DiskFileNotExist
|
|
# in EC in this test. However, if the counterpart exists in the new
|
|
# location, this is ok - it will be fixed by the reconstructor later on
|
|
storage_policy._POLICIES[0].policy_type = 'erasure_coding'
|
|
|
|
self.assertEqual(
|
|
0, relinker.cleanup(self.testdir, self.devices, True, self.logger))
|
|
self.assertEqual(self.logger.get_lines_for_level('warning'), [])
|
|
|
|
def test_cleanup_quarantined(self):
|
|
self._common_test_cleanup()
|
|
# Pretend the object in the new place got corrupted
|
|
with open(self.expected_file, "wb") as obj:
|
|
obj.write('trash')
|
|
|
|
self.assertEqual(
|
|
1, relinker.cleanup(self.testdir, self.devices, True, self.logger))
|
|
|
|
self.assertIn('failed audit and was quarantined',
|
|
self.logger.get_lines_for_level('warning')[0])
|