Remove unused code for passing shard ranges via the pending file

Remove/revert to master leftover code from when shard ranges were
written to the pending file.

Change-Id: I3e48850a0afbab0725859b4bd0c5b70bd78a16ff
This commit is contained in:
Alistair Coles 2018-04-26 17:48:57 +01:00
parent e23562bd6d
commit 5eb65e4b89
4 changed files with 41 additions and 58 deletions

View File

@ -624,28 +624,26 @@ class ContainerBroker(DatabaseBroker):
def _commit_puts_load(self, item_list, entry):
"""See :func:`swift.common.db.DatabaseBroker._commit_puts_load`"""
data = list(pickle.loads(entry.decode('base64')))
record_type = data.pop(9) if len(data) > 9 else RECORD_TYPE_OBJECT
if record_type == RECORD_TYPE_SHARD_NODE:
item = dict(zip(SHARD_RANGE_KEYS, data))
item['record_type'] = record_type
data = pickle.loads(entry.decode('base64'))
(name, timestamp, size, content_type, etag, deleted) = data[:6]
if len(data) > 6:
storage_policy_index = data[6]
else:
(name, timestamp, size, content_type, etag, deleted) = data[:6]
storage_policy_index = data[6] if len(data) > 6 else 0
content_type_timestamp = data[7] if len(data) > 7 else None
meta_timestamp = data[8] if len(data) > 8 else None
item = {
'name': name,
'created_at': timestamp,
'size': size,
'content_type': content_type,
'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index,
'ctype_timestamp': content_type_timestamp,
'meta_timestamp': meta_timestamp,
'record_type': record_type}
item_list.append(item)
storage_policy_index = 0
content_type_timestamp = meta_timestamp = None
if len(data) > 7:
content_type_timestamp = data[7]
if len(data) > 8:
meta_timestamp = data[8]
item_list.append({'name': name,
'created_at': timestamp,
'size': size,
'content_type': content_type,
'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index,
'ctype_timestamp': content_type_timestamp,
'meta_timestamp': meta_timestamp})
def _empty(self):
self._commit_puts_stale_ok()
@ -690,22 +688,11 @@ class ContainerBroker(DatabaseBroker):
deleted=1, storage_policy_index=storage_policy_index)
def make_tuple_for_pickle(self, record):
record_type = record['record_type']
if record_type == RECORD_TYPE_SHARD_NODE:
# TODO: this is so brittle, could we use dicts for shard ranges and
# try/except when reading back in _commit_puts_load?
values = [record[key] for key in SHARD_RANGE_KEYS]
while len(values) < 9:
# pad as required since record_type *MUST* be at index 9
values.insert(8, 0)
values.insert(9, record_type)
return tuple(values)
return (record['name'], record['created_at'], record['size'],
record['content_type'], record['etag'], record['deleted'],
record['storage_policy_index'],
record['ctype_timestamp'],
record['meta_timestamp'],
record['record_type'])
record['meta_timestamp'])
def put_object(self, name, timestamp, size, content_type, etag, deleted=0,
storage_policy_index=0, ctype_timestamp=None,
@ -730,8 +717,7 @@ class ContainerBroker(DatabaseBroker):
'deleted': deleted,
'storage_policy_index': storage_policy_index,
'ctype_timestamp': ctype_timestamp,
'meta_timestamp': meta_timestamp,
'record_type': RECORD_TYPE_OBJECT}
'meta_timestamp': meta_timestamp}
self.put_record(record)
def remove_objects(self, lower, upper, storage_policy_index, max_row=None):

View File

@ -56,7 +56,7 @@ class TestCliFindShardRanges(unittest.TestCase):
broker.container = 'c'
broker.initialize()
ts = utils.Timestamp.now()
broker.merge_objects([
broker.merge_items([
{'name': 'obj%02d' % i, 'created_at': ts.internal, 'size': 0,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,

View File

@ -124,13 +124,13 @@ class TestContainerBroker(unittest.TestCase):
obj = {'name': 'o', 'created_at': next(ts_iter).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker_with_object.merge_objects([dict(obj)])
broker_with_object.merge_items([dict(obj)])
self.assertFalse(broker_to_test.is_deleted())
info, deleted = broker_to_test.get_info_is_deleted()
self.assertFalse(deleted)
self.assertEqual(1, info['object_count'])
obj.update({'created_at': next(ts_iter).internal, 'deleted': 1})
broker_with_object.merge_objects([dict(obj)])
broker_with_object.merge_items([dict(obj)])
self.assertTrue(broker_to_test.is_deleted())
info, deleted = broker_to_test.get_info_is_deleted()
self.assertTrue(deleted)
@ -140,13 +140,13 @@ class TestContainerBroker(unittest.TestCase):
obj = {'name': 'o', 'created_at': next(ts_iter).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker.merge_objects([dict(obj)])
broker.merge_items([dict(obj)])
self.assertTrue(broker.is_deleted())
info, deleted = broker.get_info_is_deleted()
self.assertTrue(deleted)
self.assertEqual(0, info['object_count'])
obj.update({'created_at': next(ts_iter).internal, 'deleted': 1})
broker.merge_objects([dict(obj)])
broker.merge_items([dict(obj)])
self.assertTrue(broker.is_deleted())
info, deleted = broker.get_info_is_deleted()
self.assertTrue(deleted)
@ -227,11 +227,11 @@ class TestContainerBroker(unittest.TestCase):
obj = {'name': 'o', 'created_at': next(ts_iter).internal,
'size': 0, 'content_type': 'text/plain', 'etag': EMPTY_ETAG,
'deleted': 0}
broker_with_object.merge_objects([dict(obj)])
broker_with_object.merge_items([dict(obj)])
self.assertFalse(broker_to_test.empty())
# and delete it
obj.update({'created_at': next(ts_iter).internal, 'deleted': 1})
broker_with_object.merge_objects([dict(obj)])
broker_with_object.merge_items([dict(obj)])
self.assertTrue(broker_to_test.empty())
self.assertTrue(broker.empty())
@ -900,24 +900,23 @@ class TestContainerBroker(unittest.TestCase):
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': None,
'meta_timestamp': None,
'record_type': 'object'}
'meta_timestamp': None}
broker = ContainerBroker(':memory:', account='a', container='c')
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', None, None, 'object')
'1', '2', None, None)
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
record['ctype_timestamp'] = '2233445566.00000'
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', '2233445566.00000', None, 'object')
'1', '2', '2233445566.00000', None)
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
record['meta_timestamp'] = '5566778899.00000'
expect = ('obj', '1234567890.12345', 42, 'text/plain', 'hash_test',
'1', '2', '2233445566.00000', '5566778899.00000', 'object')
'1', '2', '2233445566.00000', '5566778899.00000')
result = broker.make_tuple_for_pickle(record)
self.assertEqual(expect, result)
@ -936,8 +935,7 @@ class TestContainerBroker(unittest.TestCase):
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': None,
'meta_timestamp': None,
'record_type': 'object'}
'meta_timestamp': None}
# sanity check
self.assertFalse(os.path.isfile(broker.pending_file))
@ -982,8 +980,7 @@ class TestContainerBroker(unittest.TestCase):
'deleted': '1',
'storage_policy_index': '2',
'ctype_timestamp': '1234567890.44444',
'meta_timestamp': '1234567890.99999',
'record_type': 'object'}
'meta_timestamp': '1234567890.99999'}
# sanity check
self.assertFalse(os.path.isfile(broker.pending_file))
@ -3618,8 +3615,8 @@ class TestContainerBroker(unittest.TestCase):
'size': 1024 * i,
'deleted': 0,
} for i in range(1, 6)]
# merge_objects mutates items
broker.merge_objects([dict(obj) for obj in objects])
# merge_items mutates items
broker.merge_items([dict(obj) for obj in objects])
original_info = broker.get_info()
# Add some metadata
@ -3980,9 +3977,9 @@ class TestContainerBroker(unittest.TestCase):
db_path, account=a, container=c)
broker.initialize(next(ts_iter).internal, 0)
broker.update_sharding_info({'Root': '%s/%s' % (root_a, root_c)})
broker.merge_objects([{'name': 'obj', 'size': 14, 'etag': 'blah',
'content_type': 'text/plain', 'deleted': 0,
'created_at': Timestamp.now().internal}])
broker.merge_items([{'name': 'obj', 'size': 14, 'etag': 'blah',
'content_type': 'text/plain', 'deleted': 0,
'created_at': Timestamp.now().internal}])
self.assertEqual(1, broker.get_info()['object_count'])
self.assertEqual(14, broker.get_info()['bytes_used'])

View File

@ -1362,7 +1362,7 @@ class TestSharder(BaseTestSharder):
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 0}
broker.get_brokers()[0].merge_objects([obj])
broker.get_brokers()[0].merge_items([obj])
self.assertEqual(2, len(broker.db_files)) # sanity check
def check_not_complete():
@ -1407,7 +1407,7 @@ class TestSharder(BaseTestSharder):
obj = {'name': 'obj', 'created_at': next(self.ts_iter).internal,
'size': 14, 'content_type': 'text/plain', 'etag': 'an etag',
'deleted': 1}
old_broker.merge_objects([obj])
old_broker.merge_items([obj])
self.assertGreater(old_broker.get_max_row(), context.max_row)
context.misplaced_done = True
context.cleaving_done = True