Use 'timestamp' name consistently in ShardRange
The attr name is 'timestamp' but 'created_at' was used in the constructor and serialized form. 'timestamp' is now used consistently, since the time value is not specific to a creation event. Change-Id: I116cd493a64bb296047bdc832ecbcf2d478bcdcc
This commit is contained in:
parent
79f4f65888
commit
68c3512f5e
@ -314,8 +314,8 @@ def print_db_info_metadata(db_type, info, metadata, drop_prefixes=False):
|
||||
'%(bytes_used)d, State: %(state_text)s (%(state)d)'
|
||||
% srange)
|
||||
print(' Created at: %s (%s)'
|
||||
% (Timestamp(srange['created_at']).isoformat,
|
||||
srange['created_at']))
|
||||
% (Timestamp(srange['timestamp']).isoformat,
|
||||
srange['timestamp']))
|
||||
print(' Meta Timestamp: %s (%s)'
|
||||
% (Timestamp(srange['meta_timestamp']).isoformat,
|
||||
srange['meta_timestamp']))
|
||||
|
@ -4395,14 +4395,14 @@ class ShardRange(object):
|
||||
MIN = MinBound()
|
||||
MAX = MaxBound()
|
||||
|
||||
def __init__(self, name, created_at, lower=MIN, upper=MAX,
|
||||
def __init__(self, name, timestamp, lower=MIN, upper=MAX,
|
||||
object_count=0, bytes_used=0, meta_timestamp=None,
|
||||
deleted=0, state=None, state_timestamp=None, epoch=None):
|
||||
"""
|
||||
A ShardRange encapsulates state related to a container shard.
|
||||
|
||||
:param name: the name of the shard.
|
||||
:param created_at: the timestamp at which the shard was created.
|
||||
:param timestamp: the timestamp at which the shard was created.
|
||||
:param lower: the lower bound of object names contained in the shard;
|
||||
the lower bound *is not* included in the shard namespace.
|
||||
:param upper: the upper bound of object names contained in the shard;
|
||||
@ -4412,12 +4412,12 @@ class ShardRange(object):
|
||||
:param bytes_used: the number of bytes in the shard; defaults to zero.
|
||||
:param meta_timestamp: the timestamp at which the shard's object_count
|
||||
and bytes_used were last updated; defaults to the value of
|
||||
``created_at``.
|
||||
``timestamp``.
|
||||
:param deleted: if set the shard range is considered to be deleted.
|
||||
:param state: the state, must be one of ShardRange.STATES, defaults to
|
||||
CREATED
|
||||
:param state_timestamp: the timestamp at which the shard state was last
|
||||
updated; defaults to the value of ``created_at``.
|
||||
updated; defaults to the value of ``timestamp``.
|
||||
:param epoch: optional epoch timestamp
|
||||
"""
|
||||
self.account, self.container = self._validate_path(name)
|
||||
@ -4426,7 +4426,7 @@ class ShardRange(object):
|
||||
self._upper = ShardRange.MAX
|
||||
self.lower = lower
|
||||
self.upper = upper
|
||||
self.timestamp = created_at
|
||||
self.timestamp = timestamp
|
||||
self._meta_timestamp = self._state_timestamp = self._epoch = None
|
||||
self.meta_timestamp = meta_timestamp
|
||||
self.object_count = object_count
|
||||
@ -4778,7 +4778,7 @@ class ShardRange(object):
|
||||
|
||||
def __iter__(self):
|
||||
yield 'name', self.name
|
||||
yield 'created_at', self.timestamp.internal
|
||||
yield 'timestamp', self.timestamp.internal
|
||||
yield 'lower', str(self.lower)
|
||||
yield 'upper', str(self.upper)
|
||||
yield 'object_count', self.object_count
|
||||
@ -4816,7 +4816,7 @@ class ShardRange(object):
|
||||
:return: an instance of this class
|
||||
"""
|
||||
return cls(
|
||||
params['name'], params['created_at'], params['lower'],
|
||||
params['name'], params['timestamp'], params['lower'],
|
||||
params['upper'], params['object_count'], params['bytes_used'],
|
||||
params['meta_timestamp'], params['deleted'], params['state'],
|
||||
params['state_timestamp'], params['epoch'])
|
||||
|
@ -65,7 +65,7 @@ def db_state_text(state):
|
||||
|
||||
# attribute names in order used when transforming shard ranges from dicts to
|
||||
# tuples and vice-versa
|
||||
SHARD_RANGE_KEYS = ('name', 'created_at', 'lower', 'upper', 'object_count',
|
||||
SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count',
|
||||
'bytes_used', 'meta_timestamp', 'deleted', 'state',
|
||||
'state_timestamp', 'epoch')
|
||||
|
||||
@ -275,15 +275,15 @@ def merge_shards(shard_data, existing):
|
||||
# may have been updated with meta independently of the other.
|
||||
if not existing:
|
||||
return True
|
||||
if existing['created_at'] < shard_data['created_at']:
|
||||
if existing['timestamp'] < shard_data['timestamp']:
|
||||
# note that currently we do not roll forward any meta or state from
|
||||
# an item that was created at older time, newer created time trumps
|
||||
return True
|
||||
elif existing['created_at'] > shard_data['created_at']:
|
||||
elif existing['timestamp'] > shard_data['timestamp']:
|
||||
return False
|
||||
|
||||
new_content = False
|
||||
# created_at must be the same, so preserve existing range bounds
|
||||
# timestamp must be the same, so preserve existing range bounds
|
||||
for k in ('lower', 'upper'):
|
||||
shard_data[k] = existing[k]
|
||||
|
||||
@ -579,7 +579,7 @@ class ContainerBroker(DatabaseBroker):
|
||||
upper TEXT,
|
||||
object_count INTEGER DEFAULT 0,
|
||||
bytes_used INTEGER DEFAULT 0,
|
||||
created_at TEXT,
|
||||
timestamp TEXT,
|
||||
meta_timestamp TEXT,
|
||||
state INTEGER,
|
||||
state_timestamp TEXT,
|
||||
@ -1348,8 +1348,7 @@ class ContainerBroker(DatabaseBroker):
|
||||
(', '.join(SHARD_RANGE_KEYS),
|
||||
','.join('?' * len(chunk))), chunk))
|
||||
|
||||
# Sort item_list into things that need adding and deleting, based
|
||||
# on results of created_at query.
|
||||
# Sort item_list into things that need adding and deleting
|
||||
to_delete = {}
|
||||
to_add = {}
|
||||
for item in item_list:
|
||||
|
@ -196,7 +196,7 @@ Sharding Metadata:
|
||||
|
||||
shard_ranges = [utils.ShardRange(
|
||||
name='.sharded_a/shard_range_%s' % i,
|
||||
created_at=utils.Timestamp(i), lower='%da' % i,
|
||||
timestamp=utils.Timestamp(i), lower='%da' % i,
|
||||
upper='%dz' % i, object_count=i, bytes_used=i,
|
||||
meta_timestamp=utils.Timestamp(i)) for i in range(1, 4)]
|
||||
shard_ranges[0].state = utils.ShardRange.CLEAVED
|
||||
|
@ -6791,32 +6791,32 @@ class TestShardRange(unittest.TestCase):
|
||||
ts_2 = next(self.ts_iter)
|
||||
ts_3 = next(self.ts_iter)
|
||||
ts_4 = next(self.ts_iter)
|
||||
empty_run = dict(name=None, created_at=None, lower=None,
|
||||
empty_run = dict(name=None, timestamp=None, lower=None,
|
||||
upper=None, object_count=0, bytes_used=0,
|
||||
meta_timestamp=None, deleted=0,
|
||||
state=0, state_timestamp=None, epoch=None)
|
||||
# name, timestamp must be given
|
||||
assert_initialisation_fails(empty_run.copy())
|
||||
assert_initialisation_fails(dict(empty_run, name='a/c'), TypeError)
|
||||
assert_initialisation_fails(dict(empty_run, created_at=ts_1))
|
||||
assert_initialisation_fails(dict(empty_run, timestamp=ts_1))
|
||||
# name must be form a/c
|
||||
assert_initialisation_fails(dict(empty_run, name='c', created_at=ts_1))
|
||||
assert_initialisation_fails(dict(empty_run, name='', created_at=ts_1))
|
||||
assert_initialisation_fails(dict(empty_run, name='c', timestamp=ts_1))
|
||||
assert_initialisation_fails(dict(empty_run, name='', timestamp=ts_1))
|
||||
assert_initialisation_fails(dict(empty_run, name='/a/c',
|
||||
created_at=ts_1))
|
||||
timestamp=ts_1))
|
||||
assert_initialisation_fails(dict(empty_run, name='/c',
|
||||
created_at=ts_1))
|
||||
timestamp=ts_1))
|
||||
# lower, upper can be None
|
||||
expect = dict(name='a/c', created_at=ts_1.internal, lower='',
|
||||
expect = dict(name='a/c', timestamp=ts_1.internal, lower='',
|
||||
upper='', object_count=0, bytes_used=0,
|
||||
meta_timestamp=ts_1.internal, deleted=0,
|
||||
state=0, state_timestamp=ts_1.internal,
|
||||
epoch=None)
|
||||
assert_initialisation_ok(dict(empty_run, name='a/c', created_at=ts_1),
|
||||
assert_initialisation_ok(dict(empty_run, name='a/c', timestamp=ts_1),
|
||||
expect)
|
||||
assert_initialisation_ok(dict(name='a/c', created_at=ts_1), expect)
|
||||
assert_initialisation_ok(dict(name='a/c', timestamp=ts_1), expect)
|
||||
|
||||
good_run = dict(name='a/c', created_at=ts_1, lower='l',
|
||||
good_run = dict(name='a/c', timestamp=ts_1, lower='l',
|
||||
upper='u', object_count=2, bytes_used=10,
|
||||
meta_timestamp=ts_2, deleted=0,
|
||||
state=1, state_timestamp=ts_3.internal,
|
||||
@ -6842,7 +6842,7 @@ class TestShardRange(unittest.TestCase):
|
||||
assert_initialisation_ok(good_deleted,
|
||||
dict(expect, deleted=1))
|
||||
|
||||
assert_initialisation_fails(dict(good_run, created_at='water balloon'))
|
||||
assert_initialisation_fails(dict(good_run, timestamp='water balloon'))
|
||||
|
||||
assert_initialisation_fails(
|
||||
dict(good_run, meta_timestamp='water balloon'))
|
||||
@ -6871,7 +6871,7 @@ class TestShardRange(unittest.TestCase):
|
||||
state=1, state_timestamp=ts_3, epoch=ts_4)
|
||||
sr_dict = dict(sr)
|
||||
expected = {
|
||||
'name': 'a/test', 'created_at': ts_1.internal, 'lower': lower,
|
||||
'name': 'a/test', 'timestamp': ts_1.internal, 'lower': lower,
|
||||
'upper': upper, 'object_count': 10, 'bytes_used': 100,
|
||||
'meta_timestamp': ts_2.internal, 'deleted': 0,
|
||||
'state': 1, 'state_timestamp': ts_3.internal, 'epoch': ts_4}
|
||||
@ -6892,7 +6892,7 @@ class TestShardRange(unittest.TestCase):
|
||||
with self.assertRaises(KeyError):
|
||||
utils.ShardRange.from_dict(bad_dict)
|
||||
# But __init__ still (generally) works!
|
||||
if key not in ('name', 'created_at'):
|
||||
if key not in ('name', 'timestamp'):
|
||||
utils.ShardRange(**bad_dict)
|
||||
else:
|
||||
with self.assertRaises(TypeError):
|
||||
@ -7094,10 +7094,10 @@ class TestShardRange(unittest.TestCase):
|
||||
self.assertTrue(sr.deleted)
|
||||
old_sr_dict = dict(old_sr)
|
||||
old_sr_dict.pop('deleted')
|
||||
old_sr_dict.pop('created_at')
|
||||
old_sr_dict.pop('timestamp')
|
||||
sr_dict = dict(sr)
|
||||
sr_dict.pop('deleted')
|
||||
sr_dict.pop('created_at')
|
||||
sr_dict.pop('timestamp')
|
||||
self.assertEqual(old_sr_dict, sr_dict)
|
||||
|
||||
# no change
|
||||
@ -7444,13 +7444,13 @@ class TestShardRange(unittest.TestCase):
|
||||
|
||||
new_timestamp = next(self.ts_iter)
|
||||
new = sr.copy(timestamp=new_timestamp)
|
||||
self.assertEqual(dict(sr, created_at=new_timestamp.internal,
|
||||
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
|
||||
meta_timestamp=new_timestamp.internal,
|
||||
state_timestamp=new_timestamp.internal),
|
||||
dict(new))
|
||||
|
||||
new = sr.copy(timestamp=new_timestamp, object_count=99)
|
||||
self.assertEqual(dict(sr, created_at=new_timestamp.internal,
|
||||
self.assertEqual(dict(sr, timestamp=new_timestamp.internal,
|
||||
meta_timestamp=new_timestamp.internal,
|
||||
state_timestamp=new_timestamp.internal,
|
||||
object_count=99),
|
||||
|
@ -645,7 +645,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -670,7 +670,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -697,7 +697,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -722,7 +722,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp) # Not old_put_timestamp!
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -748,7 +748,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp) # Not old_delete_timestamp!
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -775,7 +775,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT deleted FROM shard_ranges").fetchone()[0], 1)
|
||||
@ -792,7 +792,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -822,7 +822,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -849,7 +849,7 @@ class TestContainerBroker(unittest.TestCase):
|
||||
"SELECT name FROM shard_ranges").fetchone()[0],
|
||||
'"a/{<shardrange \'&\' name>}"')
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT created_at FROM shard_ranges").fetchone()[0],
|
||||
"SELECT timestamp FROM shard_ranges").fetchone()[0],
|
||||
timestamp)
|
||||
self.assertEqual(conn.execute(
|
||||
"SELECT meta_timestamp FROM shard_ranges").fetchone()[0],
|
||||
@ -3559,14 +3559,14 @@ class TestContainerBroker(unittest.TestCase):
|
||||
# Add some ShardRanges
|
||||
shard_ranges = [ShardRange(
|
||||
name='.sharded_a/shard_range_%s' % i,
|
||||
created_at=next(ts_iter), lower='obj_%d' % i,
|
||||
timestamp=next(ts_iter), lower='obj_%d' % i,
|
||||
upper='obj_%d' % (i + 2),
|
||||
object_count=len(objects[i:i + 2]),
|
||||
bytes_used=sum(obj['size'] for obj in objects[i:i + 2]),
|
||||
meta_timestamp=next(ts_iter)) for i in range(0, 6, 2)]
|
||||
deleted_range = ShardRange('.sharded_a/shard_range_z', next(ts_iter),
|
||||
'z', '', state=ShardRange.ACTIVE, deleted=1)
|
||||
own_sr = ShardRange(name='a/c', created_at=next(ts_iter),
|
||||
own_sr = ShardRange(name='a/c', timestamp=next(ts_iter),
|
||||
state=ShardRange.ACTIVE)
|
||||
broker.merge_shard_ranges([own_sr] + shard_ranges + [deleted_range])
|
||||
ts_epoch = next(ts_iter)
|
||||
|
@ -2282,7 +2282,7 @@ class TestContainerController(unittest.TestCase):
|
||||
check_bad_body('not json')
|
||||
check_bad_body('')
|
||||
bad_shard_range = dict(ShardRange('a/c', next(ts_iter)))
|
||||
bad_shard_range.pop('created_at')
|
||||
bad_shard_range.pop('timestamp')
|
||||
check_bad_body(json.dumps([bad_shard_range]))
|
||||
|
||||
def check_not_shard_record_type(headers):
|
||||
|
Loading…
Reference in New Issue
Block a user