2013-09-20 01:00:54 +08:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2010-07-12 17:03:45 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2013-09-01 01:14:40 -04:00
|
|
|
"""Tests for swift.common.db"""
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
import os
|
2015-05-13 21:18:02 +05:30
|
|
|
import sys
|
2010-07-12 17:03:45 -05:00
|
|
|
import unittest
|
2014-01-16 00:49:28 -05:00
|
|
|
from tempfile import mkdtemp
|
2011-08-02 18:21:25 +00:00
|
|
|
from shutil import rmtree, copy
|
2010-07-12 17:03:45 -05:00
|
|
|
from uuid import uuid4
|
2015-07-07 22:46:37 +05:30
|
|
|
import six.moves.cPickle as pickle
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2018-07-11 17:55:48 -05:00
|
|
|
import base64
|
2015-10-30 11:02:54 -07:00
|
|
|
import json
|
2010-07-12 17:03:45 -05:00
|
|
|
import sqlite3
|
2014-05-27 16:57:25 -07:00
|
|
|
import itertools
|
|
|
|
import time
|
|
|
|
import random
|
2013-10-23 11:35:57 -07:00
|
|
|
from mock import patch, MagicMock
|
|
|
|
|
|
|
|
from eventlet.timeout import Timeout
|
2015-05-25 18:28:02 +02:00
|
|
|
from six.moves import range
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2018-07-11 17:55:48 -05:00
|
|
|
import six
|
|
|
|
|
2011-08-01 20:46:30 +00:00
|
|
|
import swift.common.db
|
2014-10-01 09:37:47 -04:00
|
|
|
from swift.common.constraints import \
|
|
|
|
MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE
|
2013-09-10 13:30:28 -06:00
|
|
|
from swift.common.db import chexor, dict_factory, get_db_connection, \
|
Fix DB locked error on commit
This bug was introduced in ef7f9e27; while moving timeout for execute
to the cursor wrapper, commit was moved as well; however commit is
purely a connection method, only execute is passed on to a cursor.
Added unit tests to check both methods for correct timeouts.
This manifested in a test failure as:
ERROR __call__ error with POST /sdb1/418/AUTH_d1c4b610b16a48de83219c696261009c/TestContainer-tempest-1572414684 :
Traceback (most recent call last):
File "/opt/stack/new/swift/swift/container/server.py", line 486, in __call__
res = method(req)
File "/opt/stack/new/swift/swift/common/utils.py", line 1915, in wrapped
return func(*a, **kw)
File "/opt/stack/new/swift/swift/common/utils.py", line 687, in _timing_stats
resp = func(ctrl, *args, **kwargs)
File "/opt/stack/new/swift/swift/container/server.py", line 464, in POST
broker.update_metadata(metadata)
File "/opt/stack/new/swift/swift/common/db.py", line 677, in update_metadata
conn.commit()
OperationalError: database is locked (txn: tx5065394f288740e69fcec-00528e184e)
Change-Id: I269b133fac53d4792d21b62f801cc0c0ccf337ea
2013-11-21 11:46:15 -08:00
|
|
|
DatabaseBroker, DatabaseConnectionError, DatabaseAlreadyExists, \
|
2018-05-01 15:12:05 +01:00
|
|
|
GreenDBConnection, PICKLE_PROTOCOL, zero_like
|
2015-10-30 11:02:54 -07:00
|
|
|
from swift.common.utils import normalize_timestamp, mkdirs, Timestamp
|
2010-07-12 17:03:45 -05:00
|
|
|
from swift.common.exceptions import LockTimeout
|
2014-10-01 09:37:47 -04:00
|
|
|
from swift.common.swob import HTTPException
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2019-10-25 14:34:41 -05:00
|
|
|
from test.unit import with_tempdir, make_timestamp_iter
|
2014-06-17 22:35:59 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2018-05-01 15:12:05 +01:00
|
|
|
class TestHelperFunctions(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_zero_like(self):
|
|
|
|
expectations = {
|
|
|
|
# value => expected
|
|
|
|
None: True,
|
|
|
|
True: False,
|
|
|
|
'': True,
|
|
|
|
'asdf': False,
|
|
|
|
0: True,
|
|
|
|
1: False,
|
|
|
|
'0': True,
|
|
|
|
'1': False,
|
|
|
|
}
|
|
|
|
errors = []
|
|
|
|
for value, expected in expectations.items():
|
|
|
|
rv = zero_like(value)
|
|
|
|
if rv != expected:
|
|
|
|
errors.append('zero_like(%r) => %r expected %r' % (
|
|
|
|
value, rv, expected))
|
|
|
|
if errors:
|
|
|
|
self.fail('Some unexpected return values:\n' + '\n'.join(errors))
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestDatabaseConnectionError(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_str(self):
|
|
|
|
err = \
|
|
|
|
DatabaseConnectionError(':memory:', 'No valid database connection')
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn(':memory:', str(err))
|
|
|
|
self.assertIn('No valid database connection', str(err))
|
2010-07-12 17:03:45 -05:00
|
|
|
err = DatabaseConnectionError(':memory:',
|
2013-09-01 01:14:40 -04:00
|
|
|
'No valid database connection',
|
|
|
|
timeout=1357)
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn(':memory:', str(err))
|
|
|
|
self.assertIn('No valid database connection', str(err))
|
|
|
|
self.assertIn('1357', str(err))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
|
|
|
|
class TestDictFactory(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_normal_case(self):
|
|
|
|
conn = sqlite3.connect(':memory:')
|
|
|
|
conn.execute('CREATE TABLE test (one TEXT, two INTEGER)')
|
|
|
|
conn.execute('INSERT INTO test (one, two) VALUES ("abc", 123)')
|
|
|
|
conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)')
|
|
|
|
conn.commit()
|
|
|
|
curs = conn.execute('SELECT one, two FROM test')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(dict_factory(curs, next(curs)),
|
|
|
|
{'one': 'abc', 'two': 123})
|
|
|
|
self.assertEqual(dict_factory(curs, next(curs)),
|
|
|
|
{'one': 'def', 'two': 456})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
|
|
|
|
class TestChexor(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_normal_case(self):
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(
|
2013-09-01 01:14:40 -04:00
|
|
|
chexor('d41d8cd98f00b204e9800998ecf8427e',
|
|
|
|
'new name', normalize_timestamp(1)),
|
2010-07-12 17:03:45 -05:00
|
|
|
'4f2ea31ac14d4273fe32ba08062b21de')
|
|
|
|
|
|
|
|
def test_invalid_old_hash(self):
|
2013-02-27 15:26:27 -08:00
|
|
|
self.assertRaises(ValueError, chexor, 'oldhash', 'name',
|
2010-07-12 17:03:45 -05:00
|
|
|
normalize_timestamp(1))
|
|
|
|
|
|
|
|
def test_no_name(self):
|
|
|
|
self.assertRaises(Exception, chexor,
|
2013-09-01 01:14:40 -04:00
|
|
|
'd41d8cd98f00b204e9800998ecf8427e', None,
|
|
|
|
normalize_timestamp(1))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
def test_chexor(self):
|
|
|
|
ts = (normalize_timestamp(ts) for ts in
|
|
|
|
itertools.count(int(time.time())))
|
|
|
|
|
|
|
|
objects = [
|
2015-06-15 22:10:45 +05:30
|
|
|
('frank', next(ts)),
|
|
|
|
('bob', next(ts)),
|
|
|
|
('tom', next(ts)),
|
|
|
|
('frank', next(ts)),
|
|
|
|
('tom', next(ts)),
|
|
|
|
('bob', next(ts)),
|
2014-05-27 16:57:25 -07:00
|
|
|
]
|
|
|
|
hash_ = '0'
|
|
|
|
random.shuffle(objects)
|
|
|
|
for obj in objects:
|
|
|
|
hash_ = chexor(hash_, *obj)
|
|
|
|
|
|
|
|
other_hash = '0'
|
|
|
|
random.shuffle(objects)
|
|
|
|
for obj in objects:
|
|
|
|
other_hash = chexor(other_hash, *obj)
|
|
|
|
|
|
|
|
self.assertEqual(hash_, other_hash)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
Fix DB locked error on commit
This bug was introduced in ef7f9e27; while moving timeout for execute
to the cursor wrapper, commit was moved as well; however commit is
purely a connection method, only execute is passed on to a cursor.
Added unit tests to check both methods for correct timeouts.
This manifested in a test failure as:
ERROR __call__ error with POST /sdb1/418/AUTH_d1c4b610b16a48de83219c696261009c/TestContainer-tempest-1572414684 :
Traceback (most recent call last):
File "/opt/stack/new/swift/swift/container/server.py", line 486, in __call__
res = method(req)
File "/opt/stack/new/swift/swift/common/utils.py", line 1915, in wrapped
return func(*a, **kw)
File "/opt/stack/new/swift/swift/common/utils.py", line 687, in _timing_stats
resp = func(ctrl, *args, **kwargs)
File "/opt/stack/new/swift/swift/container/server.py", line 464, in POST
broker.update_metadata(metadata)
File "/opt/stack/new/swift/swift/common/db.py", line 677, in update_metadata
conn.commit()
OperationalError: database is locked (txn: tx5065394f288740e69fcec-00528e184e)
Change-Id: I269b133fac53d4792d21b62f801cc0c0ccf337ea
2013-11-21 11:46:15 -08:00
|
|
|
class TestGreenDBConnection(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_execute_when_locked(self):
|
2014-02-07 16:06:12 +08:00
|
|
|
# This test is dependent on the code under test calling execute and
|
Fix DB locked error on commit
This bug was introduced in ef7f9e27; while moving timeout for execute
to the cursor wrapper, commit was moved as well; however commit is
purely a connection method, only execute is passed on to a cursor.
Added unit tests to check both methods for correct timeouts.
This manifested in a test failure as:
ERROR __call__ error with POST /sdb1/418/AUTH_d1c4b610b16a48de83219c696261009c/TestContainer-tempest-1572414684 :
Traceback (most recent call last):
File "/opt/stack/new/swift/swift/container/server.py", line 486, in __call__
res = method(req)
File "/opt/stack/new/swift/swift/common/utils.py", line 1915, in wrapped
return func(*a, **kw)
File "/opt/stack/new/swift/swift/common/utils.py", line 687, in _timing_stats
resp = func(ctrl, *args, **kwargs)
File "/opt/stack/new/swift/swift/container/server.py", line 464, in POST
broker.update_metadata(metadata)
File "/opt/stack/new/swift/swift/common/db.py", line 677, in update_metadata
conn.commit()
OperationalError: database is locked (txn: tx5065394f288740e69fcec-00528e184e)
Change-Id: I269b133fac53d4792d21b62f801cc0c0ccf337ea
2013-11-21 11:46:15 -08:00
|
|
|
# commit as sqlite3.Cursor.execute in a subclass.
|
|
|
|
class InterceptCursor(sqlite3.Cursor):
|
|
|
|
pass
|
|
|
|
db_error = sqlite3.OperationalError('database is locked')
|
|
|
|
InterceptCursor.execute = MagicMock(side_effect=db_error)
|
|
|
|
with patch('sqlite3.Cursor', new=InterceptCursor):
|
|
|
|
conn = sqlite3.connect(':memory:', check_same_thread=False,
|
|
|
|
factory=GreenDBConnection, timeout=0.1)
|
|
|
|
self.assertRaises(Timeout, conn.execute, 'select 1')
|
|
|
|
self.assertTrue(InterceptCursor.execute.called)
|
|
|
|
self.assertEqual(InterceptCursor.execute.call_args_list,
|
|
|
|
list((InterceptCursor.execute.call_args,) *
|
|
|
|
InterceptCursor.execute.call_count))
|
|
|
|
|
|
|
|
def text_commit_when_locked(self):
|
2014-02-07 16:06:12 +08:00
|
|
|
# This test is dependent on the code under test calling commit and
|
Fix DB locked error on commit
This bug was introduced in ef7f9e27; while moving timeout for execute
to the cursor wrapper, commit was moved as well; however commit is
purely a connection method, only execute is passed on to a cursor.
Added unit tests to check both methods for correct timeouts.
This manifested in a test failure as:
ERROR __call__ error with POST /sdb1/418/AUTH_d1c4b610b16a48de83219c696261009c/TestContainer-tempest-1572414684 :
Traceback (most recent call last):
File "/opt/stack/new/swift/swift/container/server.py", line 486, in __call__
res = method(req)
File "/opt/stack/new/swift/swift/common/utils.py", line 1915, in wrapped
return func(*a, **kw)
File "/opt/stack/new/swift/swift/common/utils.py", line 687, in _timing_stats
resp = func(ctrl, *args, **kwargs)
File "/opt/stack/new/swift/swift/container/server.py", line 464, in POST
broker.update_metadata(metadata)
File "/opt/stack/new/swift/swift/common/db.py", line 677, in update_metadata
conn.commit()
OperationalError: database is locked (txn: tx5065394f288740e69fcec-00528e184e)
Change-Id: I269b133fac53d4792d21b62f801cc0c0ccf337ea
2013-11-21 11:46:15 -08:00
|
|
|
# commit as sqlite3.Connection.commit in a subclass.
|
|
|
|
class InterceptConnection(sqlite3.Connection):
|
|
|
|
pass
|
|
|
|
db_error = sqlite3.OperationalError('database is locked')
|
|
|
|
InterceptConnection.commit = MagicMock(side_effect=db_error)
|
|
|
|
with patch('sqlite3.Connection', new=InterceptConnection):
|
|
|
|
conn = sqlite3.connect(':memory:', check_same_thread=False,
|
|
|
|
factory=GreenDBConnection, timeout=0.1)
|
|
|
|
self.assertRaises(Timeout, conn.commit)
|
|
|
|
self.assertTrue(InterceptConnection.commit.called)
|
|
|
|
self.assertEqual(InterceptConnection.commit.call_args_list,
|
|
|
|
list((InterceptConnection.commit.call_args,) *
|
|
|
|
InterceptConnection.commit.call_count))
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestGetDBConnection(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_normal_case(self):
|
|
|
|
conn = get_db_connection(':memory:')
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(hasattr(conn, 'execute'))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_invalid_path(self):
|
|
|
|
self.assertRaises(DatabaseConnectionError, get_db_connection,
|
|
|
|
'invalid database path / name')
|
|
|
|
|
2013-10-23 11:35:57 -07:00
|
|
|
def test_locked_db(self):
|
2014-02-07 16:06:12 +08:00
|
|
|
# This test is dependent on the code under test calling execute and
|
Fix DB locked error on commit
This bug was introduced in ef7f9e27; while moving timeout for execute
to the cursor wrapper, commit was moved as well; however commit is
purely a connection method, only execute is passed on to a cursor.
Added unit tests to check both methods for correct timeouts.
This manifested in a test failure as:
ERROR __call__ error with POST /sdb1/418/AUTH_d1c4b610b16a48de83219c696261009c/TestContainer-tempest-1572414684 :
Traceback (most recent call last):
File "/opt/stack/new/swift/swift/container/server.py", line 486, in __call__
res = method(req)
File "/opt/stack/new/swift/swift/common/utils.py", line 1915, in wrapped
return func(*a, **kw)
File "/opt/stack/new/swift/swift/common/utils.py", line 687, in _timing_stats
resp = func(ctrl, *args, **kwargs)
File "/opt/stack/new/swift/swift/container/server.py", line 464, in POST
broker.update_metadata(metadata)
File "/opt/stack/new/swift/swift/common/db.py", line 677, in update_metadata
conn.commit()
OperationalError: database is locked (txn: tx5065394f288740e69fcec-00528e184e)
Change-Id: I269b133fac53d4792d21b62f801cc0c0ccf337ea
2013-11-21 11:46:15 -08:00
|
|
|
# commit as sqlite3.Cursor.execute in a subclass.
|
2013-10-23 11:35:57 -07:00
|
|
|
class InterceptCursor(sqlite3.Cursor):
|
|
|
|
pass
|
|
|
|
|
|
|
|
db_error = sqlite3.OperationalError('database is locked')
|
|
|
|
mock_db_cmd = MagicMock(side_effect=db_error)
|
|
|
|
InterceptCursor.execute = mock_db_cmd
|
|
|
|
|
Fix DB locked error on commit
This bug was introduced in ef7f9e27; while moving timeout for execute
to the cursor wrapper, commit was moved as well; however commit is
purely a connection method, only execute is passed on to a cursor.
Added unit tests to check both methods for correct timeouts.
This manifested in a test failure as:
ERROR __call__ error with POST /sdb1/418/AUTH_d1c4b610b16a48de83219c696261009c/TestContainer-tempest-1572414684 :
Traceback (most recent call last):
File "/opt/stack/new/swift/swift/container/server.py", line 486, in __call__
res = method(req)
File "/opt/stack/new/swift/swift/common/utils.py", line 1915, in wrapped
return func(*a, **kw)
File "/opt/stack/new/swift/swift/common/utils.py", line 687, in _timing_stats
resp = func(ctrl, *args, **kwargs)
File "/opt/stack/new/swift/swift/container/server.py", line 464, in POST
broker.update_metadata(metadata)
File "/opt/stack/new/swift/swift/common/db.py", line 677, in update_metadata
conn.commit()
OperationalError: database is locked (txn: tx5065394f288740e69fcec-00528e184e)
Change-Id: I269b133fac53d4792d21b62f801cc0c0ccf337ea
2013-11-21 11:46:15 -08:00
|
|
|
with patch('sqlite3.Cursor', new=InterceptCursor):
|
2013-10-23 11:35:57 -07:00
|
|
|
self.assertRaises(Timeout, get_db_connection, ':memory:',
|
|
|
|
timeout=0.1)
|
|
|
|
self.assertTrue(mock_db_cmd.called)
|
|
|
|
self.assertEqual(mock_db_cmd.call_args_list,
|
|
|
|
list((mock_db_cmd.call_args,) *
|
|
|
|
mock_db_cmd.call_count))
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
class ExampleBroker(DatabaseBroker):
|
2014-06-17 22:35:59 -07:00
|
|
|
"""
|
|
|
|
Concrete enough implementation of a DatabaseBroker.
|
|
|
|
"""
|
2014-05-27 16:57:25 -07:00
|
|
|
|
|
|
|
db_type = 'test'
|
|
|
|
db_contains_type = 'test'
|
2014-12-03 16:54:43 -08:00
|
|
|
db_reclaim_timestamp = 'created_at'
|
2014-05-27 16:57:25 -07:00
|
|
|
|
2014-06-17 22:35:59 -07:00
|
|
|
def _initialize(self, conn, put_timestamp, **kwargs):
|
2014-12-03 16:54:43 -08:00
|
|
|
if not self.account:
|
|
|
|
raise ValueError(
|
|
|
|
'Attempting to create a new database with no account set')
|
2014-05-27 16:57:25 -07:00
|
|
|
conn.executescript('''
|
|
|
|
CREATE TABLE test_stat (
|
2014-12-03 16:54:43 -08:00
|
|
|
account TEXT,
|
2014-06-17 22:35:59 -07:00
|
|
|
test_count INTEGER DEFAULT 0,
|
|
|
|
created_at TEXT,
|
|
|
|
put_timestamp TEXT DEFAULT '0',
|
|
|
|
delete_timestamp TEXT DEFAULT '0',
|
2014-12-03 16:54:43 -08:00
|
|
|
hash TEXT default '00000000000000000000000000000000',
|
|
|
|
id TEXT,
|
2019-11-07 00:15:25 +00:00
|
|
|
status TEXT DEFAULT '',
|
2014-06-17 22:35:59 -07:00
|
|
|
status_changed_at TEXT DEFAULT '0',
|
|
|
|
metadata TEXT DEFAULT ''
|
2014-05-27 16:57:25 -07:00
|
|
|
);
|
|
|
|
CREATE TABLE test (
|
|
|
|
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
2014-06-17 22:35:59 -07:00
|
|
|
name TEXT,
|
|
|
|
created_at TEXT,
|
|
|
|
deleted INTEGER DEFAULT 0
|
2014-05-27 16:57:25 -07:00
|
|
|
);
|
2014-06-17 22:35:59 -07:00
|
|
|
CREATE TRIGGER test_insert AFTER INSERT ON test
|
|
|
|
BEGIN
|
|
|
|
UPDATE test_stat
|
|
|
|
SET test_count = test_count + (1 - new.deleted);
|
|
|
|
END;
|
|
|
|
CREATE TRIGGER test_delete AFTER DELETE ON test
|
|
|
|
BEGIN
|
|
|
|
UPDATE test_stat
|
|
|
|
SET test_count = test_count - (1 - old.deleted);
|
|
|
|
END;
|
2014-05-27 16:57:25 -07:00
|
|
|
''')
|
|
|
|
conn.execute("""
|
2014-06-17 22:35:59 -07:00
|
|
|
INSERT INTO test_stat (
|
2019-11-07 00:15:25 +00:00
|
|
|
account, created_at, id, put_timestamp, status_changed_at, status)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?);
|
2017-04-27 14:19:00 -07:00
|
|
|
""", (self.account, Timestamp.now().internal, str(uuid4()),
|
2019-11-07 00:15:25 +00:00
|
|
|
put_timestamp, put_timestamp, ''))
|
2014-06-17 22:35:59 -07:00
|
|
|
|
|
|
|
def merge_items(self, item_list):
|
|
|
|
with self.get() as conn:
|
|
|
|
for rec in item_list:
|
|
|
|
conn.execute(
|
|
|
|
'DELETE FROM test WHERE name = ? and created_at < ?', (
|
|
|
|
rec['name'], rec['created_at']))
|
|
|
|
if not conn.execute(
|
|
|
|
'SELECT 1 FROM test WHERE name = ?',
|
|
|
|
(rec['name'],)).fetchall():
|
|
|
|
conn.execute('''
|
|
|
|
INSERT INTO test (name, created_at, deleted)
|
|
|
|
VALUES (?, ?, ?)''', (
|
|
|
|
rec['name'], rec['created_at'], rec['deleted']))
|
|
|
|
conn.commit()
|
|
|
|
|
|
|
|
def _commit_puts_load(self, item_list, entry):
|
2018-10-05 14:58:35 -07:00
|
|
|
(name, timestamp, deleted) = entry
|
2014-06-17 22:35:59 -07:00
|
|
|
item_list.append({
|
|
|
|
'name': name,
|
|
|
|
'created_at': timestamp,
|
|
|
|
'deleted': deleted,
|
|
|
|
})
|
|
|
|
|
|
|
|
def _load_item(self, name, timestamp, deleted):
|
|
|
|
if self.db_file == ':memory:':
|
|
|
|
record = {
|
|
|
|
'name': name,
|
|
|
|
'created_at': timestamp,
|
|
|
|
'deleted': deleted,
|
|
|
|
}
|
|
|
|
self.merge_items([record])
|
|
|
|
return
|
|
|
|
with open(self.pending_file, 'a+b') as fp:
|
2018-07-11 17:55:48 -05:00
|
|
|
fp.write(b':')
|
|
|
|
fp.write(base64.b64encode(pickle.dumps(
|
2014-06-17 22:35:59 -07:00
|
|
|
(name, timestamp, deleted),
|
2018-07-11 17:55:48 -05:00
|
|
|
protocol=PICKLE_PROTOCOL)))
|
2014-06-17 22:35:59 -07:00
|
|
|
fp.flush()
|
|
|
|
|
|
|
|
def put_test(self, name, timestamp):
|
|
|
|
self._load_item(name, timestamp, 0)
|
|
|
|
|
|
|
|
def delete_test(self, name, timestamp):
|
|
|
|
self._load_item(name, timestamp, 1)
|
|
|
|
|
2014-12-03 16:54:43 -08:00
|
|
|
def _delete_db(self, conn, timestamp):
|
|
|
|
conn.execute("""
|
|
|
|
UPDATE test_stat
|
|
|
|
SET delete_timestamp = ?,
|
2019-11-07 00:15:25 +00:00
|
|
|
status = 'DELETED',
|
2014-12-03 16:54:43 -08:00
|
|
|
status_changed_at = ?
|
|
|
|
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
|
|
|
|
|
2014-06-17 22:35:59 -07:00
|
|
|
def _is_deleted(self, conn):
|
|
|
|
info = conn.execute('SELECT * FROM test_stat').fetchone()
|
|
|
|
return (info['test_count'] in (None, '', 0, '0')) and \
|
2014-06-10 22:17:47 -07:00
|
|
|
(Timestamp(info['delete_timestamp']) >
|
|
|
|
Timestamp(info['put_timestamp']))
|
2014-06-17 22:35:59 -07:00
|
|
|
|
|
|
|
|
|
|
|
class TestExampleBroker(unittest.TestCase):
|
|
|
|
"""
|
|
|
|
Tests that use the mostly Concrete enough ExampleBroker to exercise some
|
|
|
|
of the abstract methods on DatabaseBroker.
|
|
|
|
"""
|
|
|
|
|
|
|
|
broker_class = ExampleBroker
|
|
|
|
policy = 0
|
|
|
|
|
2014-12-03 16:54:43 -08:00
|
|
|
def setUp(self):
|
2019-10-25 14:34:41 -05:00
|
|
|
self.ts = make_timestamp_iter()
|
2014-12-03 16:54:43 -08:00
|
|
|
|
|
|
|
def test_delete_db(self):
|
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
2019-10-25 14:34:41 -05:00
|
|
|
broker.initialize(next(self.ts).internal)
|
|
|
|
broker.delete_db(next(self.ts).internal)
|
2014-12-03 16:54:43 -08:00
|
|
|
self.assertTrue(broker.is_deleted())
|
|
|
|
|
2014-06-17 22:35:59 -07:00
|
|
|
def test_merge_timestamps_simple_delete(self):
|
2019-10-25 14:34:41 -05:00
|
|
|
put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
|
|
|
broker.initialize(put_timestamp)
|
|
|
|
created_at = broker.get_info()['created_at']
|
|
|
|
broker.merge_timestamps(created_at, put_timestamp, '0')
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], put_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], '0')
|
|
|
|
self.assertEqual(info['status_changed_at'], put_timestamp)
|
|
|
|
# delete
|
2019-10-25 14:34:41 -05:00
|
|
|
delete_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(broker.is_deleted())
|
2014-06-17 22:35:59 -07:00
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], put_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], delete_timestamp)
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(info['status_changed_at'] > Timestamp(put_timestamp))
|
2014-06-17 22:35:59 -07:00
|
|
|
|
|
|
|
def put_item(self, broker, timestamp):
|
|
|
|
broker.put_test('test', timestamp)
|
|
|
|
|
|
|
|
def delete_item(self, broker, timestamp):
|
|
|
|
broker.delete_test('test', timestamp)
|
|
|
|
|
|
|
|
def test_merge_timestamps_delete_with_objects(self):
|
2019-10-25 14:34:41 -05:00
|
|
|
put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
|
|
|
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
|
|
|
|
created_at = broker.get_info()['created_at']
|
|
|
|
broker.merge_timestamps(created_at, put_timestamp, '0')
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], put_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], '0')
|
|
|
|
self.assertEqual(info['status_changed_at'], put_timestamp)
|
|
|
|
# add object
|
2019-10-25 14:34:41 -05:00
|
|
|
self.put_item(broker, next(self.ts).internal)
|
2014-06-17 22:35:59 -07:00
|
|
|
self.assertEqual(broker.get_info()[
|
|
|
|
'%s_count' % broker.db_contains_type], 1)
|
|
|
|
# delete
|
2019-10-25 14:34:41 -05:00
|
|
|
delete_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
|
|
|
|
self.assertFalse(broker.is_deleted())
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], put_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], delete_timestamp)
|
|
|
|
# status is unchanged
|
|
|
|
self.assertEqual(info['status_changed_at'], put_timestamp)
|
|
|
|
# count is causing status to hold on
|
2019-10-25 14:34:41 -05:00
|
|
|
self.delete_item(broker, next(self.ts).internal)
|
2014-06-17 22:35:59 -07:00
|
|
|
self.assertEqual(broker.get_info()[
|
|
|
|
'%s_count' % broker.db_contains_type], 0)
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(broker.is_deleted())
|
2014-06-17 22:35:59 -07:00
|
|
|
|
|
|
|
def test_merge_timestamps_simple_recreate(self):
|
2019-10-25 14:34:41 -05:00
|
|
|
put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
|
|
|
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
|
|
|
|
virgin_status_changed_at = broker.get_info()['status_changed_at']
|
|
|
|
created_at = broker.get_info()['created_at']
|
2019-10-25 14:34:41 -05:00
|
|
|
delete_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(broker.is_deleted())
|
2014-06-17 22:35:59 -07:00
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], put_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], delete_timestamp)
|
|
|
|
orig_status_changed_at = info['status_changed_at']
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(orig_status_changed_at >
|
|
|
|
Timestamp(virgin_status_changed_at))
|
2014-06-17 22:35:59 -07:00
|
|
|
# recreate
|
2019-10-25 14:34:41 -05:00
|
|
|
recreate_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
status_changed_at = time.time()
|
|
|
|
with patch('swift.common.db.time.time', new=lambda: status_changed_at):
|
|
|
|
broker.merge_timestamps(created_at, recreate_timestamp, '0')
|
|
|
|
self.assertFalse(broker.is_deleted())
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], recreate_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], delete_timestamp)
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(info['status_changed_at'], status_changed_at)
|
2014-06-17 22:35:59 -07:00
|
|
|
|
|
|
|
def test_merge_timestamps_recreate_with_objects(self):
|
2019-10-25 14:34:41 -05:00
|
|
|
put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
|
|
|
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
|
|
|
|
created_at = broker.get_info()['created_at']
|
|
|
|
# delete
|
2019-10-25 14:34:41 -05:00
|
|
|
delete_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(broker.is_deleted())
|
2014-06-17 22:35:59 -07:00
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], put_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], delete_timestamp)
|
|
|
|
orig_status_changed_at = info['status_changed_at']
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(Timestamp(orig_status_changed_at) >=
|
|
|
|
Timestamp(put_timestamp))
|
2014-06-17 22:35:59 -07:00
|
|
|
# add object
|
2019-10-25 14:34:41 -05:00
|
|
|
self.put_item(broker, next(self.ts).internal)
|
2014-06-17 22:35:59 -07:00
|
|
|
count_key = '%s_count' % broker.db_contains_type
|
|
|
|
self.assertEqual(broker.get_info()[count_key], 1)
|
|
|
|
self.assertFalse(broker.is_deleted())
|
|
|
|
# recreate
|
2019-10-25 14:34:41 -05:00
|
|
|
recreate_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.merge_timestamps(created_at, recreate_timestamp, '0')
|
|
|
|
self.assertFalse(broker.is_deleted())
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(info['created_at'], created_at)
|
|
|
|
self.assertEqual(info['put_timestamp'], recreate_timestamp)
|
|
|
|
self.assertEqual(info['delete_timestamp'], delete_timestamp)
|
|
|
|
self.assertEqual(info['status_changed_at'], orig_status_changed_at)
|
|
|
|
# count is not causing status to hold on
|
2019-10-25 14:34:41 -05:00
|
|
|
self.delete_item(broker, next(self.ts).internal)
|
2014-06-17 22:35:59 -07:00
|
|
|
self.assertFalse(broker.is_deleted())
|
|
|
|
|
|
|
|
def test_merge_timestamps_update_put_no_status_change(self):
|
2019-10-25 14:34:41 -05:00
|
|
|
put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
|
|
|
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
|
|
|
|
info = broker.get_info()
|
|
|
|
orig_status_changed_at = info['status_changed_at']
|
|
|
|
created_at = info['created_at']
|
2019-10-25 14:34:41 -05:00
|
|
|
new_put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.merge_timestamps(created_at, new_put_timestamp, '0')
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(new_put_timestamp, info['put_timestamp'])
|
|
|
|
self.assertEqual(orig_status_changed_at, info['status_changed_at'])
|
|
|
|
|
|
|
|
def test_merge_timestamps_update_delete_no_status_change(self):
|
2019-10-25 14:34:41 -05:00
|
|
|
put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
|
|
|
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
|
|
|
|
created_at = broker.get_info()['created_at']
|
2019-10-25 14:34:41 -05:00
|
|
|
broker.merge_timestamps(created_at, put_timestamp,
|
|
|
|
next(self.ts).internal)
|
2014-06-17 22:35:59 -07:00
|
|
|
orig_status_changed_at = broker.get_info()['status_changed_at']
|
2019-10-25 14:34:41 -05:00
|
|
|
new_delete_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.merge_timestamps(created_at, put_timestamp,
|
|
|
|
new_delete_timestamp)
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(new_delete_timestamp, info['delete_timestamp'])
|
|
|
|
self.assertEqual(orig_status_changed_at, info['status_changed_at'])
|
|
|
|
|
|
|
|
def test_get_max_row(self):
|
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
2019-10-25 14:34:41 -05:00
|
|
|
broker.initialize(next(self.ts).internal,
|
|
|
|
storage_policy_index=int(self.policy))
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(-1, broker.get_max_row())
|
2019-10-25 14:34:41 -05:00
|
|
|
self.put_item(broker, next(self.ts).internal)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(1, broker.get_max_row())
|
2019-10-25 14:34:41 -05:00
|
|
|
self.delete_item(broker, next(self.ts).internal)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(2, broker.get_max_row())
|
2019-10-25 14:34:41 -05:00
|
|
|
self.put_item(broker, next(self.ts).internal)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(3, broker.get_max_row())
|
2014-06-17 22:35:59 -07:00
|
|
|
|
|
|
|
def test_get_info(self):
|
|
|
|
broker = self.broker_class(':memory:', account='test', container='c')
|
|
|
|
created_at = time.time()
|
|
|
|
with patch('swift.common.db.time.time', new=lambda: created_at):
|
2014-06-10 22:17:47 -07:00
|
|
|
broker.initialize(Timestamp(1).internal,
|
2014-06-17 22:35:59 -07:00
|
|
|
storage_policy_index=int(self.policy))
|
|
|
|
info = broker.get_info()
|
|
|
|
count_key = '%s_count' % broker.db_contains_type
|
|
|
|
expected = {
|
|
|
|
count_key: 0,
|
2014-06-10 22:17:47 -07:00
|
|
|
'created_at': Timestamp(created_at).internal,
|
|
|
|
'put_timestamp': Timestamp(1).internal,
|
|
|
|
'status_changed_at': Timestamp(1).internal,
|
2014-06-17 22:35:59 -07:00
|
|
|
'delete_timestamp': '0',
|
|
|
|
}
|
|
|
|
for k, v in expected.items():
|
|
|
|
self.assertEqual(info[k], v,
|
|
|
|
'mismatch for %s, %s != %s' % (
|
|
|
|
k, info[k], v))
|
|
|
|
|
|
|
|
def test_get_raw_metadata(self):
|
|
|
|
broker = self.broker_class(':memory:', account='test', container='c')
|
2014-06-10 22:17:47 -07:00
|
|
|
broker.initialize(Timestamp(0).internal,
|
2014-06-17 22:35:59 -07:00
|
|
|
storage_policy_index=int(self.policy))
|
|
|
|
self.assertEqual(broker.metadata, {})
|
|
|
|
self.assertEqual(broker.get_raw_metadata(), '')
|
2018-07-11 17:55:48 -05:00
|
|
|
# This is not obvious. The actual JSON in the database is the same:
|
|
|
|
# '{"test\\u062a": ["value\\u062a", "0000000001.00000"]}'
|
|
|
|
# The only difference is what reading it produces on py2 and py3.
|
sharding: Better-handle newlines in container names
Previously, if you were on Python 2.7.10+ [0], such a newline would cause the
sharder to fail, complaining about invalid header values when trying to create
the shard containers. On older versions of Python, it would most likely cause a
parsing error in the container-server that was trying to handle the PUT.
Now, quote all places that we pass around container paths. This includes:
* The X-Container-Sysmeta-Shard-(Quoted-)Root sent when creating the (empty)
remote shards
* The X-Container-Sysmeta-Shard-(Quoted-)Root included when initializing the
local handoff for cleaving
* The X-Backend-(Quoted-)Container-Path the proxy sends to the object-server
for container updates
* The Location header the container-server sends to the object-updater
Note that a new header was required in requests so that servers would
know whether the value should be unquoted or not. We can get away with
reusing Location in responses by having clients opt-in to quoting with
a new X-Backend-Accept-Quoted-Location header.
During a rolling upgrade,
* old object-servers servicing requests from new proxy-servers will
not know about the container path override and so will try to update
the root container,
* in general, object updates are more likely to land in the root
container; the sharder will deal with them as misplaced objects, and
* shard containers created by new code on servers running old code
will think they are root containers until the server is running new
code, too; during this time they'll fail the sharder audit and report
stats to their account, but both of these should get cleared up upon
upgrade.
Drive-by: fix a "conainer_name" typo that prevented us from testing that
we can shard a container with unicode in its name. Also, add more UTF8
probe tests.
[0] See https://bugs.python.org/issue22928
Change-Id: Ie08f36e31a448a547468dd85911c3a3bc30e89f1
Closes-Bug: 1856894
2019-12-18 15:14:00 -08:00
|
|
|
# We use native strings for metadata (see native_str_keys_and_values),
|
|
|
|
# so types are different.
|
2018-07-11 17:55:48 -05:00
|
|
|
if six.PY2:
|
|
|
|
key = u'test\u062a'.encode('utf-8')
|
sharding: Better-handle newlines in container names
Previously, if you were on Python 2.7.10+ [0], such a newline would cause the
sharder to fail, complaining about invalid header values when trying to create
the shard containers. On older versions of Python, it would most likely cause a
parsing error in the container-server that was trying to handle the PUT.
Now, quote all places that we pass around container paths. This includes:
* The X-Container-Sysmeta-Shard-(Quoted-)Root sent when creating the (empty)
remote shards
* The X-Container-Sysmeta-Shard-(Quoted-)Root included when initializing the
local handoff for cleaving
* The X-Backend-(Quoted-)Container-Path the proxy sends to the object-server
for container updates
* The Location header the container-server sends to the object-updater
Note that a new header was required in requests so that servers would
know whether the value should be unquoted or not. We can get away with
reusing Location in responses by having clients opt-in to quoting with
a new X-Backend-Accept-Quoted-Location header.
During a rolling upgrade,
* old object-servers servicing requests from new proxy-servers will
not know about the container path override and so will try to update
the root container,
* in general, object updates are more likely to land in the root
container; the sharder will deal with them as misplaced objects, and
* shard containers created by new code on servers running old code
will think they are root containers until the server is running new
code, too; during this time they'll fail the sharder audit and report
stats to their account, but both of these should get cleared up upon
upgrade.
Drive-by: fix a "conainer_name" typo that prevented us from testing that
we can shard a container with unicode in its name. Also, add more UTF8
probe tests.
[0] See https://bugs.python.org/issue22928
Change-Id: Ie08f36e31a448a547468dd85911c3a3bc30e89f1
Closes-Bug: 1856894
2019-12-18 15:14:00 -08:00
|
|
|
value = u'value\u062a'.encode('utf-8')
|
2018-07-11 17:55:48 -05:00
|
|
|
else:
|
|
|
|
key = u'test\u062a'
|
sharding: Better-handle newlines in container names
Previously, if you were on Python 2.7.10+ [0], such a newline would cause the
sharder to fail, complaining about invalid header values when trying to create
the shard containers. On older versions of Python, it would most likely cause a
parsing error in the container-server that was trying to handle the PUT.
Now, quote all places that we pass around container paths. This includes:
* The X-Container-Sysmeta-Shard-(Quoted-)Root sent when creating the (empty)
remote shards
* The X-Container-Sysmeta-Shard-(Quoted-)Root included when initializing the
local handoff for cleaving
* The X-Backend-(Quoted-)Container-Path the proxy sends to the object-server
for container updates
* The Location header the container-server sends to the object-updater
Note that a new header was required in requests so that servers would
know whether the value should be unquoted or not. We can get away with
reusing Location in responses by having clients opt-in to quoting with
a new X-Backend-Accept-Quoted-Location header.
During a rolling upgrade,
* old object-servers servicing requests from new proxy-servers will
not know about the container path override and so will try to update
the root container,
* in general, object updates are more likely to land in the root
container; the sharder will deal with them as misplaced objects, and
* shard containers created by new code on servers running old code
will think they are root containers until the server is running new
code, too; during this time they'll fail the sharder audit and report
stats to their account, but both of these should get cleared up upon
upgrade.
Drive-by: fix a "conainer_name" typo that prevented us from testing that
we can shard a container with unicode in its name. Also, add more UTF8
probe tests.
[0] See https://bugs.python.org/issue22928
Change-Id: Ie08f36e31a448a547468dd85911c3a3bc30e89f1
Closes-Bug: 1856894
2019-12-18 15:14:00 -08:00
|
|
|
value = u'value\u062a'
|
2014-06-17 22:35:59 -07:00
|
|
|
metadata = {
|
2014-06-10 22:17:47 -07:00
|
|
|
key: [value, Timestamp(1).internal]
|
2014-06-17 22:35:59 -07:00
|
|
|
}
|
|
|
|
broker.update_metadata(metadata)
|
|
|
|
self.assertEqual(broker.metadata, metadata)
|
|
|
|
self.assertEqual(broker.get_raw_metadata(),
|
|
|
|
json.dumps(metadata))
|
|
|
|
|
|
|
|
def test_put_timestamp(self):
|
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
2019-10-25 14:34:41 -05:00
|
|
|
orig_put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.initialize(orig_put_timestamp,
|
|
|
|
storage_policy_index=int(self.policy))
|
|
|
|
self.assertEqual(broker.get_info()['put_timestamp'],
|
|
|
|
orig_put_timestamp)
|
|
|
|
# put_timestamp equal - no change
|
|
|
|
broker.update_put_timestamp(orig_put_timestamp)
|
|
|
|
self.assertEqual(broker.get_info()['put_timestamp'],
|
|
|
|
orig_put_timestamp)
|
|
|
|
# put_timestamp newer - gets newer
|
2019-10-25 14:34:41 -05:00
|
|
|
newer_put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.update_put_timestamp(newer_put_timestamp)
|
|
|
|
self.assertEqual(broker.get_info()['put_timestamp'],
|
|
|
|
newer_put_timestamp)
|
|
|
|
# put_timestamp older - no change
|
|
|
|
broker.update_put_timestamp(orig_put_timestamp)
|
|
|
|
self.assertEqual(broker.get_info()['put_timestamp'],
|
|
|
|
newer_put_timestamp)
|
|
|
|
|
|
|
|
def test_status_changed_at(self):
|
|
|
|
broker = self.broker_class(':memory:', account='test', container='c')
|
2019-10-25 14:34:41 -05:00
|
|
|
put_timestamp = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
created_at = time.time()
|
|
|
|
with patch('swift.common.db.time.time', new=lambda: created_at):
|
|
|
|
broker.initialize(put_timestamp,
|
|
|
|
storage_policy_index=int(self.policy))
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_info()['status_changed_at'],
|
|
|
|
put_timestamp)
|
|
|
|
self.assertEqual(broker.get_info()['created_at'],
|
|
|
|
Timestamp(created_at).internal)
|
2019-10-25 14:34:41 -05:00
|
|
|
status_changed_at = next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.update_status_changed_at(status_changed_at)
|
|
|
|
self.assertEqual(broker.get_info()['status_changed_at'],
|
|
|
|
status_changed_at)
|
|
|
|
# save the old and get a new status_changed_at
|
|
|
|
old_status_changed_at, status_changed_at = \
|
2019-10-25 14:34:41 -05:00
|
|
|
status_changed_at, next(self.ts).internal
|
2014-06-17 22:35:59 -07:00
|
|
|
broker.update_status_changed_at(status_changed_at)
|
|
|
|
self.assertEqual(broker.get_info()['status_changed_at'],
|
|
|
|
status_changed_at)
|
|
|
|
# status changed at won't go backwards...
|
|
|
|
broker.update_status_changed_at(old_status_changed_at)
|
|
|
|
self.assertEqual(broker.get_info()['status_changed_at'],
|
|
|
|
status_changed_at)
|
|
|
|
|
|
|
|
def test_get_syncs(self):
|
|
|
|
broker = self.broker_class(':memory:', account='a', container='c')
|
2017-04-27 14:19:00 -07:00
|
|
|
broker.initialize(Timestamp.now().internal,
|
2014-06-17 22:35:59 -07:00
|
|
|
storage_policy_index=int(self.policy))
|
|
|
|
self.assertEqual([], broker.get_syncs())
|
|
|
|
broker.merge_syncs([{'sync_point': 1, 'remote_id': 'remote1'}])
|
|
|
|
self.assertEqual([{'sync_point': 1, 'remote_id': 'remote1'}],
|
|
|
|
broker.get_syncs())
|
|
|
|
self.assertEqual([], broker.get_syncs(incoming=False))
|
|
|
|
broker.merge_syncs([{'sync_point': 2, 'remote_id': 'remote2'}],
|
|
|
|
incoming=False)
|
|
|
|
self.assertEqual([{'sync_point': 2, 'remote_id': 'remote2'}],
|
|
|
|
broker.get_syncs(incoming=False))
|
|
|
|
|
|
|
|
@with_tempdir
|
|
|
|
def test_commit_pending(self, tempdir):
|
|
|
|
broker = self.broker_class(os.path.join(tempdir, 'test.db'),
|
|
|
|
account='a', container='c')
|
2019-10-25 14:34:41 -05:00
|
|
|
broker.initialize(next(self.ts).internal,
|
2014-12-03 16:54:43 -08:00
|
|
|
storage_policy_index=int(self.policy))
|
2019-10-25 14:34:41 -05:00
|
|
|
self.put_item(broker, next(self.ts).internal)
|
2014-06-17 22:35:59 -07:00
|
|
|
qry = 'select * from %s_stat' % broker.db_type
|
|
|
|
with broker.get() as conn:
|
|
|
|
rows = [dict(x) for x in conn.execute(qry)]
|
|
|
|
info = rows[0]
|
|
|
|
count_key = '%s_count' % broker.db_contains_type
|
|
|
|
self.assertEqual(0, info[count_key])
|
|
|
|
broker.get_info()
|
|
|
|
self.assertEqual(1, broker.get_info()[count_key])
|
2014-05-27 16:57:25 -07:00
|
|
|
|
2018-05-22 14:14:53 -07:00
|
|
|
@with_tempdir
|
|
|
|
def test_maybe_get(self, tempdir):
|
|
|
|
broker = self.broker_class(os.path.join(tempdir, 'test.db'),
|
|
|
|
account='a', container='c')
|
2019-10-25 14:34:41 -05:00
|
|
|
broker.initialize(next(self.ts).internal,
|
2018-05-22 14:14:53 -07:00
|
|
|
storage_policy_index=int(self.policy))
|
|
|
|
qry = 'select account from %s_stat' % broker.db_type
|
|
|
|
with broker.maybe_get(None) as conn:
|
|
|
|
rows = [dict(x) for x in conn.execute(qry)]
|
|
|
|
self.assertEqual([{'account': 'a'}], rows)
|
|
|
|
self.assertEqual(conn, broker.conn)
|
|
|
|
with broker.get() as other_conn:
|
|
|
|
self.assertEqual(broker.conn, None)
|
|
|
|
with broker.maybe_get(other_conn) as identity_conn:
|
2018-05-22 15:03:30 -07:00
|
|
|
self.assertIs(other_conn, identity_conn)
|
2018-05-22 14:14:53 -07:00
|
|
|
self.assertEqual(broker.conn, None)
|
|
|
|
self.assertEqual(broker.conn, None)
|
|
|
|
self.assertEqual(broker.conn, conn)
|
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestDatabaseBroker(unittest.TestCase):
|
|
|
|
|
|
|
|
def setUp(self):
|
2014-01-16 00:49:28 -05:00
|
|
|
self.testdir = mkdtemp()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-05 20:38:01 -06:00
|
|
|
def tearDown(self):
|
|
|
|
rmtree(self.testdir, ignore_errors=1)
|
|
|
|
|
2012-03-28 21:40:48 +00:00
|
|
|
def test_DB_PREALLOCATION_setting(self):
|
|
|
|
u = uuid4().hex
|
|
|
|
b = DatabaseBroker(u)
|
2013-05-02 22:50:41 +03:00
|
|
|
swift.common.db.DB_PREALLOCATION = False
|
2012-03-28 21:40:48 +00:00
|
|
|
b._preallocate()
|
!! Changed db_preallocation to False
Long explanation, but hopefully answers any questions.
We don't like changing the default behavior of Swift unless there's a
really good reason and, up until now, I've tried doing this with this
new db_preallocation setting.
For clusters with dedicated account/container servers that usually
have fewer disks overall but SSD for speed, having db_preallocation
on will gobble up disk space quite quickly and the fragmentation it's
designed to fight isn't that big a speed impact to SSDs anyway.
For clusters with account/container servers spread across all servers
along with object servers usually having standard disks for cost,
having db_preallocation off will cause very fragmented database files
impacting speed, sometimes dramatically.
Weighing these two negatives, it seems the second is the lesser evil.
The first can cause disks to fill up and disable the cluster. The
second will cause performance degradation, but the cluster will still
function.
Furthermore, if just one piece of code that touches all databases
runs with db_preallocation on, it's effectively on for the whole
cluster. We discovered this most recently when we finally configured
everything within the Swift codebase to have db_preallocation off,
only to find out Slogging didn't know about the new setting and so
ran with it on and starting filling up SSDs.
So that's why I'm proposing this change to the default behavior.
We will definitely need to post a prominent notice of this change
with the next release.
Change-Id: I48a43439264cff5d03c14ec8787f718ee44e78ea
2012-05-22 00:30:47 +00:00
|
|
|
swift.common.db.DB_PREALLOCATION = True
|
|
|
|
self.assertRaises(OSError, b._preallocate)
|
2012-03-28 21:40:48 +00:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_memory_db_init(self):
|
|
|
|
broker = DatabaseBroker(':memory:')
|
|
|
|
self.assertEqual(broker.db_file, ':memory:')
|
|
|
|
self.assertRaises(AttributeError, broker.initialize,
|
|
|
|
normalize_timestamp('0'))
|
|
|
|
|
|
|
|
def test_disk_db_init(self):
|
|
|
|
db_file = os.path.join(self.testdir, '1.db')
|
|
|
|
broker = DatabaseBroker(db_file)
|
|
|
|
self.assertEqual(broker.db_file, db_file)
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIsNone(broker.conn)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-08-05 20:38:01 -06:00
|
|
|
def test_disk_preallocate(self):
|
|
|
|
test_size = [-1]
|
2013-09-01 01:14:40 -04:00
|
|
|
|
2013-08-05 20:38:01 -06:00
|
|
|
def fallocate_stub(fd, size):
|
|
|
|
test_size[0] = size
|
2013-09-01 01:14:40 -04:00
|
|
|
|
2013-08-05 20:38:01 -06:00
|
|
|
with patch('swift.common.db.fallocate', fallocate_stub):
|
|
|
|
db_file = os.path.join(self.testdir, 'pre.db')
|
|
|
|
# Write 1 byte and hope that the fs will allocate less than 1 MB.
|
|
|
|
f = open(db_file, "w")
|
|
|
|
f.write('@')
|
|
|
|
f.close()
|
|
|
|
b = DatabaseBroker(db_file)
|
|
|
|
b._preallocate()
|
|
|
|
# We only wrote 1 byte, so we should end with the 1st step or 1 MB.
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(test_size[0], 1024 * 1024)
|
2013-08-05 20:38:01 -06:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_initialize(self):
|
|
|
|
self.assertRaises(AttributeError,
|
|
|
|
DatabaseBroker(':memory:').initialize,
|
|
|
|
normalize_timestamp('1'))
|
|
|
|
stub_dict = {}
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def stub(*args, **kwargs):
|
2018-10-05 21:42:46 +00:00
|
|
|
stub_dict.clear()
|
2010-07-12 17:03:45 -05:00
|
|
|
stub_dict['args'] = args
|
2018-10-05 21:42:46 +00:00
|
|
|
stub_dict.update(kwargs)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker = DatabaseBroker(':memory:')
|
|
|
|
broker._initialize = stub
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(hasattr(stub_dict['args'][0], 'execute'))
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(stub_dict['args'][1], '0000000001.00000')
|
2010-07-12 17:03:45 -05:00
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('SELECT * FROM outgoing_sync')
|
|
|
|
conn.execute('SELECT * FROM incoming_sync')
|
|
|
|
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
|
|
|
|
broker._initialize = stub
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
2015-07-21 19:23:00 +05:30
|
|
|
self.assertTrue(hasattr(stub_dict['args'][0], 'execute'))
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(stub_dict['args'][1], '0000000001.00000')
|
2010-07-12 17:03:45 -05:00
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('SELECT * FROM outgoing_sync')
|
|
|
|
conn.execute('SELECT * FROM incoming_sync')
|
2013-11-13 11:16:59 -05:00
|
|
|
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
|
|
|
|
broker._initialize = stub
|
|
|
|
self.assertRaises(DatabaseAlreadyExists,
|
|
|
|
broker.initialize, normalize_timestamp('1'))
|
2013-05-02 22:50:41 +03:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_delete_db(self):
|
2014-05-27 16:57:25 -07:00
|
|
|
def init_stub(conn, put_timestamp, **kwargs):
|
2011-08-29 13:04:20 -05:00
|
|
|
conn.execute('CREATE TABLE test (one TEXT)')
|
2019-11-07 00:15:25 +00:00
|
|
|
conn.execute('''CREATE TABLE test_stat (
|
|
|
|
id TEXT, put_timestamp TEXT, delete_timestamp TEXT,
|
|
|
|
status TEXT, status_changed_at TEXT, metadata TEXT)''')
|
|
|
|
meta = {'foo': ('bar', normalize_timestamp('0'))}
|
|
|
|
conn.execute(
|
|
|
|
'''INSERT INTO test_stat (
|
|
|
|
id, put_timestamp, delete_timestamp, status,
|
|
|
|
status_changed_at, metadata) VALUES (?, ?, ?, ?, ?, ?)''',
|
|
|
|
(str(uuid4), put_timestamp, '0', '', '0', json.dumps(meta)))
|
2011-08-29 13:04:20 -05:00
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("1")')
|
|
|
|
conn.commit()
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2011-08-29 13:04:20 -05:00
|
|
|
broker = DatabaseBroker(':memory:')
|
|
|
|
broker.db_type = 'test'
|
|
|
|
broker._initialize = init_stub
|
2011-08-29 10:58:07 -05:00
|
|
|
# Initializes a good broker for us
|
2010-07-12 17:03:45 -05:00
|
|
|
broker.initialize(normalize_timestamp('1'))
|
2019-11-07 00:15:25 +00:00
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual('0', info['delete_timestamp'])
|
|
|
|
self.assertEqual('', info['status'])
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIsNotNone(broker.conn)
|
2019-11-07 00:15:25 +00:00
|
|
|
broker.delete_db(normalize_timestamp('2'))
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(normalize_timestamp('2'), info['delete_timestamp'])
|
|
|
|
self.assertEqual('DELETED', info['status'])
|
|
|
|
|
2011-08-29 13:04:20 -05:00
|
|
|
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
|
|
|
|
broker.db_type = 'test'
|
|
|
|
broker._initialize = init_stub
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
2019-11-07 00:15:25 +00:00
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual('0', info['delete_timestamp'])
|
|
|
|
self.assertEqual('', info['status'])
|
|
|
|
broker.delete_db(normalize_timestamp('2'))
|
|
|
|
info = broker.get_info()
|
|
|
|
self.assertEqual(normalize_timestamp('2'), info['delete_timestamp'])
|
|
|
|
self.assertEqual('DELETED', info['status'])
|
|
|
|
|
2011-08-29 13:04:20 -05:00
|
|
|
# ensure that metadata was cleared
|
2011-08-29 10:58:07 -05:00
|
|
|
m2 = broker.metadata
|
2019-11-07 00:15:25 +00:00
|
|
|
self.assertEqual(m2, {'foo': ['', normalize_timestamp('2')]})
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_get(self):
|
|
|
|
broker = DatabaseBroker(':memory:')
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(DatabaseConnectionError) as raised, \
|
|
|
|
broker.get() as conn:
|
|
|
|
conn.execute('SELECT 1')
|
|
|
|
self.assertEqual(
|
|
|
|
str(raised.exception),
|
|
|
|
"DB connection error (:memory:, 0):\nDB doesn't exist")
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(DatabaseConnectionError) as raised, \
|
|
|
|
broker.get() as conn:
|
|
|
|
conn.execute('SELECT 1')
|
|
|
|
self.assertEqual(
|
|
|
|
str(raised.exception),
|
|
|
|
"DB connection error (%s, 0):\nDB doesn't exist" % broker.db_file)
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def stub(*args, **kwargs):
|
|
|
|
pass
|
|
|
|
broker._initialize = stub
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('CREATE TABLE test (one TEXT)')
|
|
|
|
try:
|
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("1")')
|
|
|
|
raise Exception('test')
|
|
|
|
conn.commit()
|
2011-01-26 14:31:33 -08:00
|
|
|
except Exception:
|
2010-07-12 17:03:45 -05:00
|
|
|
pass
|
|
|
|
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
|
|
|
|
with broker.get() as conn:
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(
|
2010-07-12 17:03:45 -05:00
|
|
|
[r[0] for r in conn.execute('SELECT * FROM test')], [])
|
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("1")')
|
|
|
|
conn.commit()
|
|
|
|
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
|
|
|
|
with broker.get() as conn:
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(
|
2010-07-12 17:03:45 -05:00
|
|
|
[r[0] for r in conn.execute('SELECT * FROM test')], ['1'])
|
2014-01-16 00:49:28 -05:00
|
|
|
|
|
|
|
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
|
|
|
|
mkdirs(dbpath)
|
|
|
|
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
|
2014-10-08 19:49:39 +05:30
|
|
|
with patch('swift.common.db.renamer', lambda a, b,
|
|
|
|
fsync: b):
|
2011-08-01 20:46:30 +00:00
|
|
|
# Test malformed database
|
2011-08-02 18:21:25 +00:00
|
|
|
copy(os.path.join(os.path.dirname(__file__),
|
|
|
|
'malformed_example.db'),
|
2014-01-16 00:49:28 -05:00
|
|
|
os.path.join(dbpath, '1.db'))
|
|
|
|
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
|
2011-08-01 20:46:30 +00:00
|
|
|
broker.db_type = 'test'
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(sqlite3.DatabaseError) as raised, \
|
|
|
|
broker.get() as conn:
|
|
|
|
conn.execute('SELECT * FROM test')
|
2016-12-01 09:46:53 +11:00
|
|
|
self.assertEqual(
|
2018-05-11 12:59:47 -07:00
|
|
|
str(raised.exception),
|
2016-12-01 09:46:53 +11:00
|
|
|
'Quarantined %s to %s due to malformed database' %
|
|
|
|
(dbpath, qpath))
|
|
|
|
# Test malformed schema database
|
|
|
|
copy(os.path.join(os.path.dirname(__file__),
|
|
|
|
'malformed_schema_example.db'),
|
|
|
|
os.path.join(dbpath, '1.db'))
|
|
|
|
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
|
|
|
|
broker.db_type = 'test'
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(sqlite3.DatabaseError) as raised, \
|
|
|
|
broker.get() as conn:
|
|
|
|
conn.execute('SELECT * FROM test')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(
|
2018-05-11 12:59:47 -07:00
|
|
|
str(raised.exception),
|
2011-08-01 21:14:41 +00:00
|
|
|
'Quarantined %s to %s due to malformed database' %
|
2014-01-16 00:49:28 -05:00
|
|
|
(dbpath, qpath))
|
2011-08-01 20:46:30 +00:00
|
|
|
# Test corrupted database
|
2011-08-02 18:21:25 +00:00
|
|
|
copy(os.path.join(os.path.dirname(__file__),
|
|
|
|
'corrupted_example.db'),
|
2014-01-16 00:49:28 -05:00
|
|
|
os.path.join(dbpath, '1.db'))
|
|
|
|
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
|
2011-08-01 20:46:30 +00:00
|
|
|
broker.db_type = 'test'
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(sqlite3.DatabaseError) as raised, \
|
|
|
|
broker.get() as conn:
|
|
|
|
conn.execute('SELECT * FROM test')
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(
|
2018-05-11 12:59:47 -07:00
|
|
|
str(raised.exception),
|
2011-08-01 21:14:41 +00:00
|
|
|
'Quarantined %s to %s due to corrupted database' %
|
2014-01-16 00:49:28 -05:00
|
|
|
(dbpath, qpath))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2018-02-06 13:08:42 +01:00
|
|
|
def test_get_raw_metadata_missing_container_info(self):
|
|
|
|
# Test missing container_info/container_stat row
|
|
|
|
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
|
|
|
|
mkdirs(dbpath)
|
|
|
|
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'containers',
|
|
|
|
'db')
|
|
|
|
copy(os.path.join(os.path.dirname(__file__),
|
|
|
|
'missing_container_info.db'),
|
|
|
|
os.path.join(dbpath, '1.db'))
|
|
|
|
|
|
|
|
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
|
|
|
|
broker.db_type = 'container'
|
|
|
|
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(sqlite3.DatabaseError) as raised:
|
2018-02-06 13:08:42 +01:00
|
|
|
broker.get_raw_metadata()
|
|
|
|
self.assertEqual(
|
2018-05-11 12:59:47 -07:00
|
|
|
str(raised.exception),
|
2018-02-06 13:08:42 +01:00
|
|
|
'Quarantined %s to %s due to missing row in container_stat table' %
|
|
|
|
(dbpath, qpath))
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_lock(self):
|
|
|
|
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(DatabaseConnectionError) as raised, \
|
|
|
|
broker.lock():
|
|
|
|
pass
|
|
|
|
self.assertEqual(
|
|
|
|
str(raised.exception),
|
|
|
|
"DB connection error (%s, 0):\nDB doesn't exist" % broker.db_file)
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def stub(*args, **kwargs):
|
|
|
|
pass
|
|
|
|
broker._initialize = stub
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
|
|
|
with broker.lock():
|
|
|
|
pass
|
|
|
|
with broker.lock():
|
|
|
|
pass
|
2012-08-31 11:24:46 +08:00
|
|
|
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'),
|
|
|
|
timeout=.1)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker2._initialize = stub
|
|
|
|
with broker.lock():
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(LockTimeout) as raised, \
|
|
|
|
broker2.lock():
|
|
|
|
pass
|
|
|
|
self.assertEqual(str(raised.exception),
|
|
|
|
'0.1 seconds: %s' % broker.db_file)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
try:
|
|
|
|
with broker.lock():
|
|
|
|
raise Exception('test')
|
2011-01-26 14:31:33 -08:00
|
|
|
except Exception:
|
2010-07-12 17:03:45 -05:00
|
|
|
pass
|
|
|
|
with broker.lock():
|
|
|
|
pass
|
|
|
|
|
|
|
|
def test_newid(self):
|
|
|
|
broker = DatabaseBroker(':memory:')
|
|
|
|
broker.db_type = 'test'
|
|
|
|
broker.db_contains_type = 'test'
|
|
|
|
uuid1 = str(uuid4())
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
def _initialize(conn, timestamp, **kwargs):
|
2010-07-12 17:03:45 -05:00
|
|
|
conn.execute('CREATE TABLE test (one TEXT)')
|
|
|
|
conn.execute('CREATE TABLE test_stat (id TEXT)')
|
|
|
|
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
|
|
|
|
conn.commit()
|
|
|
|
broker._initialize = _initialize
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
|
|
|
uuid2 = str(uuid4())
|
|
|
|
broker.newid(uuid2)
|
|
|
|
with broker.get() as conn:
|
|
|
|
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(len(uuids), 1)
|
2015-08-31 21:49:49 +05:30
|
|
|
self.assertNotEqual(uuids[0], uuid1)
|
2010-07-12 17:03:45 -05:00
|
|
|
uuid1 = uuids[0]
|
2013-09-01 01:14:40 -04:00
|
|
|
points = [(r[0], r[1]) for r in conn.execute(
|
|
|
|
'SELECT sync_point, '
|
2010-07-12 17:03:45 -05:00
|
|
|
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(len(points), 1)
|
|
|
|
self.assertEqual(points[0][0], -1)
|
|
|
|
self.assertEqual(points[0][1], uuid2)
|
2010-07-12 17:03:45 -05:00
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("1")')
|
|
|
|
conn.commit()
|
|
|
|
uuid3 = str(uuid4())
|
|
|
|
broker.newid(uuid3)
|
|
|
|
with broker.get() as conn:
|
|
|
|
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(len(uuids), 1)
|
2015-08-31 21:49:49 +05:30
|
|
|
self.assertNotEqual(uuids[0], uuid1)
|
2010-07-12 17:03:45 -05:00
|
|
|
uuid1 = uuids[0]
|
2013-09-01 01:14:40 -04:00
|
|
|
points = [(r[0], r[1]) for r in conn.execute(
|
|
|
|
'SELECT sync_point, '
|
2010-07-12 17:03:45 -05:00
|
|
|
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid3,))]
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(len(points), 1)
|
|
|
|
self.assertEqual(points[0][1], uuid3)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker.newid(uuid2)
|
|
|
|
with broker.get() as conn:
|
|
|
|
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(len(uuids), 1)
|
2015-08-31 21:49:49 +05:30
|
|
|
self.assertNotEqual(uuids[0], uuid1)
|
2013-09-01 01:14:40 -04:00
|
|
|
points = [(r[0], r[1]) for r in conn.execute(
|
|
|
|
'SELECT sync_point, '
|
2010-07-12 17:03:45 -05:00
|
|
|
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(len(points), 1)
|
|
|
|
self.assertEqual(points[0][1], uuid2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_get_items_since(self):
|
|
|
|
broker = DatabaseBroker(':memory:')
|
|
|
|
broker.db_type = 'test'
|
|
|
|
broker.db_contains_type = 'test'
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
def _initialize(conn, timestamp, **kwargs):
|
2010-07-12 17:03:45 -05:00
|
|
|
conn.execute('CREATE TABLE test (one TEXT)')
|
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("1")')
|
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("2")')
|
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("3")')
|
|
|
|
conn.commit()
|
|
|
|
broker._initialize = _initialize
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_items_since(-1, 10),
|
|
|
|
[{'one': '1'}, {'one': '2'}, {'one': '3'}])
|
|
|
|
self.assertEqual(broker.get_items_since(-1, 2),
|
|
|
|
[{'one': '1'}, {'one': '2'}])
|
|
|
|
self.assertEqual(broker.get_items_since(1, 2),
|
|
|
|
[{'one': '2'}, {'one': '3'}])
|
|
|
|
self.assertEqual(broker.get_items_since(3, 2), [])
|
|
|
|
self.assertEqual(broker.get_items_since(999, 2), [])
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_get_sync(self):
|
|
|
|
broker = DatabaseBroker(':memory:')
|
|
|
|
broker.db_type = 'test'
|
|
|
|
broker.db_contains_type = 'test'
|
|
|
|
uuid1 = str(uuid4())
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
def _initialize(conn, timestamp, **kwargs):
|
2010-07-12 17:03:45 -05:00
|
|
|
conn.execute('CREATE TABLE test (one TEXT)')
|
|
|
|
conn.execute('CREATE TABLE test_stat (id TEXT)')
|
|
|
|
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
|
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("1")')
|
|
|
|
conn.commit()
|
|
|
|
pass
|
|
|
|
broker._initialize = _initialize
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
|
|
|
uuid2 = str(uuid4())
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2), -1)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker.newid(uuid2)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2), 1)
|
2010-07-12 17:03:45 -05:00
|
|
|
uuid3 = str(uuid4())
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid3), -1)
|
2010-07-12 17:03:45 -05:00
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('INSERT INTO test (one) VALUES ("2")')
|
|
|
|
conn.commit()
|
|
|
|
broker.newid(uuid3)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2), 1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3), 2)
|
|
|
|
self.assertEqual(broker.get_sync(uuid2, incoming=False), -1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}],
|
|
|
|
incoming=False)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2), 1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3), 2)
|
|
|
|
self.assertEqual(broker.get_sync(uuid2, incoming=False), 1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}],
|
|
|
|
incoming=False)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2, incoming=False), 1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3, incoming=False), 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_merge_syncs(self):
|
|
|
|
broker = DatabaseBroker(':memory:')
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def stub(*args, **kwargs):
|
|
|
|
pass
|
|
|
|
broker._initialize = stub
|
|
|
|
broker.initialize(normalize_timestamp('1'))
|
|
|
|
uuid2 = str(uuid4())
|
|
|
|
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2), 1)
|
2010-07-12 17:03:45 -05:00
|
|
|
uuid3 = str(uuid4())
|
|
|
|
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2), 1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3), 2)
|
|
|
|
self.assertEqual(broker.get_sync(uuid2, incoming=False), -1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
|
|
|
|
{'sync_point': 4, 'remote_id': uuid3}],
|
|
|
|
incoming=False)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2, incoming=False), 3)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3, incoming=False), 4)
|
|
|
|
self.assertEqual(broker.get_sync(uuid2), 1)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3), 2)
|
2010-07-12 17:03:45 -05:00
|
|
|
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.get_sync(uuid2), 5)
|
2018-05-01 15:12:05 +01:00
|
|
|
# max sync point sticks
|
|
|
|
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
|
|
|
|
self.assertEqual(broker.get_sync(uuid2), 5)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3), 2)
|
|
|
|
broker.merge_syncs([{'sync_point': 4, 'remote_id': uuid2}])
|
|
|
|
self.assertEqual(broker.get_sync(uuid2), 5)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3), 2)
|
|
|
|
broker.merge_syncs([{'sync_point': -1, 'remote_id': uuid2},
|
|
|
|
{'sync_point': 3, 'remote_id': uuid3}])
|
|
|
|
self.assertEqual(broker.get_sync(uuid2), 5)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3), 3)
|
|
|
|
self.assertEqual(broker.get_sync(uuid2, incoming=False), 3)
|
|
|
|
self.assertEqual(broker.get_sync(uuid3, incoming=False), 4)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2010-08-10 12:18:15 -07:00
|
|
|
def test_get_replication_info(self):
|
|
|
|
self.get_replication_info_tester(metadata=False)
|
|
|
|
|
|
|
|
def test_get_replication_info_with_metadata(self):
|
|
|
|
self.get_replication_info_tester(metadata=True)
|
|
|
|
|
|
|
|
def get_replication_info_tester(self, metadata=False):
|
|
|
|
broker = DatabaseBroker(':memory:', account='a')
|
|
|
|
broker.db_type = 'test'
|
|
|
|
broker.db_contains_type = 'test'
|
|
|
|
broker_creation = normalize_timestamp(1)
|
|
|
|
broker_uuid = str(uuid4())
|
2015-10-30 11:02:54 -07:00
|
|
|
broker_metadata = metadata and json.dumps(
|
2013-09-01 01:14:40 -04:00
|
|
|
{'Test': ('Value', normalize_timestamp(1))}) or ''
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2014-05-27 16:57:25 -07:00
|
|
|
def _initialize(conn, put_timestamp, **kwargs):
|
2010-08-10 12:18:15 -07:00
|
|
|
if put_timestamp is None:
|
|
|
|
put_timestamp = normalize_timestamp(0)
|
|
|
|
conn.executescript('''
|
|
|
|
CREATE TABLE test (
|
|
|
|
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
|
|
name TEXT UNIQUE,
|
|
|
|
created_at TEXT
|
|
|
|
);
|
|
|
|
CREATE TRIGGER test_insert AFTER INSERT ON test
|
|
|
|
BEGIN
|
|
|
|
UPDATE test_stat
|
|
|
|
SET test_count = test_count + 1,
|
|
|
|
hash = chexor(hash, new.name, new.created_at);
|
|
|
|
END;
|
|
|
|
CREATE TRIGGER test_update BEFORE UPDATE ON test
|
|
|
|
BEGIN
|
|
|
|
SELECT RAISE(FAIL,
|
|
|
|
'UPDATE not allowed; DELETE and INSERT');
|
|
|
|
END;
|
|
|
|
CREATE TRIGGER test_delete AFTER DELETE ON test
|
|
|
|
BEGIN
|
|
|
|
UPDATE test_stat
|
|
|
|
SET test_count = test_count - 1,
|
|
|
|
hash = chexor(hash, old.name, old.created_at);
|
|
|
|
END;
|
|
|
|
CREATE TABLE test_stat (
|
|
|
|
account TEXT,
|
|
|
|
created_at TEXT,
|
|
|
|
put_timestamp TEXT DEFAULT '0',
|
|
|
|
delete_timestamp TEXT DEFAULT '0',
|
2014-05-27 16:57:25 -07:00
|
|
|
status_changed_at TEXT DEFAULT '0',
|
2010-08-10 12:18:15 -07:00
|
|
|
test_count INTEGER,
|
|
|
|
hash TEXT default '00000000000000000000000000000000',
|
|
|
|
id TEXT
|
|
|
|
%s
|
|
|
|
);
|
|
|
|
INSERT INTO test_stat (test_count) VALUES (0);
|
|
|
|
''' % (metadata and ", metadata TEXT DEFAULT ''" or ""))
|
|
|
|
conn.execute('''
|
|
|
|
UPDATE test_stat
|
2014-05-27 16:57:25 -07:00
|
|
|
SET account = ?, created_at = ?, id = ?, put_timestamp = ?,
|
|
|
|
status_changed_at = ?
|
|
|
|
''', (broker.account, broker_creation, broker_uuid, put_timestamp,
|
|
|
|
put_timestamp))
|
2010-08-10 12:18:15 -07:00
|
|
|
if metadata:
|
|
|
|
conn.execute('UPDATE test_stat SET metadata = ?',
|
|
|
|
(broker_metadata,))
|
|
|
|
conn.commit()
|
|
|
|
broker._initialize = _initialize
|
|
|
|
put_timestamp = normalize_timestamp(2)
|
|
|
|
broker.initialize(put_timestamp)
|
|
|
|
info = broker.get_replication_info()
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(info, {
|
2014-05-27 16:57:25 -07:00
|
|
|
'account': broker.account, 'count': 0,
|
2010-08-10 12:18:15 -07:00
|
|
|
'hash': '00000000000000000000000000000000',
|
|
|
|
'created_at': broker_creation, 'put_timestamp': put_timestamp,
|
2014-05-27 16:57:25 -07:00
|
|
|
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
|
|
|
|
'max_row': -1, 'id': broker_uuid, 'metadata': broker_metadata})
|
2010-08-10 12:18:15 -07:00
|
|
|
insert_timestamp = normalize_timestamp(3)
|
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('''
|
|
|
|
INSERT INTO test (name, created_at) VALUES ('test', ?)
|
|
|
|
''', (insert_timestamp,))
|
|
|
|
conn.commit()
|
|
|
|
info = broker.get_replication_info()
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(info, {
|
2014-05-27 16:57:25 -07:00
|
|
|
'account': broker.account, 'count': 1,
|
2010-08-10 12:18:15 -07:00
|
|
|
'hash': 'bdc4c93f574b0d8c2911a27ce9dd38ba',
|
|
|
|
'created_at': broker_creation, 'put_timestamp': put_timestamp,
|
2014-05-27 16:57:25 -07:00
|
|
|
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
|
|
|
|
'max_row': 1, 'id': broker_uuid, 'metadata': broker_metadata})
|
2010-08-10 12:18:15 -07:00
|
|
|
with broker.get() as conn:
|
|
|
|
conn.execute('DELETE FROM test')
|
|
|
|
conn.commit()
|
|
|
|
info = broker.get_replication_info()
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(info, {
|
2014-05-27 16:57:25 -07:00
|
|
|
'account': broker.account, 'count': 0,
|
2010-08-10 12:18:15 -07:00
|
|
|
'hash': '00000000000000000000000000000000',
|
|
|
|
'created_at': broker_creation, 'put_timestamp': put_timestamp,
|
2014-05-27 16:57:25 -07:00
|
|
|
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
|
|
|
|
'max_row': 1, 'id': broker_uuid, 'metadata': broker_metadata})
|
2010-08-10 12:18:15 -07:00
|
|
|
return broker
|
|
|
|
|
2018-05-01 15:12:05 +01:00
|
|
|
# only testing _reclaim_metadata here
|
2020-05-13 13:32:18 -05:00
|
|
|
@patch.object(DatabaseBroker, '_reclaim', return_value='')
|
2018-05-01 15:12:05 +01:00
|
|
|
def test_metadata(self, mock_reclaim):
|
2010-08-10 12:18:15 -07:00
|
|
|
# Initializes a good broker for us
|
|
|
|
broker = self.get_replication_info_tester(metadata=True)
|
|
|
|
# Add our first item
|
|
|
|
first_timestamp = normalize_timestamp(1)
|
|
|
|
first_value = '1'
|
2010-08-16 15:30:27 -07:00
|
|
|
broker.update_metadata({'First': [first_value, first_timestamp]})
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('First', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['First'],
|
|
|
|
[first_value, first_timestamp])
|
2010-08-10 12:18:15 -07:00
|
|
|
# Add our second item
|
|
|
|
second_timestamp = normalize_timestamp(2)
|
|
|
|
second_value = '2'
|
2010-08-16 15:30:27 -07:00
|
|
|
broker.update_metadata({'Second': [second_value, second_timestamp]})
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('First', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['First'],
|
|
|
|
[first_value, first_timestamp])
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('Second', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['Second'],
|
|
|
|
[second_value, second_timestamp])
|
2010-08-10 12:18:15 -07:00
|
|
|
# Update our first item
|
|
|
|
first_timestamp = normalize_timestamp(3)
|
|
|
|
first_value = '1b'
|
2010-08-16 15:30:27 -07:00
|
|
|
broker.update_metadata({'First': [first_value, first_timestamp]})
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('First', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['First'],
|
|
|
|
[first_value, first_timestamp])
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('Second', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['Second'],
|
|
|
|
[second_value, second_timestamp])
|
2010-08-10 12:18:15 -07:00
|
|
|
# Delete our second item (by setting to empty string)
|
|
|
|
second_timestamp = normalize_timestamp(4)
|
|
|
|
second_value = ''
|
2010-08-16 15:30:27 -07:00
|
|
|
broker.update_metadata({'Second': [second_value, second_timestamp]})
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('First', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['First'],
|
|
|
|
[first_value, first_timestamp])
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('Second', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['Second'],
|
|
|
|
[second_value, second_timestamp])
|
2010-08-10 12:18:15 -07:00
|
|
|
# Reclaim at point before second item was deleted
|
2018-05-01 15:12:05 +01:00
|
|
|
broker.reclaim(normalize_timestamp(3), normalize_timestamp(3))
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('First', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['First'],
|
|
|
|
[first_value, first_timestamp])
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('Second', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['Second'],
|
|
|
|
[second_value, second_timestamp])
|
2010-08-10 12:18:15 -07:00
|
|
|
# Reclaim at point second item was deleted
|
2018-05-01 15:12:05 +01:00
|
|
|
broker.reclaim(normalize_timestamp(4), normalize_timestamp(4))
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('First', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['First'],
|
|
|
|
[first_value, first_timestamp])
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('Second', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['Second'],
|
|
|
|
[second_value, second_timestamp])
|
2010-08-10 12:18:15 -07:00
|
|
|
# Reclaim after point second item was deleted
|
2018-05-01 15:12:05 +01:00
|
|
|
broker.reclaim(normalize_timestamp(5), normalize_timestamp(5))
|
2016-12-12 11:58:01 -08:00
|
|
|
self.assertIn('First', broker.metadata)
|
2015-08-05 23:58:14 +05:30
|
|
|
self.assertEqual(broker.metadata['First'],
|
|
|
|
[first_value, first_timestamp])
|
2016-12-12 15:18:52 +07:00
|
|
|
self.assertNotIn('Second', broker.metadata)
|
2018-05-01 15:12:05 +01:00
|
|
|
# Delete first item (by setting to empty string)
|
|
|
|
first_timestamp = normalize_timestamp(6)
|
|
|
|
broker.update_metadata({'First': ['', first_timestamp]})
|
|
|
|
self.assertIn('First', broker.metadata)
|
|
|
|
# Check that sync_timestamp doesn't cause item to be reclaimed
|
|
|
|
broker.reclaim(normalize_timestamp(5), normalize_timestamp(99))
|
|
|
|
self.assertIn('First', broker.metadata)
|
2010-08-10 12:18:15 -07:00
|
|
|
|
2018-02-06 13:08:42 +01:00
|
|
|
def test_update_metadata_missing_container_info(self):
|
|
|
|
# Test missing container_info/container_stat row
|
|
|
|
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
|
|
|
|
mkdirs(dbpath)
|
|
|
|
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'containers',
|
|
|
|
'db')
|
|
|
|
copy(os.path.join(os.path.dirname(__file__),
|
|
|
|
'missing_container_info.db'),
|
|
|
|
os.path.join(dbpath, '1.db'))
|
|
|
|
|
|
|
|
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
|
|
|
|
broker.db_type = 'container'
|
|
|
|
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(sqlite3.DatabaseError) as raised:
|
|
|
|
broker.update_metadata({'First': ['1', normalize_timestamp(1)]})
|
2018-02-06 13:08:42 +01:00
|
|
|
self.assertEqual(
|
2018-05-11 12:59:47 -07:00
|
|
|
str(raised.exception),
|
2018-02-06 13:08:42 +01:00
|
|
|
'Quarantined %s to %s due to missing row in container_stat table' %
|
|
|
|
(dbpath, qpath))
|
|
|
|
|
|
|
|
def test_reclaim_missing_container_info(self):
|
|
|
|
# Test missing container_info/container_stat row
|
|
|
|
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
|
|
|
|
mkdirs(dbpath)
|
|
|
|
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'containers',
|
|
|
|
'db')
|
|
|
|
copy(os.path.join(os.path.dirname(__file__),
|
|
|
|
'missing_container_info.db'),
|
|
|
|
os.path.join(dbpath, '1.db'))
|
|
|
|
|
|
|
|
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
|
|
|
|
broker.db_type = 'container'
|
|
|
|
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(sqlite3.DatabaseError) as raised, \
|
|
|
|
broker.get() as conn:
|
|
|
|
broker._reclaim_metadata(conn, 0)
|
2018-02-06 13:08:42 +01:00
|
|
|
self.assertEqual(
|
2018-05-11 12:59:47 -07:00
|
|
|
str(raised.exception),
|
2018-02-06 13:08:42 +01:00
|
|
|
'Quarantined %s to %s due to missing row in container_stat table' %
|
|
|
|
(dbpath, qpath))
|
|
|
|
|
2014-10-01 09:37:47 -04:00
|
|
|
@patch.object(DatabaseBroker, 'validate_metadata')
|
|
|
|
def test_validate_metadata_is_called_from_update_metadata(self, mock):
|
|
|
|
broker = self.get_replication_info_tester(metadata=True)
|
|
|
|
first_timestamp = normalize_timestamp(1)
|
|
|
|
first_value = '1'
|
|
|
|
metadata = {'First': [first_value, first_timestamp]}
|
|
|
|
broker.update_metadata(metadata, validate_metadata=True)
|
|
|
|
self.assertTrue(mock.called)
|
|
|
|
|
|
|
|
@patch.object(DatabaseBroker, 'validate_metadata')
|
|
|
|
def test_validate_metadata_is_not_called_from_update_metadata(self, mock):
|
|
|
|
broker = self.get_replication_info_tester(metadata=True)
|
|
|
|
first_timestamp = normalize_timestamp(1)
|
|
|
|
first_value = '1'
|
|
|
|
metadata = {'First': [first_value, first_timestamp]}
|
|
|
|
broker.update_metadata(metadata)
|
|
|
|
self.assertFalse(mock.called)
|
|
|
|
|
|
|
|
def test_metadata_with_max_count(self):
|
|
|
|
metadata = {}
|
2015-05-25 18:28:02 +02:00
|
|
|
for c in range(MAX_META_COUNT):
|
2014-10-01 09:37:47 -04:00
|
|
|
key = 'X-Account-Meta-F{0}'.format(c)
|
|
|
|
metadata[key] = ('B', normalize_timestamp(1))
|
2020-05-13 00:24:08 -07:00
|
|
|
key = 'X-Account-Meta-Foo'
|
2014-10-01 09:37:47 -04:00
|
|
|
metadata[key] = ('', normalize_timestamp(1))
|
2018-05-11 12:59:47 -07:00
|
|
|
self.assertIsNone(DatabaseBroker.validate_metadata(metadata))
|
2014-10-01 09:37:47 -04:00
|
|
|
|
2016-02-28 01:18:07 +00:00
|
|
|
def test_metadata_raises_exception_on_non_utf8(self):
|
|
|
|
def try_validate(metadata):
|
2018-05-11 12:59:47 -07:00
|
|
|
with self.assertRaises(HTTPException) as raised:
|
2016-02-28 01:18:07 +00:00
|
|
|
DatabaseBroker.validate_metadata(metadata)
|
2018-05-11 12:59:47 -07:00
|
|
|
self.assertEqual(str(raised.exception), '400 Bad Request')
|
2016-02-28 01:18:07 +00:00
|
|
|
ts = normalize_timestamp(1)
|
|
|
|
try_validate({'X-Account-Meta-Foo': (b'\xff', ts)})
|
|
|
|
try_validate({b'X-Container-Meta-\xff': ('bar', ts)})
|
|
|
|
|
2014-10-01 09:37:47 -04:00
|
|
|
def test_metadata_raises_exception_over_max_count(self):
|
|
|
|
metadata = {}
|
2015-05-25 18:28:02 +02:00
|
|
|
for c in range(MAX_META_COUNT + 1):
|
2014-10-01 09:37:47 -04:00
|
|
|
key = 'X-Account-Meta-F{0}'.format(c)
|
|
|
|
metadata[key] = ('B', normalize_timestamp(1))
|
|
|
|
message = ''
|
|
|
|
try:
|
|
|
|
DatabaseBroker.validate_metadata(metadata)
|
|
|
|
except HTTPException as e:
|
|
|
|
message = str(e)
|
|
|
|
self.assertEqual(message, '400 Bad Request')
|
|
|
|
|
|
|
|
def test_metadata_with_max_overall_size(self):
|
|
|
|
metadata = {}
|
|
|
|
metadata_value = 'v' * MAX_META_VALUE_LENGTH
|
|
|
|
size = 0
|
|
|
|
x = 0
|
|
|
|
while size < (MAX_META_OVERALL_SIZE - 4
|
|
|
|
- MAX_META_VALUE_LENGTH):
|
|
|
|
size += 4 + MAX_META_VALUE_LENGTH
|
|
|
|
metadata['X-Account-Meta-%04d' % x] = (metadata_value,
|
|
|
|
normalize_timestamp(1))
|
|
|
|
x += 1
|
|
|
|
if MAX_META_OVERALL_SIZE - size > 1:
|
|
|
|
metadata['X-Account-Meta-k'] = (
|
|
|
|
'v' * (MAX_META_OVERALL_SIZE - size - 1),
|
|
|
|
normalize_timestamp(1))
|
2018-05-11 12:59:47 -07:00
|
|
|
self.assertIsNone(DatabaseBroker.validate_metadata(metadata))
|
2014-10-01 09:37:47 -04:00
|
|
|
|
|
|
|
def test_metadata_raises_exception_over_max_overall_size(self):
|
|
|
|
metadata = {}
|
|
|
|
metadata_value = 'k' * MAX_META_VALUE_LENGTH
|
|
|
|
size = 0
|
|
|
|
x = 0
|
|
|
|
while size < (MAX_META_OVERALL_SIZE - 4
|
|
|
|
- MAX_META_VALUE_LENGTH):
|
|
|
|
size += 4 + MAX_META_VALUE_LENGTH
|
|
|
|
metadata['X-Account-Meta-%04d' % x] = (metadata_value,
|
|
|
|
normalize_timestamp(1))
|
|
|
|
x += 1
|
|
|
|
if MAX_META_OVERALL_SIZE - size > 1:
|
|
|
|
metadata['X-Account-Meta-k'] = (
|
|
|
|
'v' * (MAX_META_OVERALL_SIZE - size - 1),
|
|
|
|
normalize_timestamp(1))
|
|
|
|
metadata['X-Account-Meta-k2'] = ('v', normalize_timestamp(1))
|
|
|
|
message = ''
|
|
|
|
try:
|
|
|
|
DatabaseBroker.validate_metadata(metadata)
|
|
|
|
except HTTPException as e:
|
|
|
|
message = str(e)
|
|
|
|
self.assertEqual(message, '400 Bad Request')
|
|
|
|
|
2016-12-01 09:46:53 +11:00
|
|
|
def test_possibly_quarantine_db_errors(self):
|
2015-05-13 21:18:02 +05:30
|
|
|
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
|
|
|
|
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
|
2016-12-01 09:46:53 +11:00
|
|
|
# Data is a list of Excpetions to be raised and expected values in the
|
|
|
|
# log
|
|
|
|
data = [
|
|
|
|
(sqlite3.DatabaseError('database disk image is malformed'),
|
|
|
|
'malformed'),
|
|
|
|
(sqlite3.DatabaseError('malformed database schema'), 'malformed'),
|
|
|
|
(sqlite3.DatabaseError('file is encrypted or is not a database'),
|
|
|
|
'corrupted'),
|
|
|
|
(sqlite3.OperationalError('disk I/O error'),
|
|
|
|
'disk error while accessing')]
|
|
|
|
|
|
|
|
for i, (ex, hint) in enumerate(data):
|
|
|
|
mkdirs(dbpath)
|
|
|
|
broker = DatabaseBroker(os.path.join(dbpath, '%d.db' % (i)))
|
|
|
|
broker.db_type = 'test'
|
2015-05-13 21:18:02 +05:30
|
|
|
try:
|
2016-12-01 09:46:53 +11:00
|
|
|
raise ex
|
2018-05-11 12:59:47 -07:00
|
|
|
except sqlite3.DatabaseError:
|
|
|
|
with self.assertRaises(sqlite3.DatabaseError) as raised:
|
2016-12-01 09:46:53 +11:00
|
|
|
broker.possibly_quarantine(*sys.exc_info())
|
2018-05-11 12:59:47 -07:00
|
|
|
self.assertEqual(
|
|
|
|
str(raised.exception),
|
|
|
|
'Quarantined %s to %s due to %s database' %
|
|
|
|
(dbpath, qpath, hint))
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2018-05-01 15:12:05 +01:00
|
|
|
def test_skip_commits(self):
|
|
|
|
broker = DatabaseBroker(':memory:')
|
|
|
|
self.assertTrue(broker._skip_commit_puts())
|
|
|
|
broker._initialize = MagicMock()
|
|
|
|
broker.initialize(Timestamp.now())
|
|
|
|
self.assertTrue(broker._skip_commit_puts())
|
|
|
|
|
|
|
|
# not initialized
|
|
|
|
db_file = os.path.join(self.testdir, '1.db')
|
|
|
|
broker = DatabaseBroker(db_file)
|
|
|
|
self.assertFalse(os.path.exists(broker.db_file)) # sanity check
|
|
|
|
self.assertTrue(broker._skip_commit_puts())
|
|
|
|
|
|
|
|
# no pending file
|
|
|
|
broker._initialize = MagicMock()
|
|
|
|
broker.initialize(Timestamp.now())
|
|
|
|
self.assertTrue(os.path.exists(broker.db_file)) # sanity check
|
|
|
|
self.assertFalse(os.path.exists(broker.pending_file)) # sanity check
|
|
|
|
self.assertTrue(broker._skip_commit_puts())
|
|
|
|
|
|
|
|
# pending file exists
|
|
|
|
with open(broker.pending_file, 'wb'):
|
|
|
|
pass
|
|
|
|
self.assertTrue(os.path.exists(broker.pending_file)) # sanity check
|
|
|
|
self.assertFalse(broker._skip_commit_puts())
|
|
|
|
|
|
|
|
# skip_commits is True
|
|
|
|
broker.skip_commits = True
|
|
|
|
self.assertTrue(broker._skip_commit_puts())
|
|
|
|
|
|
|
|
# re-init
|
|
|
|
broker = DatabaseBroker(db_file)
|
|
|
|
self.assertFalse(broker._skip_commit_puts())
|
|
|
|
|
|
|
|
# constructor can override
|
|
|
|
broker = DatabaseBroker(db_file, skip_commits=True)
|
|
|
|
self.assertTrue(broker._skip_commit_puts())
|
|
|
|
|
|
|
|
def test_commit_puts(self):
|
|
|
|
db_file = os.path.join(self.testdir, '1.db')
|
|
|
|
broker = DatabaseBroker(db_file)
|
|
|
|
broker._initialize = MagicMock()
|
|
|
|
broker.initialize(Timestamp.now())
|
|
|
|
with open(broker.pending_file, 'wb'):
|
|
|
|
pass
|
|
|
|
|
|
|
|
# merge given list
|
|
|
|
with patch.object(broker, 'merge_items') as mock_merge_items:
|
|
|
|
broker._commit_puts(['test'])
|
|
|
|
mock_merge_items.assert_called_once_with(['test'])
|
|
|
|
|
|
|
|
# load file and merge
|
|
|
|
with open(broker.pending_file, 'wb') as fd:
|
2018-10-05 14:58:35 -07:00
|
|
|
for v in (1, 2, 99):
|
|
|
|
fd.write(b':' + base64.b64encode(pickle.dumps(
|
|
|
|
v, protocol=PICKLE_PROTOCOL)))
|
2018-05-01 15:12:05 +01:00
|
|
|
with patch.object(broker, 'merge_items') as mock_merge_items:
|
|
|
|
broker._commit_puts_load = lambda l, e: l.append(e)
|
|
|
|
broker._commit_puts()
|
2018-10-05 14:58:35 -07:00
|
|
|
mock_merge_items.assert_called_once_with([1, 2, 99])
|
2018-05-01 15:12:05 +01:00
|
|
|
self.assertEqual(0, os.path.getsize(broker.pending_file))
|
|
|
|
|
|
|
|
# load file and merge with given list
|
|
|
|
with open(broker.pending_file, 'wb') as fd:
|
2018-10-05 14:58:35 -07:00
|
|
|
fd.write(b':' + base64.b64encode(pickle.dumps(
|
|
|
|
b'bad', protocol=PICKLE_PROTOCOL)))
|
2018-05-01 15:12:05 +01:00
|
|
|
with patch.object(broker, 'merge_items') as mock_merge_items:
|
|
|
|
broker._commit_puts_load = lambda l, e: l.append(e)
|
2018-07-11 17:55:48 -05:00
|
|
|
broker._commit_puts([b'not'])
|
|
|
|
mock_merge_items.assert_called_once_with([b'not', b'bad'])
|
2018-05-01 15:12:05 +01:00
|
|
|
self.assertEqual(0, os.path.getsize(broker.pending_file))
|
2019-06-14 15:41:13 -07:00
|
|
|
|
|
|
|
# load a pending entry that's caused trouble in py2/py3 upgrade tests
|
|
|
|
# can't quite figure out how it got generated, though, so hard-code it
|
|
|
|
with open(broker.pending_file, 'wb') as fd:
|
|
|
|
fd.write(b':gAIoVS3olIngpILrjIvrjIvpkIngpIHlmIjlmIbjnIbgp'
|
|
|
|
b'IPjnITimIPvhI/rjI3tiI5xAVUQMTU1OTI0MTg0Ni40NjY'
|
|
|
|
b'wMXECVQEwVQEwVQEwSwBVATB0Lg==')
|
|
|
|
with patch.object(broker, 'merge_items') as mock_merge_items:
|
|
|
|
broker._commit_puts_load = lambda l, e: l.append(e)
|
|
|
|
broker._commit_puts([])
|
|
|
|
expected_name = (u'\u8509\u0902\ub30b\ub30b\u9409\u0901\u5608\u5606'
|
|
|
|
u'\u3706\u0903\u3704\u2603\uf10f\ub30d\ud20e')
|
|
|
|
if six.PY2:
|
|
|
|
expected_name = expected_name.encode('utf8')
|
|
|
|
mock_merge_items.assert_called_once_with([
|
|
|
|
(expected_name, '1559241846.46601', '0', '0', '0', 0, '0')])
|
|
|
|
self.assertEqual(0, os.path.getsize(broker.pending_file))
|
2018-05-01 15:12:05 +01:00
|
|
|
|
|
|
|
# skip_commits True - no merge
|
|
|
|
db_file = os.path.join(self.testdir, '2.db')
|
|
|
|
broker = DatabaseBroker(db_file, skip_commits=True)
|
|
|
|
broker._initialize = MagicMock()
|
|
|
|
broker.initialize(Timestamp.now())
|
|
|
|
with open(broker.pending_file, 'wb') as fd:
|
2018-07-11 17:55:48 -05:00
|
|
|
fd.write(b':ignored')
|
2018-05-01 15:12:05 +01:00
|
|
|
with patch.object(broker, 'merge_items') as mock_merge_items:
|
|
|
|
with self.assertRaises(DatabaseConnectionError) as cm:
|
2018-07-11 17:55:48 -05:00
|
|
|
broker._commit_puts([b'hmmm'])
|
2018-05-01 15:12:05 +01:00
|
|
|
mock_merge_items.assert_not_called()
|
|
|
|
self.assertIn('commits not accepted', str(cm.exception))
|
|
|
|
with open(broker.pending_file, 'rb') as fd:
|
2018-07-11 17:55:48 -05:00
|
|
|
self.assertEqual(b':ignored', fd.read())
|
2018-05-01 15:12:05 +01:00
|
|
|
|
|
|
|
def test_put_record(self):
|
|
|
|
db_file = os.path.join(self.testdir, '1.db')
|
|
|
|
broker = DatabaseBroker(db_file)
|
|
|
|
broker._initialize = MagicMock()
|
|
|
|
broker.initialize(Timestamp.now())
|
|
|
|
|
|
|
|
# pending file created and record written
|
|
|
|
broker.make_tuple_for_pickle = lambda x: x.upper()
|
|
|
|
with patch.object(broker, '_commit_puts') as mock_commit_puts:
|
|
|
|
broker.put_record('pinky')
|
|
|
|
mock_commit_puts.assert_not_called()
|
|
|
|
with open(broker.pending_file, 'rb') as fd:
|
|
|
|
pending = fd.read()
|
2018-07-11 17:55:48 -05:00
|
|
|
items = pending.split(b':')
|
2018-05-01 15:12:05 +01:00
|
|
|
self.assertEqual(['PINKY'],
|
2018-07-11 17:55:48 -05:00
|
|
|
[pickle.loads(base64.b64decode(i))
|
|
|
|
for i in items[1:]])
|
2018-05-01 15:12:05 +01:00
|
|
|
|
|
|
|
# record appended
|
|
|
|
with patch.object(broker, '_commit_puts') as mock_commit_puts:
|
|
|
|
broker.put_record('perky')
|
|
|
|
mock_commit_puts.assert_not_called()
|
|
|
|
with open(broker.pending_file, 'rb') as fd:
|
|
|
|
pending = fd.read()
|
2018-07-11 17:55:48 -05:00
|
|
|
items = pending.split(b':')
|
2018-05-01 15:12:05 +01:00
|
|
|
self.assertEqual(['PINKY', 'PERKY'],
|
2018-07-11 17:55:48 -05:00
|
|
|
[pickle.loads(base64.b64decode(i))
|
|
|
|
for i in items[1:]])
|
2018-05-01 15:12:05 +01:00
|
|
|
|
|
|
|
# pending file above cap
|
|
|
|
cap = swift.common.db.PENDING_CAP
|
|
|
|
while os.path.getsize(broker.pending_file) < cap:
|
|
|
|
with open(broker.pending_file, 'ab') as fd:
|
2018-07-11 17:55:48 -05:00
|
|
|
fd.write(b'x' * 100000)
|
2018-05-01 15:12:05 +01:00
|
|
|
with patch.object(broker, '_commit_puts') as mock_commit_puts:
|
|
|
|
broker.put_record('direct')
|
|
|
|
mock_commit_puts.called_once_with(['direct'])
|
|
|
|
|
|
|
|
# records shouldn't be put to brokers with skip_commits True because
|
|
|
|
# they cannot be accepted if the pending file is full
|
|
|
|
broker.skip_commits = True
|
|
|
|
with open(broker.pending_file, 'wb'):
|
|
|
|
# empty the pending file
|
|
|
|
pass
|
|
|
|
with patch.object(broker, '_commit_puts') as mock_commit_puts:
|
|
|
|
with self.assertRaises(DatabaseConnectionError) as cm:
|
|
|
|
broker.put_record('unwelcome')
|
|
|
|
self.assertIn('commits not accepted', str(cm.exception))
|
|
|
|
mock_commit_puts.assert_not_called()
|
|
|
|
with open(broker.pending_file, 'rb') as fd:
|
|
|
|
pending = fd.read()
|
|
|
|
self.assertFalse(pending)
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|