2012-03-19 13:45:34 -05:00
|
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
|
2010-07-12 17:03:45 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import unittest
|
|
|
|
from contextlib import contextmanager
|
|
|
|
import os
|
|
|
|
import logging
|
2011-04-18 15:00:59 -07:00
|
|
|
import errno
|
2013-04-04 18:45:24 +00:00
|
|
|
import math
|
|
|
|
from mock import patch
|
Db reclamation should remove empty suffix dirs
When a db is reclaimed it removes the hash dir the db files are in,
but it does not try to remove the parent suffix dir though it might
be empty now. This eventually leads to a bunch of empty suffix dirs
lying around. This patch fixes that by attempting to remove the
parent suffix dir after a hash dir reclamation.
Here's a quick script to see how bad a given drive might be:
import os, os.path, sys
if len(sys.argv) != 2:
sys.exit('%s <mount-point>' % sys.argv[0])
in_use = 0
empty = 0
containers = os.path.join(sys.argv[1], 'containers')
for p in os.listdir(containers):
partition = os.path.join(containers, p)
for s in os.listdir(partition):
suffix = os.path.join(partition, s)
if os.listdir(suffix):
in_use += 1
else:
empty += 1
print in_use, 'in use,', empty, 'empty,', '%.02f%%' % (
100.0 * empty / (in_use + empty)), 'empty'
And here's a quick script to clean up a drive:
NOTE THAT I HAVEN'T ACTUALLY RUN THIS ON A LIVE NODE YET!
import errno, os, os.path, sys
if len(sys.argv) != 2:
sys.exit('%s <mount-point>' % sys.argv[0])
containers = os.path.join(sys.argv[1], 'containers')
for p in os.listdir(containers):
partition = os.path.join(containers, p)
for s in os.listdir(partition):
suffix = os.path.join(partition, s)
try:
os.rmdir(suffix)
except OSError, err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
print err
Change-Id: I2e6463a4cd40597fc236ebe3e73b4b31347f2309
2012-10-25 19:17:57 +00:00
|
|
|
from shutil import rmtree
|
2012-08-17 17:00:50 -07:00
|
|
|
from tempfile import mkdtemp, NamedTemporaryFile
|
2013-05-15 12:58:57 +04:00
|
|
|
import mock
|
|
|
|
import simplejson
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
from swift.common import db_replicator
|
2010-08-10 12:18:15 -07:00
|
|
|
from swift.common.utils import normalize_timestamp
|
2010-07-12 17:03:45 -05:00
|
|
|
from swift.container import server as container_server
|
2013-05-15 12:58:57 +04:00
|
|
|
from swift.common.exceptions import DriveNotMounted
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
from test.unit import FakeLogger
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-06-03 23:50:05 +00:00
|
|
|
TEST_ACCOUNT_NAME = 'a c t'
|
|
|
|
TEST_CONTAINER_NAME = 'c o n'
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def teardown_module():
|
|
|
|
"clean up my monkey patching"
|
|
|
|
reload(db_replicator)
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
@contextmanager
|
|
|
|
def lock_parent_directory(filename):
|
|
|
|
yield True
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class FakeRing:
|
|
|
|
class Ring:
|
|
|
|
devs = []
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2012-03-11 04:36:26 -07:00
|
|
|
def __init__(self, path, reload_time=15, ring_name=None):
|
2010-07-12 17:03:45 -05:00
|
|
|
pass
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2013-06-03 23:50:05 +00:00
|
|
|
def get_part(self, account, container=None, obj=None):
|
|
|
|
return 0
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def get_part_nodes(self, part):
|
|
|
|
return []
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def get_more_nodes(self, *args):
|
|
|
|
return []
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
|
|
|
class FakeRingWithNodes:
|
|
|
|
class Ring:
|
|
|
|
devs = [dict(
|
|
|
|
id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6000, device='sdb',
|
|
|
|
meta=''
|
|
|
|
), dict(
|
|
|
|
id=2, weight=10.0, zone=2, ip='1.1.1.2', port=6000, device='sdb',
|
|
|
|
meta=''
|
|
|
|
), dict(
|
|
|
|
id=3, weight=10.0, zone=3, ip='1.1.1.3', port=6000, device='sdb',
|
|
|
|
meta=''
|
|
|
|
), dict(
|
|
|
|
id=4, weight=10.0, zone=4, ip='1.1.1.4', port=6000, device='sdb',
|
2013-05-15 12:58:57 +04:00
|
|
|
meta=''
|
|
|
|
), dict(
|
|
|
|
id=5, weight=10.0, zone=5, ip='1.1.1.5', port=6000, device='sdb',
|
|
|
|
meta=''
|
|
|
|
), dict(
|
|
|
|
id=6, weight=10.0, zone=6, ip='1.1.1.6', port=6000, device='sdb',
|
2012-08-17 17:00:50 -07:00
|
|
|
meta='')]
|
|
|
|
|
|
|
|
def __init__(self, path, reload_time=15, ring_name=None):
|
|
|
|
pass
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2013-06-03 23:50:05 +00:00
|
|
|
def get_part(self, account, container=None, obj=None):
|
|
|
|
return 0
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
def get_part_nodes(self, part):
|
|
|
|
return self.devs[:3]
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
def get_more_nodes(self, *args):
|
|
|
|
return (d for d in self.devs[3:])
|
|
|
|
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class FakeProcess:
|
|
|
|
def __init__(self, *codes):
|
|
|
|
self.codes = iter(codes)
|
2013-04-04 18:45:24 +00:00
|
|
|
self.args = None
|
|
|
|
self.kwargs = None
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def __call__(self, *args, **kwargs):
|
2013-04-04 18:45:24 +00:00
|
|
|
self.args = args
|
|
|
|
self.kwargs = kwargs
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class Failure:
|
|
|
|
def communicate(innerself):
|
|
|
|
next = self.codes.next()
|
|
|
|
if isinstance(next, int):
|
|
|
|
innerself.returncode = next
|
|
|
|
return next
|
|
|
|
raise next
|
|
|
|
return Failure()
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
@contextmanager
|
|
|
|
def _mock_process(*args):
|
|
|
|
orig_process = db_replicator.subprocess.Popen
|
|
|
|
db_replicator.subprocess.Popen = FakeProcess(*args)
|
2013-04-04 18:45:24 +00:00
|
|
|
yield db_replicator.subprocess.Popen
|
2010-07-12 17:03:45 -05:00
|
|
|
db_replicator.subprocess.Popen = orig_process
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-08-10 12:18:15 -07:00
|
|
|
class ReplHttp:
|
2013-05-15 12:58:57 +04:00
|
|
|
def __init__(self, response=None, set_status=200):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.response = response
|
2013-05-15 12:58:57 +04:00
|
|
|
self.set_status = set_status
|
2010-08-10 12:18:15 -07:00
|
|
|
replicated = False
|
2010-07-12 17:03:45 -05:00
|
|
|
host = 'localhost'
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-08-10 12:18:15 -07:00
|
|
|
def replicate(self, *args):
|
|
|
|
self.replicated = True
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class Response:
|
2013-05-15 12:58:57 +04:00
|
|
|
status = self.set_status
|
2010-07-12 17:03:45 -05:00
|
|
|
data = self.response
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def read(innerself):
|
|
|
|
return self.response
|
|
|
|
return Response()
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class ChangingMtimesOs:
|
|
|
|
def __init__(self):
|
|
|
|
self.mtime = 0
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2013-04-04 18:45:24 +00:00
|
|
|
def __call__(self, *args, **kwargs):
|
2010-07-12 17:03:45 -05:00
|
|
|
self.mtime += 1
|
|
|
|
return self.mtime
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class FakeBroker:
|
|
|
|
db_file = __file__
|
2011-04-18 15:00:59 -07:00
|
|
|
get_repl_missing_table = False
|
2012-08-17 17:00:50 -07:00
|
|
|
stub_replication_info = None
|
2011-04-18 15:00:59 -07:00
|
|
|
db_type = 'container'
|
2013-06-03 23:50:05 +00:00
|
|
|
info = {'account': TEST_ACCOUNT_NAME, 'container': TEST_CONTAINER_NAME}
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def __init__(self, *args, **kwargs):
|
2013-04-04 18:45:24 +00:00
|
|
|
self.locked = False
|
2010-07-12 17:03:45 -05:00
|
|
|
return None
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
@contextmanager
|
|
|
|
def lock(self):
|
2013-04-04 18:45:24 +00:00
|
|
|
self.locked = True
|
2010-07-12 17:03:45 -05:00
|
|
|
yield True
|
2013-04-04 18:45:24 +00:00
|
|
|
self.locked = False
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def get_sync(self, *args, **kwargs):
|
|
|
|
return 5
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def get_syncs(self):
|
|
|
|
return []
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def get_items_since(self, point, *args):
|
|
|
|
if point == 0:
|
|
|
|
return [{'ROWID': 1}]
|
|
|
|
return []
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def merge_syncs(self, *args, **kwargs):
|
|
|
|
self.args = args
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def merge_items(self, *args):
|
|
|
|
self.args = args
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def get_replication_info(self):
|
2011-04-18 15:00:59 -07:00
|
|
|
if self.get_repl_missing_table:
|
|
|
|
raise Exception('no such table')
|
2012-08-17 17:00:50 -07:00
|
|
|
if self.stub_replication_info:
|
|
|
|
return self.stub_replication_info
|
2010-07-12 17:03:45 -05:00
|
|
|
return {'delete_timestamp': 0, 'put_timestamp': 1, 'count': 0}
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def reclaim(self, item_timestamp, sync_timestamp):
|
|
|
|
pass
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
def get_info(self):
|
2013-06-03 23:50:05 +00:00
|
|
|
return self.info
|
|
|
|
|
|
|
|
|
|
|
|
class FakeAccountBroker(FakeBroker):
|
|
|
|
db_type = 'account'
|
|
|
|
info = {'account': TEST_ACCOUNT_NAME}
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
|
|
|
|
class TestReplicator(db_replicator.Replicator):
|
|
|
|
server_type = 'container'
|
|
|
|
ring_file = 'container.ring.gz'
|
|
|
|
brokerclass = FakeBroker
|
|
|
|
datadir = container_server.DATADIR
|
|
|
|
default_port = 1000
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class TestDBReplicator(unittest.TestCase):
|
2012-08-17 17:00:50 -07:00
|
|
|
def setUp(self):
|
|
|
|
db_replicator.ring = FakeRing()
|
|
|
|
self.delete_db_calls = []
|
2013-05-21 18:57:17 +04:00
|
|
|
self._patchers = []
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
for patcher in self._patchers:
|
|
|
|
patcher.stop()
|
|
|
|
|
|
|
|
def _patch(self, patching_fn, *args, **kwargs):
|
|
|
|
patcher = patching_fn(*args, **kwargs)
|
|
|
|
patched_thing = patcher.start()
|
|
|
|
self._patchers.append(patcher)
|
|
|
|
return patched_thing
|
2012-08-17 17:00:50 -07:00
|
|
|
|
|
|
|
def stub_delete_db(self, object_file):
|
|
|
|
self.delete_db_calls.append(object_file)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_repl_connection(self):
|
2012-12-17 06:39:25 -05:00
|
|
|
node = {'replication_ip': '127.0.0.1', 'replication_port': 80,
|
|
|
|
'device': 'sdb1'}
|
2010-07-12 17:03:45 -05:00
|
|
|
conn = db_replicator.ReplConnection(node, '1234567890', 'abcdefg',
|
|
|
|
logging.getLogger())
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def req(method, path, body, headers):
|
2010-08-10 12:18:15 -07:00
|
|
|
self.assertEquals(method, 'REPLICATE')
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assertEquals(headers['Content-Type'], 'application/json')
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
class Resp:
|
2012-08-31 11:24:46 +08:00
|
|
|
def read(self):
|
|
|
|
return 'data'
|
2010-07-12 17:03:45 -05:00
|
|
|
resp = Resp()
|
|
|
|
conn.request = req
|
|
|
|
conn.getresponse = lambda *args: resp
|
2010-08-10 12:18:15 -07:00
|
|
|
self.assertEquals(conn.replicate(1, 2, 3), resp)
|
2012-08-31 11:24:46 +08:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def other_req(method, path, body, headers):
|
|
|
|
raise Exception('blah')
|
|
|
|
conn.request = other_req
|
2010-08-10 12:18:15 -07:00
|
|
|
self.assertEquals(conn.replicate(1, 2, 3), None)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_rsync_file(self):
|
2010-08-20 00:42:38 +00:00
|
|
|
replicator = TestReplicator({})
|
2010-07-12 17:03:45 -05:00
|
|
|
with _mock_process(-1):
|
|
|
|
self.assertEquals(False,
|
|
|
|
replicator._rsync_file('/some/file', 'remote:/some/file'))
|
|
|
|
with _mock_process(0):
|
|
|
|
self.assertEquals(True,
|
|
|
|
replicator._rsync_file('/some/file', 'remote:/some/file'))
|
|
|
|
|
2013-04-04 18:45:24 +00:00
|
|
|
def test_rsync_file_popen_args(self):
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
with _mock_process(0) as process:
|
|
|
|
replicator._rsync_file('/some/file', 'remote:/some_file')
|
|
|
|
exp_args = ([
|
|
|
|
'rsync', '--quiet', '--no-motd',
|
|
|
|
'--timeout=%s' % int(math.ceil(replicator.node_timeout)),
|
|
|
|
'--contimeout=%s' % int(math.ceil(replicator.conn_timeout)),
|
|
|
|
'--whole-file', '/some/file', 'remote:/some_file'],)
|
|
|
|
self.assertEqual(exp_args, process.args)
|
|
|
|
|
|
|
|
def test_rsync_file_popen_args_whole_file_false(self):
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
with _mock_process(0) as process:
|
|
|
|
replicator._rsync_file('/some/file', 'remote:/some_file', False)
|
|
|
|
exp_args = ([
|
|
|
|
'rsync', '--quiet', '--no-motd',
|
|
|
|
'--timeout=%s' % int(math.ceil(replicator.node_timeout)),
|
|
|
|
'--contimeout=%s' % int(math.ceil(replicator.conn_timeout)),
|
|
|
|
'/some/file', 'remote:/some_file'],)
|
|
|
|
self.assertEqual(exp_args, process.args)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_rsync_db(self):
|
2010-08-20 00:42:38 +00:00
|
|
|
replicator = TestReplicator({})
|
2010-07-12 17:03:45 -05:00
|
|
|
replicator._rsync_file = lambda *args: True
|
2012-12-17 06:39:25 -05:00
|
|
|
fake_device = {'replication_ip': '127.0.0.1', 'device': 'sda1'}
|
2010-08-10 12:18:15 -07:00
|
|
|
replicator._rsync_db(FakeBroker(), fake_device, ReplHttp(), 'abcd')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2013-04-04 18:45:24 +00:00
|
|
|
def test_rsync_db_rsync_file_call(self):
|
2012-12-17 06:39:25 -05:00
|
|
|
fake_device = {'ip': '127.0.0.1', 'port': '0',
|
|
|
|
'replication_ip': '127.0.0.1', 'replication_port': '0',
|
|
|
|
'device': 'sda1'}
|
2013-04-04 18:45:24 +00:00
|
|
|
|
|
|
|
def mock_rsync_ip(ip):
|
|
|
|
self.assertEquals(fake_device['ip'], ip)
|
|
|
|
return 'rsync_ip(%s)' % ip
|
|
|
|
|
|
|
|
class MyTestReplicator(TestReplicator):
|
|
|
|
def __init__(self, db_file, remote_file):
|
|
|
|
super(MyTestReplicator, self).__init__({})
|
|
|
|
self.db_file = db_file
|
|
|
|
self.remote_file = remote_file
|
|
|
|
|
|
|
|
def _rsync_file(self_, db_file, remote_file, whole_file=True):
|
|
|
|
self.assertEqual(self_.db_file, db_file)
|
|
|
|
self.assertEqual(self_.remote_file, remote_file)
|
|
|
|
self_._rsync_file_called = True
|
|
|
|
return False
|
|
|
|
|
|
|
|
with patch('swift.common.db_replicator.rsync_ip', mock_rsync_ip):
|
|
|
|
broker = FakeBroker()
|
|
|
|
remote_file = 'rsync_ip(127.0.0.1)::container/sda1/tmp/abcd'
|
|
|
|
replicator = MyTestReplicator(broker.db_file, remote_file)
|
|
|
|
replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd')
|
|
|
|
self.assert_(replicator._rsync_file_called)
|
|
|
|
|
|
|
|
with patch('swift.common.db_replicator.rsync_ip', mock_rsync_ip):
|
|
|
|
broker = FakeBroker()
|
|
|
|
remote_file = 'rsync_ip(127.0.0.1)::container0/sda1/tmp/abcd'
|
|
|
|
replicator = MyTestReplicator(broker.db_file, remote_file)
|
|
|
|
replicator.vm_test_mode = True
|
|
|
|
replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd')
|
|
|
|
self.assert_(replicator._rsync_file_called)
|
|
|
|
|
|
|
|
def test_rsync_db_rsync_file_failure(self):
|
|
|
|
class MyTestReplicator(TestReplicator):
|
|
|
|
def __init__(self):
|
|
|
|
super(MyTestReplicator, self).__init__({})
|
|
|
|
self._rsync_file_called = False
|
|
|
|
|
|
|
|
def _rsync_file(self_, *args, **kwargs):
|
|
|
|
self.assertEqual(
|
|
|
|
False, self_._rsync_file_called,
|
|
|
|
'_sync_file() should only be called once')
|
|
|
|
self_._rsync_file_called = True
|
|
|
|
return False
|
|
|
|
|
|
|
|
with patch('os.path.exists', lambda *args: True):
|
|
|
|
replicator = MyTestReplicator()
|
2012-12-17 06:39:25 -05:00
|
|
|
fake_device = {'ip': '127.0.0.1', 'replication_ip': '127.0.0.1',
|
|
|
|
'device': 'sda1'}
|
2013-04-04 18:45:24 +00:00
|
|
|
replicator._rsync_db(FakeBroker(), fake_device, ReplHttp(), 'abcd')
|
|
|
|
self.assertEqual(True, replicator._rsync_file_called)
|
|
|
|
|
|
|
|
def test_rsync_db_change_after_sync(self):
|
|
|
|
class MyTestReplicator(TestReplicator):
|
|
|
|
def __init__(self, broker):
|
|
|
|
super(MyTestReplicator, self).__init__({})
|
|
|
|
self.broker = broker
|
|
|
|
self._rsync_file_call_count = 0
|
|
|
|
|
|
|
|
def _rsync_file(self_, db_file, remote_file, whole_file=True):
|
|
|
|
self_._rsync_file_call_count += 1
|
|
|
|
if self_._rsync_file_call_count == 1:
|
|
|
|
self.assertEquals(True, whole_file)
|
|
|
|
self.assertEquals(False, self_.broker.locked)
|
|
|
|
elif self_._rsync_file_call_count == 2:
|
|
|
|
self.assertEquals(False, whole_file)
|
|
|
|
self.assertEquals(True, self_.broker.locked)
|
|
|
|
else:
|
|
|
|
raise RuntimeError('_rsync_file() called too many times')
|
|
|
|
return True
|
|
|
|
|
|
|
|
# with journal file
|
|
|
|
with patch('os.path.exists', lambda *args: True):
|
|
|
|
broker = FakeBroker()
|
|
|
|
replicator = MyTestReplicator(broker)
|
2012-12-17 06:39:25 -05:00
|
|
|
fake_device = {'ip': '127.0.0.1', 'replication_ip': '127.0.0.1',
|
|
|
|
'device': 'sda1'}
|
2013-04-04 18:45:24 +00:00
|
|
|
replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd')
|
|
|
|
self.assertEquals(2, replicator._rsync_file_call_count)
|
|
|
|
|
|
|
|
# with new mtime
|
|
|
|
with patch('os.path.exists', lambda *args: False):
|
|
|
|
with patch('os.path.getmtime', ChangingMtimesOs()):
|
|
|
|
broker = FakeBroker()
|
|
|
|
replicator = MyTestReplicator(broker)
|
2012-12-17 06:39:25 -05:00
|
|
|
fake_device = {'ip': '127.0.0.1', 'replication_ip': '127.0.0.1',
|
|
|
|
'device': 'sda1'}
|
2013-04-04 18:45:24 +00:00
|
|
|
replicator._rsync_db(broker, fake_device, ReplHttp(), 'abcd')
|
|
|
|
self.assertEquals(2, replicator._rsync_file_call_count)
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
def test_in_sync(self):
|
2010-08-20 00:42:38 +00:00
|
|
|
replicator = TestReplicator({})
|
2010-07-12 17:03:45 -05:00
|
|
|
self.assertEquals(replicator._in_sync(
|
2013-05-15 12:58:57 +04:00
|
|
|
{'id': 'a', 'point': 0, 'max_row': 0, 'hash': 'b'},
|
2010-07-12 17:03:45 -05:00
|
|
|
{'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'b'},
|
|
|
|
FakeBroker(), -1), True)
|
|
|
|
self.assertEquals(replicator._in_sync(
|
|
|
|
{'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'b'},
|
|
|
|
{'id': 'a', 'point': -1, 'max_row': 10, 'hash': 'b'},
|
|
|
|
FakeBroker(), -1), True)
|
|
|
|
self.assertEquals(bool(replicator._in_sync(
|
|
|
|
{'id': 'a', 'point': -1, 'max_row': 0, 'hash': 'c'},
|
|
|
|
{'id': 'a', 'point': -1, 'max_row': 10, 'hash': 'd'},
|
|
|
|
FakeBroker(), -1)), False)
|
|
|
|
|
2010-08-31 23:12:59 +00:00
|
|
|
def test_run_once(self):
|
2010-08-20 00:42:38 +00:00
|
|
|
replicator = TestReplicator({})
|
2010-08-31 23:12:59 +00:00
|
|
|
replicator.run_once()
|
2010-07-12 17:03:45 -05:00
|
|
|
|
|
|
|
def test_usync(self):
|
2010-08-10 12:18:15 -07:00
|
|
|
fake_http = ReplHttp()
|
2010-08-20 00:42:38 +00:00
|
|
|
replicator = TestReplicator({})
|
2010-07-12 17:03:45 -05:00
|
|
|
replicator._usync_db(0, FakeBroker(), fake_http, '12345', '67890')
|
|
|
|
|
|
|
|
def test_stats(self):
|
|
|
|
# I'm not sure how to test that this logs the right thing,
|
|
|
|
# but we can at least make sure it gets covered.
|
2010-08-20 00:42:38 +00:00
|
|
|
replicator = TestReplicator({})
|
2010-07-12 17:03:45 -05:00
|
|
|
replicator._zero_stats()
|
|
|
|
replicator._report_stats()
|
|
|
|
def test_replicate_object(self):
|
2012-08-17 17:00:50 -07:00
|
|
|
db_replicator.ring = FakeRingWithNodes()
|
2010-08-20 00:42:38 +00:00
|
|
|
replicator = TestReplicator({})
|
2012-08-17 17:00:50 -07:00
|
|
|
replicator.delete_db = self.stub_delete_db
|
|
|
|
replicator._replicate_object('0', '/path/to/file', 'node_id')
|
|
|
|
self.assertEquals([], self.delete_db_calls)
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2011-04-18 15:00:59 -07:00
|
|
|
def test_replicate_object_quarantine(self):
|
|
|
|
replicator = TestReplicator({})
|
2013-05-21 18:57:17 +04:00
|
|
|
self._patch(patch.object, replicator.brokerclass, 'db_file',
|
|
|
|
'/a/b/c/d/e/hey')
|
|
|
|
self._patch(patch.object, replicator.brokerclass,
|
|
|
|
'get_repl_missing_table', True)
|
|
|
|
def mock_renamer(was, new, cause_colision=False):
|
|
|
|
if cause_colision and '-' not in new:
|
|
|
|
raise OSError(errno.EEXIST, "File already exists")
|
|
|
|
self.assertEquals('/a/b/c/d/e', was)
|
|
|
|
if '-' in new:
|
|
|
|
self.assert_(
|
|
|
|
new.startswith('/a/quarantined/containers/e-'))
|
|
|
|
else:
|
|
|
|
self.assertEquals('/a/quarantined/containers/e', new)
|
|
|
|
|
|
|
|
def mock_renamer_error(was, new):
|
|
|
|
return mock_renamer(was, new, cause_colision=True)
|
|
|
|
with patch.object(db_replicator, 'renamer', mock_renamer):
|
2011-04-18 15:00:59 -07:00
|
|
|
replicator._replicate_object('0', 'file', 'node_id')
|
2013-05-21 18:57:17 +04:00
|
|
|
# try the double quarantine
|
|
|
|
with patch.object(db_replicator, 'renamer', mock_renamer_error):
|
2011-04-18 15:00:59 -07:00
|
|
|
replicator._replicate_object('0', 'file', 'node_id')
|
2010-07-12 17:03:45 -05:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
def test_replicate_object_delete_because_deleted(self):
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
try:
|
|
|
|
replicator.delete_db = self.stub_delete_db
|
|
|
|
replicator.brokerclass.stub_replication_info = {
|
|
|
|
'delete_timestamp': 2, 'put_timestamp': 1, 'count': 0}
|
|
|
|
replicator._replicate_object('0', '/path/to/file', 'node_id')
|
|
|
|
finally:
|
|
|
|
replicator.brokerclass.stub_replication_info = None
|
|
|
|
self.assertEquals(['/path/to/file'], self.delete_db_calls)
|
|
|
|
|
|
|
|
def test_replicate_object_delete_because_not_shouldbehere(self):
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
replicator.delete_db = self.stub_delete_db
|
|
|
|
replicator._replicate_object('0', '/path/to/file', 'node_id')
|
|
|
|
self.assertEquals(['/path/to/file'], self.delete_db_calls)
|
|
|
|
|
2013-06-03 23:50:05 +00:00
|
|
|
def test_replicate_account_out_of_place(self):
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
replicator.ring = FakeRingWithNodes().Ring('path')
|
|
|
|
replicator.brokerclass = FakeAccountBroker
|
|
|
|
replicator._repl_to_node = lambda *args: True
|
|
|
|
replicator.delete_db = self.stub_delete_db
|
|
|
|
replicator.logger = FakeLogger()
|
|
|
|
# Correct node_id, wrong part
|
|
|
|
part = replicator.ring.get_part(TEST_ACCOUNT_NAME) + 1
|
|
|
|
node_id = replicator.ring.get_part_nodes(part)[0]['id']
|
|
|
|
replicator._replicate_object(str(part), '/path/to/file', node_id)
|
|
|
|
self.assertEqual(['/path/to/file'], self.delete_db_calls)
|
|
|
|
self.assertEqual(
|
|
|
|
replicator.logger.log_dict['error'],
|
|
|
|
[(('Found /path/to/file for /a%20c%20t when it should be on '
|
|
|
|
'partition 0; will replicate out and remove.',), {})])
|
|
|
|
|
|
|
|
def test_replicate_container_out_of_place(self):
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
replicator.ring = FakeRingWithNodes().Ring('path')
|
|
|
|
replicator._repl_to_node = lambda *args: True
|
|
|
|
replicator.delete_db = self.stub_delete_db
|
|
|
|
replicator.logger = FakeLogger()
|
|
|
|
# Correct node_id, wrong part
|
|
|
|
part = replicator.ring.get_part(
|
|
|
|
TEST_ACCOUNT_NAME, TEST_CONTAINER_NAME) + 1
|
|
|
|
node_id = replicator.ring.get_part_nodes(part)[0]['id']
|
|
|
|
replicator._replicate_object(str(part), '/path/to/file', node_id)
|
|
|
|
self.assertEqual(['/path/to/file'], self.delete_db_calls)
|
|
|
|
self.assertEqual(
|
|
|
|
replicator.logger.log_dict['error'],
|
|
|
|
[(('Found /path/to/file for /a%20c%20t/c%20o%20n when it should '
|
|
|
|
'be on partition 0; will replicate out and remove.',), {})])
|
|
|
|
|
2013-05-15 12:58:57 +04:00
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
def test_delete_db(self):
|
|
|
|
db_replicator.lock_parent_directory = lock_parent_directory
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
replicator._zero_stats()
|
|
|
|
replicator.extract_device = lambda _: 'some_device'
|
|
|
|
replicator.logger = FakeLogger()
|
|
|
|
|
|
|
|
temp_dir = mkdtemp()
|
Db reclamation should remove empty suffix dirs
When a db is reclaimed it removes the hash dir the db files are in,
but it does not try to remove the parent suffix dir though it might
be empty now. This eventually leads to a bunch of empty suffix dirs
lying around. This patch fixes that by attempting to remove the
parent suffix dir after a hash dir reclamation.
Here's a quick script to see how bad a given drive might be:
import os, os.path, sys
if len(sys.argv) != 2:
sys.exit('%s <mount-point>' % sys.argv[0])
in_use = 0
empty = 0
containers = os.path.join(sys.argv[1], 'containers')
for p in os.listdir(containers):
partition = os.path.join(containers, p)
for s in os.listdir(partition):
suffix = os.path.join(partition, s)
if os.listdir(suffix):
in_use += 1
else:
empty += 1
print in_use, 'in use,', empty, 'empty,', '%.02f%%' % (
100.0 * empty / (in_use + empty)), 'empty'
And here's a quick script to clean up a drive:
NOTE THAT I HAVEN'T ACTUALLY RUN THIS ON A LIVE NODE YET!
import errno, os, os.path, sys
if len(sys.argv) != 2:
sys.exit('%s <mount-point>' % sys.argv[0])
containers = os.path.join(sys.argv[1], 'containers')
for p in os.listdir(containers):
partition = os.path.join(containers, p)
for s in os.listdir(partition):
suffix = os.path.join(partition, s)
try:
os.rmdir(suffix)
except OSError, err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
print err
Change-Id: I2e6463a4cd40597fc236ebe3e73b4b31347f2309
2012-10-25 19:17:57 +00:00
|
|
|
try:
|
|
|
|
temp_suf_dir = os.path.join(temp_dir, '16e')
|
|
|
|
os.mkdir(temp_suf_dir)
|
|
|
|
temp_hash_dir = os.path.join(temp_suf_dir,
|
|
|
|
'166e33924a08ede4204871468c11e16e')
|
|
|
|
os.mkdir(temp_hash_dir)
|
|
|
|
temp_file = NamedTemporaryFile(dir=temp_hash_dir, delete=False)
|
|
|
|
temp_hash_dir2 = os.path.join(temp_suf_dir,
|
|
|
|
'266e33924a08ede4204871468c11e16e')
|
|
|
|
os.mkdir(temp_hash_dir2)
|
|
|
|
temp_file2 = NamedTemporaryFile(dir=temp_hash_dir2, delete=False)
|
|
|
|
|
|
|
|
# sanity-checks
|
|
|
|
self.assertTrue(os.path.exists(temp_dir))
|
|
|
|
self.assertTrue(os.path.exists(temp_suf_dir))
|
|
|
|
self.assertTrue(os.path.exists(temp_hash_dir))
|
|
|
|
self.assertTrue(os.path.exists(temp_file.name))
|
|
|
|
self.assertTrue(os.path.exists(temp_hash_dir2))
|
|
|
|
self.assertTrue(os.path.exists(temp_file2.name))
|
|
|
|
self.assertEqual(0, replicator.stats['remove'])
|
|
|
|
|
|
|
|
replicator.delete_db(temp_file.name)
|
|
|
|
|
|
|
|
self.assertTrue(os.path.exists(temp_dir))
|
|
|
|
self.assertTrue(os.path.exists(temp_suf_dir))
|
|
|
|
self.assertFalse(os.path.exists(temp_hash_dir))
|
|
|
|
self.assertFalse(os.path.exists(temp_file.name))
|
|
|
|
self.assertTrue(os.path.exists(temp_hash_dir2))
|
|
|
|
self.assertTrue(os.path.exists(temp_file2.name))
|
|
|
|
self.assertEqual([(('removes.some_device',), {})],
|
|
|
|
replicator.logger.log_dict['increment'])
|
|
|
|
self.assertEqual(1, replicator.stats['remove'])
|
|
|
|
|
|
|
|
replicator.delete_db(temp_file2.name)
|
|
|
|
|
|
|
|
self.assertTrue(os.path.exists(temp_dir))
|
|
|
|
self.assertFalse(os.path.exists(temp_suf_dir))
|
|
|
|
self.assertFalse(os.path.exists(temp_hash_dir))
|
|
|
|
self.assertFalse(os.path.exists(temp_file.name))
|
|
|
|
self.assertFalse(os.path.exists(temp_hash_dir2))
|
|
|
|
self.assertFalse(os.path.exists(temp_file2.name))
|
|
|
|
self.assertEqual([(('removes.some_device',), {})] * 2,
|
|
|
|
replicator.logger.log_dict['increment'])
|
|
|
|
self.assertEqual(2, replicator.stats['remove'])
|
|
|
|
finally:
|
|
|
|
rmtree(temp_dir)
|
2012-08-17 17:00:50 -07:00
|
|
|
|
|
|
|
def test_extract_device(self):
|
|
|
|
replicator = TestReplicator({'devices': '/some/root'})
|
|
|
|
self.assertEqual('some_device', replicator.extract_device(
|
|
|
|
'/some/root/some_device/deeper/and/deeper'))
|
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
# def test_dispatch(self):
|
|
|
|
# rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
|
|
|
# no_op = lambda *args, **kwargs: True
|
|
|
|
# self.assertEquals(rpc.dispatch(('drv', 'part', 'hash'), ('op',)
|
|
|
|
# ).status_int, 400)
|
|
|
|
# rpc.mount_check = True
|
|
|
|
# self.assertEquals(rpc.dispatch(('drv', 'part', 'hash'), ['op',]
|
|
|
|
# ).status_int, 507)
|
|
|
|
# rpc.mount_check = False
|
2012-08-31 11:24:46 +08:00
|
|
|
# rpc.rsync_then_merge = lambda drive, db_file,
|
|
|
|
# args: self.assertEquals(args, ['test1'])
|
|
|
|
# rpc.complete_rsync = lambda drive, db_file,
|
|
|
|
# args: self.assertEquals(args, ['test2'])
|
2010-07-12 17:03:45 -05:00
|
|
|
# rpc.dispatch(('drv', 'part', 'hash'), ['rsync_then_merge','test1'])
|
|
|
|
# rpc.dispatch(('drv', 'part', 'hash'), ['complete_rsync','test2'])
|
|
|
|
# rpc.dispatch(('drv', 'part', 'hash'), ['other_op',])
|
|
|
|
|
|
|
|
def test_rsync_then_merge(self):
|
|
|
|
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
|
|
|
rpc.rsync_then_merge('sda1', '/srv/swift/blah', ('a', 'b'))
|
|
|
|
|
|
|
|
def test_merge_items(self):
|
|
|
|
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
|
|
|
fake_broker = FakeBroker()
|
|
|
|
args = ('a', 'b')
|
|
|
|
rpc.merge_items(fake_broker, args)
|
|
|
|
self.assertEquals(fake_broker.args, args)
|
|
|
|
|
|
|
|
def test_merge_syncs(self):
|
|
|
|
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
|
|
|
fake_broker = FakeBroker()
|
|
|
|
args = ('a', 'b')
|
|
|
|
rpc.merge_syncs(fake_broker, args)
|
|
|
|
self.assertEquals(fake_broker.args, (args[0],))
|
|
|
|
|
2012-11-14 02:53:14 +00:00
|
|
|
def test_roundrobin_datadirs(self):
|
|
|
|
listdir_calls = []
|
|
|
|
isdir_calls = []
|
|
|
|
exists_calls = []
|
|
|
|
shuffle_calls = []
|
|
|
|
|
|
|
|
def _listdir(path):
|
|
|
|
listdir_calls.append(path)
|
|
|
|
if not path.startswith('/srv/node/sda/containers') and \
|
|
|
|
not path.startswith('/srv/node/sdb/containers'):
|
|
|
|
return []
|
|
|
|
path = path[len('/srv/node/sdx/containers'):]
|
|
|
|
if path == '':
|
|
|
|
return ['123', '456', '789'] # 456 will pretend to be a file
|
|
|
|
elif path == '/123':
|
|
|
|
return ['abc', 'def.db'] # def.db will pretend to be a file
|
|
|
|
elif path == '/123/abc':
|
|
|
|
# 11111111111111111111111111111abc will pretend to be a file
|
|
|
|
return ['00000000000000000000000000000abc',
|
|
|
|
'11111111111111111111111111111abc']
|
|
|
|
elif path == '/123/abc/00000000000000000000000000000abc':
|
|
|
|
return ['00000000000000000000000000000abc.db',
|
|
|
|
# This other.db isn't in the right place, so should be
|
|
|
|
# ignored later.
|
|
|
|
'000000000000000000000000000other.db',
|
|
|
|
'weird1'] # weird1 will pretend to be a dir, if asked
|
|
|
|
elif path == '/789':
|
|
|
|
return ['ghi', 'jkl'] # jkl will pretend to be a file
|
|
|
|
elif path == '/789/ghi':
|
|
|
|
# 33333333333333333333333333333ghi will pretend to be a file
|
|
|
|
return ['22222222222222222222222222222ghi',
|
|
|
|
'33333333333333333333333333333ghi']
|
|
|
|
elif path == '/789/ghi/22222222222222222222222222222ghi':
|
|
|
|
return ['22222222222222222222222222222ghi.db',
|
|
|
|
'weird2'] # weird2 will pretend to be a dir, if asked
|
|
|
|
return []
|
|
|
|
|
|
|
|
def _isdir(path):
|
|
|
|
isdir_calls.append(path)
|
|
|
|
if not path.startswith('/srv/node/sda/containers') and \
|
|
|
|
not path.startswith('/srv/node/sdb/containers'):
|
|
|
|
return False
|
|
|
|
path = path[len('/srv/node/sdx/containers'):]
|
|
|
|
if path in ('/123', '/123/abc',
|
|
|
|
'/123/abc/00000000000000000000000000000abc',
|
|
|
|
'/123/abc/00000000000000000000000000000abc/weird1',
|
|
|
|
'/789', '/789/ghi',
|
|
|
|
'/789/ghi/22222222222222222222222222222ghi',
|
|
|
|
'/789/ghi/22222222222222222222222222222ghi/weird2'):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def _exists(arg):
|
|
|
|
exists_calls.append(arg)
|
|
|
|
return True
|
|
|
|
|
|
|
|
def _shuffle(arg):
|
|
|
|
shuffle_calls.append(arg)
|
|
|
|
|
|
|
|
orig_listdir = db_replicator.os.listdir
|
|
|
|
orig_isdir = db_replicator.os.path.isdir
|
|
|
|
orig_exists = db_replicator.os.path.exists
|
|
|
|
orig_shuffle = db_replicator.random.shuffle
|
|
|
|
try:
|
|
|
|
db_replicator.os.listdir = _listdir
|
|
|
|
db_replicator.os.path.isdir = _isdir
|
|
|
|
db_replicator.os.path.exists = _exists
|
|
|
|
db_replicator.random.shuffle = _shuffle
|
|
|
|
datadirs = [('/srv/node/sda/containers', 1),
|
|
|
|
('/srv/node/sdb/containers', 2)]
|
|
|
|
results = list(db_replicator.roundrobin_datadirs(datadirs))
|
|
|
|
# The results show that the .db files are returned, the devices
|
|
|
|
# interleaved.
|
|
|
|
self.assertEquals(results, [
|
|
|
|
('123', '/srv/node/sda/containers/123/abc/'
|
|
|
|
'00000000000000000000000000000abc/'
|
|
|
|
'00000000000000000000000000000abc.db', 1),
|
|
|
|
('123', '/srv/node/sdb/containers/123/abc/'
|
|
|
|
'00000000000000000000000000000abc/'
|
|
|
|
'00000000000000000000000000000abc.db', 2),
|
|
|
|
('789', '/srv/node/sda/containers/789/ghi/'
|
|
|
|
'22222222222222222222222222222ghi/'
|
|
|
|
'22222222222222222222222222222ghi.db', 1),
|
|
|
|
('789', '/srv/node/sdb/containers/789/ghi/'
|
|
|
|
'22222222222222222222222222222ghi/'
|
|
|
|
'22222222222222222222222222222ghi.db', 2)])
|
|
|
|
# The listdir calls show that we only listdir the dirs
|
|
|
|
self.assertEquals(listdir_calls, [
|
|
|
|
'/srv/node/sda/containers',
|
|
|
|
'/srv/node/sda/containers/123',
|
|
|
|
'/srv/node/sda/containers/123/abc',
|
|
|
|
'/srv/node/sdb/containers',
|
|
|
|
'/srv/node/sdb/containers/123',
|
|
|
|
'/srv/node/sdb/containers/123/abc',
|
|
|
|
'/srv/node/sda/containers/789',
|
|
|
|
'/srv/node/sda/containers/789/ghi',
|
|
|
|
'/srv/node/sdb/containers/789',
|
|
|
|
'/srv/node/sdb/containers/789/ghi'])
|
|
|
|
# The isdir calls show that we did ask about the things pretending
|
|
|
|
# to be files at various levels.
|
|
|
|
self.assertEquals(isdir_calls, [
|
|
|
|
'/srv/node/sda/containers/123',
|
|
|
|
'/srv/node/sda/containers/123/abc',
|
|
|
|
('/srv/node/sda/containers/123/abc/'
|
|
|
|
'00000000000000000000000000000abc'),
|
|
|
|
'/srv/node/sdb/containers/123',
|
|
|
|
'/srv/node/sdb/containers/123/abc',
|
|
|
|
('/srv/node/sdb/containers/123/abc/'
|
|
|
|
'00000000000000000000000000000abc'),
|
|
|
|
('/srv/node/sda/containers/123/abc/'
|
|
|
|
'11111111111111111111111111111abc'),
|
|
|
|
'/srv/node/sda/containers/123/def.db',
|
|
|
|
'/srv/node/sda/containers/456',
|
|
|
|
'/srv/node/sda/containers/789',
|
|
|
|
'/srv/node/sda/containers/789/ghi',
|
|
|
|
('/srv/node/sda/containers/789/ghi/'
|
|
|
|
'22222222222222222222222222222ghi'),
|
|
|
|
('/srv/node/sdb/containers/123/abc/'
|
|
|
|
'11111111111111111111111111111abc'),
|
|
|
|
'/srv/node/sdb/containers/123/def.db',
|
|
|
|
'/srv/node/sdb/containers/456',
|
|
|
|
'/srv/node/sdb/containers/789',
|
|
|
|
'/srv/node/sdb/containers/789/ghi',
|
|
|
|
('/srv/node/sdb/containers/789/ghi/'
|
|
|
|
'22222222222222222222222222222ghi'),
|
|
|
|
('/srv/node/sda/containers/789/ghi/'
|
|
|
|
'33333333333333333333333333333ghi'),
|
|
|
|
'/srv/node/sda/containers/789/jkl',
|
|
|
|
('/srv/node/sdb/containers/789/ghi/'
|
|
|
|
'33333333333333333333333333333ghi'),
|
|
|
|
'/srv/node/sdb/containers/789/jkl'])
|
|
|
|
# The exists calls are the .db files we looked for as we walked the
|
|
|
|
# structure.
|
|
|
|
self.assertEquals(exists_calls, [
|
|
|
|
('/srv/node/sda/containers/123/abc/'
|
|
|
|
'00000000000000000000000000000abc/'
|
|
|
|
'00000000000000000000000000000abc.db'),
|
|
|
|
('/srv/node/sdb/containers/123/abc/'
|
|
|
|
'00000000000000000000000000000abc/'
|
|
|
|
'00000000000000000000000000000abc.db'),
|
|
|
|
('/srv/node/sda/containers/789/ghi/'
|
|
|
|
'22222222222222222222222222222ghi/'
|
|
|
|
'22222222222222222222222222222ghi.db'),
|
|
|
|
('/srv/node/sdb/containers/789/ghi/'
|
|
|
|
'22222222222222222222222222222ghi/'
|
|
|
|
'22222222222222222222222222222ghi.db')])
|
|
|
|
# Shows that we called shuffle twice, once for each device.
|
|
|
|
self.assertEquals(
|
|
|
|
shuffle_calls, [['123', '456', '789'], ['123', '456', '789']])
|
|
|
|
finally:
|
|
|
|
db_replicator.os.listdir = orig_listdir
|
|
|
|
db_replicator.os.path.isdir = orig_isdir
|
|
|
|
db_replicator.os.path.exists = orig_exists
|
|
|
|
db_replicator.random.shuffle = orig_shuffle
|
|
|
|
|
2013-05-15 12:58:57 +04:00
|
|
|
@mock.patch("swift.common.db_replicator.ReplConnection", mock.Mock())
|
|
|
|
def test_http_connect(self):
|
|
|
|
node = "node"
|
|
|
|
partition = "partition"
|
|
|
|
db_file = __file__
|
|
|
|
replicator = TestReplicator({})
|
|
|
|
replicator._http_connect(node, partition, db_file)
|
|
|
|
db_replicator.ReplConnection.assert_has_calls(
|
|
|
|
mock.call(node, partition,
|
|
|
|
os.path.basename(db_file).split('.', 1)[0],
|
|
|
|
replicator.logger))
|
|
|
|
|
|
|
|
|
|
|
|
class TestReplToNode(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
db_replicator.ring = FakeRing()
|
|
|
|
self.delete_db_calls = []
|
|
|
|
self.broker = FakeBroker()
|
|
|
|
self.replicator = TestReplicator({})
|
|
|
|
self.fake_node = {'ip': '127.0.0.1', 'device': 'sda1', 'port': 1000}
|
|
|
|
self.fake_info = {'id': 'a', 'point': -1, 'max_row': 10, 'hash': 'b',
|
|
|
|
'created_at': 100, 'put_timestamp': 0,
|
|
|
|
'delete_timestamp': 0, 'count': 0,
|
|
|
|
'metadata': {'Test': ('Value', normalize_timestamp(1))}}
|
|
|
|
self.replicator.logger= mock.Mock()
|
|
|
|
self.replicator._rsync_db = mock.Mock(return_value=True)
|
|
|
|
self.replicator._usync_db = mock.Mock(return_value=True)
|
|
|
|
self.http = ReplHttp('{"id": 3, "point": -1}')
|
|
|
|
self.replicator._http_connect = lambda *args: self.http
|
|
|
|
|
|
|
|
|
|
|
|
def test_repl_to_node_usync_success(self):
|
|
|
|
rinfo = {"id": 3, "point": -1, "max_row": 5, "hash": "c"}
|
|
|
|
self.http = ReplHttp(simplejson.dumps(rinfo))
|
|
|
|
local_sync = self.broker.get_sync()
|
|
|
|
self.assertEquals(self.replicator._repl_to_node(
|
|
|
|
self.fake_node, self.broker, '0', self.fake_info), True)
|
|
|
|
self.replicator._usync_db.assert_has_calls([
|
|
|
|
mock.call(max(rinfo['point'], local_sync), self.broker,
|
|
|
|
self.http, rinfo['id'], self.fake_info['id'])
|
|
|
|
])
|
|
|
|
|
|
|
|
def test_repl_to_node_rsync_success(self):
|
|
|
|
rinfo = {"id": 3, "point": -1, "max_row": 4, "hash": "c"}
|
|
|
|
self.http = ReplHttp(simplejson.dumps(rinfo))
|
|
|
|
local_sync = self.broker.get_sync()
|
|
|
|
self.assertEquals(self.replicator._repl_to_node(
|
|
|
|
self.fake_node, self.broker, '0', self.fake_info), True)
|
|
|
|
self.replicator.logger.increment.assert_has_calls([
|
|
|
|
mock.call.increment('remote_merges')
|
|
|
|
])
|
|
|
|
self.replicator._rsync_db.assert_has_calls([
|
|
|
|
mock.call(self.broker, self.fake_node, self.http, self.fake_info['id'],
|
|
|
|
replicate_method='rsync_then_merge',
|
|
|
|
replicate_timeout=(self.fake_info['count'] / 2000))
|
|
|
|
])
|
|
|
|
|
|
|
|
def test_repl_to_node_already_in_sync(self):
|
|
|
|
rinfo = {"id": 3, "point": -1, "max_row": 10, "hash": "b"}
|
|
|
|
self.http = ReplHttp(simplejson.dumps(rinfo))
|
|
|
|
local_sync = self.broker.get_sync()
|
|
|
|
self.assertEquals(self.replicator._repl_to_node(
|
|
|
|
self.fake_node, self.broker, '0', self.fake_info), True)
|
|
|
|
self.assertEquals(self.replicator._rsync_db.call_count, 0)
|
|
|
|
self.assertEquals(self.replicator._usync_db.call_count, 0)
|
|
|
|
|
|
|
|
def test_repl_to_node_not_found(self):
|
|
|
|
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=404)
|
|
|
|
self.assertEquals(self.replicator._repl_to_node(
|
|
|
|
self.fake_node, self.broker, '0', self.fake_info), True)
|
|
|
|
self.replicator.logger.increment.assert_has_calls([
|
|
|
|
mock.call.increment('rsyncs')
|
|
|
|
])
|
|
|
|
self.replicator._rsync_db.assert_has_calls([
|
|
|
|
mock.call(self.broker, self.fake_node, self.http, self.fake_info['id'])
|
|
|
|
])
|
|
|
|
|
|
|
|
def test_repl_to_node_drive_not_mounted(self):
|
|
|
|
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=507)
|
|
|
|
|
|
|
|
self.assertRaises(DriveNotMounted, self.replicator._repl_to_node,
|
|
|
|
self.fake_node, FakeBroker(), '0', self.fake_info)
|
|
|
|
|
|
|
|
def test_repl_to_node_300_status(self):
|
|
|
|
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=300)
|
|
|
|
|
|
|
|
self.assertEquals(self.replicator._repl_to_node(
|
|
|
|
self.fake_node, FakeBroker(), '0', self.fake_info), None)
|
|
|
|
|
|
|
|
def test_repl_to_node_http_connect_fails(self):
|
|
|
|
self.replicator._http_connect = lambda *args: None
|
|
|
|
self.assertEquals(self.replicator._repl_to_node(
|
|
|
|
self.fake_node, FakeBroker(), '0', self.fake_info), False)
|
|
|
|
|
|
|
|
def test_repl_to_node_not_response(self):
|
|
|
|
self.http = mock.Mock(replicate=mock.Mock(return_value=None))
|
|
|
|
self.assertEquals(self.replicator._repl_to_node(
|
|
|
|
self.fake_node, FakeBroker(), '0', self.fake_info), False)
|
|
|
|
|
2012-08-17 17:00:50 -07:00
|
|
|
|
2010-07-12 17:03:45 -05:00
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|