2014-04-09 16:10:13 -07:00
|
|
|
|
2013-09-20 01:00:54 +08:00
|
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
2011-06-15 02:01:01 +00:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
# implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
import os
|
2011-06-15 02:01:01 +00:00
|
|
|
import unittest
|
2013-08-06 14:15:48 +02:00
|
|
|
from contextlib import nested
|
2014-12-24 11:52:34 +08:00
|
|
|
from textwrap import dedent
|
2013-08-06 14:15:48 +02:00
|
|
|
|
|
|
|
import mock
|
2014-12-24 11:52:34 +08:00
|
|
|
from test.unit import debug_logger
|
2011-06-15 02:01:01 +00:00
|
|
|
from swift.container import sync
|
2011-06-16 00:59:55 +00:00
|
|
|
from swift.common import utils
|
2014-12-24 11:52:34 +08:00
|
|
|
from swift.common.wsgi import ConfigString
|
2013-12-23 17:57:56 +01:00
|
|
|
from swift.common.exceptions import ClientException
|
2014-06-23 12:52:50 -07:00
|
|
|
from swift.common.storage_policy import StoragePolicy
|
2014-12-24 11:52:34 +08:00
|
|
|
import test
|
|
|
|
from test.unit import patch_policies, with_tempdir
|
2011-06-15 02:01:01 +00:00
|
|
|
|
2011-06-16 00:59:55 +00:00
|
|
|
utils.HASH_PATH_SUFFIX = 'endcap'
|
2013-03-20 01:35:41 +02:00
|
|
|
utils.HASH_PATH_PREFIX = 'endcap'
|
2011-06-16 00:59:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
class FakeRing(object):
|
2011-06-15 02:01:01 +00:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
|
|
|
|
for x in xrange(3)]
|
|
|
|
|
|
|
|
def get_nodes(self, account, container=None, obj=None):
|
|
|
|
return 1, list(self.devs)
|
|
|
|
|
|
|
|
|
2011-06-16 00:59:55 +00:00
|
|
|
class FakeContainerBroker(object):
|
|
|
|
|
|
|
|
def __init__(self, path, metadata=None, info=None, deleted=False,
|
|
|
|
items_since=None):
|
|
|
|
self.db_file = path
|
|
|
|
self.metadata = metadata if metadata else {}
|
|
|
|
self.info = info if info else {}
|
|
|
|
self.deleted = deleted
|
|
|
|
self.items_since = items_since if items_since else []
|
|
|
|
self.sync_point1 = -1
|
|
|
|
self.sync_point2 = -1
|
|
|
|
|
|
|
|
def get_info(self):
|
|
|
|
return self.info
|
|
|
|
|
|
|
|
def is_deleted(self):
|
|
|
|
return self.deleted
|
|
|
|
|
|
|
|
def get_items_since(self, sync_point, limit):
|
|
|
|
if sync_point < 0:
|
|
|
|
sync_point = 0
|
|
|
|
return self.items_since[sync_point:sync_point + limit]
|
|
|
|
|
|
|
|
def set_x_container_sync_points(self, sync_point1, sync_point2):
|
|
|
|
self.sync_point1 = sync_point1
|
|
|
|
self.sync_point2 = sync_point2
|
|
|
|
|
|
|
|
|
2014-04-09 16:10:13 -07:00
|
|
|
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
|
2011-06-15 02:01:01 +00:00
|
|
|
class TestContainerSync(unittest.TestCase):
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
def setUp(self):
|
|
|
|
self.logger = debug_logger('test-container-sync')
|
|
|
|
|
2013-06-19 15:14:13 +00:00
|
|
|
def test_FileLikeIter(self):
|
|
|
|
# Retained test to show new FileLikeIter acts just like the removed
|
|
|
|
# _Iter2FileLikeObject did.
|
|
|
|
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
|
2011-06-15 02:01:01 +00:00
|
|
|
expect = '1234567890'
|
|
|
|
|
|
|
|
got = flo.read(2)
|
|
|
|
self.assertTrue(len(got) <= 2)
|
|
|
|
self.assertEquals(got, expect[:len(got)])
|
|
|
|
expect = expect[len(got):]
|
|
|
|
|
|
|
|
got = flo.read(5)
|
|
|
|
self.assertTrue(len(got) <= 5)
|
|
|
|
self.assertEquals(got, expect[:len(got)])
|
|
|
|
expect = expect[len(got):]
|
|
|
|
|
|
|
|
self.assertEquals(flo.read(), expect)
|
|
|
|
self.assertEquals(flo.read(), '')
|
|
|
|
self.assertEquals(flo.read(2), '')
|
|
|
|
|
2013-06-19 15:14:13 +00:00
|
|
|
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
|
2011-06-15 02:01:01 +00:00
|
|
|
self.assertEquals(flo.read(), '1234567890')
|
|
|
|
self.assertEquals(flo.read(), '')
|
|
|
|
self.assertEquals(flo.read(2), '')
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
def assertLogMessage(self, msg_level, expected, skip=0):
|
|
|
|
for line in self.logger.get_lines_for_level(msg_level)[skip:]:
|
|
|
|
msg = 'expected %r not in %r' % (expected, line)
|
|
|
|
self.assertTrue(expected in line, msg)
|
|
|
|
|
|
|
|
@with_tempdir
|
|
|
|
def test_init(self, tempdir):
|
|
|
|
ic_conf_path = os.path.join(tempdir, 'internal-client.conf')
|
2011-06-15 02:01:01 +00:00
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring)
|
|
|
|
self.assertTrue(cs.container_ring is cring)
|
|
|
|
|
|
|
|
# specified but not exists will not start
|
|
|
|
conf = {'internal_client_conf_path': ic_conf_path}
|
|
|
|
self.assertRaises(SystemExit, sync.ContainerSync, conf,
|
|
|
|
container_ring=cring, logger=self.logger)
|
|
|
|
|
|
|
|
# not specified will use default conf
|
|
|
|
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring,
|
|
|
|
logger=self.logger)
|
|
|
|
self.assertTrue(cs.container_ring is cring)
|
|
|
|
self.assertTrue(mock_ic.called)
|
|
|
|
conf_path, name, retry = mock_ic.call_args[0]
|
|
|
|
self.assertTrue(isinstance(conf_path, ConfigString))
|
|
|
|
self.assertEquals(conf_path.contents.getvalue(),
|
|
|
|
dedent(sync.ic_conf_body))
|
|
|
|
self.assertLogMessage('warning', 'internal_client_conf_path')
|
|
|
|
self.assertLogMessage('warning', 'internal-client.conf-sample')
|
|
|
|
|
|
|
|
# correct
|
|
|
|
contents = dedent(sync.ic_conf_body)
|
|
|
|
with open(ic_conf_path, 'w') as f:
|
|
|
|
f.write(contents)
|
|
|
|
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
|
|
|
|
cs = sync.ContainerSync(conf, container_ring=cring)
|
2011-06-15 02:01:01 +00:00
|
|
|
self.assertTrue(cs.container_ring is cring)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertTrue(mock_ic.called)
|
|
|
|
conf_path, name, retry = mock_ic.call_args[0]
|
|
|
|
self.assertEquals(conf_path, ic_conf_path)
|
|
|
|
|
|
|
|
sample_conf_filename = os.path.join(
|
|
|
|
os.path.dirname(test.__file__),
|
|
|
|
'../etc/internal-client.conf-sample')
|
|
|
|
with open(sample_conf_filename) as sample_conf_file:
|
|
|
|
sample_conf = sample_conf_file.read()
|
|
|
|
self.assertEqual(contents, sample_conf)
|
2011-06-15 02:01:01 +00:00
|
|
|
|
|
|
|
def test_run_forever(self):
|
|
|
|
# This runs runs_forever with fakes to succeed for two loops, the first
|
|
|
|
# causing a report but no interval sleep, the second no report but an
|
|
|
|
# interval sleep.
|
|
|
|
time_calls = [0]
|
|
|
|
sleep_calls = []
|
|
|
|
audit_location_generator_calls = [0]
|
|
|
|
|
|
|
|
def fake_time():
|
|
|
|
time_calls[0] += 1
|
|
|
|
returns = [1, # Initialized reported time
|
|
|
|
1, # Start time
|
|
|
|
3602, # Is it report time (yes)
|
|
|
|
3602, # Report time
|
|
|
|
3602, # Elapsed time for "under interval" (no)
|
|
|
|
3602, # Start time
|
|
|
|
3603, # Is it report time (no)
|
2013-09-01 01:30:36 -04:00
|
|
|
3603] # Elapsed time for "under interval" (yes)
|
2011-06-15 02:01:01 +00:00
|
|
|
if time_calls[0] == len(returns) + 1:
|
|
|
|
raise Exception('we are now done')
|
|
|
|
return returns[time_calls[0] - 1]
|
|
|
|
|
|
|
|
def fake_sleep(amount):
|
|
|
|
sleep_calls.append(amount)
|
|
|
|
|
|
|
|
def fake_audit_location_generator(*args, **kwargs):
|
|
|
|
audit_location_generator_calls[0] += 1
|
2013-06-25 15:16:35 -04:00
|
|
|
# Makes .container_sync() short-circuit
|
|
|
|
yield 'container.db', 'device', 'partition'
|
|
|
|
return
|
2011-06-15 02:01:01 +00:00
|
|
|
|
|
|
|
orig_time = sync.time
|
|
|
|
orig_sleep = sync.sleep
|
2013-06-25 15:16:35 -04:00
|
|
|
orig_ContainerBroker = sync.ContainerBroker
|
DiskFile API, with reference implementation
Refactor on-disk knowledge out of the object server by pushing the
async update pickle creation to the new DiskFileManager class (name is
not the best, so suggestions welcome), along with the REPLICATOR
method logic. We also move the mount checking and thread pool storage
to the new ondisk.Devices object, which then also becomes the new home
of the audit_location_generator method.
For the object server, a new setup() method is now called at the end
of the controller's construction, and the _diskfile() method has been
renamed to get_diskfile(), to allow implementation specific behavior.
We then hide the need for the REST API layer to know how and where
quarantining needs to be performed. There are now two places it is
checked internally, on open() where we verify the content-length,
name, and x-timestamp metadata, and in the reader on close where the
etag metadata is checked if the entire file was read.
We add a reader class to allow implementations to isolate the WSGI
handling code for that specific environment (it is used no-where else
in the REST APIs). This simplifies the caller's code to just use a
"with" statement once open to avoid multiple points where close needs
to be called.
For a full historical comparison, including the usage patterns see:
https://gist.github.com/portante/5488238
(as of master, 2b639f5, Merge
"Fix 500 from account-quota This Commit
middleware")
--------------------------------+------------------------------------
DiskFileManager(conf)
Methods:
.pickle_async_update()
.get_diskfile()
.get_hashes()
Attributes:
.devices
.logger
.disk_chunk_size
.keep_cache_size
.bytes_per_sync
DiskFile(a,c,o,keep_data_fp=) DiskFile(a,c,o)
Methods: Methods:
*.__iter__()
.close(verify_file=)
.is_deleted()
.is_expired()
.quarantine()
.get_data_file_size()
.open()
.read_metadata()
.create() .create()
.write_metadata()
.delete() .delete()
Attributes: Attributes:
.quarantined_dir
.keep_cache
.metadata
*DiskFileReader()
Methods:
.__iter__()
.close()
Attributes:
+.was_quarantined
DiskWriter() DiskFileWriter()
Methods: Methods:
.write() .write()
.put() .put()
* Note that the DiskFile class * Note that the DiskReader() object
implements all the methods returned by the
necessary for a WSGI app DiskFileOpened.reader() method
iterator implements all the methods
necessary for a WSGI app iterator
+ Note that if the auditor is
refactored to not use the DiskFile
class, see
https://review.openstack.org/44787
then we don't need the
was_quarantined attribute
A reference "in-memory" object server implementation of a backend
DiskFile class in swift/obj/mem_server.py and
swift/obj/mem_diskfile.py.
One can also reference
https://github.com/portante/gluster-swift/commits/diskfile for the
proposed integration with the gluster-swift code based on these
changes.
Change-Id: I44e153fdb405a5743e9c05349008f94136764916
Signed-off-by: Peter Portante <peter.portante@redhat.com>
2013-09-12 19:51:18 -04:00
|
|
|
orig_audit_location_generator = sync.audit_location_generator
|
2011-06-15 02:01:01 +00:00
|
|
|
try:
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
2014-04-09 16:10:13 -07:00
|
|
|
p, info={'account': 'a', 'container': 'c',
|
|
|
|
'storage_policy_index': 0})
|
2011-06-15 02:01:01 +00:00
|
|
|
sync.time = fake_time
|
|
|
|
sync.sleep = fake_sleep
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=FakeRing())
|
DiskFile API, with reference implementation
Refactor on-disk knowledge out of the object server by pushing the
async update pickle creation to the new DiskFileManager class (name is
not the best, so suggestions welcome), along with the REPLICATOR
method logic. We also move the mount checking and thread pool storage
to the new ondisk.Devices object, which then also becomes the new home
of the audit_location_generator method.
For the object server, a new setup() method is now called at the end
of the controller's construction, and the _diskfile() method has been
renamed to get_diskfile(), to allow implementation specific behavior.
We then hide the need for the REST API layer to know how and where
quarantining needs to be performed. There are now two places it is
checked internally, on open() where we verify the content-length,
name, and x-timestamp metadata, and in the reader on close where the
etag metadata is checked if the entire file was read.
We add a reader class to allow implementations to isolate the WSGI
handling code for that specific environment (it is used no-where else
in the REST APIs). This simplifies the caller's code to just use a
"with" statement once open to avoid multiple points where close needs
to be called.
For a full historical comparison, including the usage patterns see:
https://gist.github.com/portante/5488238
(as of master, 2b639f5, Merge
"Fix 500 from account-quota This Commit
middleware")
--------------------------------+------------------------------------
DiskFileManager(conf)
Methods:
.pickle_async_update()
.get_diskfile()
.get_hashes()
Attributes:
.devices
.logger
.disk_chunk_size
.keep_cache_size
.bytes_per_sync
DiskFile(a,c,o,keep_data_fp=) DiskFile(a,c,o)
Methods: Methods:
*.__iter__()
.close(verify_file=)
.is_deleted()
.is_expired()
.quarantine()
.get_data_file_size()
.open()
.read_metadata()
.create() .create()
.write_metadata()
.delete() .delete()
Attributes: Attributes:
.quarantined_dir
.keep_cache
.metadata
*DiskFileReader()
Methods:
.__iter__()
.close()
Attributes:
+.was_quarantined
DiskWriter() DiskFileWriter()
Methods: Methods:
.write() .write()
.put() .put()
* Note that the DiskFile class * Note that the DiskReader() object
implements all the methods returned by the
necessary for a WSGI app DiskFileOpened.reader() method
iterator implements all the methods
necessary for a WSGI app iterator
+ Note that if the auditor is
refactored to not use the DiskFile
class, see
https://review.openstack.org/44787
then we don't need the
was_quarantined attribute
A reference "in-memory" object server implementation of a backend
DiskFile class in swift/obj/mem_server.py and
swift/obj/mem_diskfile.py.
One can also reference
https://github.com/portante/gluster-swift/commits/diskfile for the
proposed integration with the gluster-swift code based on these
changes.
Change-Id: I44e153fdb405a5743e9c05349008f94136764916
Signed-off-by: Peter Portante <peter.portante@redhat.com>
2013-09-12 19:51:18 -04:00
|
|
|
sync.audit_location_generator = fake_audit_location_generator
|
2013-11-22 00:37:11 -05:00
|
|
|
cs.run_forever(1, 2, a=3, b=4, verbose=True)
|
2013-08-28 21:16:08 +02:00
|
|
|
except Exception as err:
|
2011-06-15 02:01:01 +00:00
|
|
|
if str(err) != 'we are now done':
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
sync.time = orig_time
|
|
|
|
sync.sleep = orig_sleep
|
|
|
|
sync.audit_location_generator = orig_audit_location_generator
|
2013-06-25 15:16:35 -04:00
|
|
|
sync.ContainerBroker = orig_ContainerBroker
|
2011-06-15 02:01:01 +00:00
|
|
|
|
|
|
|
self.assertEquals(time_calls, [9])
|
|
|
|
self.assertEquals(len(sleep_calls), 2)
|
|
|
|
self.assertTrue(sleep_calls[0] <= cs.interval)
|
|
|
|
self.assertTrue(sleep_calls[1] == cs.interval - 1)
|
|
|
|
self.assertEquals(audit_location_generator_calls, [2])
|
|
|
|
self.assertEquals(cs.reported, 3602)
|
|
|
|
|
|
|
|
def test_run_once(self):
|
|
|
|
# This runs runs_once with fakes twice, the first causing an interim
|
2013-02-12 15:38:40 -08:00
|
|
|
# report, the second with no interim report.
|
2011-06-15 02:01:01 +00:00
|
|
|
time_calls = [0]
|
|
|
|
audit_location_generator_calls = [0]
|
|
|
|
|
|
|
|
def fake_time():
|
|
|
|
time_calls[0] += 1
|
|
|
|
returns = [1, # Initialized reported time
|
|
|
|
1, # Start time
|
|
|
|
3602, # Is it report time (yes)
|
|
|
|
3602, # Report time
|
|
|
|
3602, # End report time
|
|
|
|
3602, # For elapsed
|
|
|
|
3602, # Start time
|
|
|
|
3603, # Is it report time (no)
|
|
|
|
3604, # End report time
|
2013-09-01 01:30:36 -04:00
|
|
|
3605] # For elapsed
|
2011-06-15 02:01:01 +00:00
|
|
|
if time_calls[0] == len(returns) + 1:
|
|
|
|
raise Exception('we are now done')
|
|
|
|
return returns[time_calls[0] - 1]
|
|
|
|
|
|
|
|
def fake_audit_location_generator(*args, **kwargs):
|
|
|
|
audit_location_generator_calls[0] += 1
|
2013-06-25 15:16:35 -04:00
|
|
|
# Makes .container_sync() short-circuit
|
|
|
|
yield 'container.db', 'device', 'partition'
|
|
|
|
return
|
2011-06-15 02:01:01 +00:00
|
|
|
|
|
|
|
orig_time = sync.time
|
|
|
|
orig_audit_location_generator = sync.audit_location_generator
|
2013-06-25 15:16:35 -04:00
|
|
|
orig_ContainerBroker = sync.ContainerBroker
|
2011-06-15 02:01:01 +00:00
|
|
|
try:
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
2014-04-09 16:10:13 -07:00
|
|
|
p, info={'account': 'a', 'container': 'c',
|
|
|
|
'storage_policy_index': 0})
|
2011-06-15 02:01:01 +00:00
|
|
|
sync.time = fake_time
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=FakeRing())
|
DiskFile API, with reference implementation
Refactor on-disk knowledge out of the object server by pushing the
async update pickle creation to the new DiskFileManager class (name is
not the best, so suggestions welcome), along with the REPLICATOR
method logic. We also move the mount checking and thread pool storage
to the new ondisk.Devices object, which then also becomes the new home
of the audit_location_generator method.
For the object server, a new setup() method is now called at the end
of the controller's construction, and the _diskfile() method has been
renamed to get_diskfile(), to allow implementation specific behavior.
We then hide the need for the REST API layer to know how and where
quarantining needs to be performed. There are now two places it is
checked internally, on open() where we verify the content-length,
name, and x-timestamp metadata, and in the reader on close where the
etag metadata is checked if the entire file was read.
We add a reader class to allow implementations to isolate the WSGI
handling code for that specific environment (it is used no-where else
in the REST APIs). This simplifies the caller's code to just use a
"with" statement once open to avoid multiple points where close needs
to be called.
For a full historical comparison, including the usage patterns see:
https://gist.github.com/portante/5488238
(as of master, 2b639f5, Merge
"Fix 500 from account-quota This Commit
middleware")
--------------------------------+------------------------------------
DiskFileManager(conf)
Methods:
.pickle_async_update()
.get_diskfile()
.get_hashes()
Attributes:
.devices
.logger
.disk_chunk_size
.keep_cache_size
.bytes_per_sync
DiskFile(a,c,o,keep_data_fp=) DiskFile(a,c,o)
Methods: Methods:
*.__iter__()
.close(verify_file=)
.is_deleted()
.is_expired()
.quarantine()
.get_data_file_size()
.open()
.read_metadata()
.create() .create()
.write_metadata()
.delete() .delete()
Attributes: Attributes:
.quarantined_dir
.keep_cache
.metadata
*DiskFileReader()
Methods:
.__iter__()
.close()
Attributes:
+.was_quarantined
DiskWriter() DiskFileWriter()
Methods: Methods:
.write() .write()
.put() .put()
* Note that the DiskFile class * Note that the DiskReader() object
implements all the methods returned by the
necessary for a WSGI app DiskFileOpened.reader() method
iterator implements all the methods
necessary for a WSGI app iterator
+ Note that if the auditor is
refactored to not use the DiskFile
class, see
https://review.openstack.org/44787
then we don't need the
was_quarantined attribute
A reference "in-memory" object server implementation of a backend
DiskFile class in swift/obj/mem_server.py and
swift/obj/mem_diskfile.py.
One can also reference
https://github.com/portante/gluster-swift/commits/diskfile for the
proposed integration with the gluster-swift code based on these
changes.
Change-Id: I44e153fdb405a5743e9c05349008f94136764916
Signed-off-by: Peter Portante <peter.portante@redhat.com>
2013-09-12 19:51:18 -04:00
|
|
|
sync.audit_location_generator = fake_audit_location_generator
|
2013-11-22 00:37:11 -05:00
|
|
|
cs.run_once(1, 2, a=3, b=4, verbose=True)
|
2011-06-15 02:01:01 +00:00
|
|
|
self.assertEquals(time_calls, [6])
|
|
|
|
self.assertEquals(audit_location_generator_calls, [1])
|
|
|
|
self.assertEquals(cs.reported, 3602)
|
|
|
|
cs.run_once()
|
2013-08-28 21:16:08 +02:00
|
|
|
except Exception as err:
|
2011-06-15 02:01:01 +00:00
|
|
|
if str(err) != 'we are now done':
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
sync.time = orig_time
|
|
|
|
sync.audit_location_generator = orig_audit_location_generator
|
2013-06-25 15:16:35 -04:00
|
|
|
sync.ContainerBroker = orig_ContainerBroker
|
2011-06-15 02:01:01 +00:00
|
|
|
|
|
|
|
self.assertEquals(time_calls, [10])
|
|
|
|
self.assertEquals(audit_location_generator_calls, [2])
|
|
|
|
self.assertEquals(cs.reported, 3604)
|
|
|
|
|
2011-06-16 00:59:55 +00:00
|
|
|
def test_container_sync_not_db(self):
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
|
|
|
|
def test_container_sync_missing_db(self):
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring)
|
2011-06-16 00:59:55 +00:00
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
|
|
|
|
def test_container_sync_not_my_db(self):
|
|
|
|
# Db could be there due to handoff replication so test that we ignore
|
|
|
|
# those.
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
Allow 1+ object-servers-per-disk deployment
Enabled by a new > 0 integer config value, "servers_per_port" in the
[DEFAULT] config section for object-server and/or replication server
configs. The setting's integer value determines how many different
object-server workers handle requests for any single unique local port
in the ring. In this mode, the parent swift-object-server process
continues to run as the original user (i.e. root if low-port binding
is required), binds to all ports as defined in the ring, and forks off
the specified number of workers per listen socket. The child, per-port
servers drop privileges and behave pretty much how object-server workers
always have, except that because the ring has unique ports per disk, the
object-servers will only be handling requests for a single disk. The
parent process detects dead servers and restarts them (with the correct
listen socket), starts missing servers when an updated ring file is
found with a device on the server with a new port, and kills extraneous
servers when their port is found to no longer be in the ring. The ring
files are stat'ed at most every "ring_check_interval" seconds, as
configured in the object-server config (same default of 15s).
Immediately stopping all swift-object-worker processes still works by
sending the parent a SIGTERM. Likewise, a SIGHUP to the parent process
still causes the parent process to close all listen sockets and exit,
allowing existing children to finish serving their existing requests.
The drop_privileges helper function now has an optional param to
suppress the setsid() call, which otherwise screws up the child workers'
process management.
The class method RingData.load() can be told to only load the ring
metadata (i.e. everything except replica2part2dev_id) with the optional
kwarg, header_only=True. This is used to keep the parent and all
forked off workers from unnecessarily having full copies of all storage
policy rings in memory.
A new helper class, swift.common.storage_policy.BindPortsCache,
provides a method to return a set of all device ports in all rings for
the server on which it is instantiated (identified by its set of IP
addresses). The BindPortsCache instance will track mtimes of ring
files, so they are not opened more frequently than necessary.
This patch includes enhancements to the probe tests and
object-replicator/object-reconstructor config plumbing to allow the
probe tests to work correctly both in the "normal" config (same IP but
unique ports for each SAIO "server") and a server-per-port setup where
each SAIO "server" must have a unique IP address and unique port per
disk within each "server". The main probe tests only work with 4
servers and 4 disks, but you can see the difference in the rings for the
EC probe tests where there are 2 disks per server for a total of 8
disks. Specifically, swift.common.ring.utils.is_local_device() will
ignore the ports when the "my_port" argument is None. Then,
object-replicator and object-reconstructor both set self.bind_port to
None if server_per_port is enabled. Bonus improvement for IPv6
addresses in is_local_device().
This PR for vagrant-swift-all-in-one will aid in testing this patch:
https://github.com/swiftstack/vagrant-swift-all-in-one/pull/16/
Also allow SAIO to answer is_local_device() better; common SAIO setups
have multiple "servers" all on the same host with different ports for
the different "servers" (which happen to match the IPs specified in the
rings for the devices on each of those "servers").
However, you can configure the SAIO to have different localhost IP
addresses (e.g. 127.0.0.1, 127.0.0.2, etc.) in the ring and in the
servers' config files' bind_ip setting.
This new whataremyips() implementation combined with a little plumbing
allows is_local_device() to accurately answer, even on an SAIO.
In the default case (an unspecified bind_ip defaults to '0.0.0.0') as
well as an explict "bind to everything" like '0.0.0.0' or '::',
whataremyips() behaves as it always has, returning all IP addresses for
the server.
Also updated probe tests to handle each "server" in the SAIO having a
unique IP address.
For some (noisy) benchmarks that show servers_per_port=X is at least as
good as the same number of "normal" workers:
https://gist.github.com/dbishop/c214f89ca708a6b1624a#file-summary-md
Benchmarks showing the benefits of I/O isolation with a small number of
slow disks:
https://gist.github.com/dbishop/fd0ab067babdecfb07ca#file-results-md
If you were wondering what the overhead of threads_per_disk looks like:
https://gist.github.com/dbishop/1d14755fedc86a161718#file-tabular_results-md
DocImpact
Change-Id: I2239a4000b41a7e7cc53465ce794af49d44796c6
2015-05-14 22:14:15 -07:00
|
|
|
cs = sync.ContainerSync({
|
|
|
|
'bind_ip': '10.0.0.0',
|
|
|
|
}, container_ring=cring)
|
|
|
|
# Plumbing test for bind_ip and whataremyips()
|
|
|
|
self.assertEqual(['10.0.0.0'], cs._myips)
|
2011-06-16 00:59:55 +00:00
|
|
|
orig_ContainerBroker = sync.ContainerBroker
|
|
|
|
try:
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
2014-04-09 16:10:13 -07:00
|
|
|
p, info={'account': 'a', 'container': 'c',
|
|
|
|
'storage_policy_index': 0})
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['127.0.0.1'] # No match
|
|
|
|
cs._myport = 1 # No match
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1 # No match
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
|
|
|
|
cs._myips = ['127.0.0.1'] # No match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
# This complete match will cause the 1 container failure since the
|
|
|
|
# broker's info doesn't contain sync point keys
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
finally:
|
|
|
|
sync.ContainerBroker = orig_ContainerBroker
|
|
|
|
|
|
|
|
def test_container_sync_deleted(self):
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring)
|
2011-06-16 00:59:55 +00:00
|
|
|
orig_ContainerBroker = sync.ContainerBroker
|
|
|
|
try:
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
2014-04-09 16:10:13 -07:00
|
|
|
p, info={'account': 'a', 'container': 'c',
|
|
|
|
'storage_policy_index': 0}, deleted=False)
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
# This complete match will cause the 1 container failure since the
|
|
|
|
# broker's info doesn't contain sync point keys
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
2014-04-09 16:10:13 -07:00
|
|
|
p, info={'account': 'a', 'container': 'c',
|
|
|
|
'storage_policy_index': 0}, deleted=True)
|
2011-06-16 00:59:55 +00:00
|
|
|
# This complete match will not cause any more container failures
|
|
|
|
# since the broker indicates deletion
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
finally:
|
|
|
|
sync.ContainerBroker = orig_ContainerBroker
|
|
|
|
|
|
|
|
def test_container_sync_no_to_or_key(self):
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring)
|
2011-06-16 00:59:55 +00:00
|
|
|
orig_ContainerBroker = sync.ContainerBroker
|
|
|
|
try:
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
|
|
|
p, info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-09-01 01:30:36 -04:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1})
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
# This complete match will be skipped since the broker's metadata
|
|
|
|
# has no x-container-sync-to or x-container-sync-key
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
self.assertEquals(cs.container_skips, 1)
|
2013-06-25 15:16:35 -04:00
|
|
|
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
|
|
|
p, info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-09-01 01:30:36 -04:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
2011-06-16 00:59:55 +00:00
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1)})
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
# This complete match will be skipped since the broker's metadata
|
|
|
|
# has no x-container-sync-key
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
self.assertEquals(cs.container_skips, 2)
|
2013-06-25 15:16:35 -04:00
|
|
|
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
|
|
|
p, info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-09-01 01:30:36 -04:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
2011-06-16 00:59:55 +00:00
|
|
|
metadata={'x-container-sync-key': ('key', 1)})
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
# This complete match will be skipped since the broker's metadata
|
|
|
|
# has no x-container-sync-to
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
self.assertEquals(cs.container_skips, 3)
|
2013-06-25 15:16:35 -04:00
|
|
|
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
|
|
|
p, info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-09-01 01:30:36 -04:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
2011-06-16 00:59:55 +00:00
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)})
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = []
|
|
|
|
# This complete match will cause a container failure since the
|
|
|
|
# sync-to won't validate as allowed.
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
self.assertEquals(cs.container_skips, 3)
|
2013-06-25 15:16:35 -04:00
|
|
|
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
|
|
|
p, info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-09-01 01:30:36 -04:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
2011-06-16 00:59:55 +00:00
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)})
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
# This complete match will succeed completely since the broker
|
|
|
|
# get_items_since will return no new rows.
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
self.assertEquals(cs.container_skips, 3)
|
|
|
|
finally:
|
|
|
|
sync.ContainerBroker = orig_ContainerBroker
|
|
|
|
|
|
|
|
def test_container_stop_at(self):
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring)
|
2011-06-16 00:59:55 +00:00
|
|
|
orig_ContainerBroker = sync.ContainerBroker
|
|
|
|
orig_time = sync.time
|
|
|
|
try:
|
2013-09-01 01:30:36 -04:00
|
|
|
sync.ContainerBroker = lambda p: FakeContainerBroker(
|
|
|
|
p, info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-09-01 01:30:36 -04:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
2011-06-16 00:59:55 +00:00
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=['erroneous data'])
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
# This sync will fail since the items_since data is bad.
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
self.assertEquals(cs.container_skips, 0)
|
|
|
|
|
|
|
|
# Set up fake times to make the sync short-circuit as having taken
|
|
|
|
# too long
|
|
|
|
fake_times = [
|
|
|
|
1.0, # Compute the time to move on
|
|
|
|
100000.0, # Compute if it's time to move on from first loop
|
|
|
|
100000.0] # Compute if it's time to move on from second loop
|
|
|
|
|
|
|
|
def fake_time():
|
|
|
|
return fake_times.pop(0)
|
2013-06-25 15:16:35 -04:00
|
|
|
|
2011-06-16 00:59:55 +00:00
|
|
|
sync.time = fake_time
|
|
|
|
# This same sync won't fail since it will look like it took so long
|
|
|
|
# as to be time to move on (before it ever actually tries to do
|
|
|
|
# anything).
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
self.assertEquals(cs.container_skips, 0)
|
|
|
|
finally:
|
|
|
|
sync.ContainerBroker = orig_ContainerBroker
|
|
|
|
sync.time = orig_time
|
|
|
|
|
|
|
|
def test_container_first_loop(self):
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring)
|
2011-06-16 00:59:55 +00:00
|
|
|
|
2013-08-06 14:15:48 +02:00
|
|
|
def fake_hash_path(account, container, obj, raw_digest=False):
|
|
|
|
# Ensures that no rows match for full syncing, ordinal is 0 and
|
|
|
|
# all hashes are 0
|
|
|
|
return '\x00' * 16
|
|
|
|
fcb = FakeContainerBroker(
|
|
|
|
'path',
|
|
|
|
info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-08-06 14:15:48 +02:00
|
|
|
'x_container_sync_point1': 2,
|
|
|
|
'x_container_sync_point2': -1},
|
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o'}])
|
|
|
|
with nested(
|
|
|
|
mock.patch('swift.container.sync.ContainerBroker',
|
|
|
|
lambda p: fcb),
|
|
|
|
mock.patch('swift.container.sync.hash_path', fake_hash_path)):
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
# Succeeds because no rows match
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(cs.container_failures, 1)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_skips, 0)
|
|
|
|
self.assertEquals(fcb.sync_point1, None)
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(fcb.sync_point2, -1)
|
2011-06-16 00:59:55 +00:00
|
|
|
|
2013-08-06 14:15:48 +02:00
|
|
|
def fake_hash_path(account, container, obj, raw_digest=False):
|
|
|
|
# Ensures that all rows match for full syncing, ordinal is 0
|
|
|
|
# and all hashes are 1
|
|
|
|
return '\x01' * 16
|
|
|
|
fcb = FakeContainerBroker('path', info={'account': 'a',
|
|
|
|
'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-08-06 14:15:48 +02:00
|
|
|
'x_container_sync_point1': 1,
|
|
|
|
'x_container_sync_point2': 1},
|
|
|
|
metadata={'x-container-sync-to':
|
|
|
|
('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key':
|
|
|
|
('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o'}])
|
|
|
|
with nested(
|
|
|
|
mock.patch('swift.container.sync.ContainerBroker',
|
|
|
|
lambda p: fcb),
|
|
|
|
mock.patch('swift.container.sync.hash_path', fake_hash_path)):
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
2012-11-16 18:51:22 +00:00
|
|
|
# Succeeds because the two sync points haven't deviated yet
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(cs.container_failures, 1)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_skips, 0)
|
|
|
|
self.assertEquals(fcb.sync_point1, -1)
|
|
|
|
self.assertEquals(fcb.sync_point2, -1)
|
|
|
|
|
2013-08-06 14:15:48 +02:00
|
|
|
fcb = FakeContainerBroker(
|
|
|
|
'path',
|
|
|
|
info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-08-06 14:15:48 +02:00
|
|
|
'x_container_sync_point1': 2,
|
|
|
|
'x_container_sync_point2': -1},
|
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o'}])
|
|
|
|
with mock.patch('swift.container.sync.ContainerBroker', lambda p: fcb):
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
# Fails because container_sync_row will fail since the row has no
|
|
|
|
# 'deleted' key
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(cs.container_failures, 2)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_skips, 0)
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(fcb.sync_point1, None)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(fcb.sync_point2, -1)
|
|
|
|
|
2013-08-06 14:15:48 +02:00
|
|
|
def fake_delete_object(*args, **kwargs):
|
|
|
|
raise ClientException
|
|
|
|
fcb = FakeContainerBroker(
|
|
|
|
'path',
|
|
|
|
info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-08-06 14:15:48 +02:00
|
|
|
'x_container_sync_point1': 2,
|
|
|
|
'x_container_sync_point2': -1},
|
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
|
|
|
|
'deleted': True}])
|
|
|
|
with nested(
|
|
|
|
mock.patch('swift.container.sync.ContainerBroker',
|
|
|
|
lambda p: fcb),
|
|
|
|
mock.patch('swift.container.sync.delete_object',
|
|
|
|
fake_delete_object)):
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
# Fails because delete_object fails
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(cs.container_failures, 3)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_skips, 0)
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(fcb.sync_point1, None)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(fcb.sync_point2, -1)
|
|
|
|
|
2013-08-06 14:15:48 +02:00
|
|
|
fcb = FakeContainerBroker(
|
|
|
|
'path',
|
|
|
|
info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2013-08-06 14:15:48 +02:00
|
|
|
'x_container_sync_point1': 2,
|
|
|
|
'x_container_sync_point2': -1},
|
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
|
|
|
|
'deleted': True}])
|
|
|
|
with nested(
|
|
|
|
mock.patch('swift.container.sync.ContainerBroker',
|
|
|
|
lambda p: fcb),
|
|
|
|
mock.patch('swift.container.sync.delete_object',
|
|
|
|
lambda *x, **y: None)):
|
2011-06-16 00:59:55 +00:00
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
# Succeeds because delete_object succeeds
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(cs.container_failures, 3)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_skips, 0)
|
|
|
|
self.assertEquals(fcb.sync_point1, None)
|
|
|
|
self.assertEquals(fcb.sync_point2, 1)
|
|
|
|
|
|
|
|
def test_container_second_loop(self):
|
|
|
|
cring = FakeRing()
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=cring,
|
|
|
|
logger=self.logger)
|
2011-06-16 00:59:55 +00:00
|
|
|
orig_ContainerBroker = sync.ContainerBroker
|
|
|
|
orig_hash_path = sync.hash_path
|
|
|
|
orig_delete_object = sync.delete_object
|
|
|
|
try:
|
|
|
|
# We'll ensure the first loop is always skipped by keeping the two
|
|
|
|
# sync points equal
|
|
|
|
|
|
|
|
def fake_hash_path(account, container, obj, raw_digest=False):
|
|
|
|
# Ensures that no rows match for second loop, ordinal is 0 and
|
|
|
|
# all hashes are 1
|
|
|
|
return '\x01' * 16
|
|
|
|
|
|
|
|
sync.hash_path = fake_hash_path
|
2013-09-01 01:30:36 -04:00
|
|
|
fcb = FakeContainerBroker(
|
|
|
|
'path',
|
2011-06-16 00:59:55 +00:00
|
|
|
info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2011-06-16 00:59:55 +00:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o'}])
|
|
|
|
sync.ContainerBroker = lambda p: fcb
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
# Succeeds because no rows match
|
|
|
|
self.assertEquals(cs.container_failures, 0)
|
|
|
|
self.assertEquals(cs.container_skips, 0)
|
|
|
|
self.assertEquals(fcb.sync_point1, 1)
|
|
|
|
self.assertEquals(fcb.sync_point2, None)
|
|
|
|
|
|
|
|
def fake_hash_path(account, container, obj, raw_digest=False):
|
|
|
|
# Ensures that all rows match for second loop, ordinal is 0 and
|
|
|
|
# all hashes are 0
|
|
|
|
return '\x00' * 16
|
|
|
|
|
|
|
|
def fake_delete_object(*args, **kwargs):
|
|
|
|
pass
|
|
|
|
|
|
|
|
sync.hash_path = fake_hash_path
|
|
|
|
sync.delete_object = fake_delete_object
|
2013-09-01 01:30:36 -04:00
|
|
|
fcb = FakeContainerBroker(
|
|
|
|
'path',
|
2011-06-16 00:59:55 +00:00
|
|
|
info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2011-06-16 00:59:55 +00:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o'}])
|
|
|
|
sync.ContainerBroker = lambda p: fcb
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
# Fails because row is missing 'deleted' key
|
2013-02-27 00:49:51 +02:00
|
|
|
# Nevertheless the fault is skipped
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
self.assertEquals(cs.container_skips, 0)
|
2013-02-27 00:49:51 +02:00
|
|
|
self.assertEquals(fcb.sync_point1, 1)
|
|
|
|
self.assertEquals(fcb.sync_point2, None)
|
2011-06-16 00:59:55 +00:00
|
|
|
|
2013-09-01 01:30:36 -04:00
|
|
|
fcb = FakeContainerBroker(
|
|
|
|
'path',
|
2011-06-16 00:59:55 +00:00
|
|
|
info={'account': 'a', 'container': 'c',
|
2014-04-09 16:10:13 -07:00
|
|
|
'storage_policy_index': 0,
|
2011-06-16 00:59:55 +00:00
|
|
|
'x_container_sync_point1': -1,
|
|
|
|
'x_container_sync_point2': -1},
|
|
|
|
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
|
|
|
|
'x-container-sync-key': ('key', 1)},
|
|
|
|
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
|
|
|
|
'deleted': True}])
|
|
|
|
sync.ContainerBroker = lambda p: fcb
|
|
|
|
cs._myips = ['10.0.0.0'] # Match
|
|
|
|
cs._myport = 1000 # Match
|
|
|
|
cs.allowed_sync_hosts = ['127.0.0.1']
|
|
|
|
cs.container_sync('isa.db')
|
|
|
|
# Succeeds because row now has 'deleted' key and delete_object
|
|
|
|
# succeeds
|
|
|
|
self.assertEquals(cs.container_failures, 1)
|
|
|
|
self.assertEquals(cs.container_skips, 0)
|
|
|
|
self.assertEquals(fcb.sync_point1, 1)
|
|
|
|
self.assertEquals(fcb.sync_point2, None)
|
|
|
|
finally:
|
|
|
|
sync.ContainerBroker = orig_ContainerBroker
|
|
|
|
sync.hash_path = orig_hash_path
|
|
|
|
sync.delete_object = orig_delete_object
|
|
|
|
|
|
|
|
def test_container_sync_row_delete(self):
|
2013-12-08 09:13:59 +00:00
|
|
|
self._test_container_sync_row_delete(None, None)
|
|
|
|
|
|
|
|
def test_container_sync_row_delete_using_realms(self):
|
|
|
|
self._test_container_sync_row_delete('US', 'realm_key')
|
|
|
|
|
|
|
|
def _test_container_sync_row_delete(self, realm, realm_key):
|
|
|
|
orig_uuid = sync.uuid
|
2011-06-16 00:59:55 +00:00
|
|
|
orig_delete_object = sync.delete_object
|
|
|
|
try:
|
2013-12-08 09:13:59 +00:00
|
|
|
class FakeUUID(object):
|
|
|
|
class uuid4(object):
|
|
|
|
hex = 'abcdef'
|
|
|
|
|
|
|
|
sync.uuid = FakeUUID
|
2011-06-16 00:59:55 +00:00
|
|
|
|
2014-05-27 21:10:47 +00:00
|
|
|
def fake_delete_object(path, name=None, headers=None, proxy=None,
|
2015-02-18 10:17:23 +00:00
|
|
|
logger=None, timeout=None):
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(path, 'http://sync/to/path')
|
|
|
|
self.assertEquals(name, 'object')
|
2013-12-08 09:13:59 +00:00
|
|
|
if realm:
|
|
|
|
self.assertEquals(headers, {
|
|
|
|
'x-container-sync-auth':
|
|
|
|
'US abcdef 90e95aabb45a6cdc0892a3db5535e7f918428c90',
|
|
|
|
'x-timestamp': '1.2'})
|
|
|
|
else:
|
|
|
|
self.assertEquals(
|
|
|
|
headers,
|
|
|
|
{'x-container-sync-key': 'key', 'x-timestamp': '1.2'})
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(proxy, 'http://proxy')
|
2015-02-18 10:17:23 +00:00
|
|
|
self.assertEqual(timeout, 5.0)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertEqual(logger, self.logger)
|
2011-06-16 00:59:55 +00:00
|
|
|
|
|
|
|
sync.delete_object = fake_delete_object
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=FakeRing(),
|
|
|
|
logger=self.logger)
|
2014-01-16 01:58:49 +00:00
|
|
|
cs.http_proxies = ['http://proxy']
|
2011-06-16 00:59:55 +00:00
|
|
|
# Success
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertTrue(cs.container_sync_row(
|
|
|
|
{'deleted': True,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_deletes, 1)
|
|
|
|
|
|
|
|
exc = []
|
|
|
|
|
2014-05-27 21:10:47 +00:00
|
|
|
def fake_delete_object(*args, **kwargs):
|
2011-06-16 00:59:55 +00:00
|
|
|
exc.append(Exception('test exception'))
|
|
|
|
raise exc[-1]
|
|
|
|
|
|
|
|
sync.delete_object = fake_delete_object
|
|
|
|
# Failure because of delete_object exception
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertFalse(cs.container_sync_row(
|
|
|
|
{'deleted': True,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_deletes, 1)
|
|
|
|
self.assertEquals(len(exc), 1)
|
|
|
|
self.assertEquals(str(exc[-1]), 'test exception')
|
|
|
|
|
2014-05-27 21:10:47 +00:00
|
|
|
def fake_delete_object(*args, **kwargs):
|
2011-06-16 00:59:55 +00:00
|
|
|
exc.append(ClientException('test client exception'))
|
|
|
|
raise exc[-1]
|
|
|
|
|
|
|
|
sync.delete_object = fake_delete_object
|
|
|
|
# Failure because of delete_object exception
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertFalse(cs.container_sync_row(
|
|
|
|
{'deleted': True,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_deletes, 1)
|
|
|
|
self.assertEquals(len(exc), 2)
|
|
|
|
self.assertEquals(str(exc[-1]), 'test client exception')
|
|
|
|
|
2014-05-27 21:10:47 +00:00
|
|
|
def fake_delete_object(*args, **kwargs):
|
2011-06-16 00:59:55 +00:00
|
|
|
exc.append(ClientException('test client exception',
|
|
|
|
http_status=404))
|
|
|
|
raise exc[-1]
|
|
|
|
|
|
|
|
sync.delete_object = fake_delete_object
|
|
|
|
# Success because the object wasn't even found
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertTrue(cs.container_sync_row(
|
|
|
|
{'deleted': True,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_deletes, 2)
|
|
|
|
self.assertEquals(len(exc), 3)
|
|
|
|
self.assertEquals(str(exc[-1]), 'test client exception: 404')
|
|
|
|
finally:
|
2013-12-08 09:13:59 +00:00
|
|
|
sync.uuid = orig_uuid
|
2011-06-16 00:59:55 +00:00
|
|
|
sync.delete_object = orig_delete_object
|
|
|
|
|
|
|
|
def test_container_sync_row_put(self):
|
2013-12-08 09:13:59 +00:00
|
|
|
self._test_container_sync_row_put(None, None)
|
|
|
|
|
|
|
|
def test_container_sync_row_put_using_realms(self):
|
|
|
|
self._test_container_sync_row_put('US', 'realm_key')
|
|
|
|
|
|
|
|
def _test_container_sync_row_put(self, realm, realm_key):
|
|
|
|
orig_uuid = sync.uuid
|
2011-06-16 00:59:55 +00:00
|
|
|
orig_shuffle = sync.shuffle
|
|
|
|
orig_put_object = sync.put_object
|
|
|
|
try:
|
2013-12-08 09:13:59 +00:00
|
|
|
class FakeUUID(object):
|
|
|
|
class uuid4(object):
|
|
|
|
hex = 'abcdef'
|
|
|
|
|
|
|
|
sync.uuid = FakeUUID
|
2011-06-16 00:59:55 +00:00
|
|
|
sync.shuffle = lambda x: x
|
|
|
|
|
|
|
|
def fake_put_object(sync_to, name=None, headers=None,
|
2015-02-18 10:17:23 +00:00
|
|
|
contents=None, proxy=None, logger=None,
|
|
|
|
timeout=None):
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(sync_to, 'http://sync/to/path')
|
|
|
|
self.assertEquals(name, 'object')
|
2013-12-08 09:13:59 +00:00
|
|
|
if realm:
|
|
|
|
self.assertEqual(headers, {
|
|
|
|
'x-container-sync-auth':
|
|
|
|
'US abcdef ef62c64bb88a33fa00722daa23d5d43253164962',
|
|
|
|
'x-timestamp': '1.2',
|
|
|
|
'etag': 'etagvalue',
|
2014-05-20 20:19:47 +00:00
|
|
|
'other-header': 'other header value',
|
|
|
|
'content-type': 'text/plain'})
|
2013-12-08 09:13:59 +00:00
|
|
|
else:
|
|
|
|
self.assertEquals(headers, {
|
|
|
|
'x-container-sync-key': 'key',
|
|
|
|
'x-timestamp': '1.2',
|
|
|
|
'other-header': 'other header value',
|
2014-05-20 20:19:47 +00:00
|
|
|
'etag': 'etagvalue',
|
|
|
|
'content-type': 'text/plain'})
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(contents.read(), 'contents')
|
|
|
|
self.assertEquals(proxy, 'http://proxy')
|
2015-02-18 10:17:23 +00:00
|
|
|
self.assertEqual(timeout, 5.0)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertEqual(logger, self.logger)
|
2011-06-16 00:59:55 +00:00
|
|
|
|
|
|
|
sync.put_object = fake_put_object
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync({}, container_ring=FakeRing(),
|
|
|
|
logger=self.logger)
|
2014-01-16 01:58:49 +00:00
|
|
|
cs.http_proxies = ['http://proxy']
|
2011-06-16 00:59:55 +00:00
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
|
|
|
|
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
|
|
|
|
'0')
|
|
|
|
return (200, {'other-header': 'other header value',
|
|
|
|
'etag': '"etagvalue"', 'x-timestamp': '1.2',
|
|
|
|
'content-type': 'text/plain; swift_bytes=123'},
|
2011-06-16 00:59:55 +00:00
|
|
|
iter('contents'))
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
cs.swift.get_object = fake_get_object
|
2011-06-16 00:59:55 +00:00
|
|
|
# Success as everything says it worked
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertTrue(cs.container_sync_row(
|
|
|
|
{'deleted': False,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_puts, 1)
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
|
|
|
|
self.assertEquals(headers['X-Newest'], True)
|
2014-06-23 12:52:50 -07:00
|
|
|
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
|
|
|
|
'0')
|
2014-12-24 11:52:34 +08:00
|
|
|
return (200, {'date': 'date value',
|
|
|
|
'last-modified': 'last modified value',
|
|
|
|
'x-timestamp': '1.2',
|
|
|
|
'other-header': 'other header value',
|
|
|
|
'etag': '"etagvalue"',
|
|
|
|
'content-type': 'text/plain; swift_bytes=123'},
|
2011-06-16 00:59:55 +00:00
|
|
|
iter('contents'))
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
cs.swift.get_object = fake_get_object
|
2011-06-16 00:59:55 +00:00
|
|
|
# Success as everything says it worked, also checks 'date' and
|
|
|
|
# 'last-modified' headers are removed and that 'etag' header is
|
|
|
|
# stripped of double quotes.
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertTrue(cs.container_sync_row(
|
|
|
|
{'deleted': False,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_puts, 2)
|
|
|
|
|
|
|
|
exc = []
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
|
|
|
|
self.assertEquals(headers['X-Newest'], True)
|
2014-06-23 12:52:50 -07:00
|
|
|
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
|
|
|
|
'0')
|
2011-06-16 00:59:55 +00:00
|
|
|
exc.append(Exception('test exception'))
|
|
|
|
raise exc[-1]
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
cs.swift.get_object = fake_get_object
|
2011-06-16 00:59:55 +00:00
|
|
|
# Fail due to completely unexpected exception
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertFalse(cs.container_sync_row(
|
|
|
|
{'deleted': False,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_puts, 2)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertEquals(len(exc), 1)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(str(exc[-1]), 'test exception')
|
|
|
|
|
2012-11-01 14:52:21 -07:00
|
|
|
exc = []
|
2013-09-01 01:30:36 -04:00
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
|
|
|
|
self.assertEquals(headers['X-Newest'], True)
|
2014-06-23 12:52:50 -07:00
|
|
|
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
|
|
|
|
'0')
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
exc.append(ClientException('test client exception'))
|
2011-06-16 00:59:55 +00:00
|
|
|
raise exc[-1]
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
cs.swift.get_object = fake_get_object
|
2011-06-16 00:59:55 +00:00
|
|
|
# Fail due to all direct_get_object calls failing
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertFalse(cs.container_sync_row(
|
|
|
|
{'deleted': False,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_puts, 2)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertEquals(len(exc), 1)
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(str(exc[-1]), 'test client exception')
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
|
|
|
|
self.assertEquals(headers['X-Newest'], True)
|
2014-06-23 12:52:50 -07:00
|
|
|
self.assertEquals(headers['X-Backend-Storage-Policy-Index'],
|
|
|
|
'0')
|
2014-12-24 11:52:34 +08:00
|
|
|
return (200, {'other-header': 'other header value',
|
|
|
|
'x-timestamp': '1.2', 'etag': '"etagvalue"'},
|
2011-06-16 00:59:55 +00:00
|
|
|
iter('contents'))
|
|
|
|
|
2014-05-27 21:10:47 +00:00
|
|
|
def fake_put_object(*args, **kwargs):
|
2011-06-16 00:59:55 +00:00
|
|
|
raise ClientException('test client exception', http_status=401)
|
|
|
|
|
2014-12-24 11:52:34 +08:00
|
|
|
cs.swift.get_object = fake_get_object
|
2011-06-16 00:59:55 +00:00
|
|
|
sync.put_object = fake_put_object
|
|
|
|
# Fail due to 401
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertFalse(cs.container_sync_row(
|
|
|
|
{'deleted': False,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_puts, 2)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertLogMessage('info', 'Unauth')
|
2011-06-16 00:59:55 +00:00
|
|
|
|
2014-05-27 21:10:47 +00:00
|
|
|
def fake_put_object(*args, **kwargs):
|
2011-06-16 00:59:55 +00:00
|
|
|
raise ClientException('test client exception', http_status=404)
|
|
|
|
|
|
|
|
sync.put_object = fake_put_object
|
|
|
|
# Fail due to 404
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertFalse(cs.container_sync_row(
|
|
|
|
{'deleted': False,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_puts, 2)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertLogMessage('info', 'Not found', 1)
|
2011-06-16 00:59:55 +00:00
|
|
|
|
2014-05-27 21:10:47 +00:00
|
|
|
def fake_put_object(*args, **kwargs):
|
2011-06-16 00:59:55 +00:00
|
|
|
raise ClientException('test client exception', http_status=503)
|
|
|
|
|
|
|
|
sync.put_object = fake_put_object
|
|
|
|
# Fail due to 503
|
2013-09-01 01:30:36 -04:00
|
|
|
self.assertFalse(cs.container_sync_row(
|
|
|
|
{'deleted': False,
|
|
|
|
'name': 'object',
|
|
|
|
'created_at': '1.2'}, 'http://sync/to/path',
|
2014-04-09 16:10:13 -07:00
|
|
|
'key', FakeContainerBroker('broker'),
|
|
|
|
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
|
|
|
|
realm, realm_key))
|
2011-06-16 00:59:55 +00:00
|
|
|
self.assertEquals(cs.container_puts, 2)
|
2014-12-24 11:52:34 +08:00
|
|
|
self.assertLogMessage('error', 'ERROR Syncing')
|
2011-06-16 00:59:55 +00:00
|
|
|
finally:
|
2013-12-08 09:13:59 +00:00
|
|
|
sync.uuid = orig_uuid
|
2011-06-16 00:59:55 +00:00
|
|
|
sync.shuffle = orig_shuffle
|
|
|
|
sync.put_object = orig_put_object
|
|
|
|
|
2014-01-16 01:58:49 +00:00
|
|
|
def test_select_http_proxy_None(self):
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync(
|
|
|
|
{'sync_proxy': ''}, container_ring=FakeRing())
|
2014-01-16 01:58:49 +00:00
|
|
|
self.assertEqual(cs.select_http_proxy(), None)
|
|
|
|
|
|
|
|
def test_select_http_proxy_one(self):
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync(
|
|
|
|
{'sync_proxy': 'http://one'}, container_ring=FakeRing())
|
2014-01-16 01:58:49 +00:00
|
|
|
self.assertEqual(cs.select_http_proxy(), 'http://one')
|
|
|
|
|
|
|
|
def test_select_http_proxy_multiple(self):
|
2014-12-24 11:52:34 +08:00
|
|
|
|
|
|
|
with mock.patch('swift.container.sync.InternalClient'):
|
|
|
|
cs = sync.ContainerSync(
|
|
|
|
{'sync_proxy': 'http://one,http://two,http://three'},
|
|
|
|
container_ring=FakeRing())
|
2014-01-16 01:58:49 +00:00
|
|
|
self.assertEqual(
|
|
|
|
set(cs.http_proxies),
|
|
|
|
set(['http://one', 'http://two', 'http://three']))
|
|
|
|
|
2011-06-15 02:01:01 +00:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|