more probe test refactoring
* move get_to_final_state into ProbeTest * get rid of kill_servers * add replicators manager and updaters manager to ProbeTest (this is all going someplace, i promise) Change-Id: I8393a2ebc0d04051cae48cc3c49580f70818dbf2
This commit is contained in:
parent
9196c31f64
commit
ca0fce8542
2
.gitignore
vendored
2
.gitignore
vendored
@ -14,3 +14,5 @@ ChangeLog
|
|||||||
pycscope.*
|
pycscope.*
|
||||||
.idea
|
.idea
|
||||||
MANIFEST
|
MANIFEST
|
||||||
|
|
||||||
|
test/probe/.noseids
|
||||||
|
@ -124,10 +124,6 @@ def kill_server(port, port2server, pids):
|
|||||||
sleep(0.1)
|
sleep(0.1)
|
||||||
|
|
||||||
|
|
||||||
def kill_servers(port2server, pids):
|
|
||||||
Manager(['all']).kill()
|
|
||||||
|
|
||||||
|
|
||||||
def kill_nonprimary_server(primary_nodes, port2server, pids):
|
def kill_nonprimary_server(primary_nodes, port2server, pids):
|
||||||
primary_ports = [n['port'] for n in primary_nodes]
|
primary_ports = [n['port'] for n in primary_nodes]
|
||||||
for port, server in port2server.iteritems():
|
for port, server in port2server.iteritems():
|
||||||
@ -217,18 +213,6 @@ def get_policy(**kwargs):
|
|||||||
raise SkipTest('No policy matching %s' % kwargs)
|
raise SkipTest('No policy matching %s' % kwargs)
|
||||||
|
|
||||||
|
|
||||||
def get_to_final_state():
|
|
||||||
replicators = Manager(['account-replicator', 'container-replicator',
|
|
||||||
'object-replicator'])
|
|
||||||
replicators.stop()
|
|
||||||
updaters = Manager(['container-updater', 'object-updater'])
|
|
||||||
updaters.stop()
|
|
||||||
|
|
||||||
replicators.once()
|
|
||||||
updaters.once()
|
|
||||||
replicators.once()
|
|
||||||
|
|
||||||
|
|
||||||
class ProbeTest(unittest.TestCase):
|
class ProbeTest(unittest.TestCase):
|
||||||
"""
|
"""
|
||||||
Don't instantiate this directly, use a child class instead.
|
Don't instantiate this directly, use a child class instead.
|
||||||
@ -273,17 +257,31 @@ class ProbeTest(unittest.TestCase):
|
|||||||
for server in Manager([server_name]):
|
for server in Manager([server_name]):
|
||||||
for i, conf in enumerate(server.conf_files(), 1):
|
for i, conf in enumerate(server.conf_files(), 1):
|
||||||
self.configs[server.server][i] = conf
|
self.configs[server.server][i] = conf
|
||||||
|
self.replicators = Manager(
|
||||||
|
['account-replicator', 'container-replicator',
|
||||||
|
'object-replicator'])
|
||||||
|
self.updaters = Manager(['container-updater', 'object-updater'])
|
||||||
except BaseException:
|
except BaseException:
|
||||||
try:
|
try:
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
kill_servers(self.port2server, self.pids)
|
Manager(['all']).kill()
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
kill_servers(self.port2server, self.pids)
|
Manager(['all']).kill()
|
||||||
|
|
||||||
|
def get_to_final_state(self):
|
||||||
|
# these .stop()s are probably not strictly necessary,
|
||||||
|
# but may prevent race conditions
|
||||||
|
self.replicators.stop()
|
||||||
|
self.updaters.stop()
|
||||||
|
|
||||||
|
self.replicators.once()
|
||||||
|
self.updaters.once()
|
||||||
|
self.replicators.once()
|
||||||
|
|
||||||
|
|
||||||
class ReplProbeTest(ProbeTest):
|
class ReplProbeTest(ProbeTest):
|
||||||
|
@ -20,7 +20,7 @@ from swiftclient import client
|
|||||||
|
|
||||||
from swift.common import direct_client
|
from swift.common import direct_client
|
||||||
from swift.common.manager import Manager
|
from swift.common.manager import Manager
|
||||||
from test.probe.common import get_to_final_state, kill_nonprimary_server, \
|
from test.probe.common import kill_nonprimary_server, \
|
||||||
kill_server, ReplProbeTest, start_server
|
kill_server, ReplProbeTest, start_server
|
||||||
|
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ class TestAccountFailures(ReplProbeTest):
|
|||||||
self.assert_(found2)
|
self.assert_(found2)
|
||||||
|
|
||||||
# Get to final state
|
# Get to final state
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# Assert account level now sees the container2/object1
|
# Assert account level now sees the container2/object1
|
||||||
headers, containers = client.get_account(self.url, self.token)
|
headers, containers = client.get_account(self.url, self.token)
|
||||||
@ -168,7 +168,7 @@ class TestAccountFailures(ReplProbeTest):
|
|||||||
self.assert_(found2)
|
self.assert_(found2)
|
||||||
|
|
||||||
# Get to final state
|
# Get to final state
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# Assert that server is now up to date
|
# Assert that server is now up to date
|
||||||
headers, containers = \
|
headers, containers = \
|
||||||
|
@ -21,8 +21,7 @@ from swift.common.storage_policy import POLICIES
|
|||||||
from swift.common.manager import Manager
|
from swift.common.manager import Manager
|
||||||
from swift.common.direct_client import direct_delete_account, \
|
from swift.common.direct_client import direct_delete_account, \
|
||||||
direct_get_object, direct_head_container, ClientException
|
direct_get_object, direct_head_container, ClientException
|
||||||
from test.probe.common import ReplProbeTest, \
|
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
|
||||||
get_to_final_state, ENABLED_POLICIES
|
|
||||||
|
|
||||||
|
|
||||||
class TestAccountReaper(ReplProbeTest):
|
class TestAccountReaper(ReplProbeTest):
|
||||||
@ -56,7 +55,7 @@ class TestAccountReaper(ReplProbeTest):
|
|||||||
|
|
||||||
Manager(['account-reaper']).once()
|
Manager(['account-reaper']).once()
|
||||||
|
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
for policy, container, obj in all_objects:
|
for policy, container, obj in all_objects:
|
||||||
cpart, cnodes = self.container_ring.get_nodes(
|
cpart, cnodes = self.container_ring.get_nodes(
|
||||||
|
@ -27,7 +27,7 @@ from swiftclient import client
|
|||||||
from swift.common import direct_client
|
from swift.common import direct_client
|
||||||
from swift.common.exceptions import ClientException
|
from swift.common.exceptions import ClientException
|
||||||
from swift.common.utils import hash_path, readconf
|
from swift.common.utils import hash_path, readconf
|
||||||
from test.probe.common import get_to_final_state, kill_nonprimary_server, \
|
from test.probe.common import kill_nonprimary_server, \
|
||||||
kill_server, ReplProbeTest, start_server
|
kill_server, ReplProbeTest, start_server
|
||||||
|
|
||||||
eventlet.monkey_patch(all=False, socket=True)
|
eventlet.monkey_patch(all=False, socket=True)
|
||||||
@ -63,7 +63,7 @@ class TestContainerFailures(ReplProbeTest):
|
|||||||
client.put_object(self.url, self.token, container1, 'object1', '123')
|
client.put_object(self.url, self.token, container1, 'object1', '123')
|
||||||
|
|
||||||
# Get to a final state
|
# Get to a final state
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# Assert all container1 servers indicate container1 is alive and
|
# Assert all container1 servers indicate container1 is alive and
|
||||||
# well with object1
|
# well with object1
|
||||||
@ -101,7 +101,7 @@ class TestContainerFailures(ReplProbeTest):
|
|||||||
start_server(cnp_port, self.port2server, self.pids)
|
start_server(cnp_port, self.port2server, self.pids)
|
||||||
|
|
||||||
# Get to a final state
|
# Get to a final state
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# Assert all container1 servers indicate container1 is gone (happens
|
# Assert all container1 servers indicate container1 is gone (happens
|
||||||
# because the one node that knew about the delete replicated to the
|
# because the one node that knew about the delete replicated to the
|
||||||
|
@ -26,8 +26,7 @@ from swift.common import utils, direct_client
|
|||||||
from swift.common.storage_policy import POLICIES
|
from swift.common.storage_policy import POLICIES
|
||||||
from swift.common.http import HTTP_NOT_FOUND
|
from swift.common.http import HTTP_NOT_FOUND
|
||||||
from test.probe.brain import BrainSplitter
|
from test.probe.brain import BrainSplitter
|
||||||
from test.probe.common import ReplProbeTest, get_to_final_state, \
|
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
|
||||||
ENABLED_POLICIES
|
|
||||||
|
|
||||||
from swiftclient import client, ClientException
|
from swiftclient import client, ClientException
|
||||||
|
|
||||||
@ -91,7 +90,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||||||
self.fail('Unable to find /%s/%s/%s in %r' % (
|
self.fail('Unable to find /%s/%s/%s in %r' % (
|
||||||
self.account, self.container_name, self.object_name,
|
self.account, self.container_name, self.object_name,
|
||||||
found_policy_indexes))
|
found_policy_indexes))
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
Manager(['container-reconciler']).once()
|
Manager(['container-reconciler']).once()
|
||||||
# validate containers
|
# validate containers
|
||||||
head_responses = []
|
head_responses = []
|
||||||
@ -196,7 +195,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||||||
self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
|
self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
|
||||||
self.account, self.container_name, self.object_name,
|
self.account, self.container_name, self.object_name,
|
||||||
found_policy_indexes))
|
found_policy_indexes))
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
Manager(['container-reconciler']).once()
|
Manager(['container-reconciler']).once()
|
||||||
# validate containers
|
# validate containers
|
||||||
head_responses = []
|
head_responses = []
|
||||||
@ -313,7 +312,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||||||
break # one should do it...
|
break # one should do it...
|
||||||
|
|
||||||
self.brain.start_handoff_half()
|
self.brain.start_handoff_half()
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
Manager(['container-reconciler']).once()
|
Manager(['container-reconciler']).once()
|
||||||
# clear proxy cache
|
# clear proxy cache
|
||||||
client.post_container(self.url, self.token, self.container_name, {})
|
client.post_container(self.url, self.token, self.container_name, {})
|
||||||
@ -424,7 +423,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||||||
acceptable_statuses=(4,),
|
acceptable_statuses=(4,),
|
||||||
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
|
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
|
||||||
|
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# verify entry in the queue
|
# verify entry in the queue
|
||||||
client = InternalClient(conf_file, 'probe-test', 3)
|
client = InternalClient(conf_file, 'probe-test', 3)
|
||||||
@ -448,7 +447,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
|
|||||||
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
|
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
|
||||||
|
|
||||||
# make sure the queue is settled
|
# make sure the queue is settled
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
for container in client.iter_containers('.misplaced_objects'):
|
for container in client.iter_containers('.misplaced_objects'):
|
||||||
for obj in client.iter_objects('.misplaced_objects',
|
for obj in client.iter_objects('.misplaced_objects',
|
||||||
container['name']):
|
container['name']):
|
||||||
|
@ -22,8 +22,7 @@ from swift.common.internal_client import InternalClient
|
|||||||
from swift.common.manager import Manager
|
from swift.common.manager import Manager
|
||||||
from swift.common.utils import Timestamp
|
from swift.common.utils import Timestamp
|
||||||
|
|
||||||
from test.probe.common import ReplProbeTest, get_to_final_state, \
|
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
|
||||||
ENABLED_POLICIES
|
|
||||||
from test.probe.test_container_merge_policy_index import BrainSplitter
|
from test.probe.test_container_merge_policy_index import BrainSplitter
|
||||||
|
|
||||||
from swiftclient import client
|
from swiftclient import client
|
||||||
@ -80,7 +79,7 @@ class TestObjectExpirer(ReplProbeTest):
|
|||||||
self.expirer.once()
|
self.expirer.once()
|
||||||
|
|
||||||
self.brain.start_handoff_half()
|
self.brain.start_handoff_half()
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# validate object is expired
|
# validate object is expired
|
||||||
found_in_policy = None
|
found_in_policy = None
|
||||||
|
@ -26,7 +26,7 @@ import uuid
|
|||||||
from swift.common import internal_client, utils
|
from swift.common import internal_client, utils
|
||||||
|
|
||||||
from test.probe.brain import BrainSplitter
|
from test.probe.brain import BrainSplitter
|
||||||
from test.probe.common import ReplProbeTest, get_to_final_state
|
from test.probe.common import ReplProbeTest
|
||||||
|
|
||||||
|
|
||||||
def _sync_methods(object_server_config_paths):
|
def _sync_methods(object_server_config_paths):
|
||||||
@ -143,7 +143,7 @@ class Test(ReplProbeTest):
|
|||||||
self.brain.start_handoff_half()
|
self.brain.start_handoff_half()
|
||||||
|
|
||||||
# run replicator
|
# run replicator
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# check object deletion has been replicated on first server set
|
# check object deletion has been replicated on first server set
|
||||||
self.brain.stop_primary_half()
|
self.brain.stop_primary_half()
|
||||||
@ -159,7 +159,7 @@ class Test(ReplProbeTest):
|
|||||||
self.brain.start_handoff_half()
|
self.brain.start_handoff_half()
|
||||||
|
|
||||||
# run replicator
|
# run replicator
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# check new object has been replicated on first server set
|
# check new object has been replicated on first server set
|
||||||
self.brain.stop_primary_half()
|
self.brain.stop_primary_half()
|
||||||
@ -198,7 +198,7 @@ class Test(ReplProbeTest):
|
|||||||
self.brain.start_handoff_half()
|
self.brain.start_handoff_half()
|
||||||
|
|
||||||
# run replicator
|
# run replicator
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# check user metadata has been replicated to first server subset
|
# check user metadata has been replicated to first server subset
|
||||||
# and sysmeta is unchanged
|
# and sysmeta is unchanged
|
||||||
@ -244,7 +244,7 @@ class Test(ReplProbeTest):
|
|||||||
self.brain.start_primary_half()
|
self.brain.start_primary_half()
|
||||||
|
|
||||||
# run replicator
|
# run replicator
|
||||||
get_to_final_state()
|
self.get_to_final_state()
|
||||||
|
|
||||||
# check stale user metadata is not replicated to first server subset
|
# check stale user metadata is not replicated to first server subset
|
||||||
# and sysmeta is unchanged
|
# and sysmeta is unchanged
|
||||||
|
@ -26,7 +26,6 @@ from swift.obj.diskfile import get_data_dir
|
|||||||
|
|
||||||
from test.probe.common import ReplProbeTest
|
from test.probe.common import ReplProbeTest
|
||||||
from swift.common.utils import readconf
|
from swift.common.utils import readconf
|
||||||
from swift.common.manager import Manager
|
|
||||||
|
|
||||||
|
|
||||||
def collect_info(path_list):
|
def collect_info(path_list):
|
||||||
@ -120,8 +119,7 @@ class TestReplicatorFunctions(ReplProbeTest):
|
|||||||
test_node_dir_list.append(d)
|
test_node_dir_list.append(d)
|
||||||
# Run all replicators
|
# Run all replicators
|
||||||
try:
|
try:
|
||||||
Manager(['object-replicator', 'container-replicator',
|
self.replicators.start()
|
||||||
'account-replicator']).start()
|
|
||||||
|
|
||||||
# Delete some files
|
# Delete some files
|
||||||
for directory in os.listdir(test_node):
|
for directory in os.listdir(test_node):
|
||||||
@ -195,8 +193,7 @@ class TestReplicatorFunctions(ReplProbeTest):
|
|||||||
raise
|
raise
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
finally:
|
finally:
|
||||||
Manager(['object-replicator', 'container-replicator',
|
self.replicators.stop()
|
||||||
'account-replicator']).stop()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
Loading…
x
Reference in New Issue
Block a user