Merge "sharding: Make replicator logging less scary"
This commit is contained in:
@@ -20,7 +20,7 @@ from collections import defaultdict
|
|||||||
from eventlet import Timeout
|
from eventlet import Timeout
|
||||||
|
|
||||||
from swift.container.sync_store import ContainerSyncStore
|
from swift.container.sync_store import ContainerSyncStore
|
||||||
from swift.container.backend import ContainerBroker, DATADIR
|
from swift.container.backend import ContainerBroker, DATADIR, SHARDED
|
||||||
from swift.container.reconciler import (
|
from swift.container.reconciler import (
|
||||||
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
|
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
|
||||||
get_reconciler_container_name, get_row_to_q_entry_translator)
|
get_reconciler_container_name, get_row_to_q_entry_translator)
|
||||||
@@ -113,9 +113,18 @@ class ContainerReplicator(db_replicator.Replicator):
|
|||||||
'peer may need upgrading', broker.db_file,
|
'peer may need upgrading', broker.db_file,
|
||||||
'%(ip)s:%(port)s/%(device)s' % node)
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
if broker.sharding_initiated():
|
if broker.sharding_initiated():
|
||||||
|
if info['db_state'] == SHARDED and len(
|
||||||
|
broker.get_objects(limit=1)) == 0:
|
||||||
|
self.logger.debug('%s is sharded and has nothing more to '
|
||||||
|
'replicate to peer %s',
|
||||||
|
broker.db_file,
|
||||||
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
else:
|
||||||
|
# Only print the scary warning if there was something that
|
||||||
|
# didn't get replicated
|
||||||
self.logger.warning(
|
self.logger.warning(
|
||||||
'%s is able to shard -- refusing to replicate objects to peer '
|
'%s is able to shard -- refusing to replicate objects to '
|
||||||
'%s; have shard ranges and will wait for cleaving',
|
'peer %s; have shard ranges and will wait for cleaving',
|
||||||
broker.db_file,
|
broker.db_file,
|
||||||
'%(ip)s:%(port)s/%(device)s' % node)
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
self.stats['deferred'] += 1
|
self.stats['deferred'] += 1
|
||||||
|
@@ -1902,7 +1902,7 @@ class TestContainerSharding(BaseTestContainerSharding):
|
|||||||
old_primary_dir, container_hash = self.get_storage_dir(
|
old_primary_dir, container_hash = self.get_storage_dir(
|
||||||
self.brain.part, handoff_node)
|
self.brain.part, handoff_node)
|
||||||
utils.mkdirs(os.path.dirname(old_primary_dir))
|
utils.mkdirs(os.path.dirname(old_primary_dir))
|
||||||
os.rename(new_primary_dir, old_primary_dir)
|
shutil.move(new_primary_dir, old_primary_dir)
|
||||||
|
|
||||||
# make the cluster more or less "healthy" again
|
# make the cluster more or less "healthy" again
|
||||||
self.brain.servers.start(number=new_primary_node_number)
|
self.brain.servers.start(number=new_primary_node_number)
|
||||||
@@ -2009,7 +2009,7 @@ class TestContainerSharding(BaseTestContainerSharding):
|
|||||||
old_primary_dir, container_hash = self.get_storage_dir(
|
old_primary_dir, container_hash = self.get_storage_dir(
|
||||||
self.brain.part, handoff_node)
|
self.brain.part, handoff_node)
|
||||||
utils.mkdirs(os.path.dirname(old_primary_dir))
|
utils.mkdirs(os.path.dirname(old_primary_dir))
|
||||||
os.rename(new_primary_dir, old_primary_dir)
|
shutil.move(new_primary_dir, old_primary_dir)
|
||||||
self.assert_container_state(handoff_node, 'sharding', 3)
|
self.assert_container_state(handoff_node, 'sharding', 3)
|
||||||
|
|
||||||
# run replicator on handoff node to create a fresh db on new primary
|
# run replicator on handoff node to create a fresh db on new primary
|
||||||
|
Reference in New Issue
Block a user