applied post-review changes
This commit is contained in:
parent
059be25657
commit
2e9514ff3f
35
config.yaml
35
config.yaml
@ -33,12 +33,20 @@ options:
|
|||||||
default: 0
|
default: 0
|
||||||
type: int
|
type: int
|
||||||
description: |
|
description: |
|
||||||
This is the Swift ring builder min_part_hours parameter. This
|
This is the Swift ring builder min_part_hours parameter. This
|
||||||
setting represents the amount of time in hours that Swift will wait
|
setting represents the amount of time in hours that Swift will wait
|
||||||
between subsequent ring rebalances in order to avoid large IO loads as
|
between subsequent ring re-balances in order to avoid large i/o loads as
|
||||||
data is rebalanced when new devices are added to the cluster. Once your
|
data is re-balanced when new devices are added to the cluster. Once your
|
||||||
cluster has been built, you can set this to a higher value e.g. 1
|
cluster has been built, you can set this to a higher value e.g. 1
|
||||||
(upstream default).
|
(upstream default). Note that changing this value will result in an
|
||||||
|
attempt to re-balance and if successful, rings will be redistributed.
|
||||||
|
disable-ring-balance:
|
||||||
|
type: boolean
|
||||||
|
default: False
|
||||||
|
description: |
|
||||||
|
This provides similar support to min-hours but without having to modify
|
||||||
|
the builders. If True, any changes to the builders will not result in a
|
||||||
|
ring re-balance and sync until this value is set back to False.
|
||||||
zone-assignment:
|
zone-assignment:
|
||||||
default: "manual"
|
default: "manual"
|
||||||
type: string
|
type: string
|
||||||
@ -52,14 +60,6 @@ options:
|
|||||||
zones before the storage ring will be initially balance. Deployment
|
zones before the storage ring will be initially balance. Deployment
|
||||||
requirements differ based on the zone-assignment policy configured, see
|
requirements differ based on the zone-assignment policy configured, see
|
||||||
this charm's README for details.
|
this charm's README for details.
|
||||||
force-cluster-ring-sync:
|
|
||||||
default: False
|
|
||||||
type: boolean
|
|
||||||
description: |
|
|
||||||
There are some cases where one might want to resync rings and
|
|
||||||
builders across storage relations perhaps as a result of a manual
|
|
||||||
ring (e.g. rebalance) update so this toggle can be used to trigger a
|
|
||||||
resync across the entire cluster.
|
|
||||||
# User provided SSL cert and key
|
# User provided SSL cert and key
|
||||||
ssl_cert:
|
ssl_cert:
|
||||||
type: string
|
type: string
|
||||||
@ -85,7 +85,8 @@ options:
|
|||||||
workers:
|
workers:
|
||||||
default: 0
|
default: 0
|
||||||
type: int
|
type: int
|
||||||
description: Number of TCP workers to launch (0 for the number of system cores)
|
description: |
|
||||||
|
Number of TCP workers to launch (0 for the number of system cores).
|
||||||
operator-roles:
|
operator-roles:
|
||||||
default: "Member,Admin"
|
default: "Member,Admin"
|
||||||
type: string
|
type: string
|
||||||
@ -101,7 +102,9 @@ options:
|
|||||||
node-timeout:
|
node-timeout:
|
||||||
default: 60
|
default: 60
|
||||||
type: int
|
type: int
|
||||||
description: How long the proxy server will wait on responses from the a/c/o servers.
|
description: |
|
||||||
|
How long the proxy server will wait on responses from the
|
||||||
|
account/container/object servers.
|
||||||
recoverable-node-timeout:
|
recoverable-node-timeout:
|
||||||
default: 30
|
default: 30
|
||||||
type: int
|
type: int
|
||||||
|
@ -16,19 +16,15 @@ from swift_utils import (
|
|||||||
swift_user,
|
swift_user,
|
||||||
SWIFT_HA_RES,
|
SWIFT_HA_RES,
|
||||||
get_zone,
|
get_zone,
|
||||||
exists_in_ring,
|
|
||||||
add_to_ring,
|
|
||||||
should_balance,
|
|
||||||
do_openstack_upgrade,
|
do_openstack_upgrade,
|
||||||
setup_ipv6,
|
setup_ipv6,
|
||||||
|
update_rings,
|
||||||
balance_rings,
|
balance_rings,
|
||||||
builders_synced,
|
builders_synced,
|
||||||
sync_proxy_rings,
|
sync_proxy_rings,
|
||||||
update_min_part_hours,
|
update_min_part_hours,
|
||||||
broadcast_rings_available,
|
broadcast_rings_available,
|
||||||
mark_www_rings_deleted,
|
mark_www_rings_deleted,
|
||||||
cluster_sync_rings,
|
|
||||||
update_www_rings,
|
|
||||||
SwiftProxyClusterRPC,
|
SwiftProxyClusterRPC,
|
||||||
get_first_available_value,
|
get_first_available_value,
|
||||||
all_responses_equal,
|
all_responses_equal,
|
||||||
@ -77,6 +73,7 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
get_address_in_network,
|
get_address_in_network,
|
||||||
get_ipv6_addr,
|
get_ipv6_addr,
|
||||||
is_ipv6,
|
is_ipv6,
|
||||||
|
format_ipv6_addr,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
|
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
|
||||||
|
|
||||||
@ -136,9 +133,9 @@ def config_changed():
|
|||||||
|
|
||||||
update_min_part_hours()
|
update_min_part_hours()
|
||||||
|
|
||||||
if config('force-cluster-ring-sync'):
|
if not config('disable-ring-balance'):
|
||||||
log("Disabling peer proxy apis and syncing rings across cluster.")
|
# Try ring balance. If rings are balanced, no sync will occur.
|
||||||
cluster_sync_rings()
|
balance_rings()
|
||||||
|
|
||||||
for r_id in relation_ids('identity-service'):
|
for r_id in relation_ids('identity-service'):
|
||||||
keystone_joined(relid=r_id)
|
keystone_joined(relid=r_id)
|
||||||
@ -187,16 +184,26 @@ def storage_joined():
|
|||||||
@hooks.hook('swift-storage-relation-changed')
|
@hooks.hook('swift-storage-relation-changed')
|
||||||
@restart_on_change(restart_map())
|
@restart_on_change(restart_map())
|
||||||
def storage_changed():
|
def storage_changed():
|
||||||
|
"""Storage relation.
|
||||||
|
|
||||||
|
Only the leader unit can update and distribute rings so if we are not the
|
||||||
|
leader we ignore this event and wait for a resync request from the leader.
|
||||||
|
"""
|
||||||
if not is_elected_leader(SWIFT_HA_RES):
|
if not is_elected_leader(SWIFT_HA_RES):
|
||||||
log("Not the leader - ignoring storage relation until leader ready.",
|
log("Not the leader - ignoring storage relation until leader ready.",
|
||||||
level=DEBUG)
|
level=DEBUG)
|
||||||
return
|
return
|
||||||
|
|
||||||
log("Leader established, updating ring builders", level=INFO)
|
log("Leader established, updating ring builders", level=INFO)
|
||||||
|
addr = relation_get('private-address')
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
host_ip = '[%s]' % relation_get('private-address')
|
host_ip = format_ipv6_addr(addr)
|
||||||
|
if not host_ip:
|
||||||
|
errmsg = ("Did not get IPv6 address from storage relation "
|
||||||
|
"(got=%s)" % (addr))
|
||||||
|
raise SwiftProxyCharmException(errmsg)
|
||||||
else:
|
else:
|
||||||
host_ip = openstack.get_host_ip(relation_get('private-address'))
|
host_ip = openstack.get_host_ip(addr)
|
||||||
|
|
||||||
zone = get_zone(config('zone-assignment'))
|
zone = get_zone(config('zone-assignment'))
|
||||||
node_settings = {
|
node_settings = {
|
||||||
@ -219,23 +226,11 @@ def storage_changed():
|
|||||||
CONFIGS.write_all()
|
CONFIGS.write_all()
|
||||||
|
|
||||||
# Allow for multiple devs per unit, passed along as a : separated list
|
# Allow for multiple devs per unit, passed along as a : separated list
|
||||||
devs = relation_get('device').split(':')
|
# Update and balance rings.
|
||||||
for dev in devs:
|
update_rings(devs=relation_get('device').split(':'))
|
||||||
node_settings['device'] = dev
|
# Restart proxy here in case no config changes made (so
|
||||||
for ring in SWIFT_RINGS.itervalues():
|
# restart_on_change() ineffective).
|
||||||
if not exists_in_ring(ring, node_settings):
|
service_restart('swift-proxy')
|
||||||
add_to_ring(ring, node_settings)
|
|
||||||
|
|
||||||
if should_balance([r for r in SWIFT_RINGS.itervalues()]):
|
|
||||||
balance_rings()
|
|
||||||
update_www_rings()
|
|
||||||
cluster_sync_rings()
|
|
||||||
# Restart proxy here in case no config changes made (so
|
|
||||||
# restart_on_change() ineffective).
|
|
||||||
service_restart('swift-proxy')
|
|
||||||
else:
|
|
||||||
log("Not yet ready to balance rings - insufficient replicas?",
|
|
||||||
level=INFO)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('swift-storage-relation-broken')
|
@hooks.hook('swift-storage-relation-broken')
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import copy
|
import copy
|
||||||
|
import hashlib
|
||||||
import os
|
import os
|
||||||
import pwd
|
import pwd
|
||||||
import shutil
|
import shutil
|
||||||
@ -56,8 +57,9 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
|
|
||||||
|
|
||||||
# Various config files that are managed via templating.
|
# Various config files that are managed via templating.
|
||||||
SWIFT_CONF = '/etc/swift/swift.conf'
|
SWIFT_CONF_DIR = '/etc/swift'
|
||||||
SWIFT_PROXY_CONF = '/etc/swift/proxy-server.conf'
|
SWIFT_CONF = os.path.join(SWIFT_CONF_DIR, 'swift.conf')
|
||||||
|
SWIFT_PROXY_CONF = os.path.join(SWIFT_CONF_DIR, 'proxy-server.conf')
|
||||||
SWIFT_CONF_DIR = os.path.dirname(SWIFT_CONF)
|
SWIFT_CONF_DIR = os.path.dirname(SWIFT_CONF)
|
||||||
MEMCACHED_CONF = '/etc/memcached.conf'
|
MEMCACHED_CONF = '/etc/memcached.conf'
|
||||||
SWIFT_RINGS_CONF = '/etc/apache2/conf.d/swift-rings'
|
SWIFT_RINGS_CONF = '/etc/apache2/conf.d/swift-rings'
|
||||||
@ -81,13 +83,13 @@ def get_www_dir():
|
|||||||
|
|
||||||
|
|
||||||
SWIFT_RINGS = {
|
SWIFT_RINGS = {
|
||||||
'account': '/etc/swift/account.builder',
|
'account': os.path.join(SWIFT_CONF_DIR, 'account.builder'),
|
||||||
'container': '/etc/swift/container.builder',
|
'container': os.path.join(SWIFT_CONF_DIR, 'container.builder'),
|
||||||
'object': '/etc/swift/object.builder'
|
'object': os.path.join(SWIFT_CONF_DIR, 'object.builder')
|
||||||
}
|
}
|
||||||
|
|
||||||
SSL_CERT = '/etc/swift/cert.crt'
|
SSL_CERT = os.path.join(SWIFT_CONF_DIR, 'cert.crt')
|
||||||
SSL_KEY = '/etc/swift/cert.key'
|
SSL_KEY = os.path.join(SWIFT_CONF_DIR, 'cert.key')
|
||||||
|
|
||||||
# Essex packages
|
# Essex packages
|
||||||
BASE_PACKAGES = [
|
BASE_PACKAGES = [
|
||||||
@ -469,7 +471,10 @@ def get_zone(assignment_policy):
|
|||||||
|
|
||||||
|
|
||||||
def balance_ring(ring_path):
|
def balance_ring(ring_path):
|
||||||
"""Balance a ring. return True if it needs redistribution."""
|
"""Balance a ring.
|
||||||
|
|
||||||
|
Returns True if it needs redistribution.
|
||||||
|
"""
|
||||||
# shell out to swift-ring-builder instead, since the balancing code there
|
# shell out to swift-ring-builder instead, since the balancing code there
|
||||||
# does a bunch of un-importable validation.'''
|
# does a bunch of un-importable validation.'''
|
||||||
cmd = ['swift-ring-builder', ring_path, 'rebalance']
|
cmd = ['swift-ring-builder', ring_path, 'rebalance']
|
||||||
@ -478,7 +483,8 @@ def balance_ring(ring_path):
|
|||||||
rc = p.returncode
|
rc = p.returncode
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
return True
|
return True
|
||||||
elif rc == 1:
|
|
||||||
|
if rc == 1:
|
||||||
# Ring builder exit-code=1 is supposed to indicate warning but I have
|
# Ring builder exit-code=1 is supposed to indicate warning but I have
|
||||||
# noticed that it can also return 1 with the following sort of message:
|
# noticed that it can also return 1 with the following sort of message:
|
||||||
#
|
#
|
||||||
@ -488,23 +494,31 @@ def balance_ring(ring_path):
|
|||||||
# This indicates that a balance has occurred and a resync would be
|
# This indicates that a balance has occurred and a resync would be
|
||||||
# required so not sure why 1 is returned in this case.
|
# required so not sure why 1 is returned in this case.
|
||||||
return False
|
return False
|
||||||
else:
|
|
||||||
msg = ('balance_ring: %s returned %s' % (cmd, rc))
|
msg = ('balance_ring: %s returned %s' % (cmd, rc))
|
||||||
raise SwiftProxyCharmException(msg)
|
raise SwiftProxyCharmException(msg)
|
||||||
|
|
||||||
|
|
||||||
def should_balance(rings):
|
def should_balance(rings):
|
||||||
"""Based on zones vs min. replicas, determine whether or not the rings
|
"""Determine whether or not a re-balance is required and allowed.
|
||||||
should be balanced during initial configuration.
|
|
||||||
|
Ring balance can be disabled/postponed using the disable-ring-balance
|
||||||
|
config option.
|
||||||
|
|
||||||
|
Otherwise, using zones vs min. replicas, determine whether or not the rings
|
||||||
|
should be balanced.
|
||||||
"""
|
"""
|
||||||
|
if config('disable-ring-balance'):
|
||||||
|
return False
|
||||||
|
|
||||||
for ring in rings:
|
for ring in rings:
|
||||||
builder = _load_builder(ring).to_dict()
|
builder = _load_builder(ring).to_dict()
|
||||||
replicas = builder['replicas']
|
replicas = builder['replicas']
|
||||||
zones = [dev['zone'] for dev in builder['devs']]
|
zones = [dev['zone'] for dev in builder['devs']]
|
||||||
num_zones = len(set(zones))
|
num_zones = len(set(zones))
|
||||||
if num_zones < replicas:
|
if num_zones < replicas:
|
||||||
log("Not enough zones (%d) defined to allow rebalance "
|
log("Not enough zones (%d) defined to allow ring balance "
|
||||||
"(need >= %d)" % (num_zones, replicas), level=DEBUG)
|
"(need >= %d)" % (num_zones, replicas), level=INFO)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -527,6 +541,11 @@ def do_openstack_upgrade(configs):
|
|||||||
|
|
||||||
|
|
||||||
def setup_ipv6():
|
def setup_ipv6():
|
||||||
|
"""Validate that we can support IPv6 mode.
|
||||||
|
|
||||||
|
This should be called if prefer-ipv6 is True to ensure that we are running
|
||||||
|
in an environment that supports ipv6.
|
||||||
|
"""
|
||||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
|
||||||
if ubuntu_rel < "trusty":
|
if ubuntu_rel < "trusty":
|
||||||
msg = ("IPv6 is not supported in the charms for Ubuntu versions less "
|
msg = ("IPv6 is not supported in the charms for Ubuntu versions less "
|
||||||
@ -553,7 +572,7 @@ def sync_proxy_rings(broker_url):
|
|||||||
"""
|
"""
|
||||||
log('Fetching swift rings & builders from proxy @ %s.' % broker_url,
|
log('Fetching swift rings & builders from proxy @ %s.' % broker_url,
|
||||||
level=DEBUG)
|
level=DEBUG)
|
||||||
target = '/etc/swift'
|
target = SWIFT_CONF_DIR
|
||||||
synced = []
|
synced = []
|
||||||
tmpdir = tempfile.mkdtemp(prefix='swiftrings')
|
tmpdir = tempfile.mkdtemp(prefix='swiftrings')
|
||||||
for server in ['account', 'object', 'container']:
|
for server in ['account', 'object', 'container']:
|
||||||
@ -589,12 +608,93 @@ def update_www_rings():
|
|||||||
os.path.join(www_dir, os.path.basename(builder_path)))
|
os.path.join(www_dir, os.path.basename(builder_path)))
|
||||||
|
|
||||||
|
|
||||||
|
def get_rings_checksum():
|
||||||
|
"""Returns sha256 checksum for rings in /etc/swift."""
|
||||||
|
hash = hashlib.sha256()
|
||||||
|
for ring in SWIFT_RINGS.iterkeys():
|
||||||
|
path = os.path.join(SWIFT_CONF_DIR, '%s.ring.gz' % ring)
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
with open(path, 'rb') as fd:
|
||||||
|
hash.update(fd.read())
|
||||||
|
|
||||||
|
return hash.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def get_builders_checksum():
|
||||||
|
"""Returns sha256 checksum for builders in /etc/swift."""
|
||||||
|
hash = hashlib.sha256()
|
||||||
|
for builder in SWIFT_RINGS.itervalues():
|
||||||
|
if not os.path.exists(builder):
|
||||||
|
continue
|
||||||
|
|
||||||
|
with open(builder, 'rb') as fd:
|
||||||
|
hash.update(fd.read())
|
||||||
|
|
||||||
|
return hash.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def sync_builders_and_rings_if_changed(f):
|
||||||
|
"""Only trigger a ring or builder sync if they have changed as a result of
|
||||||
|
the decorated operation.
|
||||||
|
"""
|
||||||
|
def _inner_sync_builders_and_rings_if_changed(*args, **kwargs):
|
||||||
|
if not is_elected_leader(SWIFT_HA_RES):
|
||||||
|
log("Sync rings called by non-leader - skipping", level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
rings_before = get_rings_checksum()
|
||||||
|
builders_before = get_builders_checksum()
|
||||||
|
|
||||||
|
ret = f(*args, **kwargs)
|
||||||
|
|
||||||
|
rings_after = get_rings_checksum()
|
||||||
|
builders_after = get_builders_checksum()
|
||||||
|
|
||||||
|
rings_changed = rings_after != rings_before
|
||||||
|
builders_changed = builders_after != builders_before
|
||||||
|
if rings_changed or builders_changed:
|
||||||
|
# Copy to www dir
|
||||||
|
update_www_rings()
|
||||||
|
# Trigger sync
|
||||||
|
cluster_sync_rings(peers_only=not rings_changed)
|
||||||
|
else:
|
||||||
|
log("Rings/builders unchanged so skipping sync", level=DEBUG)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
return _inner_sync_builders_and_rings_if_changed
|
||||||
|
|
||||||
|
|
||||||
|
@sync_builders_and_rings_if_changed
|
||||||
|
def update_rings(node_settings=None):
|
||||||
|
"""Update builder with node settings and balance rings if necessary."""
|
||||||
|
if not is_elected_leader(SWIFT_HA_RES):
|
||||||
|
log("Update rings called by non-leader - skipping", level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
if node_settings:
|
||||||
|
if 'device' in node_settings:
|
||||||
|
for ring in SWIFT_RINGS.itervalues():
|
||||||
|
if not exists_in_ring(ring, node_settings):
|
||||||
|
add_to_ring(ring, node_settings)
|
||||||
|
|
||||||
|
balance_rings()
|
||||||
|
|
||||||
|
|
||||||
|
@sync_builders_and_rings_if_changed
|
||||||
def balance_rings():
|
def balance_rings():
|
||||||
"""Rebalance each ring and notify peers that new rings are available."""
|
"""Rebalance each ring and notify peers that new rings are available."""
|
||||||
if not is_elected_leader(SWIFT_HA_RES):
|
if not is_elected_leader(SWIFT_HA_RES):
|
||||||
log("Balance rings called by non-leader - skipping", level=WARNING)
|
log("Balance rings called by non-leader - skipping", level=WARNING)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if not should_balance([r for r in SWIFT_RINGS.itervalues()]):
|
||||||
|
log("Not yet ready to balance rings - insufficient replicas?",
|
||||||
|
level=INFO)
|
||||||
|
return
|
||||||
|
|
||||||
rebalanced = False
|
rebalanced = False
|
||||||
for path in SWIFT_RINGS.itervalues():
|
for path in SWIFT_RINGS.itervalues():
|
||||||
if balance_ring(path):
|
if balance_ring(path):
|
||||||
@ -604,7 +704,8 @@ def balance_rings():
|
|||||||
log('Ring %s not rebalanced' % path, level=DEBUG)
|
log('Ring %s not rebalanced' % path, level=DEBUG)
|
||||||
|
|
||||||
if not rebalanced:
|
if not rebalanced:
|
||||||
log("Rings unchanged by rebalance", level=INFO)
|
log("Rings unchanged by rebalance", level=DEBUG)
|
||||||
|
# NOTE: checksum will tell for sure
|
||||||
|
|
||||||
|
|
||||||
def mark_www_rings_deleted():
|
def mark_www_rings_deleted():
|
||||||
@ -777,5 +878,4 @@ def update_min_part_hours():
|
|||||||
resync_builders = True
|
resync_builders = True
|
||||||
|
|
||||||
if resync_builders:
|
if resync_builders:
|
||||||
update_www_rings()
|
balance_rings()
|
||||||
cluster_sync_rings(peers_only=True)
|
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
import mock
|
import mock
|
||||||
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
|
import uuid
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
|
|
||||||
@ -8,26 +10,36 @@ with mock.patch('charmhelpers.core.hookenv.config'):
|
|||||||
import swift_utils
|
import swift_utils
|
||||||
|
|
||||||
|
|
||||||
|
def init_ring_paths(tmpdir):
|
||||||
|
swift_utils.SWIFT_CONF_DIR = tmpdir
|
||||||
|
for ring in swift_utils.SWIFT_RINGS.iterkeys():
|
||||||
|
path = os.path.join(tmpdir, "%s.builder" % ring)
|
||||||
|
swift_utils.SWIFT_RINGS[ring] = path
|
||||||
|
with open(path, 'w') as fd:
|
||||||
|
fd.write("0\n")
|
||||||
|
|
||||||
|
|
||||||
class SwiftUtilsTestCase(unittest.TestCase):
|
class SwiftUtilsTestCase(unittest.TestCase):
|
||||||
|
|
||||||
|
@mock.patch('swift_utils.sync_builders_and_rings_if_changed')
|
||||||
|
@mock.patch('swift_utils.balance_rings')
|
||||||
@mock.patch('swift_utils.log')
|
@mock.patch('swift_utils.log')
|
||||||
@mock.patch('swift_utils.os.path.exists')
|
@mock.patch('swift_utils.os.path.exists')
|
||||||
@mock.patch('swift_utils.is_elected_leader')
|
@mock.patch('swift_utils.is_elected_leader')
|
||||||
@mock.patch('swift_utils.config')
|
@mock.patch('swift_utils.config')
|
||||||
@mock.patch('swift_utils.get_min_part_hours')
|
@mock.patch('swift_utils.get_min_part_hours')
|
||||||
@mock.patch('swift_utils.set_min_part_hours')
|
@mock.patch('swift_utils.set_min_part_hours')
|
||||||
@mock.patch('swift_utils.update_www_rings')
|
def test_update_min_part_hours(self, mock_set_min_hours,
|
||||||
@mock.patch('swift_utils.cluster_sync_rings')
|
mock_get_min_hours, mock_config,
|
||||||
def test_update_min_part_hours(self, mock_cluster_sync_rings,
|
mock_is_elected_leader, mock_path_exists,
|
||||||
mock_update_www_rings,
|
mock_log, mock_balance_rings,
|
||||||
mock_set_min_hours, mock_get_min_hours,
|
mock_sync_builders_and_rings_if_changed):
|
||||||
mock_config, mock_is_elected_leader,
|
|
||||||
mock_path_exists, mock_log):
|
|
||||||
|
|
||||||
# Test blocker 1
|
# Test blocker 1
|
||||||
mock_is_elected_leader.return_value = False
|
mock_is_elected_leader.return_value = False
|
||||||
swift_utils.update_min_part_hours()
|
swift_utils.update_min_part_hours()
|
||||||
self.assertFalse(mock_config.called)
|
self.assertFalse(mock_config.called)
|
||||||
|
self.assertFalse(mock_balance_rings.called)
|
||||||
|
|
||||||
# Test blocker 2
|
# Test blocker 2
|
||||||
mock_path_exists.return_value = False
|
mock_path_exists.return_value = False
|
||||||
@ -35,6 +47,7 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
|||||||
swift_utils.update_min_part_hours()
|
swift_utils.update_min_part_hours()
|
||||||
self.assertTrue(mock_config.called)
|
self.assertTrue(mock_config.called)
|
||||||
self.assertFalse(mock_get_min_hours.called)
|
self.assertFalse(mock_get_min_hours.called)
|
||||||
|
self.assertFalse(mock_balance_rings.called)
|
||||||
|
|
||||||
# Test blocker 3
|
# Test blocker 3
|
||||||
mock_path_exists.return_value = True
|
mock_path_exists.return_value = True
|
||||||
@ -42,19 +55,53 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
|||||||
mock_config.return_value = 10
|
mock_config.return_value = 10
|
||||||
mock_get_min_hours.return_value = 10
|
mock_get_min_hours.return_value = 10
|
||||||
swift_utils.update_min_part_hours()
|
swift_utils.update_min_part_hours()
|
||||||
self.assertTrue(mock_config.called)
|
|
||||||
self.assertTrue(mock_get_min_hours.called)
|
self.assertTrue(mock_get_min_hours.called)
|
||||||
self.assertFalse(mock_set_min_hours.called)
|
self.assertFalse(mock_set_min_hours.called)
|
||||||
|
self.assertFalse(mock_balance_rings.called)
|
||||||
|
|
||||||
# Test go through
|
# Test go through
|
||||||
mock_path_exists.return_value = True
|
mock_path_exists.return_value = True
|
||||||
mock_is_elected_leader.return_value = True
|
mock_is_elected_leader.return_value = True
|
||||||
mock_config.return_value = 10
|
mock_config.return_value = 10
|
||||||
mock_get_min_hours.return_value = 11
|
mock_get_min_hours.return_value = 0
|
||||||
swift_utils.update_min_part_hours()
|
swift_utils.update_min_part_hours()
|
||||||
self.assertTrue(mock_config.called)
|
self.assertTrue(mock_config.called)
|
||||||
self.assertTrue(mock_get_min_hours.called)
|
self.assertTrue(mock_get_min_hours.called)
|
||||||
self.assertTrue(mock_set_min_hours.called)
|
self.assertTrue(mock_set_min_hours.called)
|
||||||
|
self.assertTrue(mock_balance_rings.called)
|
||||||
|
|
||||||
|
@mock.patch('swift_utils.balance_rings')
|
||||||
|
@mock.patch('swift_utils.log')
|
||||||
|
@mock.patch('swift_utils.is_elected_leader')
|
||||||
|
@mock.patch('swift_utils.config')
|
||||||
|
@mock.patch('swift_utils.update_www_rings')
|
||||||
|
@mock.patch('swift_utils.cluster_sync_rings')
|
||||||
|
def test_sync_builders_and_rings_if_changed(self, mock_cluster_sync_rings,
|
||||||
|
mock_update_www_rings,
|
||||||
|
mock_config,
|
||||||
|
mock_is_elected_leader,
|
||||||
|
mock_log,
|
||||||
|
mock_balance_rings):
|
||||||
|
|
||||||
|
@swift_utils.sync_builders_and_rings_if_changed
|
||||||
|
def mock_balance():
|
||||||
|
for ring, builder in swift_utils.SWIFT_RINGS.iteritems():
|
||||||
|
ring = os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||||
|
'%s.ring.gz' % ring)
|
||||||
|
with open(ring, 'w') as fd:
|
||||||
|
fd.write(str(uuid.uuid4()))
|
||||||
|
|
||||||
|
with open(builder, 'w') as fd:
|
||||||
|
fd.write(str(uuid.uuid4()))
|
||||||
|
|
||||||
|
mock_balance_rings.side_effect = mock_balance
|
||||||
|
|
||||||
|
init_ring_paths(tempfile.mkdtemp())
|
||||||
|
try:
|
||||||
|
swift_utils.balance_rings()
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(swift_utils.SWIFT_CONF_DIR)
|
||||||
|
|
||||||
self.assertTrue(mock_update_www_rings.called)
|
self.assertTrue(mock_update_www_rings.called)
|
||||||
self.assertTrue(mock_cluster_sync_rings.called)
|
self.assertTrue(mock_cluster_sync_rings.called)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user