Convert charm to Python 3 only
* Needed to add a swift_manager/manager.py file which uses the payload software python modules to perform certain functions on behalf of the charm. These were part of the main charm, which couldn't be retained in the charm due to the charm changing to Py3. * Changed to absolute imports using the charm root as the root for all charm modules. * The py2 target in tox.ini is used to test the swift_manager/manager.py file only. * The .testr.conf file has been migrated to .stestr.conf Change-Id: If37a393aa6ed27651b04810aa0bbf69eda37d7b4
This commit is contained in:
parent
bd84e2cb15
commit
4336b8d644
1
.gitignore
vendored
1
.gitignore
vendored
@ -6,3 +6,4 @@ bin
|
||||
tags
|
||||
*.sw[nop]
|
||||
*.pyc
|
||||
func-results.json
|
||||
|
3
.stestr.conf
Normal file
3
.stestr.conf
Normal file
@ -0,0 +1,3 @@
|
||||
[DEFAULT]
|
||||
test_path=./unit_tests
|
||||
top_dir=./
|
@ -1,8 +0,0 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
|
||||
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
@ -13,31 +13,39 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from subprocess import (
|
||||
check_output,
|
||||
CalledProcessError,
|
||||
)
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
sys.path.append('hooks/')
|
||||
|
||||
_path = os.path.dirname(os.path.realpath(__file__))
|
||||
_parent = os.path.abspath(os.path.join(_path, '..'))
|
||||
|
||||
|
||||
def _add_path(path):
|
||||
if path not in sys.path:
|
||||
sys.path.insert(1, path)
|
||||
|
||||
|
||||
_add_path(_parent)
|
||||
|
||||
|
||||
from charmhelpers.core.host import service_pause, service_resume
|
||||
from charmhelpers.core.hookenv import (
|
||||
action_fail,
|
||||
action_set,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
set_unit_paused,
|
||||
clear_unit_paused,
|
||||
)
|
||||
from hooks.swift_hooks import CONFIGS
|
||||
from lib.swift_utils import assess_status, services
|
||||
from swift_hooks import CONFIGS
|
||||
|
||||
from subprocess import (
|
||||
check_output,
|
||||
CalledProcessError,
|
||||
)
|
||||
|
||||
|
||||
def get_action_parser(actions_yaml_path, action_name,
|
||||
@ -88,14 +96,14 @@ def diskusage(args):
|
||||
@raises Exception on any other failure
|
||||
"""
|
||||
try:
|
||||
raw_output = check_output(['swift-recon', '-d'])
|
||||
raw_output = check_output(['swift-recon', '-d']).decode('UTF-8')
|
||||
recon_result = list(line.strip().split(' ')
|
||||
for line in raw_output.splitlines()
|
||||
if 'Disk' in line)
|
||||
for line in recon_result:
|
||||
if 'space' in line:
|
||||
line[4] = str(int(line[4]) / 1024 / 1024 / 1024) + 'GB'
|
||||
line[6] = str(int(line[6]) / 1024 / 1024 / 1024) + 'GB'
|
||||
line[4] = str(int(line[4]) // (1024 * 1024 * 1024)) + 'GB'
|
||||
line[6] = str(int(line[6]) // (1024 * 1024 * 1024)) + 'GB'
|
||||
result = [' '.join(x) for x in recon_result]
|
||||
action_set({'output': result})
|
||||
except CalledProcessError as e:
|
||||
@ -118,7 +126,7 @@ def main(argv):
|
||||
try:
|
||||
action = ACTIONS[action_name]
|
||||
except KeyError:
|
||||
return "Action %s undefined" % action_name
|
||||
return "Action {} undefined".format(action_name)
|
||||
else:
|
||||
try:
|
||||
action(args)
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
@ -14,6 +14,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
_path = os.path.dirname(os.path.realpath(__file__))
|
||||
_parent = os.path.abspath(os.path.join(_path, '..'))
|
||||
|
||||
|
||||
def _add_path(path):
|
||||
if path not in sys.path:
|
||||
sys.path.insert(1, path)
|
||||
|
||||
|
||||
_add_path(_parent)
|
||||
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
CalledProcessError
|
||||
@ -58,7 +73,7 @@ def add_user():
|
||||
log("Has a problem adding user: {}".format(e.output))
|
||||
action_fail(
|
||||
"Adding user {} failed with: \"{}\""
|
||||
.format(username, e.message))
|
||||
.format(username, str(e)))
|
||||
if success:
|
||||
message = "Successfully added the user {}".format(username)
|
||||
action_set({
|
||||
|
@ -1 +0,0 @@
|
||||
../charmhelpers/
|
@ -1 +0,0 @@
|
||||
../hooks/
|
@ -1 +0,0 @@
|
||||
../lib/
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
@ -13,16 +13,26 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks/')
|
||||
_path = os.path.dirname(os.path.realpath(__file__))
|
||||
_parent = os.path.abspath(os.path.join(_path, '..'))
|
||||
|
||||
|
||||
def _add_path(path):
|
||||
if path not in sys.path:
|
||||
sys.path.insert(1, path)
|
||||
|
||||
|
||||
_add_path(_parent)
|
||||
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
do_action_openstack_upgrade,
|
||||
)
|
||||
|
||||
from swift_hooks import (
|
||||
from hooks.swift_hooks import (
|
||||
config_changed,
|
||||
CONFIGS,
|
||||
)
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import six
|
||||
@ -185,7 +186,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||
include_only=None, timeout=1800):
|
||||
include_only=None, timeout=None):
|
||||
"""Wait for all units to have a specific extended status, except
|
||||
for any defined as excluded. Unless specified via message, any
|
||||
status containing any case of 'ready' will be considered a match.
|
||||
@ -215,7 +216,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
:param timeout: Maximum time in seconds to wait for status match
|
||||
:returns: None. Raises if timeout is hit.
|
||||
"""
|
||||
self.log.info('Waiting for extended status on units...')
|
||||
if not timeout:
|
||||
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
|
||||
self.log.info('Waiting for extended status on units for {}s...'
|
||||
''.format(timeout))
|
||||
|
||||
all_services = self.d.services.keys()
|
||||
|
||||
@ -252,9 +256,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
service_messages = {service: message for service in services}
|
||||
|
||||
# Check for idleness
|
||||
self.d.sentry.wait()
|
||||
self.d.sentry.wait(timeout=timeout)
|
||||
# Check for error states and bail early
|
||||
self.d.sentry.wait_for_status(self.d.juju_env, services)
|
||||
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
|
||||
# Check for ready messages
|
||||
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||
|
||||
|
@ -392,6 +392,8 @@ def get_swift_codename(version):
|
||||
releases = UBUNTU_OPENSTACK_RELEASE
|
||||
release = [k for k, v in six.iteritems(releases) if codename in v]
|
||||
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
|
||||
if six.PY3:
|
||||
ret = ret.decode('UTF-8')
|
||||
if codename in ret or release[0] in ret:
|
||||
return codename
|
||||
elif len(codenames) == 1:
|
||||
|
@ -377,12 +377,12 @@ def get_mon_map(service):
|
||||
try:
|
||||
return json.loads(mon_status)
|
||||
except ValueError as v:
|
||||
log("Unable to parse mon_status json: {}. Error: {}".format(
|
||||
mon_status, v.message))
|
||||
log("Unable to parse mon_status json: {}. Error: {}"
|
||||
.format(mon_status, str(v)))
|
||||
raise
|
||||
except CalledProcessError as e:
|
||||
log("mon_status command failed with message: {}".format(
|
||||
e.message))
|
||||
log("mon_status command failed with message: {}"
|
||||
.format(str(e)))
|
||||
raise
|
||||
|
||||
|
||||
|
@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||
with open(path, 'wb') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
if six.PY3 and isinstance(content, six.string_types):
|
||||
content = content.encode('UTF-8')
|
||||
target.write(content)
|
||||
return
|
||||
# the contents were the same, but we might still need to change the
|
||||
|
@ -1 +0,0 @@
|
||||
../charmhelpers/
|
@ -11,7 +11,7 @@ check_and_install() {
|
||||
fi
|
||||
}
|
||||
|
||||
PYTHON="python"
|
||||
PYTHON="python3"
|
||||
|
||||
for dep in ${DEPS[@]}; do
|
||||
check_and_install ${PYTHON} ${dep}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
@ -13,15 +13,26 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
CalledProcessError,
|
||||
)
|
||||
import time
|
||||
|
||||
|
||||
_path = os.path.dirname(os.path.realpath(__file__))
|
||||
_parent = os.path.abspath(os.path.join(_path, '..'))
|
||||
|
||||
|
||||
def _add_path(path):
|
||||
if path not in sys.path:
|
||||
sys.path.insert(1, path)
|
||||
|
||||
|
||||
_add_path(_parent)
|
||||
|
||||
|
||||
from lib.swift_utils import (
|
||||
SwiftProxyCharmException,
|
||||
@ -149,7 +160,7 @@ def config_changed():
|
||||
if is_elected_leader(SWIFT_HA_RES):
|
||||
log("Leader established, generating ring builders", level=INFO)
|
||||
# initialize new storage rings.
|
||||
for path in SWIFT_RINGS.itervalues():
|
||||
for path in SWIFT_RINGS.values():
|
||||
if not os.path.exists(path):
|
||||
initialize_ring(path,
|
||||
config('partition-power'),
|
||||
@ -195,18 +206,16 @@ def config_changed():
|
||||
@hooks.hook('identity-service-relation-joined')
|
||||
def keystone_joined(relid=None):
|
||||
port = config('bind-port')
|
||||
admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
|
||||
internal_url = ('%s:%s/v1/AUTH_$(tenant_id)s' %
|
||||
(canonical_url(CONFIGS, INTERNAL), port))
|
||||
public_url = ('%s:%s/v1/AUTH_$(tenant_id)s' %
|
||||
(canonical_url(CONFIGS, PUBLIC), port))
|
||||
admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), port)
|
||||
internal_url = ('{}:{}/v1/AUTH_$(tenant_id)s'
|
||||
.format(canonical_url(CONFIGS, INTERNAL), port))
|
||||
public_url = ('{}:{}/v1/AUTH_$(tenant_id)s'
|
||||
.format(canonical_url(CONFIGS, PUBLIC), port))
|
||||
region = config('region')
|
||||
|
||||
s3_public_url = ('%s:%s' %
|
||||
(canonical_url(CONFIGS, PUBLIC), port))
|
||||
s3_internal_url = ('%s:%s' %
|
||||
(canonical_url(CONFIGS, INTERNAL), port))
|
||||
s3_admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
|
||||
s3_public_url = ('{}:{}'.format(canonical_url(CONFIGS, PUBLIC), port))
|
||||
s3_internal_url = ('{}:{}'.format(canonical_url(CONFIGS, INTERNAL), port))
|
||||
s3_admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), port)
|
||||
|
||||
relation_set(relation_id=relid,
|
||||
region=None, public_url=None,
|
||||
@ -257,7 +266,7 @@ def get_host_ip(rid=None, unit=None):
|
||||
return host_ip
|
||||
else:
|
||||
msg = ("Did not get IPv6 address from storage relation "
|
||||
"(got=%s)" % (addr))
|
||||
"(got={})".format(addr))
|
||||
log(msg, level=WARNING)
|
||||
|
||||
return openstack.get_host_ip(addr)
|
||||
@ -277,7 +286,7 @@ def update_rsync_acls():
|
||||
hosts.append(get_host_ip(rid=rid, unit=unit))
|
||||
|
||||
rsync_hosts = ' '.join(hosts)
|
||||
log("Broadcasting acl '%s' to all storage units" % (rsync_hosts),
|
||||
log("Broadcasting acl '{}' to all storage units".format(rsync_hosts),
|
||||
level=DEBUG)
|
||||
# We add a timestamp so that the storage units know which is the newest
|
||||
settings = {'rsync_allowed_hosts': rsync_hosts,
|
||||
@ -317,10 +326,10 @@ def storage_changed():
|
||||
'container_port': relation_get('container_port'),
|
||||
}
|
||||
|
||||
if None in node_settings.itervalues():
|
||||
missing = [k for k, v in node_settings.iteritems() if v is None]
|
||||
if None in node_settings.values():
|
||||
missing = [k for k, v in node_settings.items() if v is None]
|
||||
log("Relation not ready - some required values not provided by "
|
||||
"relation (missing=%s)" % (', '.join(missing)), level=INFO)
|
||||
"relation (missing={})".format(', '.join(missing)), level=INFO)
|
||||
return None
|
||||
|
||||
for k in ['zone', 'account_port', 'object_port', 'container_port']:
|
||||
@ -391,9 +400,8 @@ def is_all_peers_stopped(responses):
|
||||
ack_key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
|
||||
token = relation_get(attribute=rq_key, unit=local_unit())
|
||||
if not token or token != responses[0].get(ack_key):
|
||||
log("Token mismatch, rq and ack tokens differ (expected ack=%s, "
|
||||
"got=%s)" %
|
||||
(token, responses[0].get(ack_key)), level=DEBUG)
|
||||
log("Token mismatch, rq and ack tokens differ (expected ack={}, "
|
||||
"got={})".format(token, responses[0].get(ack_key)), level=DEBUG)
|
||||
return False
|
||||
|
||||
if not all_responses_equal(responses, ack_key):
|
||||
@ -410,7 +418,7 @@ def cluster_leader_actions():
|
||||
|
||||
NOTE: must be called by leader from cluster relation hook.
|
||||
"""
|
||||
log("Cluster changed by unit=%s (local is leader)" % (remote_unit()),
|
||||
log("Cluster changed by unit={} (local is leader)".format(remote_unit()),
|
||||
level=DEBUG)
|
||||
|
||||
rx_settings = relation_get() or {}
|
||||
@ -438,7 +446,7 @@ def cluster_leader_actions():
|
||||
resync_request_ack_key = SwiftProxyClusterRPC.KEY_REQUEST_RESYNC_ACK
|
||||
tx_resync_request_ack = tx_settings.get(resync_request_ack_key)
|
||||
if rx_resync_request and tx_resync_request_ack != rx_resync_request:
|
||||
log("Unit '%s' has requested a resync" % (remote_unit()),
|
||||
log("Unit '{}' has requested a resync".format(remote_unit()),
|
||||
level=INFO)
|
||||
cluster_sync_rings(peers_only=True)
|
||||
relation_set(**{resync_request_ack_key: rx_resync_request})
|
||||
@ -462,20 +470,20 @@ def cluster_leader_actions():
|
||||
key = 'peers-only'
|
||||
if not all_responses_equal(responses, key, must_exist=False):
|
||||
msg = ("Did not get equal response from every peer unit for "
|
||||
"'%s'" % (key))
|
||||
"'{}'".format(key))
|
||||
raise SwiftProxyCharmException(msg)
|
||||
|
||||
peers_only = bool(get_first_available_value(responses, key,
|
||||
default=0))
|
||||
log("Syncing rings and builders (peers-only=%s)" % (peers_only),
|
||||
level=DEBUG)
|
||||
log("Syncing rings and builders (peers-only={})"
|
||||
.format(peers_only), level=DEBUG)
|
||||
broadcast_rings_available(broker_token=rx_ack_token,
|
||||
storage=not peers_only)
|
||||
else:
|
||||
key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
|
||||
acks = ', '.join([rsp[key] for rsp in responses if key in rsp])
|
||||
log("Not all peer apis stopped - skipping sync until all peers "
|
||||
"ready (current='%s', token='%s')" % (acks, tx_ack_token),
|
||||
"ready (current='{}', token='{}')".format(acks, tx_ack_token),
|
||||
level=INFO)
|
||||
elif ((rx_ack_token and (rx_ack_token == tx_ack_token)) or
|
||||
(rx_rq_token and (rx_rq_token == rx_ack_token))):
|
||||
@ -486,13 +494,13 @@ def cluster_leader_actions():
|
||||
if broker:
|
||||
# If we get here, manual intervention will be required in order
|
||||
# to restore the cluster.
|
||||
msg = ("Failed to restore previous broker '%s' as leader" %
|
||||
(broker))
|
||||
raise SwiftProxyCharmException(msg)
|
||||
raise SwiftProxyCharmException(
|
||||
"Failed to restore previous broker '{}' as leader"
|
||||
.format(broker))
|
||||
else:
|
||||
msg = ("No builder-broker on rx_settings relation from '%s' - "
|
||||
"unable to attempt leader restore" % (remote_unit()))
|
||||
raise SwiftProxyCharmException(msg)
|
||||
raise SwiftProxyCharmException(
|
||||
"No builder-broker on rx_settings relation from '{}' - "
|
||||
"unable to attempt leader restore".format(remote_unit()))
|
||||
else:
|
||||
log("Not taking any sync actions", level=DEBUG)
|
||||
|
||||
@ -504,8 +512,8 @@ def cluster_non_leader_actions():
|
||||
|
||||
NOTE: must be called by non-leader from cluster relation hook.
|
||||
"""
|
||||
log("Cluster changed by unit=%s (local is non-leader)" % (remote_unit()),
|
||||
level=DEBUG)
|
||||
log("Cluster changed by unit={} (local is non-leader)"
|
||||
.format(remote_unit()), level=DEBUG)
|
||||
rx_settings = relation_get() or {}
|
||||
tx_settings = relation_get(unit=local_unit()) or {}
|
||||
|
||||
@ -522,8 +530,8 @@ def cluster_non_leader_actions():
|
||||
|
||||
# Check whether we have been requested to stop proxy service
|
||||
if rx_rq_token:
|
||||
log("Peer request to stop proxy service received (%s) - sending ack" %
|
||||
(rx_rq_token), level=INFO)
|
||||
log("Peer request to stop proxy service received ({}) - sending ack"
|
||||
.format(rx_rq_token), level=INFO)
|
||||
service_stop('swift-proxy')
|
||||
peers_only = rx_settings.get('peers-only', None)
|
||||
rq = SwiftProxyClusterRPC().stop_proxy_ack(echo_token=rx_rq_token,
|
||||
@ -545,12 +553,12 @@ def cluster_non_leader_actions():
|
||||
elif broker_token:
|
||||
if tx_ack_token:
|
||||
if broker_token == tx_ack_token:
|
||||
log("Broker and ACK tokens match (%s)" % (broker_token),
|
||||
log("Broker and ACK tokens match ({})".format(broker_token),
|
||||
level=DEBUG)
|
||||
else:
|
||||
log("Received ring/builder update notification but tokens do "
|
||||
"not match (broker-token=%s/ack-token=%s)" %
|
||||
(broker_token, tx_ack_token), level=WARNING)
|
||||
"not match (broker-token={}/ack-token={})"
|
||||
.format(broker_token, tx_ack_token), level=WARNING)
|
||||
return
|
||||
else:
|
||||
log("Broker token available without handshake, assuming we just "
|
||||
@ -576,7 +584,7 @@ def cluster_non_leader_actions():
|
||||
builders_only = int(rx_settings.get('sync-only-builders', 0))
|
||||
path = os.path.basename(get_www_dir())
|
||||
try:
|
||||
sync_proxy_rings('http://%s/%s' % (broker, path),
|
||||
sync_proxy_rings('http://{}/{}'.format(broker, path),
|
||||
rings=not builders_only)
|
||||
except CalledProcessError:
|
||||
log("Ring builder sync failed, builders not yet available - "
|
||||
@ -647,8 +655,9 @@ def ha_relation_joined(relation_id=None):
|
||||
if vip not in resource_params[vip_key]:
|
||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
||||
else:
|
||||
log("Resource '%s' (vip='%s') already exists in "
|
||||
"vip group - skipping" % (vip_key, vip), WARNING)
|
||||
log("Resource '{}' (vip='{}') already exists in "
|
||||
"vip group - skipping".format(vip_key, vip),
|
||||
WARNING)
|
||||
continue
|
||||
|
||||
resources[vip_key] = res_swift_vip
|
||||
|
@ -92,7 +92,8 @@ class SwiftIdentityContext(OSContextGenerator):
|
||||
import multiprocessing
|
||||
workers = multiprocessing.cpu_count()
|
||||
if config('prefer-ipv6'):
|
||||
proxy_ip = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
proxy_ip = ('[{}]'
|
||||
.format(get_ipv6_addr(exc_list=[config('vip')])[0]))
|
||||
memcached_ip = 'ip6-localhost'
|
||||
else:
|
||||
proxy_ip = get_host_ip(unit_get('private-address'))
|
||||
|
@ -1,17 +1,20 @@
|
||||
import copy
|
||||
from collections import OrderedDict
|
||||
import functools
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import pwd
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from swift_context import (
|
||||
from lib.swift_context import (
|
||||
get_swift_hash,
|
||||
SwiftHashContext,
|
||||
SwiftIdentityContext,
|
||||
@ -40,6 +43,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
DEBUG,
|
||||
ERROR,
|
||||
INFO,
|
||||
WARNING,
|
||||
local_unit,
|
||||
@ -78,7 +82,6 @@ SWIFT_CONF_DIR = '/etc/swift'
|
||||
SWIFT_RING_EXT = 'ring.gz'
|
||||
SWIFT_CONF = os.path.join(SWIFT_CONF_DIR, 'swift.conf')
|
||||
SWIFT_PROXY_CONF = os.path.join(SWIFT_CONF_DIR, 'proxy-server.conf')
|
||||
SWIFT_CONF_DIR = os.path.dirname(SWIFT_CONF)
|
||||
MEMCACHED_CONF = '/etc/memcached.conf'
|
||||
SWIFT_RINGS_CONF = '/etc/apache2/conf.d/swift-rings'
|
||||
SWIFT_RINGS_24_CONF = '/etc/apache2/conf-available/swift-rings.conf'
|
||||
@ -104,11 +107,11 @@ def get_www_dir():
|
||||
return WWW_DIR
|
||||
|
||||
|
||||
SWIFT_RINGS = {
|
||||
'account': os.path.join(SWIFT_CONF_DIR, 'account.builder'),
|
||||
'container': os.path.join(SWIFT_CONF_DIR, 'container.builder'),
|
||||
'object': os.path.join(SWIFT_CONF_DIR, 'object.builder')
|
||||
}
|
||||
SWIFT_RINGS = OrderedDict((
|
||||
('account', os.path.join(SWIFT_CONF_DIR, 'account.builder')),
|
||||
('container', os.path.join(SWIFT_CONF_DIR, 'container.builder')),
|
||||
('object', os.path.join(SWIFT_CONF_DIR, 'object.builder')),
|
||||
))
|
||||
|
||||
SSL_CERT = os.path.join(SWIFT_CONF_DIR, 'cert.crt')
|
||||
SSL_KEY = os.path.join(SWIFT_CONF_DIR, 'cert.key')
|
||||
@ -278,7 +281,7 @@ class SwiftProxyClusterRPC(object):
|
||||
rq['sync-only-builders'] = 1
|
||||
|
||||
rq['broker-token'] = broker_token
|
||||
rq['broker-timestamp'] = "%f" % time.time()
|
||||
rq['broker-timestamp'] = "{:f}".format(time.time())
|
||||
rq['builder-broker'] = self._hostname
|
||||
return rq
|
||||
|
||||
@ -367,7 +370,7 @@ def all_responses_equal(responses, key, must_exist=True):
|
||||
if all_equal:
|
||||
return True
|
||||
|
||||
log("Responses not all equal for key '%s'" % (key), level=DEBUG)
|
||||
log("Responses not all equal for key '{}'".format(key), level=DEBUG)
|
||||
return False
|
||||
|
||||
|
||||
@ -413,7 +416,7 @@ def restart_map():
|
||||
that should be restarted when file changes.
|
||||
"""
|
||||
_map = []
|
||||
for f, ctxt in CONFIG_FILES.iteritems():
|
||||
for f, ctxt in CONFIG_FILES.items():
|
||||
svcs = []
|
||||
for svc in ctxt['services']:
|
||||
svcs.append(svc)
|
||||
@ -427,7 +430,7 @@ def services():
|
||||
''' Returns a list of services associate with this charm '''
|
||||
_services = []
|
||||
for v in restart_map().values():
|
||||
_services = _services + v
|
||||
_services.extend(v)
|
||||
return list(set(_services))
|
||||
|
||||
|
||||
@ -455,67 +458,21 @@ def determine_packages(release):
|
||||
return BASE_PACKAGES
|
||||
|
||||
|
||||
def _load_builder(path):
|
||||
# lifted straight from /usr/bin/swift-ring-builder
|
||||
from swift.common.ring import RingBuilder
|
||||
import cPickle as pickle
|
||||
try:
|
||||
builder = pickle.load(open(path, 'rb'))
|
||||
if not hasattr(builder, 'devs'):
|
||||
builder_dict = builder
|
||||
builder = RingBuilder(1, 1, 1)
|
||||
builder.copy_from(builder_dict)
|
||||
except ImportError: # Happens with really old builder pickles
|
||||
builder = RingBuilder(1, 1, 1)
|
||||
builder.copy_from(pickle.load(open(path, 'rb')))
|
||||
for dev in builder.devs:
|
||||
if dev and 'meta' not in dev:
|
||||
dev['meta'] = ''
|
||||
|
||||
return builder
|
||||
|
||||
|
||||
def _write_ring(ring, ring_path):
|
||||
import cPickle as pickle
|
||||
with open(ring_path, "wb") as fd:
|
||||
pickle.dump(ring.to_dict(), fd, protocol=2)
|
||||
|
||||
|
||||
def ring_port(ring_path, node):
|
||||
"""Determine correct port from relation settings for a given ring file."""
|
||||
for name in ['account', 'object', 'container']:
|
||||
if name in ring_path:
|
||||
return node[('%s_port' % name)]
|
||||
|
||||
|
||||
def initialize_ring(path, part_power, replicas, min_hours):
|
||||
"""Initialize a new swift ring with given parameters."""
|
||||
from swift.common.ring import RingBuilder
|
||||
ring = RingBuilder(part_power, replicas, min_hours)
|
||||
_write_ring(ring, path)
|
||||
get_manager().initialize_ring(path, part_power, replicas, min_hours)
|
||||
|
||||
|
||||
def exists_in_ring(ring_path, node):
|
||||
ring = _load_builder(ring_path).to_dict()
|
||||
node['port'] = ring_port(ring_path, node)
|
||||
|
||||
for dev in ring['devs']:
|
||||
# Devices in the ring can be None if there are holes from previously
|
||||
# removed devices so skip any that are None.
|
||||
if not dev:
|
||||
continue
|
||||
d = [(i, dev[i]) for i in dev if i in node and i != 'zone']
|
||||
n = [(i, node[i]) for i in node if i in dev and i != 'zone']
|
||||
if sorted(d) == sorted(n):
|
||||
log('Node already exists in ring (%s).' % ring_path, level=INFO)
|
||||
return True
|
||||
|
||||
return False
|
||||
node['port'] = _ring_port(ring_path, node)
|
||||
result = get_manager().exists_in_ring(ring_path, node)
|
||||
if result:
|
||||
log('Node already exists in ring ({}).'
|
||||
.format(ring_path), level=INFO)
|
||||
return result
|
||||
|
||||
|
||||
def add_to_ring(ring_path, node):
|
||||
ring = _load_builder(ring_path)
|
||||
port = ring_port(ring_path, node)
|
||||
port = _ring_port(ring_path, node)
|
||||
|
||||
# Note: this code used to attempt to calculate new dev ids, but made
|
||||
# various assumptions (e.g. in order devices, all devices in the ring
|
||||
@ -530,50 +487,16 @@ def add_to_ring(ring_path, node):
|
||||
'weight': 100,
|
||||
'meta': '',
|
||||
}
|
||||
ring.add_dev(new_dev)
|
||||
_write_ring(ring, ring_path)
|
||||
msg = 'Added new device to ring %s: %s' % (ring_path, new_dev)
|
||||
get_manager().add_dev(ring_path, new_dev)
|
||||
msg = 'Added new device to ring {}: {}'.format(ring_path, new_dev)
|
||||
log(msg, level=INFO)
|
||||
|
||||
|
||||
def _get_zone(ring_builder):
|
||||
replicas = ring_builder.replicas
|
||||
zones = [d['zone'] for d in ring_builder.devs]
|
||||
if not zones:
|
||||
return 1
|
||||
|
||||
# zones is a per-device list, so we may have one
|
||||
# node with 3 devices in zone 1. For balancing
|
||||
# we need to track the unique zones being used
|
||||
# not necessarily the number of devices
|
||||
unique_zones = list(set(zones))
|
||||
if len(unique_zones) < replicas:
|
||||
return sorted(unique_zones).pop() + 1
|
||||
|
||||
zone_distrib = {}
|
||||
for z in zones:
|
||||
zone_distrib[z] = zone_distrib.get(z, 0) + 1
|
||||
|
||||
if len(set([total for total in zone_distrib.itervalues()])) == 1:
|
||||
# all zones are equal, start assigning to zone 1 again.
|
||||
return 1
|
||||
|
||||
return sorted(zone_distrib, key=zone_distrib.get).pop(0)
|
||||
|
||||
|
||||
def get_min_part_hours(ring):
|
||||
builder = _load_builder(ring)
|
||||
return builder.min_part_hours
|
||||
|
||||
|
||||
def set_min_part_hours(path, value):
|
||||
cmd = ['swift-ring-builder', path, 'set_min_part_hours', str(value)]
|
||||
p = subprocess.Popen(cmd)
|
||||
p.communicate()
|
||||
rc = p.returncode
|
||||
if rc != 0:
|
||||
msg = ("Failed to set min_part_hours=%s on %s" % (value, path))
|
||||
raise SwiftProxyCharmException(msg)
|
||||
def _ring_port(ring_path, node):
|
||||
"""Determine correct port from relation settings for a given ring file."""
|
||||
for name in ['account', 'object', 'container']:
|
||||
if name in ring_path:
|
||||
return node[('{}_port'.format(name))]
|
||||
|
||||
|
||||
def get_zone(assignment_policy):
|
||||
@ -587,18 +510,20 @@ def get_zone(assignment_policy):
|
||||
of zones equal to the configured minimum replicas. This allows for a
|
||||
single swift-storage service unit, with each 'add-unit'd machine unit
|
||||
being assigned to a different zone.
|
||||
|
||||
:param assignment_policy: <string> the policy
|
||||
:returns: <integer> zone id
|
||||
"""
|
||||
if assignment_policy == 'manual':
|
||||
return relation_get('zone')
|
||||
elif assignment_policy == 'auto':
|
||||
potential_zones = []
|
||||
for ring in SWIFT_RINGS.itervalues():
|
||||
builder = _load_builder(ring)
|
||||
potential_zones.append(_get_zone(builder))
|
||||
_manager = get_manager()
|
||||
potential_zones = [_manager.get_zone(ring_path)
|
||||
for ring_path in SWIFT_RINGS.values()]
|
||||
return set(potential_zones).pop()
|
||||
else:
|
||||
msg = ('Invalid zone assignment policy: %s' % assignment_policy)
|
||||
raise SwiftProxyCharmException(msg)
|
||||
raise SwiftProxyCharmException(
|
||||
'Invalid zone assignment policy: {}'.format(assignment_policy))
|
||||
|
||||
|
||||
def balance_ring(ring_path):
|
||||
@ -607,27 +532,28 @@ def balance_ring(ring_path):
|
||||
Returns True if it needs redistribution.
|
||||
"""
|
||||
# shell out to swift-ring-builder instead, since the balancing code there
|
||||
# does a bunch of un-importable validation.'''
|
||||
# does a bunch of un-importable validation.
|
||||
cmd = ['swift-ring-builder', ring_path, 'rebalance']
|
||||
p = subprocess.Popen(cmd)
|
||||
p.communicate()
|
||||
rc = p.returncode
|
||||
if rc == 0:
|
||||
return True
|
||||
try:
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.returncode == 1:
|
||||
# Ring builder exit-code=1 is supposed to indicate warning but I
|
||||
# have noticed that it can also return 1 with the following sort of
|
||||
# message:
|
||||
#
|
||||
# NOTE: Balance of 166.67 indicates you should push this ring,
|
||||
# wait at least 0 hours, and rebalance/repush.
|
||||
#
|
||||
# This indicates that a balance has occurred and a resync would be
|
||||
# required so not sure why 1 is returned in this case.
|
||||
return False
|
||||
|
||||
if rc == 1:
|
||||
# Ring builder exit-code=1 is supposed to indicate warning but I have
|
||||
# noticed that it can also return 1 with the following sort of message:
|
||||
#
|
||||
# NOTE: Balance of 166.67 indicates you should push this ring, wait
|
||||
# at least 0 hours, and rebalance/repush.
|
||||
#
|
||||
# This indicates that a balance has occurred and a resync would be
|
||||
# required so not sure why 1 is returned in this case.
|
||||
return False
|
||||
raise SwiftProxyCharmException(
|
||||
'balance_ring: {} returned {}'.format(cmd, e.returncode))
|
||||
|
||||
msg = ('balance_ring: %s returned %s' % (cmd, rc))
|
||||
raise SwiftProxyCharmException(msg)
|
||||
# return True if it needs redistribution
|
||||
return True
|
||||
|
||||
|
||||
def should_balance(rings):
|
||||
@ -649,7 +575,7 @@ def do_openstack_upgrade(configs):
|
||||
new_src = config('openstack-origin')
|
||||
new_os_rel = get_os_codename_install_source(new_src)
|
||||
|
||||
log('Performing OpenStack upgrade to %s.' % (new_os_rel), level=DEBUG)
|
||||
log('Performing OpenStack upgrade to {}.'.format(new_os_rel), level=DEBUG)
|
||||
configure_installation_source(new_src)
|
||||
dpkg_opts = [
|
||||
'--option', 'Dpkg::Options::=--force-confnew',
|
||||
@ -692,7 +618,7 @@ def sync_proxy_rings(broker_url, builders=True, rings=True):
|
||||
Note that we sync the ring builder and .gz files since the builder itself
|
||||
is linked to the underlying .gz ring.
|
||||
"""
|
||||
log('Fetching swift rings & builders from proxy @ %s.' % broker_url,
|
||||
log('Fetching swift rings & builders from proxy @ {}.'.format(broker_url),
|
||||
level=DEBUG)
|
||||
target = SWIFT_CONF_DIR
|
||||
synced = []
|
||||
@ -700,18 +626,18 @@ def sync_proxy_rings(broker_url, builders=True, rings=True):
|
||||
try:
|
||||
for server in ['account', 'object', 'container']:
|
||||
if builders:
|
||||
url = '%s/%s.builder' % (broker_url, server)
|
||||
log('Fetching %s.' % url, level=DEBUG)
|
||||
builder = "%s.builder" % (server)
|
||||
url = '{}/{}.builder'.format(broker_url, server)
|
||||
log('Fetching {}.'.format(url), level=DEBUG)
|
||||
builder = "{}.builder".format(server)
|
||||
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
|
||||
os.path.join(tmpdir, builder)]
|
||||
subprocess.check_call(cmd)
|
||||
synced.append(builder)
|
||||
|
||||
if rings:
|
||||
url = '%s/%s.%s' % (broker_url, server, SWIFT_RING_EXT)
|
||||
log('Fetching %s.' % url, level=DEBUG)
|
||||
ring = '%s.%s' % (server, SWIFT_RING_EXT)
|
||||
url = '{}/{}.{}'.format(broker_url, server, SWIFT_RING_EXT)
|
||||
log('Fetching {}.'.format(url), level=DEBUG)
|
||||
ring = '{}.{}'.format(server, SWIFT_RING_EXT)
|
||||
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
|
||||
os.path.join(tmpdir, ring)]
|
||||
subprocess.check_call(cmd)
|
||||
@ -745,9 +671,9 @@ def update_www_rings(rings=True, builders=True):
|
||||
return
|
||||
|
||||
tmp_dir = tempfile.mkdtemp(prefix='swift-rings-www-tmp')
|
||||
for ring, builder_path in SWIFT_RINGS.iteritems():
|
||||
for ring, builder_path in SWIFT_RINGS.items():
|
||||
if rings:
|
||||
ringfile = '%s.%s' % (ring, SWIFT_RING_EXT)
|
||||
ringfile = '{}.{}'.format(ring, SWIFT_RING_EXT)
|
||||
src = os.path.join(SWIFT_CONF_DIR, ringfile)
|
||||
dst = os.path.join(tmp_dir, ringfile)
|
||||
shutil.copyfile(src, dst)
|
||||
@ -758,7 +684,7 @@ def update_www_rings(rings=True, builders=True):
|
||||
shutil.copyfile(src, dst)
|
||||
|
||||
www_dir = get_www_dir()
|
||||
deleted = "%s.deleted" % (www_dir)
|
||||
deleted = "{}.deleted".format(www_dir)
|
||||
ensure_www_dir_permissions(tmp_dir)
|
||||
os.rename(www_dir, deleted)
|
||||
os.rename(tmp_dir, www_dir)
|
||||
@ -768,8 +694,9 @@ def update_www_rings(rings=True, builders=True):
|
||||
def get_rings_checksum():
|
||||
"""Returns sha256 checksum for rings in /etc/swift."""
|
||||
sha = hashlib.sha256()
|
||||
for ring in SWIFT_RINGS.iterkeys():
|
||||
path = os.path.join(SWIFT_CONF_DIR, '%s.%s' % (ring, SWIFT_RING_EXT))
|
||||
for ring in SWIFT_RINGS.keys():
|
||||
path = os.path.join(SWIFT_CONF_DIR, '{}.{}'
|
||||
.format(ring, SWIFT_RING_EXT))
|
||||
if not os.path.isfile(path):
|
||||
continue
|
||||
|
||||
@ -782,7 +709,7 @@ def get_rings_checksum():
|
||||
def get_builders_checksum():
|
||||
"""Returns sha256 checksum for builders in /etc/swift."""
|
||||
sha = hashlib.sha256()
|
||||
for builder in SWIFT_RINGS.itervalues():
|
||||
for builder in SWIFT_RINGS.values():
|
||||
if not os.path.exists(builder):
|
||||
continue
|
||||
|
||||
@ -819,6 +746,7 @@ def sync_builders_and_rings_if_changed(f):
|
||||
"""Only trigger a ring or builder sync if they have changed as a result of
|
||||
the decorated operation.
|
||||
"""
|
||||
@functools.wraps(f)
|
||||
def _inner_sync_builders_and_rings_if_changed(*args, **kwargs):
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
log("Sync rings called by non-leader - skipping", level=WARNING)
|
||||
@ -840,8 +768,8 @@ def sync_builders_and_rings_if_changed(f):
|
||||
rings_after = get_rings_checksum()
|
||||
builders_after = get_builders_checksum()
|
||||
|
||||
rings_path = os.path.join(SWIFT_CONF_DIR, '*.%s' %
|
||||
(SWIFT_RING_EXT))
|
||||
rings_path = os.path.join(SWIFT_CONF_DIR, '*.{}'
|
||||
.format(SWIFT_RING_EXT))
|
||||
rings_ready = len(glob.glob(rings_path)) == len(SWIFT_RINGS)
|
||||
rings_changed = ((rings_after != rings_before) or
|
||||
not previously_synced())
|
||||
@ -867,7 +795,7 @@ def sync_builders_and_rings_if_changed(f):
|
||||
|
||||
|
||||
@sync_builders_and_rings_if_changed
|
||||
def update_rings(nodes=[], min_part_hours=None):
|
||||
def update_rings(nodes=None, min_part_hours=None):
|
||||
"""Update builder with node settings and balance rings if necessary.
|
||||
|
||||
Also update min_part_hours if provided.
|
||||
@ -883,12 +811,12 @@ def update_rings(nodes=[], min_part_hours=None):
|
||||
# only the builder.
|
||||
|
||||
# Only update if all exist
|
||||
if all([os.path.exists(p) for p in SWIFT_RINGS.itervalues()]):
|
||||
for ring, path in SWIFT_RINGS.iteritems():
|
||||
if all(os.path.exists(p) for p in SWIFT_RINGS.values()):
|
||||
for ring, path in SWIFT_RINGS.items():
|
||||
current_min_part_hours = get_min_part_hours(path)
|
||||
if min_part_hours != current_min_part_hours:
|
||||
log("Setting ring %s min_part_hours to %s" %
|
||||
(ring, min_part_hours), level=INFO)
|
||||
log("Setting ring {} min_part_hours to {}"
|
||||
.format(ring, min_part_hours), level=INFO)
|
||||
try:
|
||||
set_min_part_hours(path, min_part_hours)
|
||||
except SwiftProxyCharmException as exc:
|
||||
@ -899,16 +827,35 @@ def update_rings(nodes=[], min_part_hours=None):
|
||||
else:
|
||||
balance_required = True
|
||||
|
||||
for node in nodes:
|
||||
for ring in SWIFT_RINGS.itervalues():
|
||||
if not exists_in_ring(ring, node):
|
||||
add_to_ring(ring, node)
|
||||
balance_required = True
|
||||
if nodes is not None:
|
||||
for node in nodes:
|
||||
for ring in SWIFT_RINGS.values():
|
||||
if not exists_in_ring(ring, node):
|
||||
add_to_ring(ring, node)
|
||||
balance_required = True
|
||||
|
||||
if balance_required:
|
||||
balance_rings()
|
||||
|
||||
|
||||
def get_min_part_hours(path):
|
||||
"""Just a proxy to the manager.py:get_min_part_hours() function
|
||||
|
||||
:param path: the path to get the min_part_hours for
|
||||
:returns: integer
|
||||
"""
|
||||
return get_manager().get_min_part_hours(path)
|
||||
|
||||
|
||||
def set_min_part_hours(path, value):
|
||||
cmd = ['swift-ring-builder', path, 'set_min_part_hours', str(value)]
|
||||
try:
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
raise SwiftProxyCharmException(
|
||||
"Failed to set min_part_hours={} on {}".format(value, path))
|
||||
|
||||
|
||||
@sync_builders_and_rings_if_changed
|
||||
def balance_rings():
|
||||
"""Rebalance each ring and notify peers that new rings are available."""
|
||||
@ -916,19 +863,19 @@ def balance_rings():
|
||||
log("Balance rings called by non-leader - skipping", level=WARNING)
|
||||
return
|
||||
|
||||
if not should_balance([r for r in SWIFT_RINGS.itervalues()]):
|
||||
if not should_balance([r for r in SWIFT_RINGS.values()]):
|
||||
log("Not yet ready to balance rings - insufficient replicas?",
|
||||
level=INFO)
|
||||
return
|
||||
|
||||
rebalanced = False
|
||||
log("Rebalancing rings", level=INFO)
|
||||
for path in SWIFT_RINGS.itervalues():
|
||||
for path in SWIFT_RINGS.values():
|
||||
if balance_ring(path):
|
||||
log('Balanced ring %s' % path, level=DEBUG)
|
||||
log('Balanced ring {}'.format(path), level=DEBUG)
|
||||
rebalanced = True
|
||||
else:
|
||||
log('Ring %s not rebalanced' % path, level=DEBUG)
|
||||
log('Ring {} not rebalanced'.format(path), level=DEBUG)
|
||||
|
||||
if not rebalanced:
|
||||
log("Rings unchanged by rebalance", level=DEBUG)
|
||||
@ -940,10 +887,10 @@ def mark_www_rings_deleted():
|
||||
storage units won't see them.
|
||||
"""
|
||||
www_dir = get_www_dir()
|
||||
for ring, _ in SWIFT_RINGS.iteritems():
|
||||
path = os.path.join(www_dir, '%s.ring.gz' % ring)
|
||||
for ring in SWIFT_RINGS.keys():
|
||||
path = os.path.join(www_dir, '{}.ring.gz'.format(ring))
|
||||
if os.path.exists(path):
|
||||
os.rename(path, "%s.deleted" % (path))
|
||||
os.rename(path, "{}.deleted".format(path))
|
||||
|
||||
|
||||
def notify_peers_builders_available(broker_token, builders_only=False):
|
||||
@ -967,16 +914,17 @@ def notify_peers_builders_available(broker_token, builders_only=False):
|
||||
return
|
||||
|
||||
if builders_only:
|
||||
type = "builders"
|
||||
_type = "builders"
|
||||
else:
|
||||
type = "builders & rings"
|
||||
_type = "builders & rings"
|
||||
|
||||
# Notify peers that builders are available
|
||||
log("Notifying peer(s) that %s are ready for sync." % type, level=INFO)
|
||||
log("Notifying peer(s) that {} are ready for sync."
|
||||
.format(_type), level=INFO)
|
||||
rq = SwiftProxyClusterRPC().sync_rings_request(broker_token,
|
||||
builders_only=builders_only)
|
||||
for rid in cluster_rids:
|
||||
log("Notifying rid=%s (%s)" % (rid, rq), level=DEBUG)
|
||||
log("Notifying rid={} ({})".format(rid, rq), level=DEBUG)
|
||||
relation_set(relation_id=rid, relation_settings=rq)
|
||||
|
||||
|
||||
@ -1053,7 +1001,7 @@ def notify_storage_rings_available():
|
||||
hostname = get_hostaddr()
|
||||
hostname = format_ipv6_addr(hostname) or hostname
|
||||
path = os.path.basename(get_www_dir())
|
||||
rings_url = 'http://%s/%s' % (hostname, path)
|
||||
rings_url = 'http://{}/{}'.format(hostname, path)
|
||||
trigger = uuid.uuid4()
|
||||
# Notify storage nodes that there is a new ring to fetch.
|
||||
log("Notifying storage nodes that new rings are ready for sync.",
|
||||
@ -1069,17 +1017,17 @@ def fully_synced():
|
||||
Returns True if we have all rings and builders.
|
||||
"""
|
||||
not_synced = []
|
||||
for ring, builder in SWIFT_RINGS.iteritems():
|
||||
for ring, builder in SWIFT_RINGS.items():
|
||||
if not os.path.exists(builder):
|
||||
not_synced.append(builder)
|
||||
|
||||
ringfile = os.path.join(SWIFT_CONF_DIR,
|
||||
'%s.%s' % (ring, SWIFT_RING_EXT))
|
||||
'{}.{}'.format(ring, SWIFT_RING_EXT))
|
||||
if not os.path.exists(ringfile):
|
||||
not_synced.append(ringfile)
|
||||
|
||||
if not_synced:
|
||||
log("Not yet synced: %s" % ', '.join(not_synced), level=INFO)
|
||||
log("Not yet synced: {}".format(', '.join(not_synced), level=INFO))
|
||||
return False
|
||||
|
||||
return True
|
||||
@ -1120,21 +1068,85 @@ def timestamps_available(excluded_unit):
|
||||
return False
|
||||
|
||||
|
||||
def has_minimum_zones(rings):
|
||||
"""Determine if enough zones exist to satisfy minimum replicas"""
|
||||
for ring in rings:
|
||||
if not os.path.isfile(ring):
|
||||
return False
|
||||
builder = _load_builder(ring).to_dict()
|
||||
replicas = builder['replicas']
|
||||
zones = [dev['zone'] for dev in builder['devs'] if dev]
|
||||
num_zones = len(set(zones))
|
||||
if num_zones < replicas:
|
||||
log("Not enough zones (%d) defined to satisfy minimum replicas "
|
||||
"(need >= %d)" % (num_zones, replicas), level=INFO)
|
||||
return False
|
||||
def get_manager():
|
||||
return ManagerProxy()
|
||||
|
||||
return True
|
||||
|
||||
class ManagerProxy(object):
|
||||
|
||||
def __init__(self, path=None):
|
||||
self._path = path or []
|
||||
|
||||
def __getattribute__(self, attr):
|
||||
if attr in ['__class__', '_path', 'api_version']:
|
||||
return super().__getattribute__(attr)
|
||||
return self.__class__(path=self._path + [attr])
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
# Following line retained commented-out for future debugging
|
||||
# print("Called: {} ({}, {})".format(self._path, args, kwargs))
|
||||
return _proxy_manager_call(self._path, args, kwargs)
|
||||
|
||||
|
||||
JSON_ENCODE_OPTIONS = dict(
|
||||
sort_keys=True,
|
||||
allow_nan=False,
|
||||
indent=None,
|
||||
separators=(',', ':'),
|
||||
)
|
||||
|
||||
|
||||
def _proxy_manager_call(path, args, kwargs):
|
||||
package = dict(path=path,
|
||||
args=args,
|
||||
kwargs=kwargs)
|
||||
serialized = json.dumps(package, **JSON_ENCODE_OPTIONS)
|
||||
script = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'..',
|
||||
'swift_manager',
|
||||
'manager.py'))
|
||||
env = os.environ
|
||||
try:
|
||||
if sys.version_info < (3, 5):
|
||||
# remove this after trusty support is removed. No subprocess.run
|
||||
# in Python 3.4
|
||||
process = subprocess.Popen([script, serialized],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=env)
|
||||
out, err = process.communicate()
|
||||
result = json.loads(out.decode('UTF-8'))
|
||||
else:
|
||||
completed = subprocess.run([script, serialized],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=env)
|
||||
result = json.loads(completed.stdout.decode('UTF-8'))
|
||||
if 'error' in result:
|
||||
s = ("The call within manager.py failed with the error: '{}'. "
|
||||
"The call was: path={}, args={}, kwargs={}"
|
||||
.format(result['error'], path, args, kwargs))
|
||||
log(s, level=ERROR)
|
||||
raise RuntimeError(s)
|
||||
return result['result']
|
||||
except subprocess.CalledProcessError as e:
|
||||
s = ("manger.py failed when called with path={}, args={}, kwargs={},"
|
||||
" with the error: {}".format(path, args, kwargs, str(e)))
|
||||
log(s, level=ERROR)
|
||||
if sys.version_info < (3, 5):
|
||||
# remove this after trusty support is removed.
|
||||
log("stderr was:\n{}\n".format(err.decode('UTF-8')),
|
||||
level=ERROR)
|
||||
else:
|
||||
log("stderr was:\n{}\n".format(completed.stderr.decode('UTF-8')),
|
||||
level=ERROR)
|
||||
raise RuntimeError(s)
|
||||
except Exception as e:
|
||||
s = ("Decoding the result from the call to manager.py resulted in "
|
||||
"error '{}' (command: path={}, args={}, kwargs={}"
|
||||
.format(str(e), path, args, kwargs))
|
||||
log(s, level=ERROR)
|
||||
raise RuntimeError(s)
|
||||
|
||||
|
||||
def customer_check_assess_status(configs):
|
||||
@ -1155,7 +1167,7 @@ def customer_check_assess_status(configs):
|
||||
return ('blocked', 'Not enough related storage nodes')
|
||||
|
||||
# Verify there are enough storage zones to satisfy minimum replicas
|
||||
rings = [r for r in SWIFT_RINGS.itervalues()]
|
||||
rings = [r for r in SWIFT_RINGS.values()]
|
||||
if not has_minimum_zones(rings):
|
||||
return ('blocked', 'Not enough storage zones for minimum replicas')
|
||||
|
||||
@ -1167,11 +1179,25 @@ def customer_check_assess_status(configs):
|
||||
if not is_ipv6(addr):
|
||||
return ('blocked',
|
||||
'Did not get IPv6 address from '
|
||||
'storage relation (got=%s)' % (addr))
|
||||
'storage relation (got={})'.format(addr))
|
||||
|
||||
return 'active', 'Unit is ready'
|
||||
|
||||
|
||||
def has_minimum_zones(rings):
|
||||
"""Determine if enough zones exist to satisfy minimum replicas
|
||||
|
||||
Uses manager.py as it accesses the ring_builder object in swift
|
||||
|
||||
:param rings: the list of ring_paths to check
|
||||
:returns: Boolean
|
||||
"""
|
||||
result = get_manager().has_minimum_zones(rings)
|
||||
if 'log' in result:
|
||||
log(result['log'], level=result['level'])
|
||||
return result['result']
|
||||
|
||||
|
||||
def assess_status(configs, check_services=None):
|
||||
"""Assess status of current unit
|
||||
Decides what the state of the unit should be based on the current
|
||||
|
3
swift_manager/.stestr.conf
Normal file
3
swift_manager/.stestr.conf
Normal file
@ -0,0 +1,3 @@
|
||||
[DEFAULT]
|
||||
test_path=./
|
||||
top_dir=./
|
275
swift_manager/manager.py
Executable file
275
swift_manager/manager.py
Executable file
@ -0,0 +1,275 @@
|
||||
#!/usr/bin/env python2
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# NOTE(tinwood): This file needs to remain Python2 as it uses keystoneclient
|
||||
# from the payload software to do it's work.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import cPickle as pickle
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
_usage = """This file is called from the swift_utils.py file to implement
|
||||
various swift ring builder calls and functions. It is called with one
|
||||
parameter which is a json encoded string that contains the 'arguments' string
|
||||
with the following parameters:
|
||||
|
||||
{
|
||||
'path': The function that needs ot be performed
|
||||
'args': the non-keyword argument to supply to the swift manager call.
|
||||
'kwargs': any keyword args to supply to the swift manager call.
|
||||
}
|
||||
|
||||
The result of the call, or an error, is returned as a json encoded result that
|
||||
is printed to the STDOUT, Any errors are printed to STDERR.
|
||||
|
||||
The format of the output has the same keys as, but in a compressed form:
|
||||
|
||||
{
|
||||
'result': <whatever the result of the function call was>
|
||||
'error': <if an error occured, the text of the error
|
||||
}
|
||||
|
||||
This system is currently needed to decouple the majority of the charm from the
|
||||
underlying package being used for keystone.
|
||||
"""
|
||||
|
||||
JSON_ENCODE_OPTIONS = dict(
|
||||
sort_keys=True,
|
||||
allow_nan=False,
|
||||
indent=None,
|
||||
separators=(',', ':'),
|
||||
)
|
||||
|
||||
|
||||
# These are the main 'API' functions that are called in the manager.py file
|
||||
|
||||
def initialize_ring(path, part_power, replicas, min_hours):
|
||||
"""Initialize a new swift ring with given parameters."""
|
||||
from swift.common.ring import RingBuilder
|
||||
ring = RingBuilder(part_power, replicas, min_hours)
|
||||
_write_ring(ring, path)
|
||||
|
||||
|
||||
def exists_in_ring(ring_path, node):
|
||||
"""Return boolean True if the node exists in the ring defined by the
|
||||
ring_path.
|
||||
|
||||
:param ring_path: the file representing the ring
|
||||
:param node: a dictionary of the node (ip, region, port, zone, weight,
|
||||
device)
|
||||
:returns: boolean
|
||||
"""
|
||||
ring = _load_builder(ring_path).to_dict()
|
||||
|
||||
for dev in ring['devs']:
|
||||
# Devices in the ring can be None if there are holes from previously
|
||||
# removed devices so skip any that are None.
|
||||
if not dev:
|
||||
continue
|
||||
d = [(i, dev[i]) for i in dev if i in node and i != 'zone']
|
||||
n = [(i, node[i]) for i in node if i in dev and i != 'zone']
|
||||
if sorted(d) == sorted(n):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def add_dev(ring_path, dev):
|
||||
"""Add a new device to the ring_path
|
||||
|
||||
The dev is in the form of:
|
||||
|
||||
new_dev = {
|
||||
'zone': node['zone'],
|
||||
'ip': node['ip'],
|
||||
'port': port,
|
||||
'device': node['device'],
|
||||
'weight': 100,
|
||||
'meta': '',
|
||||
}
|
||||
|
||||
:param ring_path: a ring_path for _load_builder
|
||||
:parm dev: the device in the above format
|
||||
"""
|
||||
ring = _load_builder(ring_path)
|
||||
ring.add_dev(dev)
|
||||
_write_ring(ring, ring_path)
|
||||
|
||||
|
||||
def get_min_part_hours(ring_path):
|
||||
"""Get the min_part_hours for a ring
|
||||
|
||||
:param ring_path: The path for the ring
|
||||
:returns: integer that is the min_part_hours
|
||||
"""
|
||||
builder = _load_builder(ring_path)
|
||||
return builder.min_part_hours
|
||||
|
||||
|
||||
def get_zone(ring_path):
|
||||
"""Determine the zone for the ring_path
|
||||
|
||||
If there is no zone in the ring's devices, then simple return 1 as the
|
||||
first zone.
|
||||
|
||||
Otherwise, return the lowest numerically ordered unique zone being used
|
||||
across the devices of the ring if the number of unique zones is less that
|
||||
the number of replicas for that ring.
|
||||
|
||||
If the replicas >= to the number of unique zones, the if all the zones are
|
||||
equal, start again at 1.
|
||||
|
||||
Otherwise, if the zones aren't equal, return the lowest zone number across
|
||||
the devices
|
||||
|
||||
:param ring_path: The path to the ring to get the zone for.
|
||||
:returns: <integer> zone id
|
||||
"""
|
||||
builder = _load_builder(ring_path)
|
||||
replicas = builder.replicas
|
||||
zones = [d['zone'] for d in builder.devs]
|
||||
if not zones:
|
||||
return 1
|
||||
|
||||
# zones is a per-device list, so we may have one
|
||||
# node with 3 devices in zone 1. For balancing
|
||||
# we need to track the unique zones being used
|
||||
# not necessarily the number of devices
|
||||
unique_zones = list(set(zones))
|
||||
if len(unique_zones) < replicas:
|
||||
return sorted(unique_zones).pop() + 1
|
||||
|
||||
zone_distrib = {}
|
||||
for z in zones:
|
||||
zone_distrib[z] = zone_distrib.get(z, 0) + 1
|
||||
|
||||
if len(set(zone_distrib.values())) == 1:
|
||||
# all zones are equal, start assigning to zone 1 again.
|
||||
return 1
|
||||
|
||||
return sorted(zone_distrib, key=zone_distrib.get).pop(0)
|
||||
|
||||
|
||||
def has_minimum_zones(rings):
|
||||
"""Determine if enough zones exist to satisfy minimum replicas
|
||||
|
||||
Returns a structure with:
|
||||
|
||||
{
|
||||
"result": boolean,
|
||||
"log": <Not present> | string to log to the debug_log
|
||||
"level": <string>
|
||||
}
|
||||
|
||||
:param rings: list of strings of the ring_path
|
||||
:returns: structure with boolean and possible log
|
||||
"""
|
||||
for ring in rings:
|
||||
if not os.path.isfile(ring):
|
||||
return {
|
||||
"result": False
|
||||
}
|
||||
builder = _load_builder(ring).to_dict()
|
||||
replicas = builder['replicas']
|
||||
zones = [dev['zone'] for dev in builder['devs'] if dev]
|
||||
num_zones = len(set(zones))
|
||||
if num_zones < replicas:
|
||||
log = ("Not enough zones ({:d}) defined to satisfy minimum "
|
||||
"replicas (need >= {:d})".format(num_zones, replicas))
|
||||
return {
|
||||
"result": False,
|
||||
"log": log,
|
||||
"level": "INFO",
|
||||
}
|
||||
|
||||
return {
|
||||
"result": True
|
||||
}
|
||||
|
||||
|
||||
# These are utility functions that are for the 'API' functions above (i.e. they
|
||||
# are not called from the main function)
|
||||
|
||||
def _load_builder(path):
|
||||
# lifted straight from /usr/bin/swift-ring-builder
|
||||
from swift.common.ring import RingBuilder
|
||||
try:
|
||||
builder = pickle.load(open(path, 'rb'))
|
||||
if not hasattr(builder, 'devs'):
|
||||
builder_dict = builder
|
||||
builder = RingBuilder(1, 1, 1)
|
||||
builder.copy_from(builder_dict)
|
||||
except ImportError: # Happens with really old builder pickles
|
||||
builder = RingBuilder(1, 1, 1)
|
||||
builder.copy_from(pickle.load(open(path, 'rb')))
|
||||
for dev in builder.devs:
|
||||
if dev and 'meta' not in dev:
|
||||
dev['meta'] = ''
|
||||
|
||||
return builder
|
||||
|
||||
|
||||
def _write_ring(ring, ring_path):
|
||||
with open(ring_path, "wb") as fd:
|
||||
pickle.dump(ring.to_dict(), fd, protocol=2)
|
||||
|
||||
|
||||
# The following code is just the glue to link the manager.py and swift_utils.py
|
||||
# files together at a 'python' function level.
|
||||
|
||||
|
||||
class ManagerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# This script needs 1 argument which is the input json. See file header
|
||||
# for details on how it is called. It returns a JSON encoded result, in
|
||||
# the same file, which is overwritten
|
||||
result = None
|
||||
try:
|
||||
if len(sys.argv) != 2:
|
||||
raise ManagerException(
|
||||
"{} called without 2 arguments: must pass the filename"
|
||||
.format(__file__))
|
||||
spec = json.loads(sys.argv[1])
|
||||
_callable = sys.modules[__name__]
|
||||
for attr in spec['path']:
|
||||
_callable = getattr(_callable, attr)
|
||||
# now make the call and return the arguments
|
||||
result = {'result': _callable(*spec['args'], **spec['kwargs'])}
|
||||
except ManagerException as e:
|
||||
# deal with sending an error back.
|
||||
print(str(e), file=sys.stderr)
|
||||
import traceback
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
result = {'error', str(e)}
|
||||
except Exception as e:
|
||||
print("{}: something went wrong: {}".format(__file__, str(e)),
|
||||
file=sys.stderr)
|
||||
import traceback
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
result = {'error': str(e)}
|
||||
finally:
|
||||
if result is not None:
|
||||
result_json = json.dumps(result, **JSON_ENCODE_OPTIONS)
|
||||
print(result_json)
|
||||
|
||||
# normal exit
|
||||
sys.exit(0)
|
120
swift_manager/test_manager.py
Normal file
120
swift_manager/test_manager.py
Normal file
@ -0,0 +1,120 @@
|
||||
import mock
|
||||
import unittest
|
||||
|
||||
import manager
|
||||
|
||||
|
||||
def create_mock_load_builder_fn(mock_rings):
|
||||
"""To avoid the need for swift.common.ring library, mock a basic rings
|
||||
dictionary, keyed by path. Each ring has enough logic to hold a dictionary
|
||||
with a single 'devs' key, which stores the list of passed dev(s) by
|
||||
add_dev().
|
||||
|
||||
If swift (actual) ring representation diverges (see _load_builder),
|
||||
this mock will need to be adapted.
|
||||
|
||||
:param mock_rings: a dict containing the dict form of the rings
|
||||
"""
|
||||
def mock_load_builder_fn(path):
|
||||
class mock_ring(object):
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
def to_dict(self):
|
||||
return mock_rings[self.path]
|
||||
|
||||
def add_dev(self, dev):
|
||||
mock_rings[self.path]['devs'].append(dev)
|
||||
|
||||
return mock_ring(path)
|
||||
return mock_load_builder_fn
|
||||
|
||||
|
||||
MOCK_SWIFT_RINGS = {
|
||||
'account': 'account.builder',
|
||||
'container': 'container.builder',
|
||||
'object': 'object.builder'
|
||||
}
|
||||
|
||||
|
||||
class TestSwiftManager(unittest.TestCase):
|
||||
|
||||
@mock.patch('os.path.isfile')
|
||||
@mock.patch.object(manager, '_load_builder')
|
||||
def test_has_minimum_zones(self, mock_load_builder, mock_is_file):
|
||||
mock_rings = {}
|
||||
|
||||
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
|
||||
for ring in MOCK_SWIFT_RINGS:
|
||||
mock_rings[ring] = {
|
||||
'replicas': 3,
|
||||
'devs': [{'zone': 1}, {'zone': 2}, None, {'zone': 3}],
|
||||
}
|
||||
ret = manager.has_minimum_zones(MOCK_SWIFT_RINGS)
|
||||
self.assertTrue(ret['result'])
|
||||
|
||||
# Increase the replicas to make sure that it returns false
|
||||
for ring in MOCK_SWIFT_RINGS:
|
||||
mock_rings[ring]['replicas'] = 4
|
||||
|
||||
ret = manager.has_minimum_zones(MOCK_SWIFT_RINGS)
|
||||
self.assertFalse(ret['result'])
|
||||
|
||||
@mock.patch.object(manager, '_load_builder')
|
||||
def test_exists_in_ring(self, mock_load_builder):
|
||||
mock_rings = {}
|
||||
|
||||
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
|
||||
ring = 'account'
|
||||
mock_rings[ring] = {
|
||||
'devs': [
|
||||
{'replication_port': 6000, 'zone': 1, 'weight': 100.0,
|
||||
'ip': '172.16.0.2', 'region': 1, 'port': 6000,
|
||||
'replication_ip': '172.16.0.2', 'parts': 2, 'meta': '',
|
||||
'device': u'bcache10', 'parts_wanted': 0, 'id': 199},
|
||||
None, # Ring can have holes, so add None to simulate
|
||||
{'replication_port': 6000, 'zone': 1, 'weight': 100.0,
|
||||
'ip': '172.16.0.2', 'region': 1, 'id': 198,
|
||||
'replication_ip': '172.16.0.2', 'parts': 2, 'meta': '',
|
||||
'device': u'bcache13', 'parts_wanted': 0, 'port': 6000},
|
||||
]
|
||||
}
|
||||
|
||||
node = {
|
||||
'ip': '172.16.0.2',
|
||||
'region': 1,
|
||||
'account_port': 6000,
|
||||
'zone': 1,
|
||||
'replication_port': 6000,
|
||||
'weight': 100.0,
|
||||
'device': u'bcache10',
|
||||
}
|
||||
|
||||
ret = manager.exists_in_ring(ring, node)
|
||||
self.assertTrue(ret)
|
||||
|
||||
node['region'] = 2
|
||||
ret = manager.exists_in_ring(ring, node)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(manager, '_write_ring')
|
||||
@mock.patch.object(manager, '_load_builder')
|
||||
def test_add_dev(self, mock_load_builder, mock_write_ring):
|
||||
mock_rings = {}
|
||||
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
|
||||
ring = 'account'
|
||||
mock_rings[ring] = {
|
||||
'devs': []
|
||||
}
|
||||
|
||||
new_dev = {
|
||||
'meta': '',
|
||||
'zone': 1,
|
||||
'ip': '172.16.0.2',
|
||||
'device': '/dev/sdb',
|
||||
'port': 6000,
|
||||
'weight': 100
|
||||
}
|
||||
manager.add_dev(ring, new_dev)
|
||||
mock_write_ring.assert_called_once()
|
||||
self.assertTrue('id' not in mock_rings[ring]['devs'][0])
|
@ -5,12 +5,15 @@ coverage>=3.6
|
||||
mock>=1.2
|
||||
flake8>=2.2.4,<=2.4.1
|
||||
os-testr>=0.4.1
|
||||
charm-tools>=2.0.0
|
||||
charm-tools>=2.0.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+
|
||||
requests==2.6.0
|
||||
# BEGIN: Amulet OpenStack Charm Helper Requirements
|
||||
# Liberty client lower constraints
|
||||
# The websocket-client issue should be resolved in the jujulib/theblues
|
||||
# Temporarily work around it
|
||||
websocket-client<=0.40.0
|
||||
amulet>=1.14.3,<2.0
|
||||
bundletester>=0.6.1,<1.0
|
||||
bundletester>=0.6.1,<1.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+
|
||||
python-ceilometerclient>=1.5.0
|
||||
python-cinderclient>=1.4.0
|
||||
python-glanceclient>=1.1.0
|
||||
|
@ -532,7 +532,7 @@ class SwiftProxyBasicDeployment(OpenStackAmuletDeployment):
|
||||
'admin_token': keystone_relation['admin_token']
|
||||
}
|
||||
|
||||
for section, pairs in expected.iteritems():
|
||||
for section, pairs in expected.items():
|
||||
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||
if ret:
|
||||
message = "proxy-server config error: {}".format(ret)
|
||||
@ -596,13 +596,13 @@ class SwiftProxyBasicDeployment(OpenStackAmuletDeployment):
|
||||
if not (ks_gl_rel['api_version'] == api_version and
|
||||
ks_sw_rel['api_version'] == api_version):
|
||||
u.log.info("change of api_version not propagated yet "
|
||||
"retries left: '%d' "
|
||||
"glance:identity-service api_version: '%s' "
|
||||
"swift-proxy:identity-service api_version: '%s' "
|
||||
% (i,
|
||||
ks_gl_rel['api_version'],
|
||||
ks_sw_rel['api_version']))
|
||||
u.log.info("sleeping %d seconds..." % i)
|
||||
"retries left: '{}' "
|
||||
"glance:identity-service api_version: '{}' "
|
||||
"swift-proxy:identity-service api_version: '{}' "
|
||||
.format(i,
|
||||
ks_gl_rel['api_version'],
|
||||
ks_sw_rel['api_version']))
|
||||
u.log.info("sleeping {} seconds...".format(i))
|
||||
time.sleep(i)
|
||||
elif not u.validate_service_config_changed(
|
||||
self.swift_proxy_sentry,
|
||||
@ -655,7 +655,7 @@ class SwiftProxyBasicDeployment(OpenStackAmuletDeployment):
|
||||
self.d.configure(juju_service, set_alternate)
|
||||
|
||||
sleep_time = 40
|
||||
for s, conf_file in services.iteritems():
|
||||
for s, conf_file in services.items():
|
||||
u.log.debug("Checking that service restarted: {}".format(s))
|
||||
if not u.validate_service_config_changed(sentry, mtime, s,
|
||||
conf_file,
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import six
|
||||
@ -185,7 +186,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||
include_only=None, timeout=1800):
|
||||
include_only=None, timeout=None):
|
||||
"""Wait for all units to have a specific extended status, except
|
||||
for any defined as excluded. Unless specified via message, any
|
||||
status containing any case of 'ready' will be considered a match.
|
||||
@ -215,7 +216,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
:param timeout: Maximum time in seconds to wait for status match
|
||||
:returns: None. Raises if timeout is hit.
|
||||
"""
|
||||
self.log.info('Waiting for extended status on units...')
|
||||
if not timeout:
|
||||
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
|
||||
self.log.info('Waiting for extended status on units for {}s...'
|
||||
''.format(timeout))
|
||||
|
||||
all_services = self.d.services.keys()
|
||||
|
||||
@ -252,9 +256,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
service_messages = {service: message for service in services}
|
||||
|
||||
# Check for idleness
|
||||
self.d.sentry.wait()
|
||||
self.d.sentry.wait(timeout=timeout)
|
||||
# Check for error states and bail early
|
||||
self.d.sentry.wait_for_status(self.d.juju_env, services)
|
||||
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
|
||||
# Check for ready messages
|
||||
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||
|
||||
|
@ -377,12 +377,12 @@ def get_mon_map(service):
|
||||
try:
|
||||
return json.loads(mon_status)
|
||||
except ValueError as v:
|
||||
log("Unable to parse mon_status json: {}. Error: {}".format(
|
||||
mon_status, v.message))
|
||||
log("Unable to parse mon_status json: {}. Error: {}"
|
||||
.format(mon_status, str(v)))
|
||||
raise
|
||||
except CalledProcessError as e:
|
||||
log("mon_status command failed with message: {}".format(
|
||||
e.message))
|
||||
log("mon_status command failed with message: {}"
|
||||
.format(str(e)))
|
||||
raise
|
||||
|
||||
|
||||
|
@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||
with open(path, 'wb') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
if six.PY3 and isinstance(content, six.string_types):
|
||||
content = content.encode('UTF-8')
|
||||
target.write(content)
|
||||
return
|
||||
# the contents were the same, but we might still need to change the
|
||||
|
10
tox.ini
10
tox.ini
@ -2,8 +2,9 @@
|
||||
# This file is managed centrally by release-tools and should not be modified
|
||||
# within individual charm repos.
|
||||
[tox]
|
||||
envlist = pep8,py27
|
||||
envlist = pep8,py27,py35,py36
|
||||
skipsdist = True
|
||||
skip_missing_interpreters = True
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
@ -18,14 +19,21 @@ passenv = HOME TERM AMULET_* CS_API_*
|
||||
|
||||
[testenv:py27]
|
||||
basepython = python2.7
|
||||
changedir = swift_manager
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = ostestr --path . {posargs}
|
||||
|
||||
[testenv:py35]
|
||||
basepython = python3.5
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:py36]
|
||||
basepython = python3.6
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:pep8]
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
|
@ -11,3 +11,17 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
_path = os.path.dirname(os.path.realpath(__file__))
|
||||
_parent = os.path.abspath(os.path.join(_path, '..'))
|
||||
|
||||
|
||||
def _add_path(path):
|
||||
if path not in sys.path:
|
||||
sys.path.insert(1, path)
|
||||
|
||||
|
||||
_add_path(_parent)
|
||||
|
@ -156,7 +156,8 @@ class GetActionParserTestCase(unittest.TestCase):
|
||||
"""ArgumentParser is seeded from actions.yaml."""
|
||||
actions_yaml = tempfile.NamedTemporaryFile(
|
||||
prefix="GetActionParserTestCase", suffix="yaml")
|
||||
actions_yaml.write(yaml.dump({"foo": {"description": "Foo is bar"}}))
|
||||
actions_yaml.write(
|
||||
yaml.dump({"foo": {"description": "Foo is bar"}}).encode('UTF-8'))
|
||||
actions_yaml.seek(0)
|
||||
parser = actions.actions.get_action_parser(actions_yaml.name, "foo",
|
||||
get_services=lambda: [])
|
||||
@ -236,9 +237,8 @@ class AddUserTestCase(CharmTestCase):
|
||||
self.determine_api_port.return_value = 8070
|
||||
self.CalledProcessError = ValueError
|
||||
|
||||
self.check_call.side_effect = subprocess.CalledProcessError(0,
|
||||
"hi",
|
||||
"no")
|
||||
self.check_call.side_effect = subprocess.CalledProcessError(
|
||||
0, "hi", "no")
|
||||
actions.add_user.add_user()
|
||||
self.leader_get.assert_called_with("swauth-admin-key")
|
||||
calls = [call("account"), call("username"), call("password")]
|
||||
@ -246,26 +246,28 @@ class AddUserTestCase(CharmTestCase):
|
||||
self.action_set.assert_not_called()
|
||||
|
||||
self.action_fail.assert_called_once_with(
|
||||
'Adding user test failed with: ""')
|
||||
'Adding user test failed with: "Command \'hi\' returned non-zero '
|
||||
'exit status 0"')
|
||||
|
||||
|
||||
class DiskUsageTestCase(CharmTestCase):
|
||||
|
||||
TEST_RECON_OUTPUT = '===================================================' \
|
||||
'============================\n--> Starting ' \
|
||||
'reconnaissance on 9 hosts\n========================' \
|
||||
'===================================================' \
|
||||
'====\n[2017-11-03 21:50:30] Checking disk usage now' \
|
||||
'\nDistribution Graph:\n 40% 108 ******************' \
|
||||
'***************************************************' \
|
||||
'\n 41% 15 *********\n 42% 50 ******************' \
|
||||
'*************\n 43% 5 ***\n 44% 1 \n 45% ' \
|
||||
'1 \nDisk usage: space used: 89358060716032 of ' \
|
||||
'215829411840000\nDisk usage: space free: ' \
|
||||
'126471351123968 of 215829411840000\nDisk usage: ' \
|
||||
'lowest: 40.64%, highest: 45.63%, avg: ' \
|
||||
'41.4021703318%\n===================================' \
|
||||
'============================================\n'
|
||||
TEST_RECON_OUTPUT = (
|
||||
b'==================================================='
|
||||
b'============================\n--> Starting '
|
||||
b'reconnaissance on 9 hosts\n========================'
|
||||
b'==================================================='
|
||||
b'====\n[2017-11-03 21:50:30] Checking disk usage now'
|
||||
b'\nDistribution Graph:\n 40% 108 ******************'
|
||||
b'***************************************************'
|
||||
b'\n 41% 15 *********\n 42% 50 ******************'
|
||||
b'*************\n 43% 5 ***\n 44% 1 \n 45% '
|
||||
b'1 \nDisk usage: space used: 89358060716032 of '
|
||||
b'215829411840000\nDisk usage: space free: '
|
||||
b'126471351123968 of 215829411840000\nDisk usage: '
|
||||
b'lowest: 40.64%, highest: 45.63%, avg: '
|
||||
b'41.4021703318%\n==================================='
|
||||
b'============================================\n')
|
||||
|
||||
TEST_RESULT = ['Disk usage: space used: 83221GB of 201006GB',
|
||||
'Disk usage: space free: 117785GB of 201006GB',
|
||||
@ -278,7 +280,7 @@ class DiskUsageTestCase(CharmTestCase):
|
||||
|
||||
def test_success(self):
|
||||
"""Ensure that the action_set is called on success."""
|
||||
self.check_output.return_value = 'Swift recon ran OK'
|
||||
self.check_output.return_value = b'Swift recon ran OK'
|
||||
actions.actions.diskusage([])
|
||||
self.check_output.assert_called_once_with(['swift-recon', '-d'])
|
||||
|
||||
|
@ -64,12 +64,10 @@ class TestSwiftUpgradeActions(CharmTestCase):
|
||||
super(TestSwiftUpgradeActions, self).setUp(openstack_upgrade,
|
||||
TO_PATCH)
|
||||
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.config')
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.action_set')
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.'
|
||||
'git_install_requested')
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.'
|
||||
'openstack_upgrade_available')
|
||||
@patch('charmhelpers.contrib.openstack.utils.config')
|
||||
@patch('charmhelpers.contrib.openstack.utils.action_set')
|
||||
@patch('charmhelpers.contrib.openstack.utils.git_install_requested')
|
||||
@patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
|
||||
def test_openstack_upgrade_true(self, upgrade_avail, git_requested,
|
||||
action_set, config):
|
||||
git_requested.return_value = False
|
||||
@ -81,12 +79,10 @@ class TestSwiftUpgradeActions(CharmTestCase):
|
||||
self.assertTrue(self.do_openstack_upgrade.called)
|
||||
self.assertTrue(self.config_changed.called)
|
||||
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.config')
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.action_set')
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.'
|
||||
'git_install_requested')
|
||||
@patch('actions.charmhelpers.contrib.openstack.utils.'
|
||||
'openstack_upgrade_available')
|
||||
@patch('charmhelpers.contrib.openstack.utils.config')
|
||||
@patch('charmhelpers.contrib.openstack.utils.action_set')
|
||||
@patch('charmhelpers.contrib.openstack.utils.git_install_requested')
|
||||
@patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
|
||||
def test_openstack_upgrade_false(self, upgrade_avail, git_requested,
|
||||
action_set, config):
|
||||
git_requested.return_value = False
|
||||
|
@ -109,7 +109,7 @@ class SwiftContextTestCase(unittest.TestCase):
|
||||
expected = '##FILEHASH##'
|
||||
with tempfile.NamedTemporaryFile() as tmpfile:
|
||||
swift_context.SWIFT_HASH_FILE = tmpfile.name
|
||||
tmpfile.write(expected)
|
||||
tmpfile.write(expected.encode('UTF-8'))
|
||||
tmpfile.seek(0)
|
||||
os.fsync(tmpfile)
|
||||
hash = swift_context.get_swift_hash()
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import importlib
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
@ -23,24 +24,25 @@ from mock import (
|
||||
MagicMock,
|
||||
)
|
||||
|
||||
sys.path.append("hooks")
|
||||
|
||||
# python-apt is not installed as part of test-requirements but is imported by
|
||||
# some charmhelpers modules so create a fake import.
|
||||
sys.modules['apt'] = MagicMock()
|
||||
sys.modules['apt_pkg'] = MagicMock()
|
||||
|
||||
with patch('hooks.charmhelpers.contrib.hardening.harden.harden') as mock_dec, \
|
||||
patch('hooks.charmhelpers.core.hookenv.log'):
|
||||
with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec, \
|
||||
patch('charmhelpers.core.hookenv.log'), \
|
||||
patch('lib.swift_utils.register_configs'):
|
||||
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
|
||||
lambda *args, **kwargs: f(*args, **kwargs))
|
||||
import swift_hooks
|
||||
import hooks.swift_hooks as swift_hooks
|
||||
importlib.reload(swift_hooks)
|
||||
|
||||
|
||||
# @unittest.skip("debugging ...")
|
||||
class SwiftHooksTestCase(unittest.TestCase):
|
||||
|
||||
@patch("swift_hooks.relation_get")
|
||||
@patch("swift_hooks.local_unit")
|
||||
@patch.object(swift_hooks, "relation_get")
|
||||
@patch.object(swift_hooks, "local_unit")
|
||||
def test_is_all_peers_stopped(self, mock_local_unit, mock_relation_get):
|
||||
token1 = str(uuid.uuid4())
|
||||
token2 = str(uuid.uuid4())
|
||||
|
@ -26,8 +26,8 @@ with mock.patch('charmhelpers.core.hookenv.config'):
|
||||
|
||||
def init_ring_paths(tmpdir):
|
||||
swift_utils.SWIFT_CONF_DIR = tmpdir
|
||||
for ring in swift_utils.SWIFT_RINGS.iterkeys():
|
||||
path = os.path.join(tmpdir, "%s.builder" % ring)
|
||||
for ring in swift_utils.SWIFT_RINGS.keys():
|
||||
path = os.path.join(tmpdir, "{}.builder".format(ring))
|
||||
swift_utils.SWIFT_RINGS[ring] = path
|
||||
with open(path, 'w') as fd:
|
||||
fd.write("0\n")
|
||||
@ -117,32 +117,18 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
||||
self.assertTrue(mock_balance_rings.called)
|
||||
|
||||
@mock.patch('lib.swift_utils.previously_synced')
|
||||
@mock.patch('lib.swift_utils._load_builder')
|
||||
@mock.patch('lib.swift_utils.initialize_ring')
|
||||
@mock.patch('lib.swift_utils.update_www_rings')
|
||||
@mock.patch('lib.swift_utils.get_builders_checksum')
|
||||
@mock.patch('lib.swift_utils.get_rings_checksum')
|
||||
@mock.patch('lib.swift_utils.balance_rings')
|
||||
@mock.patch('lib.swift_utils.log')
|
||||
@mock.patch('lib.swift_utils.add_to_ring')
|
||||
@mock.patch('lib.swift_utils.exists_in_ring')
|
||||
@mock.patch('lib.swift_utils.is_elected_leader')
|
||||
def test_update_rings_multiple_devs(self, mock_is_elected_leader,
|
||||
mock_log, mock_balance_rings,
|
||||
mock_get_rings_checksum,
|
||||
mock_get_builders_checksum,
|
||||
mock_update_www_rings,
|
||||
mock_initialize_ring,
|
||||
mock_load_builder,
|
||||
def test_update_rings_multiple_devs(self,
|
||||
mock_is_leader_elected,
|
||||
mock_exists_in_ring,
|
||||
mock_add_to_ring,
|
||||
mock_balance_rings,
|
||||
mock_previously_synced):
|
||||
mock_rings = {}
|
||||
|
||||
def mock_initialize_ring_fn(path, *args):
|
||||
mock_rings.setdefault(path, {'devs': []})
|
||||
|
||||
mock_is_elected_leader.return_value = True
|
||||
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
|
||||
mock_initialize_ring.side_effect = mock_initialize_ring_fn
|
||||
|
||||
init_ring_paths(tempfile.mkdtemp())
|
||||
# note that this test does not (and neither did its predecessor) test
|
||||
# the 'min_part_hours is non None' part of update_rings()
|
||||
devices = ['sdb', 'sdc']
|
||||
node_settings = {
|
||||
'object_port': 6000,
|
||||
@ -151,26 +137,81 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
||||
'zone': 1,
|
||||
'ip': '1.2.3.4',
|
||||
}
|
||||
for path in swift_utils.SWIFT_RINGS.itervalues():
|
||||
swift_utils.initialize_ring(path, 8, 3, 0)
|
||||
|
||||
# verify all devices added to each ring
|
||||
nodes = []
|
||||
for dev in devices:
|
||||
node = {k: v for k, v in node_settings.items()}
|
||||
node = node_settings.copy()
|
||||
node['device'] = dev
|
||||
nodes.append(node)
|
||||
|
||||
mock_is_leader_elected.return_value = True
|
||||
mock_previously_synced.return_value = True
|
||||
mock_exists_in_ring.side_effect = lambda *args: False
|
||||
|
||||
swift_utils.update_rings(nodes)
|
||||
for path in swift_utils.SWIFT_RINGS.itervalues():
|
||||
devs = swift_utils._load_builder(path).to_dict()['devs']
|
||||
added_devices = [dev['device'] for dev in devs]
|
||||
self.assertEqual(devices, added_devices)
|
||||
calls = [mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||
'account.builder'),
|
||||
{
|
||||
'zone': 1,
|
||||
'object_port': 6000,
|
||||
'ip': '1.2.3.4',
|
||||
'container_port': 6001,
|
||||
'device': 'sdb',
|
||||
'account_port': 6002}),
|
||||
mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||
'container.builder'),
|
||||
{
|
||||
'zone': 1,
|
||||
'object_port': 6000,
|
||||
'ip': '1.2.3.4',
|
||||
'container_port': 6001,
|
||||
'device': 'sdb',
|
||||
'account_port': 6002}),
|
||||
mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||
'object.builder'),
|
||||
{
|
||||
'zone': 1,
|
||||
'object_port': 6000,
|
||||
'ip': '1.2.3.4',
|
||||
'container_port': 6001,
|
||||
'device': 'sdb',
|
||||
'account_port': 6002}),
|
||||
mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||
'account.builder'),
|
||||
{
|
||||
'zone': 1,
|
||||
'object_port': 6000,
|
||||
'ip': '1.2.3.4',
|
||||
'container_port': 6001,
|
||||
'device': 'sdc',
|
||||
'account_port': 6002}),
|
||||
mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||
'container.builder'),
|
||||
{
|
||||
'zone': 1,
|
||||
'object_port': 6000,
|
||||
'ip': '1.2.3.4',
|
||||
'container_port': 6001,
|
||||
'device': 'sdc',
|
||||
'account_port': 6002}),
|
||||
mock.call(os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||
'object.builder'),
|
||||
{
|
||||
'zone': 1,
|
||||
'object_port': 6000,
|
||||
'ip': '1.2.3.4',
|
||||
'container_port': 6001,
|
||||
'device': 'sdc',
|
||||
'account_port': 6002})]
|
||||
mock_exists_in_ring.assert_has_calls(calls)
|
||||
mock_balance_rings.assert_called_once_with()
|
||||
mock_add_to_ring.assert_called()
|
||||
|
||||
# try re-adding, assert add_to_ring was not called
|
||||
with mock.patch('lib.swift_utils.add_to_ring') as mock_add_to_ring:
|
||||
swift_utils.update_rings(nodes)
|
||||
self.assertFalse(mock_add_to_ring.called)
|
||||
mock_add_to_ring.reset_mock()
|
||||
mock_exists_in_ring.side_effect = lambda *args: True
|
||||
swift_utils.update_rings(nodes)
|
||||
mock_add_to_ring.assert_not_called()
|
||||
|
||||
@mock.patch('lib.swift_utils.balance_rings')
|
||||
@mock.patch('lib.swift_utils.log')
|
||||
@ -187,9 +228,9 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
||||
|
||||
@swift_utils.sync_builders_and_rings_if_changed
|
||||
def mock_balance():
|
||||
for ring, builder in swift_utils.SWIFT_RINGS.iteritems():
|
||||
for ring, builder in swift_utils.SWIFT_RINGS.items():
|
||||
ring = os.path.join(swift_utils.SWIFT_CONF_DIR,
|
||||
'%s.ring.gz' % ring)
|
||||
'{}.ring.gz'.format(ring))
|
||||
with open(ring, 'w') as fd:
|
||||
fd.write(str(uuid.uuid4()))
|
||||
|
||||
@ -370,53 +411,9 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
||||
mock_rel_get.return_value = {'broker-timestamp': '1234'}
|
||||
self.assertTrue(swift_utils.timestamps_available('proxy/2'))
|
||||
|
||||
@mock.patch.object(swift_utils, '_load_builder')
|
||||
def test_exists_in_ring(self, mock_load_builder):
|
||||
mock_rings = {}
|
||||
|
||||
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
|
||||
@mock.patch.object(swift_utils, 'get_manager')
|
||||
def test_add_to_ring(self, mock_get_manager):
|
||||
ring = 'account'
|
||||
mock_rings[ring] = {
|
||||
'devs': [
|
||||
{'replication_port': 6000, 'zone': 1, 'weight': 100.0,
|
||||
'ip': '172.16.0.2', 'region': 1, 'port': 6000,
|
||||
'replication_ip': '172.16.0.2', 'parts': 2, 'meta': '',
|
||||
'device': u'bcache10', 'parts_wanted': 0, 'id': 199},
|
||||
None, # Ring can have holes, so add None to simulate
|
||||
{'replication_port': 6000, 'zone': 1, 'weight': 100.0,
|
||||
'ip': '172.16.0.2', 'region': 1, 'id': 198,
|
||||
'replication_ip': '172.16.0.2', 'parts': 2, 'meta': '',
|
||||
'device': u'bcache13', 'parts_wanted': 0, 'port': 6000},
|
||||
]
|
||||
}
|
||||
|
||||
node = {
|
||||
'ip': '172.16.0.2',
|
||||
'region': 1,
|
||||
'account_port': 6000,
|
||||
'zone': 1,
|
||||
'replication_port': 6000,
|
||||
'weight': 100.0,
|
||||
'device': u'bcache10',
|
||||
}
|
||||
|
||||
ret = swift_utils.exists_in_ring(ring, node)
|
||||
self.assertTrue(ret)
|
||||
|
||||
node['region'] = 2
|
||||
ret = swift_utils.exists_in_ring(ring, node)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(swift_utils, '_write_ring')
|
||||
@mock.patch.object(swift_utils, '_load_builder')
|
||||
def test_add_to_ring(self, mock_load_builder, mock_write_ring):
|
||||
mock_rings = {}
|
||||
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
|
||||
ring = 'account'
|
||||
mock_rings[ring] = {
|
||||
'devs': []
|
||||
}
|
||||
|
||||
node = {
|
||||
'ip': '172.16.0.2',
|
||||
'region': 1,
|
||||
@ -424,31 +421,15 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
||||
'zone': 1,
|
||||
'device': '/dev/sdb',
|
||||
}
|
||||
|
||||
swift_utils.add_to_ring(ring, node)
|
||||
mock_write_ring.assert_called_once()
|
||||
self.assertTrue('id' not in mock_rings[ring]['devs'][0])
|
||||
|
||||
@mock.patch('os.path.isfile')
|
||||
@mock.patch.object(swift_utils, '_load_builder')
|
||||
def test_has_minimum_zones(self, mock_load_builder, mock_is_file):
|
||||
mock_rings = {}
|
||||
|
||||
mock_load_builder.side_effect = create_mock_load_builder_fn(mock_rings)
|
||||
for ring in swift_utils.SWIFT_RINGS:
|
||||
mock_rings[ring] = {
|
||||
'replicas': 3,
|
||||
'devs': [{'zone': 1}, {'zone': 2}, None, {'zone': 3}],
|
||||
}
|
||||
ret = swift_utils.has_minimum_zones(swift_utils.SWIFT_RINGS)
|
||||
self.assertTrue(ret)
|
||||
|
||||
# Increase the replicas to make sure that it returns false
|
||||
for ring in swift_utils.SWIFT_RINGS:
|
||||
mock_rings[ring]['replicas'] = 4
|
||||
|
||||
ret = swift_utils.has_minimum_zones(swift_utils.SWIFT_RINGS)
|
||||
self.assertFalse(ret)
|
||||
mock_get_manager().add_dev.assert_called_once_with('account', {
|
||||
'meta': '',
|
||||
'zone': 1,
|
||||
'ip': '172.16.0.2',
|
||||
'device': '/dev/sdb',
|
||||
'port': 6000,
|
||||
'weight': 100
|
||||
})
|
||||
|
||||
@mock.patch('lib.swift_utils.config')
|
||||
@mock.patch('lib.swift_utils.set_os_workload_status')
|
||||
|
Loading…
Reference in New Issue
Block a user