Merge "py3: Switch to using Python 3 for rocky or later"

This commit is contained in:
Zuul 2018-09-21 10:19:54 +00:00 committed by Gerrit Code Review
commit 82ac621a00
18 changed files with 882 additions and 106 deletions

View File

@ -14,24 +14,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import traceback
import subprocess
sys.path.append('hooks/')
from cinder import context
from cinder import db
from cinder.db.sqlalchemy.api import model_query, get_session
from cinder.db.sqlalchemy import models
from charmhelpers.contrib.openstack.utils import (
os_release,
CompareOpenStackReleases,
)
from sqlalchemy import and_
from charmhelpers.core.hookenv import (
action_set,
action_fail,
@ -44,16 +37,16 @@ DEFAULT_SERVICES = (
"cinder@cinder-ceph",
)
try:
from cinder import flags
cfg = flags.FLAGS
except ImportError:
from cinder.common.config import CONF
cfg = CONF
class CinderService():
def load_config_file(conf):
cfg(args=[], project='cinder', default_config_files=[conf])
def __init__(self, binary, host,
zone, status, state):
self.binary = binary
self.host = host
self.zone = zone
self.status = status
self.active = (state != 'XXX')
def cinder_manage_remove(binary, hostname):
@ -67,31 +60,35 @@ def cinder_manage_volume_update_host(currenthost, newhost):
"--newhost", newhost])
def remove_services(args):
load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf"))
def cinder_manage_service_list():
service_list = []
services = subprocess.check_output(
["cinder-manage", "service", "list"]).decode('UTF-8')
for service in services.splitlines():
if not service.startswith('cinder'):
continue
service_list.append(CinderService(*service.split()[:5]))
return service_list
def remove_services(args):
host = action_get(key="host")
services = model_query({}, models.Service, read_deleted="no",
session=get_session())
services = cinder_manage_service_list()
if host not in ("unused", "",):
services = services.filter(models.Service.host == host)
services = [s for s in services if s.host == host]
else:
ands = []
for service in DEFAULT_SERVICES:
ands.append(and_(models.Service.host != service))
services = services.filter(*ands)
services = [s for s in services if s.host not in DEFAULT_SERVICES]
removed_services = []
ctxt = context.get_admin_context()
for service in services.all():
log("Removing service:%d, hostname:%s" % (service.id, service.host))
for service in services:
log("Removing binary:%s, hostname:%s" % (service.binary, service.host))
try:
if CompareOpenStackReleases(os_release("cinder")) >= "liberty":
cinder_manage_remove(service.binary, service.host)
else:
db.service_destroy(ctxt, service.id)
action_fail("Cannot remove service: %s" % service.host)
except:
action_set({'traceback': traceback.format_exc()})
action_fail("Cannot remove service: %s" % service.host)
@ -102,11 +99,9 @@ def remove_services(args):
def _rename_volume_host(currenthost, newhost):
load_config_file(os.path.join(os.path.sep, "etc", "cinder", "cinder.conf"))
services = model_query({}, models.Service, read_deleted="no",
session=get_session())
services = services.filter(models.Service.host == currenthost)
if services.all():
services = cinder_manage_service_list()
services = [s for s in services if s.host == currenthost]
if services:
try:
cinder_manage_volume_update_host(currenthost, newhost)
except:

View File

@ -24,7 +24,8 @@ import urlparse
import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import glanceclient.v1 as glance_client
import glanceclient.v2 as glance_clientv2
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
from keystoneauth1.identity import (
@ -623,7 +624,7 @@ class OpenStackAmuletUtils(AmuletUtils):
ep = keystone.service_catalog.url_for(service_type='image',
interface='adminURL')
if keystone.session:
return glance_client.Client(ep, session=keystone.session)
return glance_clientv2.Client("2", session=keystone.session)
else:
return glance_client.Client(ep, token=keystone.auth_token)
@ -711,10 +712,19 @@ class OpenStackAmuletUtils(AmuletUtils):
f.close()
# Create glance image
with open(local_path) as f:
if float(glance.version) < 2.0:
with open(local_path) as fimage:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
container_format='bare',
data=fimage)
else:
image = glance.images.create(
name=image_name,
disk_format="qcow2",
visibility="public",
container_format="bare")
glance.images.upload(image.id, open(local_path, 'rb'))
# Wait for image to reach active status
img_id = image.id
@ -729,9 +739,14 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Validating image attributes...')
val_img_name = glance.images.get(img_id).name
val_img_stat = glance.images.get(img_id).status
val_img_pub = glance.images.get(img_id).is_public
val_img_cfmt = glance.images.get(img_id).container_format
val_img_dfmt = glance.images.get(img_id).disk_format
if float(glance.version) < 2.0:
val_img_pub = glance.images.get(img_id).is_public
else:
val_img_pub = glance.images.get(img_id).visibility == "public"
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
'container fmt:{} disk fmt:{}'.format(
val_img_name, val_img_pub, img_id,
@ -998,6 +1013,9 @@ class OpenStackAmuletUtils(AmuletUtils):
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
# For mimic ceph osd lspools output
output = output.replace("\n", ",")
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')

View File

@ -1389,11 +1389,12 @@ class WorkerConfigContext(OSContextGenerator):
class WSGIWorkerConfigContext(WorkerConfigContext):
def __init__(self, name=None, script=None, admin_script=None,
public_script=None, process_weight=1.00,
public_script=None, user=None, group=None,
process_weight=1.00,
admin_process_weight=0.25, public_process_weight=0.75):
self.service_name = name
self.user = name
self.group = name
self.user = user or name
self.group = group or name
self.script = script
self.admin_script = admin_script
self.public_script = public_script
@ -1518,6 +1519,14 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-qos',
'default': False,
},
'enable_nsg_logging': {
'rel_key': 'enable-nsg-logging',
'default': False,
},
'nsg_log_output_base': {
'rel_key': 'nsg-log-output-base',
'default': None,
},
}
ctxt = self.get_neutron_options({})
for rid in relation_ids('neutron-plugin-api'):

View File

@ -0,0 +1,412 @@
# Copyright 2018 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from charmhelpers.core.hookenv import (
ERROR,
log,
relation_get,
)
from charmhelpers.contrib.network.ip import (
is_ipv6,
ns_query,
)
from charmhelpers.contrib.openstack.utils import (
get_hostname,
get_host_ip,
is_ip,
)
NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
def ssh_directory_for_unit(application_name, user=None):
"""Return the directory used to store ssh assets for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified directory path.
:rtype: str
"""
if user:
application_name = "{}_{}".format(application_name, user)
_dir = os.path.join(NOVA_SSH_DIR, application_name)
for d in [NOVA_SSH_DIR, _dir]:
if not os.path.isdir(d):
os.mkdir(d)
for f in ['authorized_keys', 'known_hosts']:
f = os.path.join(_dir, f)
if not os.path.isfile(f):
open(f, 'w').close()
return _dir
def known_hosts(application_name, user=None):
"""Return the known hosts file for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified path to file.
:rtype: str
"""
return os.path.join(
ssh_directory_for_unit(application_name, user),
'known_hosts')
def authorized_keys(application_name, user=None):
"""Return the authorized keys file for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified path to file.
:rtype: str
"""
return os.path.join(
ssh_directory_for_unit(application_name, user),
'authorized_keys')
def ssh_known_host_key(host, application_name, user=None):
"""Return the first entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Host key
:rtype: str or None
"""
cmd = [
'ssh-keygen',
'-f', known_hosts(application_name, user),
'-H',
'-F',
host]
try:
# The first line of output is like '# Host xx found: line 1 type RSA',
# which should be excluded.
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# RC of 1 seems to be legitimate for most ssh-keygen -F calls.
if e.returncode == 1:
output = e.output
else:
raise
output = output.strip()
if output:
# Bug #1500589 cmd has 0 rc on precise if entry not present
lines = output.split('\n')
if len(lines) >= 1:
return lines[0]
return None
def remove_known_host(host, application_name, user=None):
"""Remove the entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
log('Removing SSH known host entry for compute host at %s' % host)
cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host]
subprocess.check_call(cmd)
def is_same_key(key_1, key_2):
"""Extract the key from two host entries and compare them.
:param key_1: Host key
:type key_1: str
:param key_2: Host key
:type key_2: str
"""
# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
# the part start with 'ssh-rsa' followed with '= ', because the hash
# value in the beginning will change each time.
k_1 = key_1.split('= ')[1]
k_2 = key_2.split('= ')[1]
return k_1 == k_2
def add_known_host(host, application_name, user=None):
"""Add the given host key to the known hosts file.
:param host: host name
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
try:
remote_key = subprocess.check_output(cmd).strip()
except Exception as e:
log('Could not obtain SSH host key from %s' % host, level=ERROR)
raise e
current_key = ssh_known_host_key(host, application_name, user)
if current_key and remote_key:
if is_same_key(remote_key, current_key):
log('Known host key for compute host %s up to date.' % host)
return
else:
remove_known_host(host, application_name, user)
log('Adding SSH host key to known hosts for compute node at %s.' % host)
with open(known_hosts(application_name, user), 'a') as out:
out.write("{}\n".format(remote_key))
def ssh_authorized_key_exists(public_key, application_name, user=None):
"""Check if given key is in the authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Whether given key is in the authorized_key file.
:rtype: boolean
"""
with open(authorized_keys(application_name, user)) as keys:
return ('%s' % public_key) in keys.read()
def add_authorized_key(public_key, application_name, user=None):
"""Add given key to the authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
with open(authorized_keys(application_name, user), 'a') as keys:
keys.write("{}\n".format(public_key))
def ssh_compute_add_host_and_key(public_key, hostname, private_address,
application_name, user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param hostname: Hostname to collect host keys from.
:type hostname: str
:param private_address:aCorresponding private address for hostname
:type private_address: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
# If remote compute node hands us a hostname, ensure we have a
# known hosts entry for its IP, hostname and FQDN.
hosts = [private_address]
if not is_ipv6(private_address):
if hostname:
hosts.append(hostname)
if is_ip(private_address):
hn = get_hostname(private_address)
if hn:
hosts.append(hn)
short = hn.split('.')[0]
if ns_query(short):
hosts.append(short)
else:
hosts.append(get_host_ip(private_address))
short = private_address.split('.')[0]
if ns_query(short):
hosts.append(short)
for host in list(set(hosts)):
add_known_host(host, application_name, user)
if not ssh_authorized_key_exists(public_key, application_name, user):
log('Saving SSH authorized key for compute host at %s.' %
private_address)
add_authorized_key(public_key, application_name, user)
def ssh_compute_add(public_key, application_name, rid=None, unit=None,
user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param rid: Relation id of the relation between this charm and the app. If
none is supplied it is assumed its the relation relating to
the current hook context.
:type rid: str
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
its the unit relating to the current hook context.
:type unit: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
relation_data = relation_get(rid=rid, unit=unit)
ssh_compute_add_host_and_key(
public_key,
relation_data.get('hostname'),
relation_data.get('private-address'),
application_name,
user=user)
def ssh_known_hosts_lines(application_name, user=None):
"""Return contents of known_hosts file for given application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
known_hosts_list = []
with open(known_hosts(application_name, user)) as hosts:
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
def ssh_authorized_keys_lines(application_name, user=None):
"""Return contents of authorized_keys file for given application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
authorized_keys_list = []
with open(authorized_keys(application_name, user)) as keys:
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
def ssh_compute_remove(public_key, application_name, user=None):
"""Remove given public key from authorized_keys file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
if not (os.path.isfile(authorized_keys(application_name, user)) or
os.path.isfile(known_hosts(application_name, user))):
return
keys = ssh_authorized_keys_lines(application_name, user=None)
keys = [k.strip() for k in keys]
if public_key not in keys:
return
[keys.remove(key) for key in keys if key == public_key]
with open(authorized_keys(application_name, user), 'w') as _keys:
keys = '\n'.join(keys)
if not keys.endswith('\n'):
keys += '\n'
_keys.write(keys)
def get_ssh_settings(application_name, user=None):
"""Retrieve the known host entries and public keys for application
Retrieve the known host entries and public keys for application for all
units of the given application related to this application for the
app + user combination.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Public keys + host keys for all units for app + user combination.
:rtype: dict
"""
settings = {}
keys = {}
prefix = ''
if user:
prefix = '{}_'.format(user)
for i, line in enumerate(ssh_known_hosts_lines(
application_name=application_name, user=user)):
settings['{}known_hosts_{}'.format(prefix, i)] = line
if settings:
settings['{}known_hosts_max_index'.format(prefix)] = len(
settings.keys())
for i, line in enumerate(ssh_authorized_keys_lines(
application_name=application_name, user=user)):
keys['{}authorized_keys_{}'.format(prefix, i)] = line
if keys:
keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys())
settings.update(keys)
return settings
def get_all_user_ssh_settings(application_name):
"""Retrieve the known host entries and public keys for application
Retrieve the known host entries and public keys for application for all
units of the given application related to this application for root user
and nova user.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:returns: Public keys + host keys for all units for app + user combination.
:rtype: dict
"""
settings = get_ssh_settings(application_name)
settings.update(get_ssh_settings(application_name, user='nova'))
return settings

View File

@ -14,7 +14,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
@ -40,7 +40,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
@ -66,7 +66,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}

View File

@ -0,0 +1,91 @@
# Configuration file maintained by Juju. Local changes may be overwritten.
{% if port -%}
Listen {{ port }}
{% endif -%}
{% if admin_port -%}
Listen {{ admin_port }}
{% endif -%}
{% if public_port -%}
Listen {{ public_port }}
{% endif -%}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}

View File

@ -1736,7 +1736,12 @@ def is_unit_upgrading_set():
def series_upgrade_prepare(pause_unit_helper=None, configs=None):
""" Run common series upgrade prepare tasks."""
""" Run common series upgrade prepare tasks.
:param pause_unit_helper: function: Function to pause unit
:param configs: OSConfigRenderer object: Configurations
:returns None:
"""
set_unit_upgrading()
if pause_unit_helper and configs:
if not is_unit_paused_set():
@ -1744,7 +1749,12 @@ def series_upgrade_prepare(pause_unit_helper=None, configs=None):
def series_upgrade_complete(resume_unit_helper=None, configs=None):
""" Run common series upgrade complete tasks."""
""" Run common series upgrade complete tasks.
:param resume_unit_helper: function: Function to resume unit
:param configs: OSConfigRenderer object: Configurations
:returns None:
"""
clear_unit_paused()
clear_unit_upgrading()
if configs:

View File

@ -48,6 +48,7 @@ INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
MARKER = object()
SH_MAX_ARG = 131071
cache = {}
@ -98,7 +99,7 @@ def log(message, level=None):
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
@ -201,11 +202,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
def service_name():
"""The name service group this unit belongs to"""
def application_name():
"""
The name of the deployed application this unit belongs to.
"""
return local_unit().split('/')[0]
def service_name():
"""
.. deprecated:: 0.19.1
Alias for :func:`application_name`.
"""
return application_name()
def model_name():
"""
Name of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_NAME']
def model_uuid():
"""
UUID of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_UUID']
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@ -1297,3 +1322,33 @@ def egress_subnets(rid=None, unit=None):
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen
def unit_doomed(unit=None):
"""Determines if the unit is being removed from the model
Requires Juju 2.4.1.
:param unit: string unit name, defaults to local_unit
:side effect: calls goal_state
:side effect: calls local_unit
:side effect: calls has_juju_version
:return: True if the unit is being removed, already gone, or never existed
"""
if not has_juju_version("2.4.1"):
# We cannot risk blindly returning False for 'we don't know',
# because that could cause data loss; if call sites don't
# need an accurate answer, they likely don't need this helper
# at all.
# goal-state existed in 2.4.0, but did not handle removals
# correctly until 2.4.1.
raise NotImplementedError("is_doomed")
if unit is None:
unit = local_unit()
gs = goal_state()
units = gs.get('units', {})
if unit not in units:
return True
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')

View File

@ -84,6 +84,7 @@ module = "charmhelpers.fetch.%s" % __platform__
fetch = importlib.import_module(module)
filter_installed_packages = fetch.filter_installed_packages
filter_missing_packages = fetch.filter_missing_packages
install = fetch.apt_install
upgrade = fetch.apt_upgrade
update = _fetch_update = fetch.apt_update
@ -96,6 +97,7 @@ if __platform__ == "ubuntu":
apt_update = fetch.apt_update
apt_upgrade = fetch.apt_upgrade
apt_purge = fetch.apt_purge
apt_autoremove = fetch.apt_autoremove
apt_mark = fetch.apt_mark
apt_hold = fetch.apt_hold
apt_unhold = fetch.apt_unhold

View File

@ -13,7 +13,7 @@
# limitations under the License.
import os
from subprocess import check_call
from subprocess import STDOUT, check_output
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
@ -55,7 +55,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
cmd = ['bzr', 'branch']
cmd += cmd_opts
cmd += [source, dest]
check_call(cmd)
check_output(cmd, stderr=STDOUT)
def install(self, source, dest=None, revno=None):
url_parts = self.parse_url(source)

View File

@ -13,7 +13,7 @@
# limitations under the License.
import os
from subprocess import check_call, CalledProcessError
from subprocess import check_output, CalledProcessError, STDOUT
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
@ -50,7 +50,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
check_output(cmd, stderr=STDOUT)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)

View File

@ -189,6 +189,18 @@ def filter_installed_packages(packages):
return _pkgs
def filter_missing_packages(packages):
"""Return a list of packages that are installed.
:param packages: list of packages to evaluate.
:returns list: Packages that are installed.
"""
return list(
set(packages) -
set(filter_installed_packages(packages))
)
def apt_cache(in_memory=True, progress=None):
"""Build and return an apt cache."""
from apt import apt_pkg
@ -248,6 +260,14 @@ def apt_purge(packages, fatal=False):
_run_apt_command(cmd, fatal)
def apt_autoremove(purge=True, fatal=False):
"""Purge one or more packages."""
cmd = ['apt-get', '--assume-yes', 'autoremove']
if purge:
cmd.append('--purge')
_run_apt_command(cmd, fatal)
def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark."""
log("Marking {} as {}".format(packages, mark))

View File

@ -43,7 +43,10 @@ from charmhelpers.fetch import (
apt_upgrade,
apt_update,
apt_install,
add_source
add_source,
apt_purge,
apt_autoremove,
filter_missing_packages,
)
from charmhelpers.core.host import (
@ -126,6 +129,14 @@ COMMON_PACKAGES = [
'thin-provisioning-tools',
]
PY3_PACKAGES = [
'python3-cinder',
'python3-memcache',
'python3-rados',
'python3-rbd',
'libapache2-mod-wsgi-py3',
]
API_PACKAGES = ['cinder-api']
VOLUME_PACKAGES = ['cinder-volume']
SCHEDULER_PACKAGES = ['cinder-scheduler']
@ -341,9 +352,29 @@ def determine_packages():
pkgs += p
pkgs.extend(token_cache_pkgs(source=config()['openstack-origin']))
if CompareOpenStackReleases(os_release('cinder')) >= 'rocky':
pkgs = [p for p in pkgs if not p.startswith('python-')]
pkgs.extend(PY3_PACKAGES)
return pkgs
def determine_purge_packages():
'''
Determine list of packages that where previously installed which are no
longer needed.
:returns: list of package names
'''
if CompareOpenStackReleases(os_release('cinder')) >= 'rocky':
pkgs = [p for p in COMMON_PACKAGES if p.startswith('python-')]
pkgs.append('python-cinder')
pkgs.append('python-memcache')
return pkgs
return []
def service_enabled(service):
'''Determine if a specific cinder service is enabled in
charm configuration.
@ -716,6 +747,11 @@ def do_openstack_upgrade(configs=None):
reset_os_release()
apt_install(determine_packages(), fatal=True)
installed_packages = filter_missing_packages(determine_purge_packages())
if installed_packages:
apt_purge(installed_packages, fatal=True)
apt_autoremove(purge=True, fatal=True)
# NOTE(hopem): must do this after packages have been upgraded so that
# we ensure that correct configs are selected for the target release.
# See LP 1726527.

View File

@ -1013,6 +1013,9 @@ class OpenStackAmuletUtils(AmuletUtils):
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
# For mimic ceph osd lspools output
output = output.replace("\n", ",")
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')

View File

@ -48,6 +48,7 @@ INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
MARKER = object()
SH_MAX_ARG = 131071
cache = {}
@ -98,7 +99,7 @@ def log(message, level=None):
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
@ -201,11 +202,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
def service_name():
"""The name service group this unit belongs to"""
def application_name():
"""
The name of the deployed application this unit belongs to.
"""
return local_unit().split('/')[0]
def service_name():
"""
.. deprecated:: 0.19.1
Alias for :func:`application_name`.
"""
return application_name()
def model_name():
"""
Name of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_NAME']
def model_uuid():
"""
UUID of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_UUID']
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@ -1297,3 +1322,33 @@ def egress_subnets(rid=None, unit=None):
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen
def unit_doomed(unit=None):
"""Determines if the unit is being removed from the model
Requires Juju 2.4.1.
:param unit: string unit name, defaults to local_unit
:side effect: calls goal_state
:side effect: calls local_unit
:side effect: calls has_juju_version
:return: True if the unit is being removed, already gone, or never existed
"""
if not has_juju_version("2.4.1"):
# We cannot risk blindly returning False for 'we don't know',
# because that could cause data loss; if call sites don't
# need an accurate answer, they likely don't need this helper
# at all.
# goal-state existed in 2.4.0, but did not handle removals
# correctly until 2.4.1.
raise NotImplementedError("is_doomed")
if unit is None:
unit = local_unit()
gs = goal_state()
units = gs.get('units', {})
if unit not in units:
return True
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')

View File

@ -65,7 +65,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-rocky --no-destroy
[testenv:func27-dfs]
# Charm Functional Test

View File

@ -17,7 +17,6 @@ from test_utils import (
CharmTestCase
)
import cinder_manage
import cinder
TO_PATCH = [
'subprocess',
@ -27,26 +26,26 @@ TO_PATCH = [
'os_release',
]
SERVICE_LIST = """2018-09-07 09:56:08.469 29766 WARNING oslo_db.sqlalchemy.engines [req-80f93eba-3e8f-4c4f-af66-491f91e5821d - - - - -] URL mysql://cinder:***@10.5.0.39/cinder does not contain a '+drivername' portion, and will make use of a default driver. A full dbname+drivername:// protocol is recommended. For MySQL, it is strongly recommended that mysql+pymysql:// be specified for maximum service compatibility
Binary Host Zone Status State Updated At RPC Version Object Version Cluster
cinder-volume juju-d15fa6-py3-upgrade-testing-9@LVM nova enabled XXX 2018-09-06 08:55:13 3.15 1.35
cinder-scheduler juju-d15fa6-py3-upgrade-testing-9 nova enabled XXX 2018-09-06 08:55:12 3.10 1.35
cinder-volume cinder@cinder-ceph nova enabled :-) 2018-09-07 09:56:02 3.16 1.37
cinder-scheduler cinder nova enabled :-) 2018-09-07 09:56:02 3.11 1.37
cinder-volume juju-d15fa6-py3-upgrade-testing-30@LVM nova enabled XXX 2018-09-06 09:30:40 3.15 1.35
cinder-scheduler juju-d15fa6-py3-upgrade-testing-30 nova enabled XXX 2018-09-06 09:30:47 3.10 1.35
cinder-volume juju-d15fa6-py3-upgrade-testing-32@LVM nova enabled XXX 2018-09-06 15:04:13 3.16 1.37
cinder-scheduler juju-d15fa6-py3-upgrade-testing-32 nova enabled XXX 2018-09-06 15:04:21 3.11 1.37
cinder-volume juju-d15fa6-py3-upgrade-testing-31@LVM nova enabled XXX 2018-09-06 15:04:15 3.16 1.37
cinder-scheduler juju-d15fa6-py3-upgrade-testing-31 nova enabled XXX 2018-09-06 15:04:35 3.11 1.37
""" # noqa
class CinderManageTestCase(CharmTestCase):
def setUp(self):
super(CinderManageTestCase, self).setUp(cinder_manage, TO_PATCH)
def tearDown(self):
cinder.reset_mock()
cinder.context.reset_mock()
cinder.db.reset_mock()
cinder.db.sqlalchemy.reset_mock()
cinder.db.sqlalchemy.api.reset_mock()
def test_load_config_file(self):
cinder_manage.load_config_file('/cinder.conf')
cinder.flags.FLAGS.assert_called_once_with(
args=[],
default_config_files=['/cinder.conf'],
project='cinder')
def test_cinder_manage_remove(self):
cinder_manage.cinder_manage_remove('mybin', 'myhost')
self.subprocess.check_call.assert_called_once_with(
@ -57,82 +56,85 @@ class CinderManageTestCase(CharmTestCase):
self.subprocess.check_call.assert_called_once_with(
['cinder-manage', 'service', 'remove', 'host', 'host@this#that'])
@patch.object(cinder_manage, 'cinder_manage_service_list')
@patch.object(cinder_manage, 'cinder_manage_remove')
def test_remove_services(self, cinder_manage_remove):
self.action_get.return_value = 'sv1host'
def test_remove_services(self, cinder_manage_remove,
cinder_manage_service_list):
self.action_get.return_value = 'svc1host'
svc1_mock = mock.MagicMock()
svc1_mock.binary = "svc1bin"
svc1_mock.host = "svc1host"
query_mock = mock.MagicMock()
query_mock.filter().all.return_value = [svc1_mock]
cinder.db.sqlalchemy.api.model_query.return_value = query_mock
self.os_release.return_value = 'liberty'
cinder_manage_service_list.return_value = [svc1_mock]
cinder_manage.remove_services('arg')
cinder_manage_remove.assert_called_once_with('svc1bin', 'svc1host')
self.action_set.assert_called_once_with({'removed': 'svc1host'})
@patch.object(cinder_manage, 'cinder_manage_service_list')
@patch.object(cinder_manage, 'cinder_manage_remove')
def test_remove_services_kilo(self, cinder_manage_remove):
self.action_get.return_value = 'sv1host'
def test_remove_services_kilo(self, cinder_manage_remove,
cinder_manage_service_list):
self.action_get.return_value = 'svc1host'
svc1_mock = mock.MagicMock()
svc1_mock.binary = "svc1bin"
svc1_mock.host = "svc1host"
svc1_mock.id = 42
cinder.context.get_admin_context.return_value = 'admctxt'
query_mock = mock.MagicMock()
query_mock.filter().all.return_value = [svc1_mock]
cinder.db.sqlalchemy.api.model_query.return_value = query_mock
self.os_release.return_value = 'kilo'
cinder_manage_service_list.return_value = [svc1_mock]
cinder_manage.remove_services('arg')
cinder.db.service_destroy.assert_called_once_with('admctxt', 42)
self.action_set.assert_called_once_with({'removed': 'svc1host'})
self.action_fail.assert_called_once()
@patch.object(cinder_manage, 'cinder_manage_service_list')
@patch.object(cinder_manage, 'cinder_manage_remove')
def test_remove_services_fail(self, cinder_manage_remove):
def test_remove_services_fail(self, cinder_manage_remove,
cinder_manage_service_list):
cinder_manage_remove.side_effect = Exception()
self.action_get.return_value = 'sv1host'
self.action_get.return_value = 'svc1host'
svc1_mock = mock.MagicMock()
svc1_mock.binary = "svc1bin"
svc1_mock.host = "svc1host"
query_mock = mock.MagicMock()
query_mock.filter().all.return_value = [svc1_mock]
cinder.db.sqlalchemy.api.model_query.return_value = query_mock
self.os_release.return_value = 'liberty'
cinder_manage_service_list.return_value = [svc1_mock]
cinder_manage.remove_services('arg')
cinder_manage_remove.assert_called_once_with('svc1bin', 'svc1host')
self.action_fail.assert_called_once_with(
'Cannot remove service: svc1host')
@patch.object(cinder_manage, 'cinder_manage_service_list')
@patch.object(cinder_manage, 'cinder_manage_volume_update_host')
def test__rename_volume_host(self, cinder_manage_volume_update_host):
def test__rename_volume_host(self, cinder_manage_volume_update_host,
cinder_manage_service_list):
self.action_get.return_value = 'myhost'
query_mock = mock.MagicMock()
query_mock.filter().all.return_value = ['myhost']
cinder.db.sqlalchemy.api.model_query.return_value = query_mock
cinder.db.sqlalchemy.api.model_query.return_value = query_mock
svc1_mock = mock.MagicMock()
svc1_mock.binary = "cinder-volume"
svc1_mock.host = "a"
cinder_manage_service_list.return_value = [svc1_mock]
cinder_manage._rename_volume_host('a', 'b')
cinder_manage_volume_update_host.assert_called_once_with('a', 'b')
@patch.object(cinder_manage, 'cinder_manage_service_list')
@patch.object(cinder_manage, 'cinder_manage_volume_update_host')
def test__rename_volume_host_missing(self,
cinder_manage_volume_update_host):
cinder_manage_volume_update_host,
cinder_manage_service_list):
self.action_get.return_value = 'myhost'
query_mock = mock.MagicMock()
query_mock.filter().all.return_value = []
cinder.db.sqlalchemy.api.model_query.return_value = query_mock
cinder_manage_service_list.return_value = []
cinder_manage._rename_volume_host('a', 'b')
self.assertFalse(cinder_manage_volume_update_host.called)
self.action_fail.assert_called_once_with(
'Cannot update host attribute from a, a not found')
@patch.object(cinder_manage, 'cinder_manage_service_list')
@patch.object(cinder_manage, 'cinder_manage_volume_update_host')
def test__rename_volume_host_fail(self,
cinder_manage_volume_update_host):
cinder_manage_volume_update_host,
cinder_manage_service_list):
cinder_manage_volume_update_host.side_effect = Exception()
self.action_get.return_value = 'myhost'
query_mock = mock.MagicMock()
query_mock.filter().all().return_value = ['myhost']
cinder.db.sqlalchemy.api.model_query.return_value = query_mock
svc1_mock = mock.MagicMock()
svc1_mock.binary = "cinder-volume"
svc1_mock.host = "a"
cinder_manage_service_list.return_value = [svc1_mock]
cinder_manage._rename_volume_host('a', 'b')
cinder_manage_volume_update_host.assert_called_once_with('a', 'b')
self.action_fail.assert_called_once_with('Cannot update host a')
@ -164,3 +166,8 @@ class CinderManageTestCase(CharmTestCase):
cinder_manage.volume_host_add_driver('arg')
_rename_volume_host.assert_called_once_with(
'orghost', 'orghost@lvmdriver-1')
@patch.object(cinder_manage, 'subprocess')
def test_cinder_manage_service_list(self, subprocess):
subprocess.check_output.return_value = SERVICE_LIST.encode()
self.assertEqual(len(cinder_manage.cinder_manage_service_list()), 10)

View File

@ -61,6 +61,9 @@ TO_PATCH = [
'apt_update',
'apt_upgrade',
'apt_install',
'apt_purge',
'apt_autoremove',
'filter_missing_packages',
'service_stop',
'service_start',
# cinder
@ -123,10 +126,24 @@ class TestCinderUtils(CharmTestCase):
self.test_config.set('enabled-services', 'api,scheduler')
self.assertFalse(cinder_utils.service_enabled('volume'))
def test_determine_purge_packages(self):
'Ensure no packages are identified for purge prior to rocky'
self.os_release.return_value = 'queens'
self.assertEqual(cinder_utils.determine_purge_packages(), [])
def test_determine_purge_packages_rocky(self):
'Ensure python packages are identified for purge at rocky'
self.os_release.return_value = 'rocky'
self.assertEqual(cinder_utils.determine_purge_packages(),
[p for p in cinder_utils.COMMON_PACKAGES
if p.startswith('python-')] +
['python-cinder', 'python-memcache'])
@patch('cinder_utils.service_enabled')
def test_determine_packages_all(self, service_enabled):
'It determines all packages required when all services enabled'
service_enabled.return_value = True
self.os_release.return_value = 'icehouse'
pkgs = cinder_utils.determine_packages()
self.assertEqual(sorted(pkgs),
sorted(cinder_utils.COMMON_PACKAGES +
@ -134,11 +151,27 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.API_PACKAGES +
cinder_utils.SCHEDULER_PACKAGES))
@patch('cinder_utils.service_enabled')
def test_determine_packages_all_rocky(self, service_enabled):
'Check python3 packages are installed @ rocky'
service_enabled.return_value = True
self.os_release.return_value = 'rocky'
pkgs = cinder_utils.determine_packages()
self.assertEqual(
sorted(pkgs),
sorted([p for p in cinder_utils.COMMON_PACKAGES
if not p.startswith('python-')] +
cinder_utils.VOLUME_PACKAGES +
cinder_utils.API_PACKAGES +
cinder_utils.SCHEDULER_PACKAGES +
cinder_utils.PY3_PACKAGES))
@patch('cinder_utils.service_enabled')
def test_determine_packages_subset(self, service_enabled):
'It determines packages required for a subset of enabled services'
service_enabled.side_effect = self.svc_enabled
self.test_config.set('openstack-origin', 'cloud:xenial-newton')
self.os_release.return_value = 'newton'
self.token_cache_pkgs.return_value = ['memcached']
self.test_config.set('enabled-services', 'api')
@ -770,6 +803,36 @@ class TestCinderUtils(CharmTestCase):
configs.set_release.assert_called_with(openstack_release='havana')
self.assertFalse(migrate.called)
@patch.object(cinder_utils, 'register_configs')
@patch.object(cinder_utils, 'services')
@patch.object(cinder_utils, 'migrate_database')
@patch.object(cinder_utils, 'determine_packages')
def test_openstack_upgrade_rocky(self, pkgs, migrate, services,
mock_register_configs):
pkgs.return_value = ['mypackage']
self.os_release.return_value = 'rocky'
self.config.side_effect = None
self.config.return_value = 'cloud:bionic-rocky'
services.return_value = ['cinder-api', 'cinder-volume']
self.is_elected_leader.return_value = True
self.get_os_codename_install_source.return_value = 'rocky'
configs = mock_register_configs.return_value
self.filter_missing_packages.return_value = [
'python-cinder',
]
cinder_utils.do_openstack_upgrade(configs)
self.assertTrue(mock_register_configs.called)
self.assertTrue(configs.write_all.called)
self.apt_upgrade.assert_called_with(options=DPKG_OPTIONS,
fatal=True, dist=True)
self.apt_install.assert_called_with(['mypackage'], fatal=True)
self.apt_purge.assert_called_with(
['python-cinder'],
fatal=True)
self.apt_autoremove.assert_called_with(purge=True, fatal=True)
configs.set_release.assert_called_with(openstack_release='rocky')
self.assertTrue(migrate.called)
@patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0')
def test_check_local_db_actions_complete_by_self(self):
self.relation_get.return_value = {}