Update functional test definitions
Change-Id: I99f6950b2b9ecefb2182298b384594ffb262df13
This commit is contained in:
parent
1e8374351a
commit
6ed5385c8c
@ -24,7 +24,8 @@ import urlparse
|
||||
|
||||
import cinderclient.v1.client as cinder_client
|
||||
import cinderclient.v2.client as cinder_clientv2
|
||||
import glanceclient.v1.client as glance_client
|
||||
import glanceclient.v1 as glance_client
|
||||
import glanceclient.v2 as glance_clientv2
|
||||
import heatclient.v1.client as heat_client
|
||||
from keystoneclient.v2_0 import client as keystone_client
|
||||
from keystoneauth1.identity import (
|
||||
@ -623,7 +624,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
ep = keystone.service_catalog.url_for(service_type='image',
|
||||
interface='adminURL')
|
||||
if keystone.session:
|
||||
return glance_client.Client(ep, session=keystone.session)
|
||||
return glance_clientv2.Client("2", session=keystone.session)
|
||||
else:
|
||||
return glance_client.Client(ep, token=keystone.auth_token)
|
||||
|
||||
@ -711,10 +712,19 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
f.close()
|
||||
|
||||
# Create glance image
|
||||
with open(local_path) as f:
|
||||
if float(glance.version) < 2.0:
|
||||
with open(local_path) as fimage:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
container_format='bare',
|
||||
data=fimage)
|
||||
else:
|
||||
image = glance.images.create(
|
||||
name=image_name,
|
||||
disk_format="qcow2",
|
||||
visibility="public",
|
||||
container_format="bare")
|
||||
glance.images.upload(image.id, open(local_path, 'rb'))
|
||||
|
||||
# Wait for image to reach active status
|
||||
img_id = image.id
|
||||
@ -729,9 +739,14 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.debug('Validating image attributes...')
|
||||
val_img_name = glance.images.get(img_id).name
|
||||
val_img_stat = glance.images.get(img_id).status
|
||||
val_img_pub = glance.images.get(img_id).is_public
|
||||
val_img_cfmt = glance.images.get(img_id).container_format
|
||||
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||
|
||||
if float(glance.version) < 2.0:
|
||||
val_img_pub = glance.images.get(img_id).is_public
|
||||
else:
|
||||
val_img_pub = glance.images.get(img_id).visibility == "public"
|
||||
|
||||
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||
'container fmt:{} disk fmt:{}'.format(
|
||||
val_img_name, val_img_pub, img_id,
|
||||
|
@ -1389,11 +1389,12 @@ class WorkerConfigContext(OSContextGenerator):
|
||||
class WSGIWorkerConfigContext(WorkerConfigContext):
|
||||
|
||||
def __init__(self, name=None, script=None, admin_script=None,
|
||||
public_script=None, process_weight=1.00,
|
||||
public_script=None, user=None, group=None,
|
||||
process_weight=1.00,
|
||||
admin_process_weight=0.25, public_process_weight=0.75):
|
||||
self.service_name = name
|
||||
self.user = name
|
||||
self.group = name
|
||||
self.user = user or name
|
||||
self.group = group or name
|
||||
self.script = script
|
||||
self.admin_script = admin_script
|
||||
self.public_script = public_script
|
||||
|
412
charmhelpers/contrib/openstack/ssh_migrations.py
Normal file
412
charmhelpers/contrib/openstack/ssh_migrations.py
Normal file
@ -0,0 +1,412 @@
|
||||
# Copyright 2018 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
ERROR,
|
||||
log,
|
||||
relation_get,
|
||||
)
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
is_ipv6,
|
||||
ns_query,
|
||||
)
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
get_hostname,
|
||||
get_host_ip,
|
||||
is_ip,
|
||||
)
|
||||
|
||||
NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
|
||||
|
||||
|
||||
def ssh_directory_for_unit(application_name, user=None):
|
||||
"""Return the directory used to store ssh assets for the application.
|
||||
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
:returns: Fully qualified directory path.
|
||||
:rtype: str
|
||||
"""
|
||||
if user:
|
||||
application_name = "{}_{}".format(application_name, user)
|
||||
_dir = os.path.join(NOVA_SSH_DIR, application_name)
|
||||
for d in [NOVA_SSH_DIR, _dir]:
|
||||
if not os.path.isdir(d):
|
||||
os.mkdir(d)
|
||||
for f in ['authorized_keys', 'known_hosts']:
|
||||
f = os.path.join(_dir, f)
|
||||
if not os.path.isfile(f):
|
||||
open(f, 'w').close()
|
||||
return _dir
|
||||
|
||||
|
||||
def known_hosts(application_name, user=None):
|
||||
"""Return the known hosts file for the application.
|
||||
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
:returns: Fully qualified path to file.
|
||||
:rtype: str
|
||||
"""
|
||||
return os.path.join(
|
||||
ssh_directory_for_unit(application_name, user),
|
||||
'known_hosts')
|
||||
|
||||
|
||||
def authorized_keys(application_name, user=None):
|
||||
"""Return the authorized keys file for the application.
|
||||
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
:returns: Fully qualified path to file.
|
||||
:rtype: str
|
||||
"""
|
||||
return os.path.join(
|
||||
ssh_directory_for_unit(application_name, user),
|
||||
'authorized_keys')
|
||||
|
||||
|
||||
def ssh_known_host_key(host, application_name, user=None):
|
||||
"""Return the first entry in known_hosts for host.
|
||||
|
||||
:param host: hostname to lookup in file.
|
||||
:type host: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
:returns: Host key
|
||||
:rtype: str or None
|
||||
"""
|
||||
cmd = [
|
||||
'ssh-keygen',
|
||||
'-f', known_hosts(application_name, user),
|
||||
'-H',
|
||||
'-F',
|
||||
host]
|
||||
try:
|
||||
# The first line of output is like '# Host xx found: line 1 type RSA',
|
||||
# which should be excluded.
|
||||
output = subprocess.check_output(cmd)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# RC of 1 seems to be legitimate for most ssh-keygen -F calls.
|
||||
if e.returncode == 1:
|
||||
output = e.output
|
||||
else:
|
||||
raise
|
||||
output = output.strip()
|
||||
|
||||
if output:
|
||||
# Bug #1500589 cmd has 0 rc on precise if entry not present
|
||||
lines = output.split('\n')
|
||||
if len(lines) >= 1:
|
||||
return lines[0]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def remove_known_host(host, application_name, user=None):
|
||||
"""Remove the entry in known_hosts for host.
|
||||
|
||||
:param host: hostname to lookup in file.
|
||||
:type host: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
log('Removing SSH known host entry for compute host at %s' % host)
|
||||
cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def is_same_key(key_1, key_2):
|
||||
"""Extract the key from two host entries and compare them.
|
||||
|
||||
:param key_1: Host key
|
||||
:type key_1: str
|
||||
:param key_2: Host key
|
||||
:type key_2: str
|
||||
"""
|
||||
# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
|
||||
# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
|
||||
# the part start with 'ssh-rsa' followed with '= ', because the hash
|
||||
# value in the beginning will change each time.
|
||||
k_1 = key_1.split('= ')[1]
|
||||
k_2 = key_2.split('= ')[1]
|
||||
return k_1 == k_2
|
||||
|
||||
|
||||
def add_known_host(host, application_name, user=None):
|
||||
"""Add the given host key to the known hosts file.
|
||||
|
||||
:param host: host name
|
||||
:type host: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
|
||||
try:
|
||||
remote_key = subprocess.check_output(cmd).strip()
|
||||
except Exception as e:
|
||||
log('Could not obtain SSH host key from %s' % host, level=ERROR)
|
||||
raise e
|
||||
|
||||
current_key = ssh_known_host_key(host, application_name, user)
|
||||
if current_key and remote_key:
|
||||
if is_same_key(remote_key, current_key):
|
||||
log('Known host key for compute host %s up to date.' % host)
|
||||
return
|
||||
else:
|
||||
remove_known_host(host, application_name, user)
|
||||
|
||||
log('Adding SSH host key to known hosts for compute node at %s.' % host)
|
||||
with open(known_hosts(application_name, user), 'a') as out:
|
||||
out.write("{}\n".format(remote_key))
|
||||
|
||||
|
||||
def ssh_authorized_key_exists(public_key, application_name, user=None):
|
||||
"""Check if given key is in the authorized_key file.
|
||||
|
||||
:param public_key: Public key.
|
||||
:type public_key: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
:returns: Whether given key is in the authorized_key file.
|
||||
:rtype: boolean
|
||||
"""
|
||||
with open(authorized_keys(application_name, user)) as keys:
|
||||
return ('%s' % public_key) in keys.read()
|
||||
|
||||
|
||||
def add_authorized_key(public_key, application_name, user=None):
|
||||
"""Add given key to the authorized_key file.
|
||||
|
||||
:param public_key: Public key.
|
||||
:type public_key: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
with open(authorized_keys(application_name, user), 'a') as keys:
|
||||
keys.write("{}\n".format(public_key))
|
||||
|
||||
|
||||
def ssh_compute_add_host_and_key(public_key, hostname, private_address,
|
||||
application_name, user=None):
|
||||
"""Add a compute nodes ssh details to local cache.
|
||||
|
||||
Collect various hostname variations and add the corresponding host keys to
|
||||
the local known hosts file. Finally, add the supplied public key to the
|
||||
authorized_key file.
|
||||
|
||||
:param public_key: Public key.
|
||||
:type public_key: str
|
||||
:param hostname: Hostname to collect host keys from.
|
||||
:type hostname: str
|
||||
:param private_address:aCorresponding private address for hostname
|
||||
:type private_address: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
# If remote compute node hands us a hostname, ensure we have a
|
||||
# known hosts entry for its IP, hostname and FQDN.
|
||||
hosts = [private_address]
|
||||
|
||||
if not is_ipv6(private_address):
|
||||
if hostname:
|
||||
hosts.append(hostname)
|
||||
|
||||
if is_ip(private_address):
|
||||
hn = get_hostname(private_address)
|
||||
if hn:
|
||||
hosts.append(hn)
|
||||
short = hn.split('.')[0]
|
||||
if ns_query(short):
|
||||
hosts.append(short)
|
||||
else:
|
||||
hosts.append(get_host_ip(private_address))
|
||||
short = private_address.split('.')[0]
|
||||
if ns_query(short):
|
||||
hosts.append(short)
|
||||
|
||||
for host in list(set(hosts)):
|
||||
add_known_host(host, application_name, user)
|
||||
|
||||
if not ssh_authorized_key_exists(public_key, application_name, user):
|
||||
log('Saving SSH authorized key for compute host at %s.' %
|
||||
private_address)
|
||||
add_authorized_key(public_key, application_name, user)
|
||||
|
||||
|
||||
def ssh_compute_add(public_key, application_name, rid=None, unit=None,
|
||||
user=None):
|
||||
"""Add a compute nodes ssh details to local cache.
|
||||
|
||||
Collect various hostname variations and add the corresponding host keys to
|
||||
the local known hosts file. Finally, add the supplied public key to the
|
||||
authorized_key file.
|
||||
|
||||
:param public_key: Public key.
|
||||
:type public_key: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param rid: Relation id of the relation between this charm and the app. If
|
||||
none is supplied it is assumed its the relation relating to
|
||||
the current hook context.
|
||||
:type rid: str
|
||||
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
|
||||
its the unit relating to the current hook context.
|
||||
:type unit: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
relation_data = relation_get(rid=rid, unit=unit)
|
||||
ssh_compute_add_host_and_key(
|
||||
public_key,
|
||||
relation_data.get('hostname'),
|
||||
relation_data.get('private-address'),
|
||||
application_name,
|
||||
user=user)
|
||||
|
||||
|
||||
def ssh_known_hosts_lines(application_name, user=None):
|
||||
"""Return contents of known_hosts file for given application.
|
||||
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
known_hosts_list = []
|
||||
with open(known_hosts(application_name, user)) as hosts:
|
||||
for hosts_line in hosts:
|
||||
if hosts_line.rstrip():
|
||||
known_hosts_list.append(hosts_line.rstrip())
|
||||
return(known_hosts_list)
|
||||
|
||||
|
||||
def ssh_authorized_keys_lines(application_name, user=None):
|
||||
"""Return contents of authorized_keys file for given application.
|
||||
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
authorized_keys_list = []
|
||||
|
||||
with open(authorized_keys(application_name, user)) as keys:
|
||||
for authkey_line in keys:
|
||||
if authkey_line.rstrip():
|
||||
authorized_keys_list.append(authkey_line.rstrip())
|
||||
return(authorized_keys_list)
|
||||
|
||||
|
||||
def ssh_compute_remove(public_key, application_name, user=None):
|
||||
"""Remove given public key from authorized_keys file.
|
||||
|
||||
:param public_key: Public key.
|
||||
:type public_key: str
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
"""
|
||||
if not (os.path.isfile(authorized_keys(application_name, user)) or
|
||||
os.path.isfile(known_hosts(application_name, user))):
|
||||
return
|
||||
|
||||
keys = ssh_authorized_keys_lines(application_name, user=None)
|
||||
keys = [k.strip() for k in keys]
|
||||
|
||||
if public_key not in keys:
|
||||
return
|
||||
|
||||
[keys.remove(key) for key in keys if key == public_key]
|
||||
|
||||
with open(authorized_keys(application_name, user), 'w') as _keys:
|
||||
keys = '\n'.join(keys)
|
||||
if not keys.endswith('\n'):
|
||||
keys += '\n'
|
||||
_keys.write(keys)
|
||||
|
||||
|
||||
def get_ssh_settings(application_name, user=None):
|
||||
"""Retrieve the known host entries and public keys for application
|
||||
|
||||
Retrieve the known host entries and public keys for application for all
|
||||
units of the given application related to this application for the
|
||||
app + user combination.
|
||||
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:param user: The user that the ssh asserts are for.
|
||||
:type user: str
|
||||
:returns: Public keys + host keys for all units for app + user combination.
|
||||
:rtype: dict
|
||||
"""
|
||||
settings = {}
|
||||
keys = {}
|
||||
prefix = ''
|
||||
if user:
|
||||
prefix = '{}_'.format(user)
|
||||
|
||||
for i, line in enumerate(ssh_known_hosts_lines(
|
||||
application_name=application_name, user=user)):
|
||||
settings['{}known_hosts_{}'.format(prefix, i)] = line
|
||||
if settings:
|
||||
settings['{}known_hosts_max_index'.format(prefix)] = len(
|
||||
settings.keys())
|
||||
|
||||
for i, line in enumerate(ssh_authorized_keys_lines(
|
||||
application_name=application_name, user=user)):
|
||||
keys['{}authorized_keys_{}'.format(prefix, i)] = line
|
||||
if keys:
|
||||
keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys())
|
||||
settings.update(keys)
|
||||
return settings
|
||||
|
||||
|
||||
def get_all_user_ssh_settings(application_name):
|
||||
"""Retrieve the known host entries and public keys for application
|
||||
|
||||
Retrieve the known host entries and public keys for application for all
|
||||
units of the given application related to this application for root user
|
||||
and nova user.
|
||||
|
||||
:param application_name: Name of application eg nova-compute-something
|
||||
:type application_name: str
|
||||
:returns: Public keys + host keys for all units for app + user combination.
|
||||
:rtype: dict
|
||||
"""
|
||||
settings = get_ssh_settings(application_name)
|
||||
settings.update(get_ssh_settings(application_name, user='nova'))
|
||||
return settings
|
@ -14,7 +14,7 @@ Listen {{ public_port }}
|
||||
|
||||
{% if port -%}
|
||||
<VirtualHost *:{{ port }}>
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}
|
||||
WSGIScriptAlias / {{ script }}
|
||||
@ -40,7 +40,7 @@ Listen {{ public_port }}
|
||||
|
||||
{% if admin_port -%}
|
||||
<VirtualHost *:{{ admin_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-admin
|
||||
WSGIScriptAlias / {{ admin_script }}
|
||||
@ -66,7 +66,7 @@ Listen {{ public_port }}
|
||||
|
||||
{% if public_port -%}
|
||||
<VirtualHost *:{{ public_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-public
|
||||
WSGIScriptAlias / {{ public_script }}
|
||||
|
@ -0,0 +1,91 @@
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
|
||||
{% if port -%}
|
||||
Listen {{ port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if admin_port -%}
|
||||
Listen {{ admin_port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if public_port -%}
|
||||
Listen {{ public_port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if port -%}
|
||||
<VirtualHost *:{{ port }}>
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}
|
||||
WSGIScriptAlias / {{ script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
{% endif -%}
|
||||
|
||||
{% if admin_port -%}
|
||||
<VirtualHost *:{{ admin_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-admin
|
||||
WSGIScriptAlias / {{ admin_script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
{% endif -%}
|
||||
|
||||
{% if public_port -%}
|
||||
<VirtualHost *:{{ public_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-public
|
||||
WSGIScriptAlias / {{ public_script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
{% endif -%}
|
@ -831,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None):
|
||||
"""Check if the unit is supposed to be paused, and if so check that the
|
||||
services/ports (if passed) are actually stopped/not being listened to.
|
||||
|
||||
if the unit isn't supposed to be paused, just return None, None
|
||||
If the unit isn't supposed to be paused, just return None, None
|
||||
|
||||
If the unit is performing a series upgrade, return a message indicating
|
||||
this.
|
||||
|
||||
@param services: OPTIONAL services spec or list of service names.
|
||||
@param ports: OPTIONAL list of port numbers.
|
||||
@returns state, message or None, None
|
||||
"""
|
||||
if is_unit_upgrading_set():
|
||||
state, message = check_actually_paused(services=services,
|
||||
ports=ports)
|
||||
if state is None:
|
||||
# we're paused okay, so set maintenance and return
|
||||
state = "blocked"
|
||||
message = ("Ready for do-release-upgrade and reboot. "
|
||||
"Set complete when finished.")
|
||||
return state, message
|
||||
|
||||
if is_unit_paused_set():
|
||||
state, message = check_actually_paused(services=services,
|
||||
ports=ports)
|
||||
@ -1339,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None,
|
||||
message = assess_status_func()
|
||||
if message:
|
||||
messages.append(message)
|
||||
if messages:
|
||||
if messages and not is_unit_upgrading_set():
|
||||
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
|
||||
|
||||
|
||||
@ -1689,3 +1702,34 @@ def install_os_snaps(snaps, refresh=False):
|
||||
snap_install(snap,
|
||||
_ensure_flag(snaps[snap]['channel']),
|
||||
_ensure_flag(snaps[snap]['mode']))
|
||||
|
||||
|
||||
def set_unit_upgrading():
|
||||
"""Set the unit to a upgrading state in the local kv() store.
|
||||
"""
|
||||
with unitdata.HookData()() as t:
|
||||
kv = t[0]
|
||||
kv.set('unit-upgrading', True)
|
||||
|
||||
|
||||
def clear_unit_upgrading():
|
||||
"""Clear the unit from a upgrading state in the local kv() store
|
||||
"""
|
||||
with unitdata.HookData()() as t:
|
||||
kv = t[0]
|
||||
kv.set('unit-upgrading', False)
|
||||
|
||||
|
||||
def is_unit_upgrading_set():
|
||||
"""Return the state of the kv().get('unit-upgrading').
|
||||
|
||||
To help with units that don't have HookData() (testing)
|
||||
if it excepts, return False
|
||||
"""
|
||||
try:
|
||||
with unitdata.HookData()() as t:
|
||||
kv = t[0]
|
||||
# transform something truth-y into a Boolean.
|
||||
return not(not(kv.get('unit-upgrading')))
|
||||
except Exception:
|
||||
return False
|
||||
|
@ -201,11 +201,35 @@ def remote_unit():
|
||||
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
||||
|
||||
|
||||
def service_name():
|
||||
"""The name service group this unit belongs to"""
|
||||
def application_name():
|
||||
"""
|
||||
The name of the deployed application this unit belongs to.
|
||||
"""
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
def service_name():
|
||||
"""
|
||||
.. deprecated:: 0.19.1
|
||||
Alias for :func:`application_name`.
|
||||
"""
|
||||
return application_name()
|
||||
|
||||
|
||||
def model_name():
|
||||
"""
|
||||
Name of the model that this unit is deployed in.
|
||||
"""
|
||||
return os.environ['JUJU_MODEL_NAME']
|
||||
|
||||
|
||||
def model_uuid():
|
||||
"""
|
||||
UUID of the model that this unit is deployed in.
|
||||
"""
|
||||
return os.environ['JUJU_MODEL_UUID']
|
||||
|
||||
|
||||
def principal_unit():
|
||||
"""Returns the principal unit of this unit, otherwise None"""
|
||||
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
|
||||
@ -1297,3 +1321,33 @@ def egress_subnets(rid=None, unit=None):
|
||||
if 'private-address' in settings:
|
||||
return [_to_range(settings['private-address'])]
|
||||
return [] # Should never happen
|
||||
|
||||
|
||||
def unit_doomed(unit=None):
|
||||
"""Determines if the unit is being removed from the model
|
||||
|
||||
Requires Juju 2.4.1.
|
||||
|
||||
:param unit: string unit name, defaults to local_unit
|
||||
:side effect: calls goal_state
|
||||
:side effect: calls local_unit
|
||||
:side effect: calls has_juju_version
|
||||
:return: True if the unit is being removed, already gone, or never existed
|
||||
"""
|
||||
if not has_juju_version("2.4.1"):
|
||||
# We cannot risk blindly returning False for 'we don't know',
|
||||
# because that could cause data loss; if call sites don't
|
||||
# need an accurate answer, they likely don't need this helper
|
||||
# at all.
|
||||
# goal-state existed in 2.4.0, but did not handle removals
|
||||
# correctly until 2.4.1.
|
||||
raise NotImplementedError("is_doomed")
|
||||
if unit is None:
|
||||
unit = local_unit()
|
||||
gs = goal_state()
|
||||
units = gs.get('units', {})
|
||||
if unit not in units:
|
||||
return True
|
||||
# I don't think 'dead' units ever show up in the goal-state, but
|
||||
# check anyway in addition to 'dying'.
|
||||
return units[unit]['status'] in ('dying', 'dead')
|
||||
|
@ -430,10 +430,11 @@ class SwiftStorageBasicDeployment(OpenStackAmuletDeployment):
|
||||
'swift-backed glance image...')
|
||||
|
||||
# Create swift-backed glance image
|
||||
img_new = u.create_cirros_image(self.glance, "cirros-image-1")
|
||||
img_id = img_new.id
|
||||
img_md5 = img_new.checksum
|
||||
img_size = img_new.size
|
||||
img_id = u.create_cirros_image(self.glance, "cirros-image-1").id
|
||||
|
||||
# Get the image from glance by ID
|
||||
img_md5 = self.glance.images.get(img_id).checksum
|
||||
img_size = self.glance.images.get(img_id).size
|
||||
|
||||
# Validate that swift object's checksum/size match that from glance
|
||||
headers, containers = self.swift.get_account()
|
||||
@ -444,13 +445,19 @@ class SwiftStorageBasicDeployment(OpenStackAmuletDeployment):
|
||||
|
||||
container_name = containers[0].get('name')
|
||||
|
||||
# Until glance v2 and swift bug is resolved
|
||||
# https://bugs.launchpad.net/glance/+bug/1789748
|
||||
read_headers = {'X-Container-Read': ".r:*,.rlistings"}
|
||||
self.swift.post_container(container_name, headers=read_headers)
|
||||
|
||||
headers, objects = self.swift.get_container(container_name)
|
||||
if len(objects) != 1:
|
||||
msg = "Expected 1 swift object, found {}".format(len(objects))
|
||||
|
||||
if len(objects) != 2:
|
||||
msg = "Expected 2 swift object, found {}".format(len(objects))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
swift_object_size = objects[0].get('bytes')
|
||||
swift_object_md5 = objects[0].get('hash')
|
||||
swift_object_size = objects[1].get('bytes')
|
||||
swift_object_md5 = objects[1].get('hash')
|
||||
|
||||
if img_size != swift_object_size:
|
||||
msg = "Glance image size {} != swift object size {}".format(
|
||||
|
@ -24,7 +24,8 @@ import urlparse
|
||||
|
||||
import cinderclient.v1.client as cinder_client
|
||||
import cinderclient.v2.client as cinder_clientv2
|
||||
import glanceclient.v1.client as glance_client
|
||||
import glanceclient.v1 as glance_client
|
||||
import glanceclient.v2 as glance_clientv2
|
||||
import heatclient.v1.client as heat_client
|
||||
from keystoneclient.v2_0 import client as keystone_client
|
||||
from keystoneauth1.identity import (
|
||||
@ -623,7 +624,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
ep = keystone.service_catalog.url_for(service_type='image',
|
||||
interface='adminURL')
|
||||
if keystone.session:
|
||||
return glance_client.Client(ep, session=keystone.session)
|
||||
return glance_clientv2.Client("2", session=keystone.session)
|
||||
else:
|
||||
return glance_client.Client(ep, token=keystone.auth_token)
|
||||
|
||||
@ -711,10 +712,19 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
f.close()
|
||||
|
||||
# Create glance image
|
||||
with open(local_path) as f:
|
||||
if float(glance.version) < 2.0:
|
||||
with open(local_path) as fimage:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
container_format='bare',
|
||||
data=fimage)
|
||||
else:
|
||||
image = glance.images.create(
|
||||
name=image_name,
|
||||
disk_format="qcow2",
|
||||
visibility="public",
|
||||
container_format="bare")
|
||||
glance.images.upload(image.id, open(local_path, 'rb'))
|
||||
|
||||
# Wait for image to reach active status
|
||||
img_id = image.id
|
||||
@ -729,9 +739,14 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.debug('Validating image attributes...')
|
||||
val_img_name = glance.images.get(img_id).name
|
||||
val_img_stat = glance.images.get(img_id).status
|
||||
val_img_pub = glance.images.get(img_id).is_public
|
||||
val_img_cfmt = glance.images.get(img_id).container_format
|
||||
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||
|
||||
if float(glance.version) < 2.0:
|
||||
val_img_pub = glance.images.get(img_id).is_public
|
||||
else:
|
||||
val_img_pub = glance.images.get(img_id).visibility == "public"
|
||||
|
||||
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||
'container fmt:{} disk fmt:{}'.format(
|
||||
val_img_name, val_img_pub, img_id,
|
||||
|
@ -201,11 +201,35 @@ def remote_unit():
|
||||
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
||||
|
||||
|
||||
def service_name():
|
||||
"""The name service group this unit belongs to"""
|
||||
def application_name():
|
||||
"""
|
||||
The name of the deployed application this unit belongs to.
|
||||
"""
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
def service_name():
|
||||
"""
|
||||
.. deprecated:: 0.19.1
|
||||
Alias for :func:`application_name`.
|
||||
"""
|
||||
return application_name()
|
||||
|
||||
|
||||
def model_name():
|
||||
"""
|
||||
Name of the model that this unit is deployed in.
|
||||
"""
|
||||
return os.environ['JUJU_MODEL_NAME']
|
||||
|
||||
|
||||
def model_uuid():
|
||||
"""
|
||||
UUID of the model that this unit is deployed in.
|
||||
"""
|
||||
return os.environ['JUJU_MODEL_UUID']
|
||||
|
||||
|
||||
def principal_unit():
|
||||
"""Returns the principal unit of this unit, otherwise None"""
|
||||
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
|
||||
@ -1297,3 +1321,33 @@ def egress_subnets(rid=None, unit=None):
|
||||
if 'private-address' in settings:
|
||||
return [_to_range(settings['private-address'])]
|
||||
return [] # Should never happen
|
||||
|
||||
|
||||
def unit_doomed(unit=None):
|
||||
"""Determines if the unit is being removed from the model
|
||||
|
||||
Requires Juju 2.4.1.
|
||||
|
||||
:param unit: string unit name, defaults to local_unit
|
||||
:side effect: calls goal_state
|
||||
:side effect: calls local_unit
|
||||
:side effect: calls has_juju_version
|
||||
:return: True if the unit is being removed, already gone, or never existed
|
||||
"""
|
||||
if not has_juju_version("2.4.1"):
|
||||
# We cannot risk blindly returning False for 'we don't know',
|
||||
# because that could cause data loss; if call sites don't
|
||||
# need an accurate answer, they likely don't need this helper
|
||||
# at all.
|
||||
# goal-state existed in 2.4.0, but did not handle removals
|
||||
# correctly until 2.4.1.
|
||||
raise NotImplementedError("is_doomed")
|
||||
if unit is None:
|
||||
unit = local_unit()
|
||||
gs = goal_state()
|
||||
units = gs.get('units', {})
|
||||
if unit not in units:
|
||||
return True
|
||||
# I don't think 'dead' units ever show up in the goal-state, but
|
||||
# check anyway in addition to 'dying'.
|
||||
return units[unit]['status'] in ('dying', 'dead')
|
||||
|
Loading…
Reference in New Issue
Block a user