Enable swift payload upgrades for wallaby+
Sync charm-helpers to pick up fix for openstack_upgrade_available that enables successful payload upgrades. This will need backporting to stable/wallaby. https: //github.com/juju/charm-helpers/pull/869 Closes-Bug: #2040606 Change-Id: Ib6c68845516ebaffe7759e2394ca284567076a74
This commit is contained in:
parent
9725234908
commit
cbbd244f2c
@ -19,6 +19,7 @@
|
||||
|
||||
import glob
|
||||
import grp
|
||||
import json
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
@ -30,6 +31,7 @@ import yaml
|
||||
from charmhelpers.core.hookenv import (
|
||||
application_name,
|
||||
config,
|
||||
ERROR,
|
||||
hook_name,
|
||||
local_unit,
|
||||
log,
|
||||
@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
|
||||
:param str unit_name: Unit name to use in check description
|
||||
:param bool immediate_check: For sysv init, run the service check immediately
|
||||
"""
|
||||
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
|
||||
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
|
||||
for rid in relation_ids("ha"):
|
||||
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
|
||||
if ha_resources:
|
||||
try:
|
||||
ha_resources_parsed = json.loads(ha_resources)
|
||||
except ValueError as e:
|
||||
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
|
||||
raise
|
||||
if "lsb:haproxy" in ha_resources_parsed.values():
|
||||
if "haproxy" in services:
|
||||
log("removed check_haproxy. This service will be monitored by check_crm")
|
||||
services.remove("haproxy")
|
||||
for svc in services:
|
||||
# Don't add a check for these services from neutron-gateway
|
||||
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
||||
|
@ -25,6 +25,7 @@ import socket
|
||||
import time
|
||||
|
||||
from base64 import b64decode
|
||||
from distutils.version import LooseVersion
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
filter_installed_packages,
|
||||
get_installed_version,
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
NoNetworkBinding,
|
||||
@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import (
|
||||
network_get_primary_address,
|
||||
WARNING,
|
||||
service_name,
|
||||
remote_service_name,
|
||||
)
|
||||
|
||||
from charmhelpers.core.sysctl import create as sysctl_create
|
||||
@ -130,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||
HAPROXY_RUN_DIR = '/var/run/haproxy/'
|
||||
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
|
||||
DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404
|
||||
|
||||
|
||||
def ensure_packages(packages):
|
||||
@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):
|
||||
|
||||
class IdentityServiceContext(OSContextGenerator):
|
||||
|
||||
_forward_compat_remaps = {
|
||||
'admin_user': 'admin-user-name',
|
||||
'service_username': 'service-user-name',
|
||||
'service_tenant': 'service-project-name',
|
||||
'service_tenant_id': 'service-project-id',
|
||||
'service_domain': 'service-domain-name',
|
||||
}
|
||||
|
||||
def __init__(self,
|
||||
service=None,
|
||||
service_user=None,
|
||||
@ -397,6 +409,11 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
|
||||
# see keystonemiddleware upstream sources for more info
|
||||
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
|
||||
if 'public_auth_url' in ctxt:
|
||||
c.update((
|
||||
('www_authenticate_uri', '{}/v3'.format(
|
||||
ctxt.get('public_auth_url'))),))
|
||||
else:
|
||||
c.update((
|
||||
('www_authenticate_uri', "{}://{}:{}/v3".format(
|
||||
ctxt.get('service_protocol', ''),
|
||||
@ -409,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
ctxt.get('service_host', ''),
|
||||
ctxt.get('service_port', ''))),))
|
||||
|
||||
if 'internal_auth_url' in ctxt:
|
||||
c.update((
|
||||
('auth_url', ctxt.get('internal_auth_url')),))
|
||||
else:
|
||||
c.update((
|
||||
('auth_url', "{}://{}:{}/v3".format(
|
||||
ctxt.get('auth_protocol', ''),
|
||||
ctxt.get('auth_host', ''),
|
||||
ctxt.get('auth_port', ''))),
|
||||
ctxt.get('auth_port', ''))),))
|
||||
|
||||
c.update((
|
||||
('project_domain_name', ctxt.get('admin_domain_name', '')),
|
||||
('user_domain_name', ctxt.get('admin_domain_name', '')),
|
||||
('project_name', ctxt.get('admin_tenant_name', '')),
|
||||
@ -441,41 +464,86 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
for rid in relation_ids(self.rel_name):
|
||||
self.related = True
|
||||
for unit in related_units(rid):
|
||||
rdata = {}
|
||||
# NOTE(jamespage):
|
||||
# forwards compat with application data
|
||||
# bag driven approach to relation.
|
||||
_adata = relation_get(rid=rid, app=remote_service_name(rid))
|
||||
adata = {}
|
||||
# if no app data bag presented - fallback
|
||||
# to legacy unit based relation data
|
||||
rdata = relation_get(rid=rid, unit=unit)
|
||||
serv_host = rdata.get('service_host')
|
||||
if _adata:
|
||||
# New app data bag uses - instead of _
|
||||
# in key names - remap for compat with
|
||||
# existing relation data keys
|
||||
for key, value in _adata.items():
|
||||
if key == 'api-version':
|
||||
adata[key.replace('-', '_')] = value.strip('v')
|
||||
else:
|
||||
adata[key.replace('-', '_')] = value
|
||||
# Re-map some keys for backwards compatibility
|
||||
for target, source in self._forward_compat_remaps.items():
|
||||
adata[target] = _adata.get(source)
|
||||
# Now preferentially get data from the app data bag, but if
|
||||
# it's not available, get it from the legacy based relation
|
||||
# data.
|
||||
|
||||
def _resolve(key):
|
||||
return adata.get(key) or rdata.get(key)
|
||||
|
||||
serv_host = _resolve('service_host')
|
||||
serv_host = format_ipv6_addr(serv_host) or serv_host
|
||||
auth_host = rdata.get('auth_host')
|
||||
auth_host = _resolve('auth_host')
|
||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||
int_host = rdata.get('internal_host')
|
||||
int_host = _resolve('internal_host',)
|
||||
int_host = format_ipv6_addr(int_host) or int_host
|
||||
svc_protocol = rdata.get('service_protocol') or 'http'
|
||||
auth_protocol = rdata.get('auth_protocol') or 'http'
|
||||
admin_role = rdata.get('admin_role') or 'Admin'
|
||||
int_protocol = rdata.get('internal_protocol') or 'http'
|
||||
api_version = rdata.get('api_version') or '2.0'
|
||||
ctxt.update({'service_port': rdata.get('service_port'),
|
||||
svc_protocol = _resolve('service_protocol') or 'http'
|
||||
auth_protocol = _resolve('auth_protocol') or 'http'
|
||||
admin_role = _resolve('admin_role') or 'Admin'
|
||||
int_protocol = _resolve('internal_protocol') or 'http'
|
||||
api_version = _resolve('api_version') or '2.0'
|
||||
ctxt.update({'service_port': _resolve('service_port'),
|
||||
'service_host': serv_host,
|
||||
'auth_host': auth_host,
|
||||
'auth_port': rdata.get('auth_port'),
|
||||
'auth_port': _resolve('auth_port'),
|
||||
'internal_host': int_host,
|
||||
'internal_port': rdata.get('internal_port'),
|
||||
'admin_tenant_name': rdata.get('service_tenant'),
|
||||
'admin_user': rdata.get('service_username'),
|
||||
'admin_password': rdata.get('service_password'),
|
||||
'internal_port': _resolve('internal_port'),
|
||||
'admin_tenant_name': _resolve('service_tenant'),
|
||||
'admin_user': _resolve('service_username'),
|
||||
'admin_password': _resolve('service_password'),
|
||||
'admin_role': admin_role,
|
||||
'service_protocol': svc_protocol,
|
||||
'auth_protocol': auth_protocol,
|
||||
'internal_protocol': int_protocol,
|
||||
'api_version': api_version})
|
||||
|
||||
if rdata.get('service_type'):
|
||||
ctxt['service_type'] = rdata.get('service_type')
|
||||
service_type = _resolve('service_type')
|
||||
if service_type:
|
||||
ctxt['service_type'] = service_type
|
||||
|
||||
if float(api_version) > 2:
|
||||
ctxt.update({
|
||||
'admin_domain_name': rdata.get('service_domain'),
|
||||
'service_project_id': rdata.get('service_tenant_id'),
|
||||
'service_domain_id': rdata.get('service_domain_id')})
|
||||
'admin_domain_name': _resolve('service_domain'),
|
||||
'service_project_id': _resolve('service_tenant_id'),
|
||||
'service_domain_id': _resolve('service_domain_id')})
|
||||
|
||||
# NOTE:
|
||||
# keystone-k8s operator presents full URLS
|
||||
# for all three endpoints - public and internal are
|
||||
# externally addressable for machine based charm
|
||||
public_auth_url = _resolve('public_auth_url')
|
||||
# if 'public_auth_url' in rdata:
|
||||
if public_auth_url:
|
||||
ctxt.update({
|
||||
'public_auth_url': public_auth_url,
|
||||
})
|
||||
internal_auth_url = _resolve('internal_auth_url')
|
||||
# if 'internal_auth_url' in rdata:
|
||||
if internal_auth_url:
|
||||
ctxt.update({
|
||||
'internal_auth_url': internal_auth_url,
|
||||
})
|
||||
|
||||
# we keep all veriables in ctxt for compatibility and
|
||||
# add nested dictionary for keystone_authtoken generic
|
||||
@ -489,8 +557,8 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
# NOTE(jamespage) this is required for >= icehouse
|
||||
# so a missing value just indicates keystone needs
|
||||
# upgrading
|
||||
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
|
||||
ctxt['admin_domain_id'] = rdata.get('service_domain_id')
|
||||
ctxt['admin_tenant_id'] = _resolve('service_tenant_id')
|
||||
ctxt['admin_domain_id'] = _resolve('service_domain_id')
|
||||
return ctxt
|
||||
|
||||
return {}
|
||||
@ -862,9 +930,14 @@ class HAProxyContext(OSContextGenerator):
|
||||
interfaces = ['cluster']
|
||||
|
||||
def __init__(self, singlenode_mode=False,
|
||||
address_types=ADDRESS_TYPES):
|
||||
address_types=None,
|
||||
exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
|
||||
if address_types is None:
|
||||
address_types = ADDRESS_TYPES[:]
|
||||
|
||||
self.address_types = address_types
|
||||
self.singlenode_mode = singlenode_mode
|
||||
self.exporter_stats_port = exporter_stats_port
|
||||
|
||||
def __call__(self):
|
||||
if not os.path.isdir(HAPROXY_RUN_DIR):
|
||||
@ -959,10 +1032,20 @@ class HAProxyContext(OSContextGenerator):
|
||||
db = kv()
|
||||
ctxt['stat_password'] = db.get('stat-password')
|
||||
if not ctxt['stat_password']:
|
||||
ctxt['stat_password'] = db.set('stat-password',
|
||||
pwgen(32))
|
||||
ctxt['stat_password'] = db.set('stat-password', pwgen(32))
|
||||
db.flush()
|
||||
|
||||
# NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
|
||||
# New bind will be created and a prometheus-exporter
|
||||
# will be used for path /metrics. At the same time,
|
||||
# prometheus-exporter avoids using auth.
|
||||
haproxy_version = get_installed_version("haproxy")
|
||||
if (haproxy_version and
|
||||
haproxy_version.ver_str >= LooseVersion("2.0.0") and
|
||||
is_relation_made("haproxy-exporter")):
|
||||
ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
|
||||
ctxt["stats_exporter_port"] = self.exporter_stats_port
|
||||
|
||||
for frontend in cluster_hosts:
|
||||
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
||||
self.singlenode_mode):
|
||||
@ -1665,6 +1748,9 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
|
||||
|
||||
def __call__(self):
|
||||
total_processes = _calculate_workers()
|
||||
enable_wsgi_socket_rotation = config('wsgi-socket-rotation')
|
||||
if enable_wsgi_socket_rotation is None:
|
||||
enable_wsgi_socket_rotation = True
|
||||
ctxt = {
|
||||
"service_name": self.service_name,
|
||||
"user": self.user,
|
||||
@ -1678,6 +1764,7 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
|
||||
"public_processes": int(math.ceil(self.public_process_weight *
|
||||
total_processes)),
|
||||
"threads": 1,
|
||||
"wsgi_socket_rotation": enable_wsgi_socket_rotation,
|
||||
}
|
||||
return ctxt
|
||||
|
||||
|
@ -25,6 +25,7 @@ Helpers for high availability.
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
|
||||
import re
|
||||
|
||||
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
status_set,
|
||||
DEBUG,
|
||||
application_name,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
|
||||
|
||||
VIP_GROUP_NAME = 'grp_{service}_vips'
|
||||
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
|
||||
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
|
||||
|
||||
|
||||
class DNSHAException(Exception):
|
||||
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
|
||||
relation_data['groups'] = {
|
||||
key: ' '.join(vip_group)
|
||||
}
|
||||
|
||||
|
||||
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
|
||||
"""Load grafana dashboard json model and insert prometheus datasource.
|
||||
|
||||
:param prometheus_app_name: name of the 'prometheus' application that will
|
||||
be used as datasource in grafana dashboard
|
||||
:type prometheus_app_name: str
|
||||
:param haproxy_dashboard: path to haproxy dashboard
|
||||
:type haproxy_dashboard: str
|
||||
:return: Grafana dashboard json model as a str.
|
||||
:rtype: str
|
||||
"""
|
||||
from charmhelpers.contrib.templating import jinja
|
||||
|
||||
dashboard_template = os.path.basename(haproxy_dashboard)
|
||||
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
|
||||
app_name = application_name()
|
||||
datasource = "{} - Juju generated source".format(prometheus_app_name)
|
||||
return jinja.render(dashboard_template,
|
||||
{"datasource": datasource,
|
||||
"app_name": app_name,
|
||||
"prometheus_app_name": prometheus_app_name},
|
||||
template_dir=dashboard_template_dir,
|
||||
jinja_env_args={"variable_start_string": "<< ",
|
||||
"variable_end_string": " >>"})
|
||||
|
@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
|
||||
is_ipv6,
|
||||
get_ipv6_addr,
|
||||
resolve_network_cidr,
|
||||
get_iface_for_address
|
||||
)
|
||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||
|
||||
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
|
||||
return unit_get(unit_get_fallback)
|
||||
|
||||
|
||||
def get_invalid_vips():
|
||||
"""Check if any of the provided vips are invalid.
|
||||
A vip is invalid if it doesn't belong to the subnet in any interface.
|
||||
If all vips are valid, this returns an empty list.
|
||||
|
||||
:returns: A list of strings, where each string is an invalid vip address.
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
clustered = is_clustered()
|
||||
vips = config('vip')
|
||||
if vips:
|
||||
vips = vips.split()
|
||||
invalid_vips = []
|
||||
|
||||
if clustered and vips:
|
||||
for vip in vips:
|
||||
iface_for_vip = get_iface_for_address(vip)
|
||||
if iface_for_vip is None:
|
||||
invalid_vips.append(vip)
|
||||
|
||||
return invalid_vips
|
||||
|
||||
|
||||
def resolve_address(endpoint_type=PUBLIC, override=True):
|
||||
"""Return unit address depending on net config.
|
||||
|
||||
|
@ -49,6 +49,11 @@ defaults
|
||||
|
||||
listen stats
|
||||
bind {{ local_host }}:{{ stat_port }}
|
||||
{%- if stats_exporter_host and stats_exporter_port %}
|
||||
bind {{ stats_exporter_host }}:{{ stats_exporter_port }}
|
||||
option http-use-htx
|
||||
http-request use-service prometheus-exporter if { path /metrics }
|
||||
{%- endif %}
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
|
@ -12,6 +12,12 @@ Listen {{ admin_port }}
|
||||
Listen {{ public_port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if wsgi_socket_rotation -%}
|
||||
WSGISocketRotation On
|
||||
{% else -%}
|
||||
WSGISocketRotation Off
|
||||
{% endif -%}
|
||||
|
||||
{% if port -%}
|
||||
<VirtualHost *:{{ port }}>
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
|
@ -12,6 +12,12 @@ Listen {{ admin_port }}
|
||||
Listen {{ public_port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if wsgi_socket_rotation -%}
|
||||
WSGISocketRotation On
|
||||
{% else -%}
|
||||
WSGISocketRotation Off
|
||||
{% endif -%}
|
||||
|
||||
{% if port -%}
|
||||
<VirtualHost *:{{ port }}>
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
|
@ -158,6 +158,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
||||
('2021.1', 'wallaby'),
|
||||
('2021.2', 'xena'),
|
||||
('2022.1', 'yoga'),
|
||||
('2022.2', 'zed'),
|
||||
])
|
||||
|
||||
# The ugly duckling - must list releases oldest to newest
|
||||
@ -400,24 +401,16 @@ def get_os_codename_version(vers):
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
|
||||
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES,
|
||||
raise_exception=False):
|
||||
'''Determine OpenStack version number from codename.'''
|
||||
for k, v in version_map.items():
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Could not derive OpenStack version for '\
|
||||
'codename: %s' % codename
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_codename_swift(codename):
|
||||
'''Determine OpenStack version number of swift from codename.'''
|
||||
# for k, v in six.iteritems(SWIFT_CODENAMES):
|
||||
for k, v in SWIFT_CODENAMES.items():
|
||||
if k == codename:
|
||||
return v[-1]
|
||||
e = 'Could not derive swift version for '\
|
||||
'codename: %s' % codename
|
||||
if raise_exception:
|
||||
raise ValueError(str(e))
|
||||
error_out(e)
|
||||
|
||||
|
||||
@ -841,10 +834,6 @@ def openstack_upgrade_available(package):
|
||||
if not cur_vers:
|
||||
# The package has not been installed yet do not attempt upgrade
|
||||
return False
|
||||
if "swift" in package:
|
||||
codename = get_os_codename_install_source(src)
|
||||
avail_vers = get_os_version_codename_swift(codename)
|
||||
else:
|
||||
try:
|
||||
avail_vers = get_os_version_install_source(src)
|
||||
except Exception:
|
||||
|
@ -23,6 +23,12 @@ from subprocess import (
|
||||
call
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
WARNING,
|
||||
INFO
|
||||
)
|
||||
|
||||
|
||||
def _luks_uuid(dev):
|
||||
"""
|
||||
@ -110,7 +116,7 @@ def is_device_mounted(device):
|
||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||
|
||||
|
||||
def mkfs_xfs(device, force=False, inode_size=1024):
|
||||
def mkfs_xfs(device, force=False, inode_size=None):
|
||||
"""Format device with XFS filesystem.
|
||||
|
||||
By default this should fail if the device already has a filesystem on it.
|
||||
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
|
||||
:ptype device: tr
|
||||
:param force: Force operation
|
||||
:ptype: force: boolean
|
||||
:param inode_size: XFS inode size in bytes
|
||||
:param inode_size: XFS inode size in bytes; if set to 0 or None,
|
||||
the value used will be the XFS system default
|
||||
:ptype inode_size: int"""
|
||||
cmd = ['mkfs.xfs']
|
||||
if force:
|
||||
cmd.append("-f")
|
||||
|
||||
cmd += ['-i', "size={}".format(inode_size), device]
|
||||
if inode_size:
|
||||
if inode_size >= 256 and inode_size <= 2048:
|
||||
cmd += ['-i', "size={}".format(inode_size)]
|
||||
else:
|
||||
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
|
||||
else:
|
||||
log("Using XFS filesystem with system default inode size.", level=INFO)
|
||||
|
||||
cmd += [device]
|
||||
check_call(cmd)
|
||||
|
@ -277,7 +277,7 @@ def service_resume(service_name, init_dir="/etc/init",
|
||||
return started
|
||||
|
||||
|
||||
def service(action, service_name, **kwargs):
|
||||
def service(action, service_name=None, **kwargs):
|
||||
"""Control a system service.
|
||||
|
||||
:param action: the action to take on the service
|
||||
@ -286,7 +286,9 @@ def service(action, service_name, **kwargs):
|
||||
the form of key=value.
|
||||
"""
|
||||
if init_is_systemd(service_name=service_name):
|
||||
cmd = ['systemctl', action, service_name]
|
||||
cmd = ['systemctl', action]
|
||||
if service_name is not None:
|
||||
cmd.append(service_name)
|
||||
else:
|
||||
cmd = ['service', service_name, action]
|
||||
for key, value in kwargs.items():
|
||||
|
@ -30,6 +30,7 @@ UBUNTU_RELEASES = (
|
||||
'hirsute',
|
||||
'impish',
|
||||
'jammy',
|
||||
'kinetic',
|
||||
)
|
||||
|
||||
|
||||
|
@ -15,7 +15,8 @@
|
||||
import os
|
||||
import json
|
||||
import inspect
|
||||
from collections import Iterable, OrderedDict
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Iterable
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import hashlib
|
||||
import re
|
||||
@ -24,11 +25,15 @@ from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
env_proxy_settings,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
from urllib.request import (
|
||||
build_opener, install_opener, urlopen, urlretrieve,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
ProxyHandler
|
||||
)
|
||||
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||
from urllib.error import URLError
|
||||
@ -50,6 +55,20 @@ def splitpasswd(user):
|
||||
return user, None
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def proxy_env():
|
||||
"""
|
||||
Creates a context which temporarily modifies the proxy settings in os.environ.
|
||||
"""
|
||||
restore = {**os.environ} # Copy the current os.environ
|
||||
juju_proxies = env_proxy_settings() or {}
|
||||
os.environ.update(**juju_proxies) # Insert or Update the os.environ
|
||||
yield os.environ
|
||||
for key in juju_proxies:
|
||||
del os.environ[key] # remove any keys which were added or updated
|
||||
os.environ.update(**restore) # restore any original values
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
@ -80,6 +99,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
# propagate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse(source)
|
||||
handlers = []
|
||||
if proto in ('http', 'https'):
|
||||
auth, barehost = splituser(netloc)
|
||||
if auth is not None:
|
||||
@ -89,8 +109,11 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
# Realm is set to None in add_password to force the username and password
|
||||
# to be used whatever the realm
|
||||
passman.add_password(None, source, username, password)
|
||||
authhandler = HTTPBasicAuthHandler(passman)
|
||||
opener = build_opener(authhandler)
|
||||
handlers.append(HTTPBasicAuthHandler(passman))
|
||||
|
||||
with proxy_env():
|
||||
handlers.append(ProxyHandler())
|
||||
opener = build_opener(*handlers)
|
||||
install_opener(opener)
|
||||
response = urlopen(source)
|
||||
try:
|
||||
|
@ -52,7 +52,7 @@ def _snap_exec(commands):
|
||||
:param commands: List commands
|
||||
:return: Integer exit code
|
||||
"""
|
||||
assert type(commands) == list
|
||||
assert type(commands) is list
|
||||
|
||||
retry_count = 0
|
||||
return_code = None
|
||||
|
@ -222,6 +222,14 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||
'yoga/proposed': 'focal-proposed/yoga',
|
||||
'focal-yoga/proposed': 'focal-proposed/yoga',
|
||||
'focal-proposed/yoga': 'focal-proposed/yoga',
|
||||
# Zed
|
||||
'zed': 'jammy-updates/zed',
|
||||
'jammy-zed': 'jammy-updates/zed',
|
||||
'jammy-zed/updates': 'jammy-updates/zed',
|
||||
'jammy-updates/zed': 'jammy-updates/zed',
|
||||
'zed/proposed': 'jammy-proposed/zed',
|
||||
'jammy-zed/proposed': 'jammy-proposed/zed',
|
||||
'jammy-proposed/zed': 'jammy-proposed/zed',
|
||||
|
||||
# OVN
|
||||
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
|
||||
@ -252,6 +260,7 @@ OPENSTACK_RELEASES = (
|
||||
'wallaby',
|
||||
'xena',
|
||||
'yoga',
|
||||
'zed',
|
||||
)
|
||||
|
||||
|
||||
@ -278,6 +287,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('hirsute', 'wallaby'),
|
||||
('impish', 'xena'),
|
||||
('jammy', 'yoga'),
|
||||
('kinetic', 'zed'),
|
||||
])
|
||||
|
||||
|
||||
@ -357,6 +367,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
|
||||
:type quiet: bool
|
||||
:raises: subprocess.CalledProcessError
|
||||
"""
|
||||
if not packages:
|
||||
log("Nothing to install", level=DEBUG)
|
||||
return
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user