Charm-helpers sync
Change-Id: I0053f28aaaecc6b9d60dfbccbb7c308c929cb046
This commit is contained in:
parent
093383bb28
commit
dc2d4d32cb
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
import glob
|
import glob
|
||||||
import grp
|
import grp
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import pwd
|
import pwd
|
||||||
import re
|
import re
|
||||||
@ -30,6 +31,7 @@ import yaml
|
|||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
application_name,
|
application_name,
|
||||||
config,
|
config,
|
||||||
|
ERROR,
|
||||||
hook_name,
|
hook_name,
|
||||||
local_unit,
|
local_unit,
|
||||||
log,
|
log,
|
||||||
@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
|
|||||||
:param str unit_name: Unit name to use in check description
|
:param str unit_name: Unit name to use in check description
|
||||||
:param bool immediate_check: For sysv init, run the service check immediately
|
:param bool immediate_check: For sysv init, run the service check immediately
|
||||||
"""
|
"""
|
||||||
|
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
|
||||||
|
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
|
||||||
|
for rid in relation_ids("ha"):
|
||||||
|
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
|
||||||
|
if ha_resources:
|
||||||
|
try:
|
||||||
|
ha_resources_parsed = json.loads(ha_resources)
|
||||||
|
except ValueError as e:
|
||||||
|
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
|
||||||
|
raise
|
||||||
|
if "lsb:haproxy" in ha_resources_parsed.values():
|
||||||
|
if "haproxy" in services:
|
||||||
|
log("removed check_haproxy. This service will be monitored by check_crm")
|
||||||
|
services.remove("haproxy")
|
||||||
for svc in services:
|
for svc in services:
|
||||||
# Don't add a check for these services from neutron-gateway
|
# Don't add a check for these services from neutron-gateway
|
||||||
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
||||||
|
@ -221,6 +221,13 @@ def https():
|
|||||||
return True
|
return True
|
||||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||||
return True
|
return True
|
||||||
|
# Local import to avoid ciruclar dependency.
|
||||||
|
import charmhelpers.contrib.openstack.cert_utils as cert_utils
|
||||||
|
if (
|
||||||
|
cert_utils.get_certificate_request() and not
|
||||||
|
cert_utils.get_requests_for_local_unit("certificates")
|
||||||
|
):
|
||||||
|
return False
|
||||||
for r_id in relation_ids('certificates'):
|
for r_id in relation_ids('certificates'):
|
||||||
for unit in relation_list(r_id):
|
for unit in relation_list(r_id):
|
||||||
ca = relation_get('ca', rid=r_id, unit=unit)
|
ca = relation_get('ca', rid=r_id, unit=unit)
|
||||||
@ -324,7 +331,7 @@ def valid_hacluster_config():
|
|||||||
'''
|
'''
|
||||||
vip = config_get('vip')
|
vip = config_get('vip')
|
||||||
dns = config_get('dns-ha')
|
dns = config_get('dns-ha')
|
||||||
if not(bool(vip) ^ bool(dns)):
|
if not (bool(vip) ^ bool(dns)):
|
||||||
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
|
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
|
||||||
'use high availability')
|
'use high availability')
|
||||||
status_set('blocked', msg)
|
status_set('blocked', msg)
|
||||||
|
@ -539,7 +539,7 @@ def port_has_listener(address, port):
|
|||||||
"""
|
"""
|
||||||
cmd = ['nc', '-z', address, str(port)]
|
cmd = ['nc', '-z', address, str(port)]
|
||||||
result = subprocess.call(cmd)
|
result = subprocess.call(cmd)
|
||||||
return not(bool(result))
|
return not (bool(result))
|
||||||
|
|
||||||
|
|
||||||
def assert_charm_supports_ipv6():
|
def assert_charm_supports_ipv6():
|
||||||
|
@ -127,7 +127,9 @@ def deferred_events():
|
|||||||
"""
|
"""
|
||||||
events = []
|
events = []
|
||||||
for defer_file in deferred_events_files():
|
for defer_file in deferred_events_files():
|
||||||
events.append((defer_file, read_event_file(defer_file)))
|
event = read_event_file(defer_file)
|
||||||
|
if event.policy_requestor_name == hookenv.service_name():
|
||||||
|
events.append((defer_file, event))
|
||||||
return events
|
return events
|
||||||
|
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@ Helpers for high availability.
|
|||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
config,
|
config,
|
||||||
status_set,
|
status_set,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
|
application_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
|
|||||||
|
|
||||||
VIP_GROUP_NAME = 'grp_{service}_vips'
|
VIP_GROUP_NAME = 'grp_{service}_vips'
|
||||||
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
|
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
|
||||||
|
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
|
||||||
|
|
||||||
|
|
||||||
class DNSHAException(Exception):
|
class DNSHAException(Exception):
|
||||||
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
|
|||||||
relation_data['groups'] = {
|
relation_data['groups'] = {
|
||||||
key: ' '.join(vip_group)
|
key: ' '.join(vip_group)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
|
||||||
|
"""Load grafana dashboard json model and insert prometheus datasource.
|
||||||
|
|
||||||
|
:param prometheus_app_name: name of the 'prometheus' application that will
|
||||||
|
be used as datasource in grafana dashboard
|
||||||
|
:type prometheus_app_name: str
|
||||||
|
:param haproxy_dashboard: path to haproxy dashboard
|
||||||
|
:type haproxy_dashboard: str
|
||||||
|
:return: Grafana dashboard json model as a str.
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
from charmhelpers.contrib.templating import jinja
|
||||||
|
|
||||||
|
dashboard_template = os.path.basename(haproxy_dashboard)
|
||||||
|
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
|
||||||
|
app_name = application_name()
|
||||||
|
datasource = "{} - Juju generated source".format(prometheus_app_name)
|
||||||
|
return jinja.render(dashboard_template,
|
||||||
|
{"datasource": datasource,
|
||||||
|
"app_name": app_name,
|
||||||
|
"prometheus_app_name": prometheus_app_name},
|
||||||
|
template_dir=dashboard_template_dir,
|
||||||
|
jinja_env_args={"variable_start_string": "<< ",
|
||||||
|
"variable_end_string": " >>"})
|
||||||
|
@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
is_ipv6,
|
is_ipv6,
|
||||||
get_ipv6_addr,
|
get_ipv6_addr,
|
||||||
resolve_network_cidr,
|
resolve_network_cidr,
|
||||||
|
get_iface_for_address
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
|
|||||||
return unit_get(unit_get_fallback)
|
return unit_get(unit_get_fallback)
|
||||||
|
|
||||||
|
|
||||||
|
def get_invalid_vips():
|
||||||
|
"""Check if any of the provided vips are invalid.
|
||||||
|
A vip is invalid if it doesn't belong to the subnet in any interface.
|
||||||
|
If all vips are valid, this returns an empty list.
|
||||||
|
|
||||||
|
:returns: A list of strings, where each string is an invalid vip address.
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
|
||||||
|
clustered = is_clustered()
|
||||||
|
vips = config('vip')
|
||||||
|
if vips:
|
||||||
|
vips = vips.split()
|
||||||
|
invalid_vips = []
|
||||||
|
|
||||||
|
if clustered and vips:
|
||||||
|
for vip in vips:
|
||||||
|
iface_for_vip = get_iface_for_address(vip)
|
||||||
|
if iface_for_vip is None:
|
||||||
|
invalid_vips.append(vip)
|
||||||
|
|
||||||
|
return invalid_vips
|
||||||
|
|
||||||
|
|
||||||
def resolve_address(endpoint_type=PUBLIC, override=True):
|
def resolve_address(endpoint_type=PUBLIC, override=True):
|
||||||
"""Return unit address depending on net config.
|
"""Return unit address depending on net config.
|
||||||
|
|
||||||
|
@ -159,6 +159,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
|||||||
('2021.2', 'xena'),
|
('2021.2', 'xena'),
|
||||||
('2022.1', 'yoga'),
|
('2022.1', 'yoga'),
|
||||||
('2022.2', 'zed'),
|
('2022.2', 'zed'),
|
||||||
|
('2023.1', 'antelope'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# The ugly duckling - must list releases oldest to newest
|
# The ugly duckling - must list releases oldest to newest
|
||||||
@ -956,7 +957,7 @@ def os_requires_version(ostack_release, pkg):
|
|||||||
def wrap(f):
|
def wrap(f):
|
||||||
@wraps(f)
|
@wraps(f)
|
||||||
def wrapped_f(*args):
|
def wrapped_f(*args):
|
||||||
if os_release(pkg) < ostack_release:
|
if CompareOpenStackReleases(os_release(pkg)) < ostack_release:
|
||||||
raise Exception("This hook is not supported on releases"
|
raise Exception("This hook is not supported on releases"
|
||||||
" before %s" % ostack_release)
|
" before %s" % ostack_release)
|
||||||
f(*args)
|
f(*args)
|
||||||
@ -1327,7 +1328,7 @@ def _check_listening_on_services_ports(services, test=False):
|
|||||||
@param test: default=False, if False, test for closed, otherwise open.
|
@param test: default=False, if False, test for closed, otherwise open.
|
||||||
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
|
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
|
||||||
"""
|
"""
|
||||||
test = not(not(test)) # ensure test is True or False
|
test = not (not (test)) # ensure test is True or False
|
||||||
all_ports = list(itertools.chain(*services.values()))
|
all_ports = list(itertools.chain(*services.values()))
|
||||||
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
|
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
|
||||||
map_ports = OrderedDict()
|
map_ports = OrderedDict()
|
||||||
@ -1583,7 +1584,7 @@ def is_unit_paused_set():
|
|||||||
with unitdata.HookData()() as t:
|
with unitdata.HookData()() as t:
|
||||||
kv = t[0]
|
kv = t[0]
|
||||||
# transform something truth-y into a Boolean.
|
# transform something truth-y into a Boolean.
|
||||||
return not(not(kv.get('unit-paused')))
|
return not (not (kv.get('unit-paused')))
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -2181,7 +2182,7 @@ def is_unit_upgrading_set():
|
|||||||
with unitdata.HookData()() as t:
|
with unitdata.HookData()() as t:
|
||||||
kv = t[0]
|
kv = t[0]
|
||||||
# transform something truth-y into a Boolean.
|
# transform something truth-y into a Boolean.
|
||||||
return not(not(kv.get('unit-upgrading')))
|
return not (not (kv.get('unit-upgrading')))
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -28,7 +28,6 @@ import os
|
|||||||
import shutil
|
import shutil
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import uuid
|
|
||||||
|
|
||||||
from subprocess import (
|
from subprocess import (
|
||||||
check_call,
|
check_call,
|
||||||
@ -1677,6 +1676,10 @@ class CephBrokerRq(object):
|
|||||||
The API is versioned and defaults to version 1.
|
The API is versioned and defaults to version 1.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# The below hash is the result of running
|
||||||
|
# `hashlib.sha1('[]'.encode()).hexdigest()`
|
||||||
|
EMPTY_LIST_SHA = '97d170e1550eee4afc0af065b78cda302a97674c'
|
||||||
|
|
||||||
def __init__(self, api_version=1, request_id=None, raw_request_data=None):
|
def __init__(self, api_version=1, request_id=None, raw_request_data=None):
|
||||||
"""Initialize CephBrokerRq object.
|
"""Initialize CephBrokerRq object.
|
||||||
|
|
||||||
@ -1685,8 +1688,12 @@ class CephBrokerRq(object):
|
|||||||
|
|
||||||
:param api_version: API version for request (default: 1).
|
:param api_version: API version for request (default: 1).
|
||||||
:type api_version: Optional[int]
|
:type api_version: Optional[int]
|
||||||
:param request_id: Unique identifier for request.
|
:param request_id: Unique identifier for request. The identifier will
|
||||||
(default: string representation of generated UUID)
|
be updated as ops are added or removed from the
|
||||||
|
broker request. This ensures that Ceph will
|
||||||
|
correctly process requests where operations are
|
||||||
|
added after the initial request is processed.
|
||||||
|
(default: sha1 of operations)
|
||||||
:type request_id: Optional[str]
|
:type request_id: Optional[str]
|
||||||
:param raw_request_data: JSON-encoded string to build request from.
|
:param raw_request_data: JSON-encoded string to build request from.
|
||||||
:type raw_request_data: Optional[str]
|
:type raw_request_data: Optional[str]
|
||||||
@ -1695,16 +1702,20 @@ class CephBrokerRq(object):
|
|||||||
if raw_request_data:
|
if raw_request_data:
|
||||||
request_data = json.loads(raw_request_data)
|
request_data = json.loads(raw_request_data)
|
||||||
self.api_version = request_data['api-version']
|
self.api_version = request_data['api-version']
|
||||||
self.request_id = request_data['request-id']
|
|
||||||
self.set_ops(request_data['ops'])
|
self.set_ops(request_data['ops'])
|
||||||
|
self.request_id = request_data['request-id']
|
||||||
else:
|
else:
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
if request_id:
|
if request_id:
|
||||||
self.request_id = request_id
|
self.request_id = request_id
|
||||||
else:
|
else:
|
||||||
self.request_id = str(uuid.uuid1())
|
self.request_id = CephBrokerRq.EMPTY_LIST_SHA
|
||||||
self.ops = []
|
self.ops = []
|
||||||
|
|
||||||
|
def _hash_ops(self):
|
||||||
|
"""Return the sha1 of the requested Broker ops."""
|
||||||
|
return hashlib.sha1(json.dumps(self.ops, sort_keys=True).encode()).hexdigest()
|
||||||
|
|
||||||
def add_op(self, op):
|
def add_op(self, op):
|
||||||
"""Add an op if it is not already in the list.
|
"""Add an op if it is not already in the list.
|
||||||
|
|
||||||
@ -1713,6 +1724,7 @@ class CephBrokerRq(object):
|
|||||||
"""
|
"""
|
||||||
if op not in self.ops:
|
if op not in self.ops:
|
||||||
self.ops.append(op)
|
self.ops.append(op)
|
||||||
|
self.request_id = self._hash_ops()
|
||||||
|
|
||||||
def add_op_request_access_to_group(self, name, namespace=None,
|
def add_op_request_access_to_group(self, name, namespace=None,
|
||||||
permission=None, key_name=None,
|
permission=None, key_name=None,
|
||||||
@ -1991,6 +2003,7 @@ class CephBrokerRq(object):
|
|||||||
to allow comparisons to ensure validity.
|
to allow comparisons to ensure validity.
|
||||||
"""
|
"""
|
||||||
self.ops = ops
|
self.ops = ops
|
||||||
|
self.request_id = self._hash_ops()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def request(self):
|
def request(self):
|
||||||
|
@ -23,6 +23,12 @@ from subprocess import (
|
|||||||
call
|
call
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
WARNING,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _luks_uuid(dev):
|
def _luks_uuid(dev):
|
||||||
"""
|
"""
|
||||||
@ -110,7 +116,7 @@ def is_device_mounted(device):
|
|||||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||||
|
|
||||||
|
|
||||||
def mkfs_xfs(device, force=False, inode_size=1024):
|
def mkfs_xfs(device, force=False, inode_size=None):
|
||||||
"""Format device with XFS filesystem.
|
"""Format device with XFS filesystem.
|
||||||
|
|
||||||
By default this should fail if the device already has a filesystem on it.
|
By default this should fail if the device already has a filesystem on it.
|
||||||
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
|
|||||||
:ptype device: tr
|
:ptype device: tr
|
||||||
:param force: Force operation
|
:param force: Force operation
|
||||||
:ptype: force: boolean
|
:ptype: force: boolean
|
||||||
:param inode_size: XFS inode size in bytes
|
:param inode_size: XFS inode size in bytes; if set to 0 or None,
|
||||||
|
the value used will be the XFS system default
|
||||||
:ptype inode_size: int"""
|
:ptype inode_size: int"""
|
||||||
cmd = ['mkfs.xfs']
|
cmd = ['mkfs.xfs']
|
||||||
if force:
|
if force:
|
||||||
cmd.append("-f")
|
cmd.append("-f")
|
||||||
|
|
||||||
cmd += ['-i', "size={}".format(inode_size), device]
|
if inode_size:
|
||||||
|
if inode_size >= 256 and inode_size <= 2048:
|
||||||
|
cmd += ['-i', "size={}".format(inode_size)]
|
||||||
|
else:
|
||||||
|
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
|
||||||
|
else:
|
||||||
|
log("Using XFS filesystem with system default inode size.", level=INFO)
|
||||||
|
|
||||||
|
cmd += [device]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
@ -954,7 +954,7 @@ def pwgen(length=None):
|
|||||||
random_generator = random.SystemRandom()
|
random_generator = random.SystemRandom()
|
||||||
random_chars = [
|
random_chars = [
|
||||||
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
||||||
return(''.join(random_chars))
|
return ''.join(random_chars)
|
||||||
|
|
||||||
|
|
||||||
def is_phy_iface(interface):
|
def is_phy_iface(interface):
|
||||||
|
@ -31,6 +31,7 @@ UBUNTU_RELEASES = (
|
|||||||
'impish',
|
'impish',
|
||||||
'jammy',
|
'jammy',
|
||||||
'kinetic',
|
'kinetic',
|
||||||
|
'lunar',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -171,8 +171,9 @@ class Storage(object):
|
|||||||
path parameter which causes sqlite3 to only build the db in memory.
|
path parameter which causes sqlite3 to only build the db in memory.
|
||||||
This should only be used for testing purposes.
|
This should only be used for testing purposes.
|
||||||
"""
|
"""
|
||||||
def __init__(self, path=None):
|
def __init__(self, path=None, keep_revisions=False):
|
||||||
self.db_path = path
|
self.db_path = path
|
||||||
|
self.keep_revisions = keep_revisions
|
||||||
if path is None:
|
if path is None:
|
||||||
if 'UNIT_STATE_DB' in os.environ:
|
if 'UNIT_STATE_DB' in os.environ:
|
||||||
self.db_path = os.environ['UNIT_STATE_DB']
|
self.db_path = os.environ['UNIT_STATE_DB']
|
||||||
@ -242,7 +243,7 @@ class Storage(object):
|
|||||||
Remove a key from the database entirely.
|
Remove a key from the database entirely.
|
||||||
"""
|
"""
|
||||||
self.cursor.execute('delete from kv where key=?', [key])
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
[key, self.revision, json.dumps('DELETED')])
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
@ -259,14 +260,14 @@ class Storage(object):
|
|||||||
if keys is not None:
|
if keys is not None:
|
||||||
keys = ['%s%s' % (prefix, key) for key in keys]
|
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||||
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||||
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||||
else:
|
else:
|
||||||
self.cursor.execute('delete from kv where key like ?',
|
self.cursor.execute('delete from kv where key like ?',
|
||||||
['%s%%' % prefix])
|
['%s%%' % prefix])
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||||
@ -299,7 +300,7 @@ class Storage(object):
|
|||||||
where key = ?''', [serialized, key])
|
where key = ?''', [serialized, key])
|
||||||
|
|
||||||
# Save
|
# Save
|
||||||
if not self.revision:
|
if (not self.keep_revisions) or (not self.revision):
|
||||||
return value
|
return value
|
||||||
|
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
|
@ -230,6 +230,18 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||||||
'zed/proposed': 'jammy-proposed/zed',
|
'zed/proposed': 'jammy-proposed/zed',
|
||||||
'jammy-zed/proposed': 'jammy-proposed/zed',
|
'jammy-zed/proposed': 'jammy-proposed/zed',
|
||||||
'jammy-proposed/zed': 'jammy-proposed/zed',
|
'jammy-proposed/zed': 'jammy-proposed/zed',
|
||||||
|
# antelope
|
||||||
|
'antelope': 'jammy-updates/antelope',
|
||||||
|
'jammy-antelope': 'jammy-updates/antelope',
|
||||||
|
'jammy-antelope/updates': 'jammy-updates/antelope',
|
||||||
|
'jammy-updates/antelope': 'jammy-updates/antelope',
|
||||||
|
'antelope/proposed': 'jammy-proposed/antelope',
|
||||||
|
'jammy-antelope/proposed': 'jammy-proposed/antelope',
|
||||||
|
'jammy-proposed/antelope': 'jammy-proposed/antelope',
|
||||||
|
|
||||||
|
# OVN
|
||||||
|
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
|
||||||
|
'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -257,6 +269,7 @@ OPENSTACK_RELEASES = (
|
|||||||
'xena',
|
'xena',
|
||||||
'yoga',
|
'yoga',
|
||||||
'zed',
|
'zed',
|
||||||
|
'antelope',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -284,6 +297,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||||||
('impish', 'xena'),
|
('impish', 'xena'),
|
||||||
('jammy', 'yoga'),
|
('jammy', 'yoga'),
|
||||||
('kinetic', 'zed'),
|
('kinetic', 'zed'),
|
||||||
|
('lunar', 'antelope'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
@ -363,6 +377,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
|
|||||||
:type quiet: bool
|
:type quiet: bool
|
||||||
:raises: subprocess.CalledProcessError
|
:raises: subprocess.CalledProcessError
|
||||||
"""
|
"""
|
||||||
|
if not packages:
|
||||||
|
log("Nothing to install", level=DEBUG)
|
||||||
|
return
|
||||||
if options is None:
|
if options is None:
|
||||||
options = ['--option=Dpkg::Options::=--force-confold']
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
@ -574,7 +591,7 @@ def _get_key_by_keyid(keyid):
|
|||||||
curl_cmd = ['curl', keyserver_url.format(keyid)]
|
curl_cmd = ['curl', keyserver_url.format(keyid)]
|
||||||
# use proxy server settings in order to retrieve the key
|
# use proxy server settings in order to retrieve the key
|
||||||
return subprocess.check_output(curl_cmd,
|
return subprocess.check_output(curl_cmd,
|
||||||
env=env_proxy_settings(['https']))
|
env=env_proxy_settings(['https', 'no_proxy']))
|
||||||
|
|
||||||
|
|
||||||
def _dearmor_gpg_key(key_asc):
|
def _dearmor_gpg_key(key_asc):
|
||||||
@ -687,6 +704,7 @@ def add_source(source, key=None, fail_invalid=False):
|
|||||||
(r"^cloud-archive:(.*)$", _add_apt_repository),
|
(r"^cloud-archive:(.*)$", _add_apt_repository),
|
||||||
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
|
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
|
||||||
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
|
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
|
||||||
|
(r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
|
||||||
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
|
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
|
||||||
(r"^cloud:(.*)$", _add_cloud_pocket),
|
(r"^cloud:(.*)$", _add_cloud_pocket),
|
||||||
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
|
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
|
||||||
@ -750,6 +768,11 @@ def _add_apt_repository(spec):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def __write_sources_list_d_actual_pocket(file, actual_pocket):
|
||||||
|
with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
|
||||||
|
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||||
|
|
||||||
|
|
||||||
def _add_cloud_pocket(pocket):
|
def _add_cloud_pocket(pocket):
|
||||||
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
|
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
|
||||||
|
|
||||||
@ -769,8 +792,9 @@ def _add_cloud_pocket(pocket):
|
|||||||
'Unsupported cloud: source option %s' %
|
'Unsupported cloud: source option %s' %
|
||||||
pocket)
|
pocket)
|
||||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
__write_sources_list_d_actual_pocket(
|
||||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
|
||||||
|
actual_pocket)
|
||||||
|
|
||||||
|
|
||||||
def _add_cloud_staging(cloud_archive_release, openstack_release):
|
def _add_cloud_staging(cloud_archive_release, openstack_release):
|
||||||
@ -931,10 +955,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
|
|||||||
try:
|
try:
|
||||||
result = subprocess.check_call(cmd, env=env, **kwargs)
|
result = subprocess.check_call(cmd, env=env, **kwargs)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
retry_count = retry_count + 1
|
|
||||||
if retry_count > max_retries:
|
|
||||||
raise
|
|
||||||
result = e.returncode
|
result = e.returncode
|
||||||
|
if result not in retry_results:
|
||||||
|
# a non-retriable exitcode was produced
|
||||||
|
raise
|
||||||
|
retry_count += 1
|
||||||
|
if retry_count > max_retries:
|
||||||
|
# a retriable exitcode was produced more than {max_retries} times
|
||||||
|
raise
|
||||||
log(retry_message)
|
log(retry_message)
|
||||||
time.sleep(CMD_RETRY_DELAY)
|
time.sleep(CMD_RETRY_DELAY)
|
||||||
|
|
||||||
|
@ -122,13 +122,12 @@ class Cache(object):
|
|||||||
:raises: subprocess.CalledProcessError
|
:raises: subprocess.CalledProcessError
|
||||||
"""
|
"""
|
||||||
pkgs = {}
|
pkgs = {}
|
||||||
cmd = ['dpkg-query', '--list']
|
cmd = [
|
||||||
|
'dpkg-query', '--show',
|
||||||
|
'--showformat',
|
||||||
|
r'${db:Status-Abbrev}\t${Package}\t${Version}\t${Architecture}\t${binary:Summary}\n'
|
||||||
|
]
|
||||||
cmd.extend(packages)
|
cmd.extend(packages)
|
||||||
if locale.getlocale() == (None, None):
|
|
||||||
# subprocess calls out to locale.getpreferredencoding(False) to
|
|
||||||
# determine encoding. Workaround for Trusty where the
|
|
||||||
# environment appears to not be set up correctly.
|
|
||||||
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
|
|
||||||
try:
|
try:
|
||||||
output = subprocess.check_output(cmd,
|
output = subprocess.check_output(cmd,
|
||||||
stderr=subprocess.STDOUT,
|
stderr=subprocess.STDOUT,
|
||||||
@ -140,24 +139,17 @@ class Cache(object):
|
|||||||
if cp.returncode != 1:
|
if cp.returncode != 1:
|
||||||
raise
|
raise
|
||||||
output = cp.output
|
output = cp.output
|
||||||
headings = []
|
|
||||||
for line in output.splitlines():
|
for line in output.splitlines():
|
||||||
if line.startswith('||/'):
|
# only process lines for successfully installed packages
|
||||||
headings = line.split()
|
if not (line.startswith('ii ') or line.startswith('hi ')):
|
||||||
headings.pop(0)
|
|
||||||
continue
|
continue
|
||||||
elif (line.startswith('|') or line.startswith('+') or
|
status, name, version, arch, desc = line.split('\t', 4)
|
||||||
line.startswith('dpkg-query:')):
|
pkgs[name] = {
|
||||||
continue
|
'name': name,
|
||||||
else:
|
'version': version,
|
||||||
data = line.split(None, 4)
|
'architecture': arch,
|
||||||
status = data.pop(0)
|
'description': desc,
|
||||||
if status not in ('ii', 'hi'):
|
}
|
||||||
continue
|
|
||||||
pkg = {}
|
|
||||||
pkg.update({k.lower(): v for k, v in zip(headings, data)})
|
|
||||||
if 'name' in pkg:
|
|
||||||
pkgs.update({pkg['name']: pkg})
|
|
||||||
return pkgs
|
return pkgs
|
||||||
|
|
||||||
def _apt_cache_show(self, packages):
|
def _apt_cache_show(self, packages):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user