1618 lines
54 KiB
Python
Raw Normal View History

# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2013-08-01 20:42:16 -07:00
import os
import subprocess
import ConfigParser
import uuid
2013-08-01 20:42:16 -07:00
from base64 import b64encode
from collections import OrderedDict
from copy import deepcopy
from urlparse import urlparse
from uuid import uuid1
import json
2013-08-01 20:42:16 -07:00
2013-08-12 13:53:00 -07:00
from charmhelpers.contrib.openstack import context, templating
2013-08-01 20:42:16 -07:00
2015-10-07 09:32:28 +00:00
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
)
2013-08-19 12:16:37 -07:00
from charmhelpers.contrib.peerstorage import (
peer_retrieve,
peer_store,
)
2013-08-01 20:42:16 -07:00
from charmhelpers.contrib.openstack.utils import (
2013-08-19 12:16:37 -07:00
configure_installation_source,
get_host_ip,
get_hostname,
2013-08-19 12:16:37 -07:00
get_os_codename_install_source,
incomplete_relation_data,
is_ip,
2013-08-12 13:53:00 -07:00
os_release,
reset_os_release,
2015-10-07 09:32:28 +00:00
save_script_rc as _save_script_rc,
is_unit_paused_set,
make_assess_status_func,
pause_unit,
resume_unit,
os_application_version_set,
token_cache_pkgs,
enable_memcache,
CompareOpenStackReleases,
2015-10-07 09:32:28 +00:00
)
2013-09-20 17:29:50 +01:00
from charmhelpers.fetch import (
2014-03-06 12:46:15 +00:00
apt_upgrade,
2013-08-19 12:16:37 -07:00
apt_update,
2014-03-14 11:23:48 +00:00
apt_install,
apt_purge,
apt_autoremove,
add_source,
filter_installed_packages,
filter_missing_packages,
2013-08-19 12:16:37 -07:00
)
2013-08-01 20:42:16 -07:00
from charmhelpers.core.hookenv import (
config,
is_leader,
2013-08-10 09:49:47 +01:00
log,
leader_get,
leader_set,
relation_get,
relation_id,
2013-08-01 20:42:16 -07:00
relation_ids,
2013-08-10 09:49:47 +01:00
remote_unit,
DEBUG,
2013-08-10 09:49:47 +01:00
ERROR,
INFO,
status_set,
related_units,
local_unit,
2013-08-01 20:42:16 -07:00
)
from charmhelpers.core.host import (
service_pause,
service_resume,
service_running,
service_start,
service_stop,
service_restart,
2015-04-15 14:17:56 +00:00
lsb_release,
CompareHostReleases,
)
from charmhelpers.contrib.network.ip import (
is_ipv6,
ns_query,
)
from charmhelpers.core.decorators import (
retry_on_exception,
)
from charmhelpers.contrib.openstack.ip import (
canonical_url,
INTERNAL,
)
2013-08-01 20:42:16 -07:00
import nova_cc_context
TEMPLATES = 'templates/'
CLUSTER_RES = 'grp_nova_vips'
2013-08-01 20:42:16 -07:00
SHARED_METADATA_SECRET_KEY = 'shared-metadata-secret'
2015-10-07 09:32:28 +00:00
# The interface is said to be satisfied if anyone of the interfaces in the
# list has a complete context.
REQUIRED_INTERFACES = {
'database': ['shared-db'],
'messaging': ['amqp'],
2015-10-07 09:32:28 +00:00
'identity': ['identity-service'],
'image': ['image-service'],
'compute': ['nova-compute'],
}
# removed from original: charm-helper-sh
2013-08-01 20:42:16 -07:00
BASE_PACKAGES = [
'apache2',
'haproxy',
'libapache2-mod-wsgi',
'python-keystoneclient',
'python-mysqldb',
2014-03-20 12:07:52 +01:00
'python-psycopg2',
2014-08-12 15:41:11 +01:00
'python-psutil',
2014-12-16 19:31:29 +00:00
'python-six',
2014-10-24 18:04:26 -03:00
'python-memcache',
'uuid',
]
PY3_PACKAGES = [
'libapache2-mod-wsgi-py3',
'python3-nova',
'python3-keystoneclient',
'python3-psutil',
'python3-six',
'python3-memcache',
2013-08-01 20:42:16 -07:00
]
VERSION_PACKAGE = 'nova-common'
2013-08-01 20:42:16 -07:00
BASE_SERVICES = [
'nova-api-ec2',
'nova-api-os-compute',
'nova-placement-api',
2013-08-01 20:42:16 -07:00
'nova-objectstore',
'nova-cert',
'nova-scheduler',
'nova-conductor',
2013-08-01 20:42:16 -07:00
]
AWS_COMPAT_SERVICES = ['nova-api-ec2', 'nova-objectstore']
2016-02-09 11:19:34 +00:00
SERVICE_BLACKLIST = {
'liberty': AWS_COMPAT_SERVICES,
'newton': ['nova-cert'],
2016-02-09 11:19:34 +00:00
}
2013-08-01 20:42:16 -07:00
API_PORTS = {
'nova-api-ec2': 8773,
'nova-api-os-compute': 8774,
'nova-api-metadata': 8775,
2013-08-01 20:42:16 -07:00
'nova-api-os-volume': 8776,
'nova-placement-api': 8778,
2013-08-01 20:42:16 -07:00
'nova-objectstore': 3333,
}
NOVA_CONF_DIR = "/etc/nova"
2014-02-23 17:42:40 -05:00
NEUTRON_CONF_DIR = "/etc/neutron"
NOVA_CONF = '%s/nova.conf' % NOVA_CONF_DIR
NOVA_API_PASTE = '%s/api-paste.ini' % NOVA_CONF_DIR
2013-09-25 14:10:26 +01:00
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
2013-09-25 17:21:41 +01:00
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
MEMCACHED_CONF = '/etc/memcached.conf'
WSGI_NOVA_PLACEMENT_API_CONF = \
'/etc/apache2/sites-enabled/wsgi-openstack-api.conf'
PACKAGE_NOVA_PLACEMENT_API_CONF = \
'/etc/apache2/sites-enabled/nova-placement-api.conf'
WSGI_NOVA_METADATA_API_CONF = \
'/etc/apache2/sites-enabled/wsgi-openstack-metadata.conf'
VENDORDATA_FILE = '/etc/nova/vendor_data.json'
2013-09-25 14:10:26 +01:00
2016-02-09 11:19:34 +00:00
def resolve_services():
_services = deepcopy(BASE_SERVICES)
os_rel = os_release('nova-common')
2016-02-09 11:19:34 +00:00
for release in SERVICE_BLACKLIST:
if os_rel >= release or config('disable-aws-compat'):
2016-02-09 11:38:46 +00:00
[_services.remove(service)
for service in SERVICE_BLACKLIST[release]]
2016-02-09 11:19:34 +00:00
return _services
2013-08-01 20:42:16 -07:00
BASE_RESOURCE_MAP = OrderedDict([
2013-09-25 14:10:26 +01:00
(NOVA_CONF, {
2016-02-09 11:19:34 +00:00
'services': resolve_services(),
2014-02-23 17:42:40 -05:00
'contexts': [context.AMQPContext(ssl_dir=NOVA_CONF_DIR),
context.SharedDBContext(
relation_prefix='nova', ssl_dir=NOVA_CONF_DIR),
context.OSConfigFlagContext(
charm_flag='nova-alchemy-flags',
template_flag='nova_alchemy_flags'),
2013-08-01 20:42:16 -07:00
context.ImageServiceContext(),
context.OSConfigFlagContext(),
2013-10-08 17:51:27 -07:00
context.SubordinateConfigContext(
interface='nova-vmware',
service='nova',
config_file=NOVA_CONF),
context.SyslogContext(),
2014-07-24 07:05:25 +00:00
context.LogLevelContext(),
nova_cc_context.HAProxyContext(),
nova_cc_context.IdentityServiceContext(
service='nova',
service_user='nova'),
2014-09-09 13:01:37 +00:00
nova_cc_context.VolumeServiceContext(),
2014-09-10 17:17:14 +00:00
context.ZeroMQContext(),
2014-10-15 06:49:03 +00:00
context.NotificationDriverContext(),
nova_cc_context.NovaIPv6Context(),
nova_cc_context.NeutronCCContext(),
nova_cc_context.NovaConfigContext(),
2015-07-21 10:42:36 +08:00
nova_cc_context.InstanceConsoleContext(),
2015-10-07 09:32:28 +00:00
nova_cc_context.ConsoleSSLContext(),
nova_cc_context.CloudComputeContext(),
context.InternalEndpointContext(),
context.VolumeAPIContext('nova-common'),
nova_cc_context.NeutronAPIContext(),
nova_cc_context.SerialConsoleContext(),
context.MemcacheContext(),
nova_cc_context.NovaMetadataContext()],
2013-08-01 20:42:16 -07:00
}),
2013-09-25 14:10:26 +01:00
(NOVA_API_PASTE, {
2016-02-09 11:19:34 +00:00
'services': [s for s in resolve_services() if 'api' in s],
2015-10-02 14:05:06 +02:00
'contexts': [nova_cc_context.IdentityServiceContext(),
nova_cc_context.APIRateLimitingContext()],
2013-08-01 20:42:16 -07:00
}),
2013-09-25 14:10:26 +01:00
(HAPROXY_CONF, {
'contexts': [context.HAProxyContext(singlenode_mode=True),
2013-08-01 20:42:16 -07:00
nova_cc_context.HAProxyContext()],
'services': ['haproxy'],
}),
2013-09-25 14:10:26 +01:00
(APACHE_CONF, {
2013-09-25 17:21:41 +01:00
'contexts': [nova_cc_context.ApacheSSLContext()],
'services': ['apache2'],
}),
(APACHE_24_CONF, {
2013-08-01 20:42:16 -07:00
'contexts': [nova_cc_context.ApacheSSLContext()],
'services': ['apache2'],
}),
])
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
2013-08-10 09:49:47 +01:00
NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
2014-07-09 14:09:15 +01:00
CONSOLE_CONFIG = {
'spice': {
'packages': ['nova-spiceproxy', 'nova-consoleauth'],
'services': ['nova-spiceproxy', 'nova-consoleauth'],
2014-07-09 16:35:00 +01:00
'proxy-page': '/spice_auto.html',
2014-07-09 14:09:15 +01:00
'proxy-port': 6082,
2014-07-09 16:35:00 +01:00
},
'novnc': {
'packages': ['nova-novncproxy', 'nova-consoleauth'],
'services': ['nova-novncproxy', 'nova-consoleauth'],
'proxy-page': '/vnc_auto.html',
'proxy-port': 6080,
2014-07-10 08:37:39 +01:00
},
'xvpvnc': {
'packages': ['nova-xvpvncproxy', 'nova-consoleauth'],
'services': ['nova-xvpvncproxy', 'nova-consoleauth'],
'proxy-page': '/console',
'proxy-port': 6081,
},
2014-07-09 14:09:15 +01:00
}
SERIAL_CONSOLE = {
'packages': ['nova-serialproxy', 'nova-consoleauth',
'websockify'],
'services': ['nova-serialproxy', 'nova-consoleauth'],
}
2013-08-01 20:42:16 -07:00
def resource_map(actual_services=True):
2013-08-01 20:42:16 -07:00
'''
Dynamically generate a map of resources that will be managed for a single
hook execution.
:param actual_services: Whether to return the actual services that run on a
unit (ie. apache2) or the services defined in BASE_SERVICES
(ie.nova-placement-api).
2013-08-01 20:42:16 -07:00
'''
resource_map = deepcopy(BASE_RESOURCE_MAP)
2013-09-25 17:21:41 +01:00
if os.path.exists('/etc/apache2/conf-available'):
2013-09-25 17:24:36 +01:00
resource_map.pop(APACHE_CONF)
2013-09-25 17:21:41 +01:00
else:
2013-09-25 17:24:36 +01:00
resource_map.pop(APACHE_24_CONF)
2013-09-25 17:21:41 +01:00
resource_map[NOVA_CONF]['contexts'].append(
nova_cc_context.NeutronCCContext())
2014-03-20 19:17:35 +01:00
release = os_release('nova-common')
cmp_os_release = CompareOpenStackReleases(release)
if cmp_os_release >= 'mitaka':
resource_map[NOVA_CONF]['contexts'].append(
nova_cc_context.NovaAPISharedDBContext(relation_prefix='novaapi',
database='nova_api',
ssl_dir=NOVA_CONF_DIR)
)
if console_attributes('services'):
resource_map[NOVA_CONF]['services'] += console_attributes('services')
# nova-consoleauth will be managed by pacemaker, if
# single-nova-consoleauth is used, then don't monitor for the
# nova-consoleauth service to be started (LP: #1660244).
if config('single-nova-consoleauth') and relation_ids('ha'):
services = resource_map[NOVA_CONF]['services']
if 'nova-consoleauth' in services:
services.remove('nova-consoleauth')
2014-07-09 14:09:15 +01:00
if (config('enable-serial-console') and cmp_os_release >= 'juno'):
resource_map[NOVA_CONF]['services'] += SERIAL_CONSOLE['services']
# also manage any configs that are being updated by subordinates.
vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware',
service='nova',
config_file=NOVA_CONF)
vmware_ctxt = vmware_ctxt()
if vmware_ctxt and 'services' in vmware_ctxt:
for s in vmware_ctxt['services']:
if s not in resource_map[NOVA_CONF]['services']:
resource_map[NOVA_CONF]['services'].append(s)
2014-04-25 16:46:42 +00:00
if enable_memcache(release=release):
resource_map[MEMCACHED_CONF] = {
'contexts': [context.MemcacheContext()],
'services': ['memcached']}
if actual_services and placement_api_enabled():
for cfile in resource_map:
svcs = resource_map[cfile]['services']
if 'nova-placement-api' in svcs:
svcs.remove('nova-placement-api')
if 'apache2' not in svcs:
svcs.append('apache2')
wsgi_script = "/usr/bin/nova-placement-api"
resource_map[WSGI_NOVA_PLACEMENT_API_CONF] = {
'contexts': [context.WSGIWorkerConfigContext(name="nova",
script=wsgi_script),
nova_cc_context.HAProxyContext()],
'services': ['apache2']
}
elif not placement_api_enabled():
for cfile in resource_map:
svcs = resource_map[cfile]['services']
if 'nova-placement-api' in svcs:
svcs.remove('nova-placement-api')
if enable_metadata_api():
if actual_services:
svcs = ['apache2']
else:
svcs = ['nova-api-metadata']
resource_map[WSGI_NOVA_METADATA_API_CONF] = {
'contexts': [
context.WSGIWorkerConfigContext(
name="nova_meta",
user='nova',
group='nova',
script='/usr/bin/nova-metadata-wsgi'),
nova_cc_context.MetaDataHAProxyContext()],
'services': svcs}
2013-08-01 20:42:16 -07:00
return resource_map
def register_configs(release=None):
release = release or os_release('nova-common')
2013-08-01 20:42:16 -07:00
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
for cfg, rscs in resource_map().iteritems():
2013-08-01 20:42:16 -07:00
configs.register(cfg, rscs['contexts'])
return configs
def restart_map(actual_services=True):
'''
Constructs a restart map of config files and corresponding services
:param actual_services: Whether to return the actual services that run on a
unit (ie. apache2) or the services defined in BASE_SERVICES
(ie.nova-placement-api).
'''
return OrderedDict(
[(cfg, v['services'])
for cfg, v in resource_map(actual_services).iteritems()
if v['services']])
2013-08-01 20:42:16 -07:00
def services():
''' Returns a list of services associate with this charm '''
_services = []
for v in restart_map().values():
_services = _services + v
return list(set(_services))
2013-08-01 20:42:16 -07:00
def determine_ports():
'''Assemble a list of API ports for services we are managing'''
ports = []
for services in restart_map(actual_services=False).values():
for svc in services:
2013-08-01 20:42:16 -07:00
try:
ports.append(API_PORTS[svc])
2013-08-01 20:42:16 -07:00
except KeyError:
pass
return list(set(ports))
2013-08-01 20:42:16 -07:00
2013-08-10 14:35:17 +01:00
def api_port(service):
return API_PORTS[service]
2013-08-01 20:42:16 -07:00
2014-07-10 08:37:39 +01:00
def console_attributes(attr, proto=None):
'''Leave proto unset to query attributes of the protocal specified at
runtime'''
if proto:
console_proto = proto
else:
console_proto = config('console-access-protocol')
if console_proto is not None and console_proto.lower() in ('none', ''):
console_proto = None
2014-07-09 14:09:15 +01:00
if attr == 'protocol':
return console_proto
2014-07-10 08:37:39 +01:00
# 'vnc' is a virtual type made up of novnc and xvpvnc
if console_proto == 'vnc':
if attr in ['packages', 'services']:
return list(set(CONSOLE_CONFIG['novnc'][attr] +
CONSOLE_CONFIG['xvpvnc'][attr]))
else:
return None
2014-07-09 14:09:15 +01:00
if console_proto in CONSOLE_CONFIG:
return CONSOLE_CONFIG[console_proto][attr]
return None
2013-08-01 20:42:16 -07:00
def determine_packages():
# currently all packages match service names
release = CompareOpenStackReleases(os_release('nova-common'))
packages = deepcopy(BASE_PACKAGES)
for v in resource_map(actual_services=False).values():
2013-08-01 20:42:16 -07:00
packages.extend(v['services'])
# The nova-api-metadata service is served via wsgi and the package is
# only needed for the standalone service so remove it to avoid port
# clashes.
try:
packages.remove("nova-api-metadata")
except ValueError:
pass
if console_attributes('packages'):
packages.extend(console_attributes('packages'))
if (config('enable-serial-console') and release >= 'juno'):
packages.extend(SERIAL_CONSOLE['packages'])
packages.extend(token_cache_pkgs(source=config('openstack-origin')))
if release >= 'rocky':
packages = [p for p in packages if not p.startswith('python-')]
packages.extend(PY3_PACKAGES)
packages.remove('libapache2-mod-wsgi')
2013-08-01 20:42:16 -07:00
return list(set(packages))
def determine_purge_packages():
'''
Determine list of packages that where previously installed which are no
longer needed.
:returns: list of package names
'''
release = CompareOpenStackReleases(os_release('keystone'))
if release >= 'rocky':
pkgs = [p for p in BASE_PACKAGES if p.startswith('python-')]
pkgs.extend(['python-nova', 'python-memcache', 'libapache2-mod-wsgi'])
return pkgs
return []
2013-08-01 20:42:16 -07:00
def save_script_rc():
env_vars = {
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
'OPENSTACK_SERVICE_API_EC2': 'nova-api-ec2',
'OPENSTACK_SERVICE_API_OS_COMPUTE': 'nova-api-os-compute',
'OPENSTACK_SERVICE_CERT': 'nova-cert',
'OPENSTACK_SERVICE_CONDUCTOR': 'nova-conductor',
'OPENSTACK_SERVICE_OBJECTSTORE': 'nova-objectstore',
'OPENSTACK_SERVICE_SCHEDULER': 'nova-scheduler',
}
if relation_ids('nova-volume-service'):
env_vars['OPENSTACK_SERVICE_API_OS_VOL'] = 'nova-api-os-volume'
_save_script_rc(**env_vars)
def get_step_upgrade_source(new_src):
'''
Determine if upgrade skips a release and, if so, return source
of skipped release.
'''
sources = {
# target_src: (cur_pocket, step_src)
# NOTE: cur_pocket == * means all upgrades to target_src must step
# through step_src if step_src is higher than
# current release
'precise-icehouse': ('precise-updates/grizzly',
'cloud:precise-havana'),
'precise-icehouse/proposed': ('precise-proposed/grizzly',
'cloud:precise-havana/proposed'),
'trusty-liberty': ('*', 'cloud:trusty-kilo'),
'xenial-ocata': ('*', 'cloud:xenial-newton'), # LP: #1711209
}
try:
os_codename = get_os_codename_install_source(new_src)
ubuntu_series = lsb_release()['DISTRIB_CODENAME'].lower()
cur_pocket, step_src = sources['%s-%s' % (ubuntu_series, os_codename)]
current_src = os_release('nova-common')
step_src_codename = get_os_codename_install_source(step_src)
if cur_pocket == '*' and step_src_codename > current_src:
return step_src
except KeyError:
pass
configure_installation_source(new_src)
# charmhelpers.contrib.openstack.utils.configure_installation_source()
# configures the repository in juju_deb.list, while
# charmhelpers.fetch.add_sources() uses cloud-archive.list, so both
# files need to read looking for the currently configured repo.
for fname in ['cloud-archive.list', 'juju_deb.list']:
fpath = os.path.join('/etc/apt/sources.list.d/', fname)
if not os.path.isfile(fpath):
log('Missing %s skipping it' % fpath, level=DEBUG)
continue
with open(fpath, 'r') as f:
for line in f.readlines():
for target_src, (cur_pocket, step_src) in sources.items():
if target_src != new_src:
continue
if cur_pocket in line:
return step_src
return None
2014-04-08 23:23:04 +01:00
POLICY_RC_D = """#!/bin/bash
set -e
case $1 in
nova-*)
2014-04-08 23:23:04 +01:00
[ $2 = "start" ] && exit 101
;;
*)
;;
esac
exit 0
"""
def enable_policy_rcd():
with open('/usr/sbin/policy-rc.d', 'w') as policy:
policy.write(POLICY_RC_D)
os.chmod('/usr/sbin/policy-rc.d', 0o755)
def disable_policy_rcd():
os.unlink('/usr/sbin/policy-rc.d')
def is_db_initialised():
if relation_ids('cluster'):
dbsync_state = peer_retrieve('dbsync_state')
if dbsync_state == 'complete':
log("Database is initialised", level=DEBUG)
return True
log("Database is NOT initialised", level=DEBUG)
return False
def is_cellv2_init_ready():
"""Determine if we're ready to initialize the cell v2 databases
Cells v2 init requires transport_url and database connections to be set
in nova.conf.
"""
amqp = context.AMQPContext()
shared_db = nova_cc_context.NovaCellV2SharedDBContext()
if (CompareOpenStackReleases(os_release('nova-common')) >= 'ocata' and
amqp() and shared_db()):
return True
log("OpenStack release, database, or rabbitmq not ready for Cells V2",
level=DEBUG)
return False
def _do_openstack_upgrade(new_src):
2014-04-08 23:23:04 +01:00
enable_policy_rcd()
# All upgrades to Liberty are forced to step through Kilo. Liberty does
# not have the migrate_flavor_data option (Bug #1511466) available so it
# must be done pre-upgrade
if (CompareOpenStackReleases(os_release('nova-common')) == 'kilo' and
is_leader()):
migrate_nova_flavors()
# 'nova-manage db online_data_migrations' needs to be run before moving to
# the next release for environments upgraded using old charms where this
# step was not being executed (LP: #1711209).
online_data_migrations_if_needed()
2013-08-19 12:16:37 -07:00
new_os_rel = get_os_codename_install_source(new_src)
cmp_new_os_rel = CompareOpenStackReleases(new_os_rel)
2013-08-19 12:16:37 -07:00
log('Performing OpenStack upgrade to %s.' % (new_os_rel))
configure_installation_source(new_src)
dpkg_opts = [
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
2014-03-14 11:23:48 +00:00
apt_update(fatal=True)
2014-03-06 13:41:46 +00:00
apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
reset_os_release()
2014-03-14 11:23:48 +00:00
apt_install(determine_packages(), fatal=True)
2013-08-19 12:16:37 -07:00
installed_pkgs = filter_missing_packages(determine_purge_packages())
if installed_pkgs:
apt_purge(installed_pkgs, fatal=True)
apt_autoremove(purge=True, fatal=True)
disable_policy_rcd()
# NOTE(jamespage) upgrade with existing config files as the
# havana->icehouse migration enables new service_plugins which
# create issues with db upgrades
configs = register_configs(release=new_os_rel)
configs.write_all()
2013-08-19 12:16:37 -07:00
if cmp_new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
# NOTE: Defer service restarts and database migrations for now
# as nova_api database is not yet created
if (relation_ids('cluster') and is_leader()):
# NOTE: reset dbsync state so that migration will complete
# when the nova_api database is setup.
peer_store('dbsync_state', None)
return configs
if cmp_new_os_rel >= 'ocata' and not database_setup(prefix='novacell0'):
# NOTE: Defer service restarts and database migrations for now
# as nova_cell0 database is not yet created
if (relation_ids('cluster') and is_leader()):
# NOTE: reset dbsync state so that migration will complete
# when the novacell0 database is setup.
peer_store('dbsync_state', None)
return configs
if is_leader():
status_set('maintenance', 'Running nova db migration')
migrate_nova_databases()
if not is_unit_paused_set():
[service_start(s) for s in services()]
2014-04-08 23:23:04 +01:00
return configs
2013-08-01 20:42:16 -07:00
def database_setup(prefix):
'''
Determine when a specific database is setup
and access is granted to the local unit.
This function only checks the MySQL shared-db
relation name using the provided prefix.
'''
key = '{}_allowed_units'.format(prefix)
for db_rid in relation_ids('shared-db'):
for unit in related_units(db_rid):
allowed_units = relation_get(key, rid=db_rid, unit=unit)
if allowed_units and local_unit() in allowed_units.split():
return True
return False
2015-09-21 16:06:54 -07:00
def do_openstack_upgrade(configs):
new_src = config('openstack-origin')
step_src = get_step_upgrade_source(new_src)
if step_src is not None:
_do_openstack_upgrade(step_src)
return _do_openstack_upgrade(new_src)
2013-08-01 20:42:16 -07:00
@retry_on_exception(5, base_delay=3, exc_type=subprocess.CalledProcessError)
def migrate_nova_flavors():
'''Runs nova-manage to migrate flavor data if needed'''
log('Migrating nova flavour information in database.', level=INFO)
cmd = ['nova-manage', 'db', 'migrate_flavor_data']
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
log('migrate_flavor_data failed\n{}'.format(e.output), level=ERROR)
raise
@retry_on_exception(5, base_delay=3, exc_type=subprocess.CalledProcessError)
def online_data_migrations_if_needed():
'''Runs nova-manage to run online data migrations available since Mitaka'''
if (is_leader() and
CompareOpenStackReleases(os_release('nova-common')) >= 'mitaka'):
log('Running online_data_migrations', level=INFO)
cmd = ['nova-manage', 'db', 'online_data_migrations']
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
log('online_data_migrations failed\n{}'.format(e.output),
level=ERROR)
raise
def migrate_nova_api_database():
'''Initialize or migrate the nova_api database'''
if CompareOpenStackReleases(os_release('nova-common')) >= 'mitaka':
log('Migrating the nova-api database.', level=INFO)
cmd = ['nova-manage', 'api_db', 'sync']
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# NOTE(coreycb): sync of api_db on upgrade from newton->ocata
# fails but cell init is successful.
log('Ignoring CalledProcessError during nova-api database '
'migration\n{}'.format(e.output), level=INFO)
2014-09-26 15:46:53 +00:00
def migrate_nova_database():
'''Initialize or migrate the nova database'''
log('Migrating the nova database.', level=INFO)
2013-08-01 20:42:16 -07:00
cmd = ['nova-manage', 'db', 'sync']
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
log('db sync failed\n{}'.format(e.output), level=ERROR)
raise
def initialize_cell_databases():
'''Initialize the cell0 and cell1 databases
cell0 is stored in the database named 'nova_cell0'.
cell1 is stored in the database named 'nova'.
'''
log('Creating cell0 database records', level=INFO)
cmd = ['nova-manage', 'cell_v2', 'map_cell0']
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
log('map_cell0 failed\n{}'.format(e.output), level=ERROR)
raise
log('Creating cell1 database records', level=INFO)
cmd = ['nova-manage', 'cell_v2', 'create_cell', '--name', 'cell1',
'--verbose']
try:
subprocess.check_output(cmd)
log('cell1 was successfully created', level=INFO)
except subprocess.CalledProcessError as e:
if e.returncode == 1:
log('Cell1 create_cell failed\n{}'.format(e.output), level=ERROR)
raise
elif e.returncode == 2:
log('Cell1 create_cell failure ignored - a cell is already using '
'the transport_url/database combination.', level=INFO)
def get_cell_uuid(cell, fatal=True):
'''Get cell uuid
:param cell: string cell name i.e. 'cell1'
:returns: string cell uuid
'''
log("Listing cell, '{}'".format(cell), level=INFO)
cells = get_cell_details()
cell_info = cells.get(cell)
if not cell_info:
if fatal:
raise Exception("Cannot find cell, '{}', in list_cells."
"".format(cell))
return None
return cell_info['uuid']
def get_cell_details():
'''Get cell details
:returns: string cell uuid
'''
log("Getting details of cells", level=INFO)
cells = {}
cmd = ['sudo', 'nova-manage', 'cell_v2', 'list_cells', '--verbose']
try:
out = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
log('list_cells failed\n{}'.format(e.output), level=ERROR)
raise
for line in out.split('\n'):
columns = line.split('|')
if len(columns) < 2:
continue
columns = [c.strip() for c in columns]
try:
uuid.UUID(columns[2].strip())
cells[columns[1]] = {
'uuid': columns[2],
'amqp': columns[3],
'db': columns[4]}
except ValueError:
pass
return cells
def update_cell_database():
'''Update the cell1 database properties
This should be called whenever a database or rabbitmq-server relation is
changed to update the transport_url in the nova_api cell_mappings table.
'''
log('Updating cell1 properties', level=INFO)
cell1_uuid = get_cell_uuid('cell1')
cmd = ['nova-manage', 'cell_v2', 'update_cell', '--cell_uuid', cell1_uuid]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
if e.returncode == 1:
log('Cell1 update_cell failed\n{}'.format(e.output), level=ERROR)
raise
elif e.returncode == 2:
log('Cell1 update_cell failure ignored - the properties cannot '
'be set.', level=INFO)
else:
log('cell1 was successfully updated', level=INFO)
def map_instances():
'''Map instances to cell
Updates nova_api.instance_mappings with pre-existing instances
:raises: Exception if Cell1 map_instances fails
'''
batch_size = '50000'
cell1_uuid = get_cell_uuid('cell1')
cmd = ['nova-manage', 'cell_v2', 'map_instances',
'--cell_uuid', cell1_uuid, '--max-count', batch_size]
iteration = 0
exit_code = 1
# Return code if 0 indicates all instances have been mapped. A return code
# of 1 indicates this batch is complete but there are more instances that
# still need mapping.
while exit_code == 1:
msg = 'Mapping instances. Batch number: {}'.format(iteration)
status_set('maintenance', msg)
log(msg, level=INFO)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
exit_code = process.wait()
if exit_code not in [0, 1]:
msg = 'Cell1 map_instances failed\nstdout: {}\nstderr: {}'.format(
stdout,
stderr)
log(msg, level=ERROR)
raise Exception(msg)
iteration += 1
msg = 'Mapping instances complete'
status_set('maintenance', msg)
log(msg, level=INFO)
def archive_deleted_rows(max_rows=None):
log('Archiving deleted rows', level=INFO)
cmd = ['nova-manage', 'db', 'archive_deleted_rows', '--verbose']
if max_rows:
cmd.extend(['--max_rows', str(max_rows)])
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
exit_code = process.wait()
if exit_code not in [0, 1]:
msg = 'Archiving deleted rows failed\nstdout: {}\nstderr: {}'.format(
stdout,
stderr)
log(msg, level=ERROR)
raise Exception(msg)
else:
return stdout
def add_hosts_to_cell():
'''Map compute hosts to cell'''
log('Cell1 discover_hosts', level=INFO)
cell1_uuid = get_cell_uuid('cell1')
cmd = ['nova-manage', 'cell_v2', 'discover_hosts', '--cell_uuid',
cell1_uuid, '--verbose']
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
log('Cell1 discover_hosts failed\n{}'.format(e.output), level=ERROR)
raise
def finalize_migrate_nova_databases():
if relation_ids('cluster'):
log('Informing peers that dbsync is complete', level=INFO)
peer_store('dbsync_state', 'complete')
log('Enabling services', level=INFO)
if not is_unit_paused_set():
for svc in services():
service_resume(svc)
else:
log('Unit is in paused state, not issuing start/resume to all '
'services')
2013-08-01 20:42:16 -07:00
# NOTE(jamespage): Retry deals with sync issues during one-shot HA deploys.
# mysql might be restarting or suchlike.
@retry_on_exception(5, base_delay=3, exc_type=subprocess.CalledProcessError)
def migrate_nova_databases():
'''Runs nova-manage to initialize new databases or migrate existing'''
release = CompareOpenStackReleases(os_release('nova-common'))
if release < 'ocata':
migrate_nova_api_database()
migrate_nova_database()
online_data_migrations_if_needed()
finalize_migrate_nova_databases()
elif is_cellv2_init_ready():
migrate_nova_api_database()
initialize_cell_databases()
migrate_nova_database()
online_data_migrations_if_needed()
add_hosts_to_cell()
# Populate the cells mapping table if upgrading to a cells
# environment for the first time eg Newton -> Ocata
if release == 'ocata':
map_instances()
finalize_migrate_nova_databases()
# TODO: refactor to use unit storage or related data
2013-08-01 20:42:16 -07:00
def auth_token_config(setting):
2014-02-24 14:16:35 -05:00
"""
2013-08-01 20:42:16 -07:00
Returns currently configured value for setting in api-paste.ini's
authtoken section, or None.
2014-02-24 14:16:35 -05:00
"""
2013-08-01 20:42:16 -07:00
config = ConfigParser.RawConfigParser()
config.read('/etc/nova/api-paste.ini')
try:
value = config.get('filter:authtoken', setting)
except:
return None
if value.startswith('%'):
return None
return value
def keystone_ca_cert_b64():
'''Returns the local Keystone-provided CA cert if it exists, or None.'''
if not os.path.isfile(CA_CERT_PATH):
return None
with open(CA_CERT_PATH) as _in:
return b64encode(_in.read())
def ssh_directory_for_unit(unit=None, user=None):
if unit:
remote_service = unit.split('/')[0]
else:
remote_service = remote_unit().split('/')[0]
if user:
remote_service = "{}_{}".format(remote_service, user)
2013-08-12 13:53:00 -07:00
_dir = os.path.join(NOVA_SSH_DIR, remote_service)
for d in [NOVA_SSH_DIR, _dir]:
if not os.path.isdir(d):
os.mkdir(d)
for f in ['authorized_keys', 'known_hosts']:
f = os.path.join(_dir, f)
if not os.path.isfile(f):
open(f, 'w').close()
return _dir
2013-08-10 09:49:47 +01:00
def known_hosts(unit=None, user=None):
return os.path.join(ssh_directory_for_unit(unit, user), 'known_hosts')
2013-08-10 09:49:47 +01:00
def authorized_keys(unit=None, user=None):
return os.path.join(ssh_directory_for_unit(unit, user), 'authorized_keys')
2013-08-10 09:49:47 +01:00
def ssh_known_host_key(host, unit=None, user=None):
cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host]
try:
2015-08-06 14:04:08 +08:00
# The first line of output is like '# Host xx found: line 1 type RSA',
# which should be excluded.
2015-07-29 18:41:19 +08:00
output = subprocess.check_output(cmd).strip()
except subprocess.CalledProcessError:
return None
2013-08-10 09:49:47 +01:00
2015-10-06 16:47:35 +01:00
if output:
# Bug #1500589 cmd has 0 rc on precise if entry not present
lines = output.split('\n')
if len(lines) > 1:
return lines[1]
return None
2013-08-10 09:49:47 +01:00
def remove_known_host(host, unit=None, user=None):
2013-08-10 09:49:47 +01:00
log('Removing SSH known host entry for compute host at %s' % host)
cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-R', host]
2013-08-10 09:49:47 +01:00
subprocess.check_call(cmd)
2015-07-29 19:23:06 +08:00
def is_same_key(key_1, key_2):
2015-08-06 14:04:08 +08:00
# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
# the part start with 'ssh-rsa' followed with '= ', because the hash
# value in the beginning will change each time.
2015-07-29 19:23:06 +08:00
k_1 = key_1.split('= ')[1]
k_2 = key_2.split('= ')[1]
return k_1 == k_2
def add_known_host(host, unit=None, user=None):
2013-08-10 09:49:47 +01:00
'''Add variations of host to a known hosts file.'''
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
try:
remote_key = subprocess.check_output(cmd).strip()
except Exception as e:
log('Could not obtain SSH host key from %s' % host, level=ERROR)
raise e
current_key = ssh_known_host_key(host, unit, user)
2015-07-29 19:23:06 +08:00
if current_key and remote_key:
if is_same_key(remote_key, current_key):
2013-08-10 09:49:47 +01:00
log('Known host key for compute host %s up to date.' % host)
return
else:
remove_known_host(host, unit, user)
2013-08-10 09:49:47 +01:00
log('Adding SSH host key to known hosts for compute node at %s.' % host)
with open(known_hosts(unit, user), 'a') as out:
2013-08-10 09:49:47 +01:00
out.write(remote_key + '\n')
def ssh_authorized_key_exists(public_key, unit=None, user=None):
with open(authorized_keys(unit, user)) as keys:
2013-08-10 09:49:47 +01:00
return (' %s ' % public_key) in keys.read()
def add_authorized_key(public_key, unit=None, user=None):
with open(authorized_keys(unit, user), 'a') as keys:
2013-08-10 09:49:47 +01:00
keys.write(public_key + '\n')
def ssh_compute_add(public_key, rid=None, unit=None, user=None):
# If remote compute node hands us a hostname, ensure we have a
# known hosts entry for its IP, hostname and FQDN.
private_address = relation_get(rid=rid, unit=unit,
attribute='private-address')
hosts = []
if not is_ipv6(private_address):
hostname = relation_get(rid=rid, unit=unit,
attribute='hostname')
if hostname:
hosts.append(hostname.lower())
if not is_ip(private_address):
hosts.append(private_address.lower())
hosts.append(get_host_ip(private_address))
short = private_address.split('.')[0]
if ns_query(short):
hosts.append(short.lower())
else:
hosts.append(private_address)
hn = get_hostname(private_address)
if hn:
hosts.append(hn.lower())
short = hn.split('.')[0]
if ns_query(short):
hosts.append(short.lower())
else:
hosts.append(private_address)
for host in list(set(hosts)):
2015-07-29 17:31:20 +08:00
add_known_host(host, unit, user)
if not ssh_authorized_key_exists(public_key, unit, user):
log('Saving SSH authorized key for compute host at %s.' %
private_address)
add_authorized_key(public_key, unit, user)
2013-08-10 09:49:47 +01:00
def ssh_known_hosts_lines(unit=None, user=None):
known_hosts_list = []
2013-08-10 09:49:47 +01:00
with open(known_hosts(unit, user)) as hosts:
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
2013-08-10 09:49:47 +01:00
def ssh_authorized_keys_lines(unit=None, user=None):
authorized_keys_list = []
2013-08-10 09:49:47 +01:00
with open(authorized_keys(unit, user)) as keys:
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
2013-08-10 09:49:47 +01:00
def ssh_compute_remove(public_key, unit=None, user=None):
if not (os.path.isfile(authorized_keys(unit, user)) or
os.path.isfile(known_hosts(unit, user))):
2013-08-10 09:49:47 +01:00
return
with open(authorized_keys(unit, user)) as _keys:
keys = [k.strip() for k in _keys.readlines()]
if public_key not in keys:
return
[keys.remove(key) for key in keys if key == public_key]
with open(authorized_keys(unit, user), 'w') as _keys:
keys = '\n'.join(keys)
if not keys.endswith('\n'):
keys += '\n'
_keys.write(keys)
2013-08-01 20:42:16 -07:00
2014-06-27 11:37:18 +01:00
def determine_endpoints(public_url, internal_url, admin_url):
2013-08-10 14:35:17 +01:00
'''Generates a dictionary containing all relevant endpoints to be
passed to keystone as relation settings.'''
region = config('region')
os_rel = os_release('nova-common')
cmp_os_rel = CompareOpenStackReleases(os_rel)
2013-08-10 14:35:17 +01:00
nova_public_url = ('%s:%s/v2/$(tenant_id)s' %
(public_url, api_port('nova-api-os-compute')))
nova_internal_url = ('%s:%s/v2/$(tenant_id)s' %
(internal_url, api_port('nova-api-os-compute')))
nova_admin_url = ('%s:%s/v2/$(tenant_id)s' %
(admin_url, api_port('nova-api-os-compute')))
if cmp_os_rel >= 'queens':
nova_public_url = (
'%s:%s/v2.1' %
(public_url, api_port('nova-api-os-compute'))
)
nova_internal_url = (
'%s:%s/v2.1' %
(internal_url, api_port('nova-api-os-compute'))
)
nova_admin_url = (
'%s:%s/v2.1' %
(admin_url, api_port('nova-api-os-compute'))
)
2014-07-16 14:19:17 +01:00
ec2_public_url = '%s:%s/services/Cloud' % (
public_url, api_port('nova-api-ec2'))
ec2_internal_url = '%s:%s/services/Cloud' % (
internal_url, api_port('nova-api-ec2'))
ec2_admin_url = '%s:%s/services/Cloud' % (admin_url,
api_port('nova-api-ec2'))
s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore'))
s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore'))
2014-06-27 11:37:18 +01:00
s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore'))
2013-08-10 14:35:17 +01:00
if cmp_os_rel >= 'ocata':
placement_public_url = '%s:%s' % (
public_url, api_port('nova-placement-api'))
placement_internal_url = '%s:%s' % (
internal_url, api_port('nova-placement-api'))
placement_admin_url = '%s:%s' % (
admin_url, api_port('nova-placement-api'))
2013-08-10 14:35:17 +01:00
# the base endpoints
endpoints = {
'nova_service': 'nova',
'nova_region': region,
'nova_public_url': nova_public_url,
2014-06-27 11:37:18 +01:00
'nova_admin_url': nova_admin_url,
'nova_internal_url': nova_internal_url,
2013-08-10 14:35:17 +01:00
'ec2_service': 'ec2',
'ec2_region': region,
'ec2_public_url': ec2_public_url,
2014-06-27 11:37:18 +01:00
'ec2_admin_url': ec2_admin_url,
'ec2_internal_url': ec2_internal_url,
2013-08-10 14:35:17 +01:00
's3_service': 's3',
's3_region': region,
's3_public_url': s3_public_url,
2014-06-27 11:37:18 +01:00
's3_admin_url': s3_admin_url,
's3_internal_url': s3_internal_url,
2013-08-10 14:35:17 +01:00
}
if cmp_os_rel >= 'kilo':
2015-03-24 17:48:42 +00:00
# NOTE(jamespage) drop endpoints for ec2 and s3
# ec2 is deprecated
2015-03-24 17:49:11 +00:00
# s3 is insecure and should die in flames
2015-03-25 13:34:27 +00:00
endpoints.update({
2015-03-24 17:48:42 +00:00
'ec2_service': None,
'ec2_region': None,
'ec2_public_url': None,
'ec2_admin_url': None,
'ec2_internal_url': None,
's3_service': None,
's3_region': None,
's3_public_url': None,
's3_admin_url': None,
's3_internal_url': None,
2015-03-25 13:34:27 +00:00
})
2015-03-24 17:48:42 +00:00
if cmp_os_rel >= 'ocata':
endpoints.update({
'placement_service': 'placement',
'placement_region': region,
'placement_public_url': placement_public_url,
'placement_admin_url': placement_admin_url,
'placement_internal_url': placement_internal_url,
})
2013-08-10 14:35:17 +01:00
return endpoints
def guard_map():
2014-07-29 12:43:41 +01:00
'''Map of services and required interfaces that must be present before
the service should be allowed to start'''
gmap = {}
2016-02-09 11:19:34 +00:00
nova_services = resolve_services()
2014-07-29 12:43:41 +01:00
if os_release('nova-common') not in ['essex', 'folsom']:
nova_services.append('nova-conductor')
nova_interfaces = ['identity-service', 'amqp']
nova_interfaces.append('shared-db')
2014-07-29 12:43:41 +01:00
for svc in nova_services:
gmap[svc] = nova_interfaces
return gmap
2014-07-29 12:43:41 +01:00
def service_guard(guard_map, contexts, active=False):
'''Inhibit services in guard_map from running unless
required interfaces are found complete in contexts.'''
def wrap(f):
def wrapped_f(*args):
2014-07-29 12:43:41 +01:00
if active is True:
incomplete_services = []
for svc in guard_map:
for interface in guard_map[svc]:
if interface not in contexts.complete_contexts():
incomplete_services.append(svc)
f(*args)
for svc in incomplete_services:
if service_running(svc):
log('Service {} has unfulfilled '
'interface requirements, stopping.'.format(svc))
service_stop(svc)
2014-07-29 12:43:41 +01:00
else:
f(*args)
return wrapped_f
return wrap
2014-09-09 13:01:37 +00:00
2014-09-18 20:23:54 +08:00
def setup_ipv6():
2014-09-30 14:24:53 +01:00
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(ubuntu_rel) < "trusty":
2014-09-30 14:24:53 +01:00
raise Exception("IPv6 is not supported in the charms for Ubuntu "
2014-09-18 20:23:54 +08:00
"versions less than Trusty 14.04")
# Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to
# use trusty-backports otherwise we can use the UCA.
if (ubuntu_rel == 'trusty' and
CompareOpenStackReleases(os_release('nova-api')) < 'liberty'):
add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports '
'main')
2014-09-18 20:23:54 +08:00
apt_update()
apt_install('haproxy/trusty-backports', fatal=True)
2015-04-15 14:17:56 +00:00
def get_optional_interfaces():
"""Return the optional interfaces that should be checked if the relavent
relations have appeared.
:returns: {general_interface: [specific_int1, specific_int2, ...], ...}
"""
optional_interfaces = {}
if relation_ids('quantum-network-service'):
optional_interfaces['quantum'] = ['quantum-network-service']
if relation_ids('cinder-volume-service'):
optional_interfaces['cinder'] = ['cinder-volume-service']
if relation_ids('neutron-api'):
optional_interfaces['neutron-api'] = ['neutron-api']
return optional_interfaces
2015-10-07 09:32:28 +00:00
def check_optional_relations(configs):
"""Check that if we have a relation_id for high availability that we can
get the hacluster config. If we can't then we are blocked.
This function is called from assess_status/set_os_workload_status as the
charm_func and needs to return either None, None if there is no problem or
the status, message if there is a problem.
:param configs: an OSConfigRender() instance.
:return 2-tuple: (string, string) = (status, message)
"""
2015-10-07 09:32:28 +00:00
if relation_ids('ha'):
try:
get_hacluster_config()
except:
return ('blocked',
'hacluster missing configuration: '
'vip, vip_iface, vip_cidr')
# return 'unknown' as the lowest priority to not clobber an existing
# status.
return "unknown", ""
def is_api_ready(configs):
return (not incomplete_relation_data(configs, REQUIRED_INTERFACES))
def assess_status(configs):
"""Assess status of current unit
Decides what the state of the unit should be based on the current
configuration.
SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
status of the unit.
Also calls status_set(...) directly if paused state isn't complete.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
assess_status_func(configs)()
os_application_version_set(VERSION_PACKAGE)
def assess_status_func(configs):
"""Helper function to create the function that will assess_status() for
the unit.
Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to
create the appropriate status function and then returns it.
Used directly by assess_status() and also for pausing and resuming
the unit.
NOTE: REQUIRED_INTERFACES is augmented with the optional interfaces
depending on the current config before being passed to the
make_assess_status_func() function.
NOTE(ajkavanagh) ports are not checked due to race hazards with services
that don't behave sychronously w.r.t their service scripts. e.g.
apache2.
@param configs: a templating.OSConfigRenderer() object
@return f() -> None : a function that assesses the unit's workload status
"""
required_interfaces = REQUIRED_INTERFACES.copy()
required_interfaces.update(get_optional_interfaces())
return make_assess_status_func(
configs, required_interfaces,
charm_func=check_optional_relations,
services=services(), ports=None)
def pause_unit_helper(configs):
"""Helper function to pause a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(pause_unit, configs)
def resume_unit_helper(configs):
"""Helper function to resume a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(resume_unit, configs)
def _pause_resume_helper(f, configs):
"""Helper function that uses the make_assess_status_func(...) from
charmhelpers.contrib.openstack.utils to create an assess_status(...)
function that can be used with the pause/resume of the unit
@param f: the function to be used with the assess_status(...) function
@returns None - this function is executed for its side-effect
"""
# TODO(ajkavanagh) - ports= has been left off because of the race hazard
# that exists due to service_start()
f(assess_status_func(configs),
services=services(),
ports=None)
def update_aws_compat_services():
"""Depending on the configuration of `disable-aws-compatibility` config
option.
This will stop/start and disable/enable `nova-api-ec2` and
`nova-objectstore` services.
"""
# if packages aren't installed, then there is nothing to do
if filter_installed_packages(AWS_COMPAT_SERVICES) != []:
return
if config('disable-aws-compat'):
# TODO: the endpoints have to removed from keystone
for service_ in AWS_COMPAT_SERVICES:
service_pause(service_)
else:
for service_ in AWS_COMPAT_SERVICES:
service_resume(service_)
def serial_console_settings():
'''Utility wrapper to retrieve serial console settings
for use in cloud-compute relation
'''
return nova_cc_context.SerialConsoleContext()()
def placement_api_enabled():
"""Return true if nova-placement-api is enabled in this release"""
return CompareOpenStackReleases(os_release('nova-common')) >= 'ocata'
def enable_metadata_api(release=None):
"""Should nova-metadata-api be running on this unit for this release."""
if not release:
release = os_release('nova-common')
return CompareOpenStackReleases(os_release('nova-common')) >= 'rocky'
def disable_package_apache_site():
"""Ensure that the package-provided apache configuration is disabled to
prevent it from conflicting with the charm-provided version.
"""
if os.path.exists(PACKAGE_NOVA_PLACEMENT_API_CONF):
subprocess.check_call(['a2dissite', 'nova-placement-api'])
def get_shared_metadatasecret():
"""Return the shared metadata secret."""
return leader_get(SHARED_METADATA_SECRET_KEY)
def set_shared_metadatasecret():
"""Store the shared metadata secret."""
leader_set({SHARED_METADATA_SECRET_KEY: uuid1()})
def get_metadata_settings(configs):
"""Return the settings for accessing the metadata service."""
if enable_metadata_api():
url = urlparse(canonical_url(configs, INTERNAL))
settings = {
'nova-metadata-host': url.netloc,
'nova-metadata-protocol': url.scheme,
'nova-metadata-port': API_PORTS['nova-api-metadata'],
'shared-metadata-secret': get_shared_metadatasecret()}
else:
settings = {}
return settings
def write_vendordata(vdata):
"""Write supplied vendor data out to a file."""
try:
json_vdata = json.loads(vdata)
except (TypeError, json.decoder.JSONDecodeError) as e:
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
return False
with open(VENDORDATA_FILE, 'w') as vdata_file:
vdata_file.write(json.dumps(json_vdata, sort_keys=True, indent=2))
def get_cell_db_context(db_service):
"""Return the database context for the given service name"""
db_rid = relation_id(
relation_name='shared-db-cell',
service_or_unit=db_service)
return context.SharedDBContext(
relation_prefix='nova',
ssl_dir=NOVA_CONF_DIR,
relation_id=db_rid)()
def get_cell_amqp_context(amqp_service):
"""Return the amqp context for the given service name"""
amq_rid = relation_id(
relation_name='amqp-cell',
service_or_unit=amqp_service)
return context.AMQPContext(
ssl_dir=NOVA_CONF_DIR,
relation_id=amq_rid)()
def get_sql_uri(db_ctxt):
"""Return the uri for conextind to the database in the supplied context"""
uri_template = ("{database_type}://{database_user}:{database_password}"
"@{database_host}/{database}")
uri = uri_template.format(**db_ctxt)
if db_ctxt.get('database_ssl_ca'):
uri = uri + '?ssl_ca={database_ssl_ca}'.format(**db_ctxt)
if db_ctxt.get('database_ssl_cert'):
uri = uri + ('&ssl_cert={database_ssl_cert}'
'&ssl_key={database_ssl_key}').format(**db_ctxt)
return uri
def update_child_cell(name, db_service, amqp_service, skip_acl_check=True):
"""Register cell.
Registering a cell requires:
1) Complete relation with api db service.
2) Complete relation with cells db service.
3) Complete relation with cells amqp service.
"""
if not is_db_initialised():
log(
'Defering registering Cell {}, api db not ready.'.format(name),
level=DEBUG)
return False
existing_cells = get_cell_details()
if not existing_cells.get('cell1'):
log('Defering registering cell {}, api cell setup is not complete.'
''.format(name),
level=DEBUG)
return False
db_ctxt = get_cell_db_context(db_service)
if not db_ctxt:
log('Defering registering cell {}, cell db relation not '
'ready.'.format(name),
level=DEBUG)
return False
sql_connection = get_sql_uri(db_ctxt)
amqp_ctxt = get_cell_amqp_context(amqp_service)
if not amqp_ctxt:
log('Defering registering cell {}, cell amqp relation not '
'ready.'.format(name),
level=DEBUG)
return False
cmd = [
'nova-manage',
'cell_v2',
]
if existing_cells.get(name):
log('Cell {} already registered, checking if details are correct.'
''.format(name), level=DEBUG)
if (amqp_ctxt['transport_url'] == existing_cells[name]['amqp'] and
sql_connection == existing_cells[name]['db']):
log('Cell details are correct no update needed', level=DEBUG)
return False
else:
log('Cell details have changed', level=DEBUG)
cmd.extend([
'update_cell',
'--cell_uuid', existing_cells[name]['uuid']])
else:
log(
'Cell {} is new and needs to be created.'.format(name),
level=DEBUG)
cmd.extend(['create_cell', '--verbose'])
cmd.extend([
'--name', name,
'--transport-url', amqp_ctxt['transport_url'],
'--database_connection', sql_connection])
try:
log('Updating cell {}'.format(name), level=DEBUG)
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
log('Register cell failed\n{}'.format(e.output), level=ERROR)
raise
service_restart('nova-scheduler')
return True