Propagate vendor_data from nova-cloud-controller

When using DVR and L3HA neutron deployment options,
Nova API Metadata requests are served from compute nodes,
instead of from neutron-gateway nodes.

This change allows nova-cloud-controller to send vendor_data
configuration values to nova-compute charm relation so it
can write to nova-compute's nova.conf appropriately.

Replaced the existing context logic with inheritance
from a new context created in charm-helpers, so the
logic can be shared across several charms that write
vendor metadata to nova.conf and vendor_data.json.

Also, small fix in the vendor-data and vendor-data-url
descriptions, where it was incorrectly stating that such
configuration would be effective in nova-cloud-controller
on Queens release.

The values set in vendor-data and vendor-data-url config
options will always be propagated to nova-compute regardless
of the OpenStack release. Those values will continue to only
be effective in nova-cloud-controller nodes on Rocky release
or later.

Included sync of charm-helpers code in order to inherit
the refactored vendor metadata contexts.

Change-Id: If8373fc6b2d04dbc29ed07896d385ac920cae3f4
Depends-On: I0c79e1bfac9fbe7009a7e862ad010cfa2de8cfda
Closes-Bug: #1777714
This commit is contained in:
Rodrigo Barbieri 2019-05-10 11:38:53 -03:00
parent da247f22f9
commit a51efaf640
18 changed files with 445 additions and 105 deletions

View File

@ -294,8 +294,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('bionic', 'cloud:bionic-train'): self.bionic_train,
('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein,
('eoan', None): self.eoan_train,
}
return releases[(self.series, self.openstack)]
@ -313,6 +315,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]

View File

@ -54,11 +54,15 @@ NOVA_CLIENT_VERSION = "2"
OPENSTACK_RELEASES_PAIRS = [
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
'xenial_pike', 'artful_pike', 'xenial_queens',
'bionic_queens', 'bionic_rocky', 'cosmic_rocky',
'bionic_stein', 'disco_stein']
'trusty_mitaka', 'xenial_mitaka',
'xenial_newton', 'yakkety_newton',
'xenial_ocata', 'zesty_ocata',
'xenial_pike', 'artful_pike',
'xenial_queens', 'bionic_queens',
'bionic_rocky', 'cosmic_rocky',
'bionic_stein', 'disco_stein',
'bionic_train', 'eoan_train',
]
class OpenStackAmuletUtils(AmuletUtils):

View File

@ -220,6 +220,8 @@ def process_certificates(service_name, relation_id, unit,
:type user: str
:param group: (Optional) Group of certificate files. Defaults to 'root'
:type group: str
:returns: True if certificates processed for local unit or False
:rtype: bool
"""
data = relation_get(rid=relation_id, unit=unit)
ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
@ -235,6 +237,8 @@ def process_certificates(service_name, relation_id, unit,
create_ip_cert_links(
ssl_dir,
custom_hostname_link=custom_hostname_link)
return True
return False
def get_requests_for_local_unit(relation_name=None):

View File

@ -117,6 +117,7 @@ except ImportError:
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
HAPROXY_RUN_DIR = '/var/run/haproxy/'
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
def ensure_packages(packages):
@ -351,10 +352,70 @@ class IdentityServiceContext(OSContextGenerator):
return cachedir
return None
def _get_pkg_name(self, python_name='keystonemiddleware'):
"""Get corresponding distro installed package for python
package name.
:param python_name: nameof the python package
:type: string
"""
pkg_names = map(lambda x: x + python_name, ('python3-', 'python-'))
for pkg in pkg_names:
if not filter_installed_packages((pkg,)):
return pkg
return None
def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel):
"""Build Jinja2 context for full rendering of [keystone_authtoken]
section with variable names included. Re-constructed from former
template 'section-keystone-auth-mitaka'.
:param ctxt: Jinja2 context returned from self.__call__()
:type: dict
:param keystonemiddleware_os_rel: OpenStack release name of
keystonemiddleware package installed
"""
c = collections.OrderedDict((('auth_type', 'password'),))
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
# see keystonemiddleware upstream sources for more info
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
else:
c.update((
('auth_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),
('project_domain_name', ctxt.get('admin_domain_name', '')),
('user_domain_name', ctxt.get('admin_domain_name', '')),
('project_name', ctxt.get('admin_tenant_name', '')),
('username', ctxt.get('admin_user', '')),
('password', ctxt.get('admin_password', '')),
('signing_dir', ctxt.get('signing_dir', '')),))
return c
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
keystonemiddleware_os_release = None
if self._get_pkg_name():
keystonemiddleware_os_release = os_release(self._get_pkg_name())
cachedir = self._setup_pki_cache()
if cachedir:
ctxt['signing_dir'] = cachedir
@ -385,6 +446,14 @@ class IdentityServiceContext(OSContextGenerator):
ctxt.update({'admin_domain_name':
rdata.get('service_domain')})
# we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic
# templating
if keystonemiddleware_os_release:
ctxt['keystone_authtoken'] = \
self._get_keystone_authtoken_ctxt(
ctxt, keystonemiddleware_os_release)
if self.context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs
@ -452,6 +521,86 @@ class IdentityCredentialsContext(IdentityServiceContext):
return {}
class NovaVendorMetadataContext(OSContextGenerator):
"""Context used for configuring nova vendor metadata on nova.conf file."""
def __init__(self, os_release_pkg, interfaces=None):
"""Initialize the NovaVendorMetadataContext object.
:param os_release_pkg: the package name to extract the OpenStack
release codename from.
:type os_release_pkg: str
:param interfaces: list of string values to be used as the Context's
relation interfaces.
:type interfaces: List[str]
"""
self.os_release_pkg = os_release_pkg
if interfaces is not None:
self.interfaces = interfaces
def __call__(self):
cmp_os_release = CompareOpenStackReleases(
os_release(self.os_release_pkg))
ctxt = {'vendor_data': False}
vdata_providers = []
vdata = config('vendor-data')
vdata_url = config('vendor-data-url')
if vdata:
try:
# validate the JSON. If invalid, we do not set anything here
json.loads(vdata)
except (TypeError, ValueError) as e:
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
else:
ctxt['vendor_data'] = True
# Mitaka does not support DynamicJSON
# so vendordata_providers is not needed
if cmp_os_release > 'mitaka':
vdata_providers.append('StaticJSON')
if vdata_url:
if cmp_os_release > 'mitaka':
ctxt['vendor_data_url'] = vdata_url
vdata_providers.append('DynamicJSON')
else:
log('Dynamic vendor data unsupported'
' for {}.'.format(cmp_os_release), level=ERROR)
if vdata_providers:
ctxt['vendordata_providers'] = ','.join(vdata_providers)
return ctxt
class NovaVendorMetadataJSONContext(OSContextGenerator):
"""Context used for writing nova vendor metadata json file."""
def __init__(self, os_release_pkg):
"""Initialize the NovaVendorMetadataJSONContext object.
:param os_release_pkg: the package name to extract the OpenStack
release codename from.
:type os_release_pkg: str
"""
self.os_release_pkg = os_release_pkg
def __call__(self):
ctxt = {'vendor_data_json': '{}'}
vdata = config('vendor-data')
if vdata:
try:
# validate the JSON. If invalid, we return empty.
json.loads(vdata)
except (TypeError, ValueError) as e:
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
else:
ctxt['vendor_data_json'] = vdata
return ctxt
class AMQPContext(OSContextGenerator):
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
@ -569,6 +718,19 @@ class AMQPContext(OSContextGenerator):
ctxt['oslo_messaging_flags'] = config_flags_parser(
oslo_messaging_flags)
oslo_messaging_driver = conf.get(
'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER)
if oslo_messaging_driver:
ctxt['oslo_messaging_driver'] = oslo_messaging_driver
notification_format = conf.get('notification-format', None)
if notification_format:
ctxt['notification_format'] = notification_format
send_notifications_to_logs = conf.get('send-notifications-to-logs', None)
if send_notifications_to_logs:
ctxt['send_notifications_to_logs'] = send_notifications_to_logs
if not self.complete:
return {}
@ -620,6 +782,25 @@ class CephContext(OSContextGenerator):
ensure_packages(['ceph-common'])
return ctxt
def context_complete(self, ctxt):
"""Overridden here to ensure the context is actually complete.
We set `key` and `auth` to None here, by default, to ensure
that the context will always evaluate to incomplete until the
Ceph relation has actually sent these details; otherwise,
there is a potential race condition between the relation
appearing and the first unit actually setting this data on the
relation.
:param ctxt: The current context members
:type ctxt: Dict[str, ANY]
:returns: True if the context is complete
:rtype: bool
"""
if 'auth' not in ctxt or 'key' not in ctxt:
return False
return super(CephContext, self).context_complete(ctxt)
class HAProxyContext(OSContextGenerator):
"""Provides half a context for the haproxy template, which describes
@ -1110,7 +1291,9 @@ class NeutronPortContext(OSContextGenerator):
hwaddr_to_nic = {}
hwaddr_to_ip = {}
for nic in list_nics():
extant_nics = list_nics()
for nic in extant_nics:
# Ignore virtual interfaces (bond masters will be identified from
# their slaves)
if not is_phy_iface(nic):
@ -1141,10 +1324,11 @@ class NeutronPortContext(OSContextGenerator):
# Entry is a MAC address for a valid interface that doesn't
# have an IP address assigned yet.
resolved.append(hwaddr_to_nic[entry])
else:
# If the passed entry is not a MAC address, assume it's a valid
# interface, and that the user put it there on purpose (we can
# trust it to be the real external network).
elif entry in extant_nics:
# If the passed entry is not a MAC address and the interface
# exists, assume it's a valid interface, and that the user put
# it there on purpose (we can trust it to be the real external
# network).
resolved.append(entry)
# Ensure no duplicates
@ -1526,6 +1710,14 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-nsg-logging',
'default': False,
},
'global_physnet_mtu': {
'rel_key': 'global-physnet-mtu',
'default': 1500,
},
'physical_network_mtus': {
'rel_key': 'physical-network-mtus',
'default': None,
},
}
ctxt = self.get_neutron_options({})
for rid in relation_ids('neutron-plugin-api'):
@ -1587,13 +1779,13 @@ class DataPortContext(NeutronPortContext):
def __call__(self):
ports = config('data-port')
if ports:
# Map of {port/mac:bridge}
# Map of {bridge:port/mac}
portmap = parse_data_port_mappings(ports)
ports = portmap.keys()
# Resolve provided ports or mac addresses and filter out those
# already attached to a bridge.
resolved = self.resolve_ports(ports)
# FIXME: is this necessary?
# Rebuild port index using resolved and filtered ports.
normalized = {get_nic_hwaddr(port): port for port in resolved
if port not in ports}
normalized.update({port: port for port in resolved

View File

@ -217,6 +217,11 @@ def neutron_plugins():
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
plugins['vsp']['driver'] = (
'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
if CompareOpenStackReleases(release) >= 'newton':
plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['vsp']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
return plugins

View File

@ -0,0 +1,9 @@
{% if auth_host -%}
[keystone_authtoken]
{% for option_name, option_value in keystone_authtoken.items() -%}
{{ option_name }} = {{ option_value }}
{% endfor -%}
{% if use_memcache == true %}
memcached_servers = {{ memcache_url }}
{% endif -%}
{% endif -%}

View File

@ -1,11 +1,15 @@
{% if transport_url -%}
[oslo_messaging_notifications]
driver = messagingv2
driver = {{ oslo_messaging_driver }}
transport_url = {{ transport_url }}
{% if send_notifications_to_logs %}
driver = log
{% endif %}
{% if notification_topics -%}
topics = {{ notification_topics }}
{% endif -%}
{% if notification_format -%}
[notifications]
notification_format = {{ notification_format }}
{% endif -%}
{% endif -%}

View File

@ -0,0 +1 @@
{{ vendor_data_json }}

View File

@ -120,6 +120,7 @@ OPENSTACK_RELEASES = (
'queens',
'rocky',
'stein',
'train',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
@ -139,6 +140,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
])
@ -159,6 +161,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2018.1', 'queens'),
('2018.2', 'rocky'),
('2019.1', 'stein'),
('2019.2', 'train'),
])
# The ugly duckling - must list releases oldest to newest
@ -194,7 +197,9 @@ SWIFT_CODENAMES = OrderedDict([
('rocky',
['2.18.0', '2.19.0']),
('stein',
['2.20.0']),
['2.20.0', '2.21.0']),
('train',
['2.22.0']),
])
# >= Liberty version->codename mapping
@ -208,6 +213,7 @@ PACKAGE_CODENAMES = {
('17', 'queens'),
('18', 'rocky'),
('19', 'stein'),
('20', 'train'),
]),
'neutron-common': OrderedDict([
('7', 'liberty'),
@ -218,6 +224,7 @@ PACKAGE_CODENAMES = {
('12', 'queens'),
('13', 'rocky'),
('14', 'stein'),
('15', 'train'),
]),
'cinder-common': OrderedDict([
('7', 'liberty'),
@ -228,6 +235,7 @@ PACKAGE_CODENAMES = {
('12', 'queens'),
('13', 'rocky'),
('14', 'stein'),
('15', 'train'),
]),
'keystone': OrderedDict([
('8', 'liberty'),
@ -238,6 +246,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
]),
'horizon-common': OrderedDict([
('8', 'liberty'),
@ -248,6 +257,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
]),
'ceilometer-common': OrderedDict([
('5', 'liberty'),
@ -258,6 +268,7 @@ PACKAGE_CODENAMES = {
('10', 'queens'),
('11', 'rocky'),
('12', 'stein'),
('13', 'train'),
]),
'heat-common': OrderedDict([
('5', 'liberty'),
@ -268,6 +279,7 @@ PACKAGE_CODENAMES = {
('10', 'queens'),
('11', 'rocky'),
('12', 'stein'),
('13', 'train'),
]),
'glance-common': OrderedDict([
('11', 'liberty'),
@ -278,6 +290,7 @@ PACKAGE_CODENAMES = {
('16', 'queens'),
('17', 'rocky'),
('18', 'stein'),
('19', 'train'),
]),
'openstack-dashboard': OrderedDict([
('8', 'liberty'),
@ -288,6 +301,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
]),
}

View File

@ -1488,7 +1488,7 @@ def is_broker_action_done(action, rid=None, unit=None):
@param action: name of action to be performed
@returns True if action complete otherwise False
"""
rdata = relation_get(rid, unit) or {}
rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return False
@ -1510,7 +1510,7 @@ def mark_broker_action_done(action, rid=None, unit=None):
@param action: name of action to be performed
@returns None
"""
rdata = relation_get(rid, unit) or {}
rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return

View File

@ -173,6 +173,14 @@ CLOUD_ARCHIVE_POCKETS = {
'stein/proposed': 'bionic-proposed/stein',
'bionic-stein/proposed': 'bionic-proposed/stein',
'bionic-proposed/stein': 'bionic-proposed/stein',
# Train
'train': 'bionic-updates/train',
'bionic-train': 'bionic-updates/train',
'bionic-train/updates': 'bionic-updates/train',
'bionic-updates/train': 'bionic-updates/train',
'train/proposed': 'bionic-proposed/train',
'bionic-train/proposed': 'bionic-proposed/train',
'bionic-proposed/train': 'bionic-proposed/train',
}
@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False):
for r, fn in six.iteritems(_mapping):
m = re.match(r, source)
if m:
# call the assoicated function with the captured groups
# raises SourceConfigError on error.
fn(*m.groups())
if key:
# Import key before adding the source which depends on it,
# as refreshing packages could fail otherwise.
try:
import_key(key)
except GPGKeyError as e:
raise SourceConfigError(str(e))
# call the associated function with the captured groups
# raises SourceConfigError on error.
fn(*m.groups())
break
else:
# nothing matched. log an error and maybe sys.exit

View File

@ -460,7 +460,8 @@ options:
description: |
A JSON-formatted string that will serve as vendor metadata
(via "StaticJSON" provider) to all VM's within an OpenStack deployment,
regardless of project or domain. For deployments prior to Queens this
regardless of project or domain. For deployments prior to Rocky and if
metadata is configured to be provided by neutron-gateway, this
value should be set in the neutron-gateway charm.
vendor-data-url:
type: string
@ -471,7 +472,8 @@ options:
regardless of project or domain.
.
Only supported in OpenStack Newton and higher. For deployments prior to
Queens this value should be set in the neutron-gateway charm.
Rocky and if metadata is configured to be provided by neutron-gateway,
this value should be set in the neutron-gateway charm.
quota-instances:
type: int
default:

View File

@ -538,31 +538,59 @@ class NovaAPISharedDBContext(ch_context.SharedDBContext):
return ctxt
class NovaMetadataContext(ch_context.OSContextGenerator):
'''
Context used for configuring the nova metadata service.
'''
class NovaMetadataContext(ch_context.NovaVendorMetadataContext):
"""Context used for configuring the nova metadata service."""
def __call__(self):
cmp_os_release = ch_utils.CompareOpenStackReleases(
ch_utils.os_release('nova-common'))
vdata_values = super(NovaMetadataContext, self).__call__()
release = ch_utils.os_release('nova-common')
cmp_os_release = ch_utils.CompareOpenStackReleases(release)
ctxt = {}
if cmp_os_release >= 'rocky':
vdata_providers = []
vdata = hookenv.config('vendor-data')
vdata_url = hookenv.config('vendor-data-url')
ctxt.update(vdata_values)
if vdata:
ctxt['vendor_data'] = True
vdata_providers.append('StaticJSON')
if vdata_url:
ctxt['vendor_data_url'] = vdata_url
vdata_providers.append('DynamicJSON')
ctxt['vendordata_providers'] = ','.join(vdata_providers)
ctxt['metadata_proxy_shared_secret'] = hookenv.leader_get(
'shared-metadata-secret')
ctxt['enable_metadata'] = True
else:
hookenv.log("Vendor metadata has been configured but is not "
"effective in nova-cloud-controller because release "
"{} is prior to Rocky.".format(release),
level=hookenv.DEBUG)
ctxt['enable_metadata'] = False
# NOTE(ganso): always propagate config value for nova-compute since
# we need to apply it there for all releases, and we cannot determine
# whether nova-compute is really the one serving the vendor metadata
for rid in hookenv.relation_ids('cloud-compute'):
hookenv.relation_set(relation_id=rid,
vendor_data=json.dumps(vdata_values))
return ctxt
class NovaMetadataJSONContext(ch_context.NovaVendorMetadataJSONContext):
def __call__(self):
vdata_values = super(NovaMetadataJSONContext, self).__call__()
# NOTE(ganso): always propagate config value for nova-compute since
# we need to apply it there for releases prior to rocky
for rid in hookenv.relation_ids('cloud-compute'):
hookenv.relation_set(relation_id=rid,
vendor_json=vdata_values['vendor_data_json'])
release = ch_utils.os_release('nova-common')
cmp_os_release = ch_utils.CompareOpenStackReleases(release)
if cmp_os_release >= 'rocky':
return vdata_values
else:
hookenv.log("Vendor metadata has been configured but is not "
"effective in nova-cloud-controller because release "
"{} is prior to Rocky.".format(release),
level=hookenv.DEBUG)
return {'vendor_data_json': '{}'}

View File

@ -283,8 +283,6 @@ def config_changed():
ncc_utils.update_aws_compat_services()
if hookenv.config('vendor-data'):
ncc_utils.write_vendordata(hookenv.config('vendor-data'))
if hookenv.is_leader() and not ncc_utils.get_shared_metadatasecret():
ncc_utils.set_shared_metadatasecret()
for rid in hookenv.relation_ids('ha'):

View File

@ -16,7 +16,6 @@ import base64
import collections
import configparser
import copy
import json
import os
import subprocess
from urllib.parse import urlparse
@ -101,6 +100,7 @@ NEUTRON_CONF_DIR = "/etc/neutron"
NOVA_CONF = '%s/nova.conf' % NOVA_CONF_DIR
NOVA_API_PASTE = '%s/api-paste.ini' % NOVA_CONF_DIR
VENDORDATA_FILE = '%s/vendor_data.json' % NOVA_CONF_DIR
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
@ -118,7 +118,6 @@ PACKAGE_NOVA_API_OS_COMPUTE_CONF = \
'/etc/apache2/sites-available/nova-api-os-compute.conf'
WSGI_NOVA_API_OS_COMPUTE_CONF = \
'/etc/apache2/sites-enabled/wsgi-api-os-compute.conf'
VENDORDATA_FILE = '/etc/nova/vendor_data.json'
def resolve_services():
@ -184,13 +183,18 @@ def get_base_resource_map():
nova_cc_context.NeutronAPIContext(),
nova_cc_context.SerialConsoleContext(),
ch_context.MemcacheContext(),
nova_cc_context.NovaMetadataContext()],
nova_cc_context.NovaMetadataContext('nova-common')],
}),
(NOVA_API_PASTE, {
'services': [s for s in resolve_services() if 'api' in s],
'contexts': [nova_cc_context.IdentityServiceContext(),
nova_cc_context.APIRateLimitingContext()],
}),
(VENDORDATA_FILE, {
'services': [],
'contexts': [nova_cc_context.NovaMetadataJSONContext(
'nova-common')],
}),
(HAPROXY_CONF, {
'contexts': [
ch_context.HAProxyContext(singlenode_mode=True),
@ -1577,18 +1581,6 @@ def get_metadata_settings(configs):
return settings
def write_vendordata(vdata):
"""Write supplied vendor data out to a file."""
try:
json_vdata = json.loads(vdata)
except (TypeError, json.decoder.JSONDecodeError) as e:
hookenv.log('Error decoding vendor-data. {}'.format(e),
level=hookenv.ERROR)
return False
with open(VENDORDATA_FILE, 'w') as vdata_file:
vdata_file.write(json.dumps(json_vdata, sort_keys=True, indent=2))
def get_cell_db_context(db_service):
"""Return the database context for the given service name"""
db_rid = hookenv.relation_id(

View File

@ -646,6 +646,69 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('glance image-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_220_nova_metadata_propagate(self):
"""Verify that the setting vendor_data is propagated to nova-compute"""
os_release = self._get_openstack_release()
expected = {
"vendordata_providers": "StaticJSON,DynamicJSON",
"vendordata_dynamic_targets": "http://example.org/vdata",
"vendordata_jsonfile_path": "/etc/nova/vendor_data.json",
}
u.log.debug('Validating the config does not exist prior to test')
if self._get_openstack_release() < self.bionic_rocky:
sentries = [self.nova_compute_sentry]
else:
sentries = [self.nova_compute_sentry, self.nova_cc_sentry]
for sentry in sentries:
# Validate nova-cc and nova-compute don't have vendor_data set
if u.validate_config_data(
sentry, "/etc/nova/nova.conf", "api", expected) is None:
amulet.raise_status(
amulet.FAIL, msg="Matching config options were found in "
"nova.conf prior to the test.")
content = u.file_contents_safe(
sentry, "/etc/nova/vendor_data.json", max_wait=4,
fatal=False)
if content:
amulet.raise_status(
amulet.FAIL, msg="vendor_data.json exists with content"
"prior to test: {}.".format(content))
config = {
'vendor-data': '{"good": "json"}',
'vendor-data-url': 'http://example.org/vdata',
}
u.log.debug('Setting nova-cloud-controller config {}'.format(config))
self.d.configure('nova-cloud-controller', config)
u.log.debug('Waiting for all units to get ready')
self.d.sentry.wait()
u.log.debug('Validating the config has been applied and propagated')
for sentry in sentries:
# Validate config got propagated to nova-compute
output = u.validate_config_data(sentry, "/etc/nova/nova.conf",
"api", expected)
if output is not None and os_release >= self.xenial_queens:
amulet.raise_status(
amulet.FAIL, msg="Matching config options "
"were not found in nova.conf. "
"Output: {}".format(output))
content = u.file_contents_safe(
sentry, "/etc/nova/vendor_data.json", max_wait=4, fatal=True)
if os_release >= self.xenial_queens:
if not content or content != '{"good": "json"}':
amulet.raise_status(
amulet.FAIL, msg="vendor_data.json content did not "
"match: {}.".format(content))
u.log.debug('Test 220 finished successfully')
def test_302_api_rate_limiting_is_enabled(self):
"""
Check that API rate limiting is enabled.

View File

@ -29,6 +29,7 @@ TO_PATCH = [
'charmhelpers.core.hookenv.log',
'charmhelpers.core.hookenv.related_units',
'charmhelpers.core.hookenv.relation_get',
'charmhelpers.core.hookenv.relation_set',
'charmhelpers.core.hookenv.relation_ids',
'charmhelpers.core.hookenv.relations_for_id',
]
@ -517,49 +518,69 @@ class NovaComputeContextTests(CharmTestCase):
ctxt = context.NeutronAPIContext()()
self.assertEqual(ctxt, expected)
def test_vendordata_static(self):
_vdata = '{"good": "json"}'
@mock.patch('charmhelpers.contrib.openstack.context.'
'NovaVendorMetadataContext.__call__')
def test_vendordata_static_and_dynamic(self, parent):
_vdata = {
'vendor_data': True,
'vendor_data_url': 'http://example.org/vdata',
'vendordata_providers': 'StaticJSON,DynamicJSON',
}
self.relation_ids.return_value = ['nova-compute:1']
self.os_release.return_value = 'rocky'
self.test_config.set('vendor-data', _vdata)
ctxt = context.NovaMetadataContext()()
self.assertTrue(ctxt['vendor_data'])
self.assertEqual(ctxt['vendordata_providers'], 'StaticJSON')
def test_vendordata_dynamic(self):
_vdata_url = 'http://example.org/vdata'
self.os_release.return_value = 'rocky'
self.test_config.set('vendor-data-url', _vdata_url)
ctxt = context.NovaMetadataContext()()
self.assertEqual(ctxt['vendor_data_url'], _vdata_url)
self.assertEqual(ctxt['vendordata_providers'], 'DynamicJSON')
def test_vendordata_static_and_dynamic(self):
self.os_release.return_value = 'rocky'
_vdata = '{"good": "json"}'
_vdata_url = 'http://example.org/vdata'
self.test_config.set('vendor-data', _vdata)
self.test_config.set('vendor-data-url', _vdata_url)
ctxt = context.NovaMetadataContext()()
self.assertTrue(ctxt['vendor_data'])
self.assertEqual(ctxt['vendor_data_url'], _vdata_url)
self.assertEqual(ctxt['vendordata_providers'],
'StaticJSON,DynamicJSON')
def test_vendordata_mitaka(self):
self.os_release.return_value = 'mitaka'
self.leader_get.return_value = 'auuid'
_vdata_url = 'http://example.org/vdata'
parent.return_value = _vdata
ctxt = context.NovaMetadataContext('nova-common')()
self.test_config.set('vendor-data-url', _vdata_url)
ctxt = context.NovaMetadataContext()()
self.assertTrue(ctxt['vendor_data'])
self.assertEqual(_vdata['vendor_data_url'], ctxt['vendor_data_url'])
self.assertEqual('StaticJSON,DynamicJSON',
ctxt['vendordata_providers'])
self.assertTrue(ctxt['enable_metadata'])
self.assertEqual('auuid', ctxt['metadata_proxy_shared_secret'])
self.relation_set.assert_called_with(relation_id=mock.ANY,
vendor_data=json.dumps(_vdata))
self.assertEqual(ctxt, {'enable_metadata': False})
@mock.patch('charmhelpers.contrib.openstack.context.'
'NovaVendorMetadataContext.__call__')
def test_vendordata_pike(self, parent):
_vdata = {
'vendor_data': True,
'vendor_data_url': 'http://example.org/vdata',
'vendordata_providers': 'StaticJSON,DynamicJSON',
}
self.relation_ids.return_value = ['nova-compute:1']
self.os_release.return_value = 'pike'
parent.return_value = _vdata
ctxt = context.NovaMetadataContext('nova-common')()
self.assertEqual({'enable_metadata': False}, ctxt)
self.relation_set.assert_called_with(relation_id=mock.ANY,
vendor_data=json.dumps(_vdata))
@mock.patch('charmhelpers.contrib.openstack.context.'
'NovaVendorMetadataJSONContext.__call__')
def test_vendor_json_valid(self, parent):
self.os_release.return_value = 'rocky'
_vdata = {'vendor_data_json': '{"good": "json"}'}
parent.return_value = _vdata
self.relation_ids.return_value = ['nova-compute:1']
ctxt = context.NovaMetadataJSONContext('nova-common')()
self.assertEqual(_vdata, ctxt)
self.relation_set.assert_called_with(relation_id=mock.ANY,
vendor_json='{"good": "json"}')
@mock.patch('charmhelpers.contrib.openstack.context.'
'NovaVendorMetadataJSONContext.__call__')
def test_vendor_json_prior_rocky(self, parent):
self.os_release.return_value = 'queens'
_vdata = {'vendor_data_json': '{"good": "json"}'}
parent.return_value = _vdata
self.relation_ids.return_value = ['nova-compute:1']
ctxt = context.NovaMetadataJSONContext('nova-common')()
self.assertEqual({'vendor_data_json': '{}'}, ctxt)
self.relation_set.assert_called_with(relation_id=mock.ANY,
vendor_json='{"good": "json"}')
def test_NovaCellV2Context(self):
settings = {'cell-name': 'cell32',

View File

@ -13,7 +13,7 @@
# limitations under the License.
from collections import OrderedDict
from mock import patch, MagicMock, call, mock_open
from mock import patch, MagicMock, call
from unit_tests.test_utils import (
CharmTestCase,
@ -1520,16 +1520,6 @@ class NovaCCUtilsTests(CharmTestCase):
utils.get_metadata_settings('configs'),
{})
def test_write_vendordata(self):
m = mock_open()
with patch.object(utils, 'open', m, create=True):
utils.write_vendordata('{"a": "b"}')
expected_calls = [
call('/etc/nova/vendor_data.json', 'w'),
call().write('{\n "a": "b"\n}')]
for c in expected_calls:
self.assertTrue(c in m.mock_calls)
@patch.object(utils.ch_context, 'SharedDBContext')
@patch('charmhelpers.core.hookenv.relation_id')
def test_get_cell_db_context(self, mock_relation_id, mock_SharedDBContext):