Add QoS support

This patch adds support for reading the 'enable-qos' setting from the
neutron-plugin-api relation and adding 'qos' to the extension_drivers setting
if it is True. This is part of a wider set of changes to support QoS across
the neutron charms.

The amulet tests were missing the neutron-api to neutron-gateway relation this
has been added in. A side-effect of this is that the l2-population setting is
now properly being set to True so tests were updated to expect that.

A charmhelper sync was performed to pull in the QoS update to the
NeutronAPIContext.

Note: Amulet tests will fail until the corresponding neutron-api change
lands

Depends-On: I1beba9bebdb7766fd95d47bf13b6f4ad86e762b5
Change-Id: I6dc71a96b635600b7e528a9acdfd4dc0eded9259
Partial-Bug: #1705358
This commit is contained in:
Liam Young 2017-08-07 17:04:33 +01:00 committed by Liam Young
parent 2d2df3c285
commit a5f92548c0
5 changed files with 66 additions and 26 deletions

View File

@ -41,9 +41,9 @@ from charmhelpers.core.hookenv import (
charm_name, charm_name,
DEBUG, DEBUG,
INFO, INFO,
WARNING,
ERROR, ERROR,
status_set, status_set,
network_get_primary_address
) )
from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.sysctl import create as sysctl_create
@ -80,9 +80,6 @@ from charmhelpers.contrib.openstack.neutron import (
from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.openstack.ip import (
resolve_address, resolve_address,
INTERNAL, INTERNAL,
ADMIN,
PUBLIC,
ADDRESS_MAP,
) )
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_address_in_network, get_address_in_network,
@ -90,6 +87,7 @@ from charmhelpers.contrib.network.ip import (
get_ipv6_addr, get_ipv6_addr,
get_netmask_for_address, get_netmask_for_address,
format_ipv6_addr, format_ipv6_addr,
is_address_in_network,
is_bridge_member, is_bridge_member,
is_ipv6_disabled, is_ipv6_disabled,
) )
@ -622,6 +620,7 @@ class HAProxyContext(OSContextGenerator):
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
if config('prefer-ipv6'): if config('prefer-ipv6'):
ctxt['ipv6'] = True
ctxt['local_host'] = 'ip6-localhost' ctxt['local_host'] = 'ip6-localhost'
ctxt['haproxy_host'] = '::' ctxt['haproxy_host'] = '::'
else: else:
@ -759,27 +758,36 @@ class ApacheSSLContext(OSContextGenerator):
...] ...]
""" """
addresses = [] addresses = []
for net_type in [INTERNAL, ADMIN, PUBLIC]: if config('vip'):
net_config = config(ADDRESS_MAP[net_type]['config']) vips = config('vip').split()
# NOTE(jamespage): Fallback must always be private address else:
# as this is used to bind services on the vips = []
# local unit.
fallback = unit_get("private-address") for net_type in ['internal', 'admin', 'public']:
if net_config: net_config = config('os-{}-network'.format(net_type))
addr = get_address_in_network(net_config, addr = get_address_in_network(net_config,
fallback) unit_get('private-address'))
hostname_config = config('os-{}-hostname'.format(net_type))
if hostname_config:
addresses.append((addr, hostname_config))
elif len(vips) > 1 and is_clustered():
if not net_config:
log("Multiple networks configured but net_type "
"is None (%s)." % net_type, level=WARNING)
continue
for vip in vips:
if is_address_in_network(net_config, vip):
addresses.append((addr, vip))
break
elif is_clustered() and config('vip'):
addresses.append((addr, config('vip')))
else: else:
try: addresses.append((addr, addr))
addr = network_get_primary_address(
ADDRESS_MAP[net_type]['binding']
)
except NotImplementedError:
addr = fallback
endpoint = resolve_address(net_type) return sorted(addresses)
addresses.append((addr, endpoint))
return sorted(set(addresses))
def __call__(self): def __call__(self):
if isinstance(self.external_ports, six.string_types): if isinstance(self.external_ports, six.string_types):
@ -806,7 +814,7 @@ class ApacheSSLContext(OSContextGenerator):
self.configure_cert(cn) self.configure_cert(cn)
addresses = self.get_network_addresses() addresses = self.get_network_addresses()
for address, endpoint in addresses: for address, endpoint in sorted(set(addresses)):
for api_port in self.external_ports: for api_port in self.external_ports:
ext_port = determine_apache_port(api_port, ext_port = determine_apache_port(api_port,
singlenode_mode=True) singlenode_mode=True)

View File

@ -94,6 +94,7 @@ class NeutronGatewayContext(NeutronAPIContext):
'l2_population': api_settings['l2_population'], 'l2_population': api_settings['l2_population'],
'enable_dvr': api_settings['enable_dvr'], 'enable_dvr': api_settings['enable_dvr'],
'enable_l3ha': api_settings['enable_l3ha'], 'enable_l3ha': api_settings['enable_l3ha'],
'extension_drivers': api_settings['extension_drivers'],
'dns_domain': api_settings['dns_domain'], 'dns_domain': api_settings['dns_domain'],
'overlay_network_type': 'overlay_network_type':
api_settings['overlay_network_type'], api_settings['overlay_network_type'],

View File

@ -14,6 +14,9 @@ l2_population = {{ l2_population }}
enable_distributed_routing = {{ enable_dvr }} enable_distributed_routing = {{ enable_dvr }}
{% if veth_mtu -%} {% if veth_mtu -%}
veth_mtu = {{ veth_mtu }} veth_mtu = {{ veth_mtu }}
{% endif -%}
{% if extension_drivers -%}
extensions = {{ extension_drivers }}
{% endif %} {% endif %}
[securitygroup] [securitygroup]

View File

@ -90,6 +90,8 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
'rabbitmq-server:amqp': 'neutron-openvswitch:amqp', 'rabbitmq-server:amqp': 'neutron-openvswitch:amqp',
'nova-compute:image-service': 'glance:image-service', 'nova-compute:image-service': 'glance:image-service',
'nova-cloud-controller:image-service': 'glance:image-service', 'nova-cloud-controller:image-service': 'glance:image-service',
'neutron-api:neutron-plugin-api': 'neutron-gateway:'
'neutron-plugin-api',
} }
super(NeutronGatewayBasicDeployment, self)._add_relations(relations) super(NeutronGatewayBasicDeployment, self)._add_relations(relations)
@ -615,7 +617,7 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
}, },
'agent': { 'agent': {
'tunnel_types': 'gre', 'tunnel_types': 'gre',
'l2_population': 'False' 'l2_population': 'True'
}, },
'securitygroup': { 'securitygroup': {
'firewall_driver': 'neutron.agent.linux.iptables_firewall.' 'firewall_driver': 'neutron.agent.linux.iptables_firewall.'
@ -631,7 +633,7 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
}, },
'agent': { 'agent': {
'tunnel_types': 'gre', 'tunnel_types': 'gre',
'l2_population': 'False' 'l2_population': 'True'
}, },
'securitygroup': { 'securitygroup': {
'firewall_driver': 'neutron.agent.linux.iptables_firewall.' 'firewall_driver': 'neutron.agent.linux.iptables_firewall.'
@ -969,6 +971,28 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
u.log.debug('Deleting neutron network...') u.log.debug('Deleting neutron network...')
self.neutron.delete_network(network['id']) self.neutron.delete_network(network['id'])
def test_401_enable_qos(self):
"""Check qos settings set via neutron-api charm"""
if self._get_openstack_release() >= self.trusty_mitaka:
unit = self.neutron_gateway_sentry
set_default = {'enable-qos': 'False'}
set_alternate = {'enable-qos': 'True'}
self.d.configure('neutron-api', set_alternate)
time.sleep(60)
self._auto_wait_for_status(exclude_services=self.exclude_services)
config = u._get_config(
unit,
'/etc/neutron/plugins/ml2/openvswitch_agent.ini')
extensions = config.get('agent', 'extensions').split(',')
if 'qos' not in extensions:
message = "qos not in extensions"
amulet.raise_status(amulet.FAIL, msg=message)
u.log.debug('Setting QoS back to {}'.format(
set_default['enable-qos']))
self.d.configure('neutron-api', set_default)
u.log.debug('OK')
def test_900_restart_on_config_change(self): def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the """Verify that the specified services are restarted when the
config is changed.""" config is changed."""

View File

@ -154,6 +154,7 @@ class TestNeutronGatewayContext(CharmTestCase):
'enable-dvr': 'True', 'enable-dvr': 'True',
'overlay-network-type': 'gre', 'overlay-network-type': 'gre',
'enable-l3ha': 'True', 'enable-l3ha': 'True',
'enable-qos': 'True',
'network-device-mtu': 9000, 'network-device-mtu': 9000,
'dns-domain': 'openstack.example.'} 'dns-domain': 'openstack.example.'}
self.test_config.set('plugin', 'ovs') self.test_config.set('plugin', 'ovs')
@ -179,6 +180,7 @@ class TestNeutronGatewayContext(CharmTestCase):
'enable_dvr': True, 'enable_dvr': True,
'enable_l3ha': True, 'enable_l3ha': True,
'dns_servers': '8.8.8.8,4.4.4.4', 'dns_servers': '8.8.8.8,4.4.4.4',
'extension_drivers': 'qos',
'dns_domain': 'openstack.example.', 'dns_domain': 'openstack.example.',
'local_ip': '10.5.0.1', 'local_ip': '10.5.0.1',
'instance_mtu': 1420, 'instance_mtu': 1420,
@ -212,6 +214,7 @@ class TestNeutronGatewayContext(CharmTestCase):
'enable-dvr': 'True', 'enable-dvr': 'True',
'overlay-network-type': 'gre', 'overlay-network-type': 'gre',
'enable-l3ha': 'True', 'enable-l3ha': 'True',
'enable-qos': 'True',
'network-device-mtu': 9000, 'network-device-mtu': 9000,
'dns-domain': 'openstack.example.'} 'dns-domain': 'openstack.example.'}
self.test_config.set('plugin', 'ovs') self.test_config.set('plugin', 'ovs')
@ -236,6 +239,7 @@ class TestNeutronGatewayContext(CharmTestCase):
'enable_dvr': True, 'enable_dvr': True,
'enable_l3ha': True, 'enable_l3ha': True,
'dns_servers': None, 'dns_servers': None,
'extension_drivers': 'qos',
'dns_domain': 'openstack.example.', 'dns_domain': 'openstack.example.',
'local_ip': '192.168.20.2', 'local_ip': '192.168.20.2',
'instance_mtu': 1420, 'instance_mtu': 1420,