Support using internal network for clients

Openstack mostly defaults to using public endpoints for
internal communication between services. This patch adds
a new option use-internal-endpoints which, if set to True,
will configure services to use internal endpoints where
possible.

Closes-Bug: 1456876
Change-Id: I736a0a281ec434067bc92fa70898b16a027f7422
This commit is contained in:
Edward Hope-Morley 2016-03-24 16:00:51 +00:00
parent da4f012a9b
commit 0bf8de365c
13 changed files with 85 additions and 18 deletions

View File

@ -175,6 +175,13 @@ options:
nagios_context will be used as the servicegroup.
# Network configuration options
# NOTE: by default all access is over 'private-address'
use-internal-endpoints:
default: False
type: boolean
description: |
Openstack mostly defaults to using public endpoints for
internal communication between services. If set to True this option will
configure services to use internal endpoints where possible.
network-device-mtu:
type: int
default:

View File

@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def resolve_network_cidr(ip_address):
'''
Resolves the full address cidr of an ip_address based on
configured network interfaces
'''
netmask = get_netmask_for_address(ip_address)
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
def format_ipv6_addr(address):
"""If address is IPv6, wrap it in '[]' otherwise return None.

View File

@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:

View File

@ -1479,3 +1479,15 @@ class NetworkServiceContext(OSContextGenerator):
if self.context_complete(ctxt):
return ctxt
return {}
class InternalEndpointContext(OSContextGenerator):
"""Internal endpoint context.
This context provides the endpoint type used for communication between
services e.g. between Nova and Cinder internally. Openstack uses Public
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
def __call__(self):
return {'use_internal_endpoints': config('use-internal-endpoints')}

View File

@ -14,16 +14,19 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -33,16 +36,19 @@ ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured.
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
"""
@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered:
if not net_addr:
# If no net-splits defined, we expect a single vip
resolved_address = vips[0]
else:
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
resolved_address = get_address_in_network(net_addr, fallback_addr)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "

View File

@ -181,7 +181,8 @@ BASE_RESOURCE_MAP = {
MetadataServiceContext(),
HostIPContext(),
DesignateContext(),
context.LogLevelContext()],
context.LogLevelContext(),
context.InternalEndpointContext()],
},
}

View File

@ -165,5 +165,5 @@ live_migration_uri = {{ live_migration_uri }}
disk_cachemodes = {{ disk_cachemodes }}
{% endif %}
{% include "parts/cinder" %}
{% include "parts/section-cinder" %}

View File

@ -177,7 +177,7 @@ disk_cachemodes = {{ disk_cachemodes }}
{% include "section-rabbitmq-oslo" %}
{% include "parts/cinder" %}
{% include "parts/section-cinder" %}
[oslo_concurrency]
lock_path=/var/lock/nova

View File

@ -183,6 +183,8 @@ disk_cachemodes = {{ disk_cachemodes }}
{% include "section-rabbitmq-oslo" %}
{% include "parts/section-cinder" %}
[oslo_concurrency]
lock_path=/var/lock/nova

View File

@ -182,7 +182,7 @@ disk_cachemodes = {{ disk_cachemodes }}
{% include "section-rabbitmq-oslo" %}
{% include "parts/cinder" %}
{% include "parts/section-cinder" %}
[oslo_concurrency]
lock_path=/var/lock/nova

View File

@ -1,5 +0,0 @@
{% if volume_service and volume_service == 'cinder' and region -%}
[cinder]
os_region_name = {{ region }}
{% endif -%}

View File

@ -0,0 +1,9 @@
{% if volume_service and volume_service == 'cinder' -%}
[cinder]
{% if use_internal_endpoints -%}
catalog_info = volumev2:cinderv2:internalURL
{% endif %}
{% if region -%}
os_region_name = {{ region }}
{% endif %}
{% endif -%}

View File

@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services: