Add Juju Network Space support
Juju 2.0 provides support for network spaces, allowing charm authors to support direct binding of relations and extra-bindings onto underlying network spaces. Resync charm-helpers to pickup support in API endpoint resolution code and add API extra-bindings to the charm metadata. Change-Id: Ibbbeba5966f799d25d0a40b69b1672deb1476924
This commit is contained in:
parent
f434b40b04
commit
ffd4644a10
25
README.md
25
README.md
@ -212,3 +212,28 @@ The following is a full list of current tip repos (may not be up-to-date):
|
|||||||
- {name: cinder,
|
- {name: cinder,
|
||||||
repository: 'git://github.com/openstack/cinder',
|
repository: 'git://github.com/openstack/cinder',
|
||||||
branch: master}
|
branch: master}
|
||||||
|
|
||||||
|
Network Space support
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above.
|
||||||
|
|
||||||
|
API endpoints can be bound to distinct network spaces supporting the network separation of public, internal and admin endpoints.
|
||||||
|
|
||||||
|
To use this feature, use the --bind option when deploying the charm:
|
||||||
|
|
||||||
|
juju deploy cinder --bind "public=public-space internal=internal-space admin=admin-space"
|
||||||
|
|
||||||
|
alternatively these can also be provided as part of a juju native bundle configuration:
|
||||||
|
|
||||||
|
cinder:
|
||||||
|
charm: cs:xenial/cinder
|
||||||
|
num_units: 1
|
||||||
|
bindings:
|
||||||
|
public: public-space
|
||||||
|
admin: admin-space
|
||||||
|
internal: internal-space
|
||||||
|
|
||||||
|
NOTE: Spaces must be configured in the underlying provider prior to attempting to use them.
|
||||||
|
|
||||||
|
NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
|
||||||
|
@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
|
|||||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_network_cidr(ip_address):
|
||||||
|
'''
|
||||||
|
Resolves the full address cidr of an ip_address based on
|
||||||
|
configured network interfaces
|
||||||
|
'''
|
||||||
|
netmask = get_netmask_for_address(ip_address)
|
||||||
|
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
|
||||||
|
|
||||||
|
|
||||||
def format_ipv6_addr(address):
|
def format_ipv6_addr(address):
|
||||||
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
||||||
|
|
||||||
|
@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
'cinder-backup']
|
'cinder-backup', 'nexentaedge-data',
|
||||||
|
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||||
|
'cinder-nexentaedge', 'nexentaedge-mgmt']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
@ -14,16 +14,19 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
unit_get,
|
unit_get,
|
||||||
service_name,
|
service_name,
|
||||||
|
network_get_primary_address,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_address_in_network,
|
get_address_in_network,
|
||||||
is_address_in_network,
|
is_address_in_network,
|
||||||
is_ipv6,
|
is_ipv6,
|
||||||
get_ipv6_addr,
|
get_ipv6_addr,
|
||||||
|
resolve_network_cidr,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
@ -33,16 +36,19 @@ ADMIN = 'admin'
|
|||||||
|
|
||||||
ADDRESS_MAP = {
|
ADDRESS_MAP = {
|
||||||
PUBLIC: {
|
PUBLIC: {
|
||||||
|
'binding': 'public',
|
||||||
'config': 'os-public-network',
|
'config': 'os-public-network',
|
||||||
'fallback': 'public-address',
|
'fallback': 'public-address',
|
||||||
'override': 'os-public-hostname',
|
'override': 'os-public-hostname',
|
||||||
},
|
},
|
||||||
INTERNAL: {
|
INTERNAL: {
|
||||||
|
'binding': 'internal',
|
||||||
'config': 'os-internal-network',
|
'config': 'os-internal-network',
|
||||||
'fallback': 'private-address',
|
'fallback': 'private-address',
|
||||||
'override': 'os-internal-hostname',
|
'override': 'os-internal-hostname',
|
||||||
},
|
},
|
||||||
ADMIN: {
|
ADMIN: {
|
||||||
|
'binding': 'admin',
|
||||||
'config': 'os-admin-network',
|
'config': 'os-admin-network',
|
||||||
'fallback': 'private-address',
|
'fallback': 'private-address',
|
||||||
'override': 'os-admin-hostname',
|
'override': 'os-admin-hostname',
|
||||||
@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
|
|||||||
correct network. If clustered with no nets defined, return primary vip.
|
correct network. If clustered with no nets defined, return primary vip.
|
||||||
|
|
||||||
If not clustered, return unit address ensuring address is on configured net
|
If not clustered, return unit address ensuring address is on configured net
|
||||||
split if one is configured.
|
split if one is configured, or a Juju 2.0 extra-binding has been used.
|
||||||
|
|
||||||
:param endpoint_type: Network endpoing type
|
:param endpoint_type: Network endpoing type
|
||||||
"""
|
"""
|
||||||
@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
|
|||||||
net_type = ADDRESS_MAP[endpoint_type]['config']
|
net_type = ADDRESS_MAP[endpoint_type]['config']
|
||||||
net_addr = config(net_type)
|
net_addr = config(net_type)
|
||||||
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
||||||
|
binding = ADDRESS_MAP[endpoint_type]['binding']
|
||||||
clustered = is_clustered()
|
clustered = is_clustered()
|
||||||
if clustered:
|
|
||||||
if not net_addr:
|
if clustered and vips:
|
||||||
# If no net-splits defined, we expect a single vip
|
if net_addr:
|
||||||
resolved_address = vips[0]
|
|
||||||
else:
|
|
||||||
for vip in vips:
|
for vip in vips:
|
||||||
if is_address_in_network(net_addr, vip):
|
if is_address_in_network(net_addr, vip):
|
||||||
resolved_address = vip
|
resolved_address = vip
|
||||||
break
|
break
|
||||||
|
else:
|
||||||
|
# NOTE: endeavour to check vips against network space
|
||||||
|
# bindings
|
||||||
|
try:
|
||||||
|
bound_cidr = resolve_network_cidr(
|
||||||
|
network_get_primary_address(binding)
|
||||||
|
)
|
||||||
|
for vip in vips:
|
||||||
|
if is_address_in_network(bound_cidr, vip):
|
||||||
|
resolved_address = vip
|
||||||
|
break
|
||||||
|
except NotImplementedError:
|
||||||
|
# If no net-splits configured and no support for extra
|
||||||
|
# bindings/network spaces so we expect a single vip
|
||||||
|
resolved_address = vips[0]
|
||||||
else:
|
else:
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
||||||
else:
|
else:
|
||||||
fallback_addr = unit_get(net_fallback)
|
fallback_addr = unit_get(net_fallback)
|
||||||
|
|
||||||
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
if net_addr:
|
||||||
|
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
||||||
|
else:
|
||||||
|
# NOTE: only try to use extra bindings if legacy network
|
||||||
|
# configuration is not in use
|
||||||
|
try:
|
||||||
|
resolved_address = network_get_primary_address(binding)
|
||||||
|
except NotImplementedError:
|
||||||
|
resolved_address = fallback_addr
|
||||||
|
|
||||||
if resolved_address is None:
|
if resolved_address is None:
|
||||||
raise ValueError("Unable to resolve a suitable IP address based on "
|
raise ValueError("Unable to resolve a suitable IP address based on "
|
||||||
|
@ -7,6 +7,10 @@ tags:
|
|||||||
- openstack
|
- openstack
|
||||||
- storage
|
- storage
|
||||||
- misc
|
- misc
|
||||||
|
extra-bindings:
|
||||||
|
public:
|
||||||
|
admin:
|
||||||
|
internal:
|
||||||
provides:
|
provides:
|
||||||
nrpe-external-master:
|
nrpe-external-master:
|
||||||
interface: nrpe-external-master
|
interface: nrpe-external-master
|
||||||
|
@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
'cinder-backup']
|
'cinder-backup', 'nexentaedge-data',
|
||||||
|
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||||
|
'cinder-nexentaedge', 'nexentaedge-mgmt']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
Loading…
Reference in New Issue
Block a user