Merge: Adding fabric/data network support
This commit is contained in:
commit
dc8778f6a3
18
config.yaml
18
config.yaml
@ -11,6 +11,24 @@ options:
|
||||
type: string
|
||||
default: 'juju-br0'
|
||||
description: The interface connected to PLUMgrid Managment network.
|
||||
os-data-network:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
The IP address and netmask of the OpenStack Data network (e.g.,
|
||||
192.168.0.0/24)
|
||||
.
|
||||
This network will be used for tenant network traffic in overlay
|
||||
networks.
|
||||
fabric-interfaces:
|
||||
default: 'MANAGEMENT'
|
||||
type: string
|
||||
description: |
|
||||
Interfaces that will provide fabric connectivity on the director nodes.
|
||||
Provided in form of json in a string. These interfaces have to be connected
|
||||
to the os-data-network specified in the config. Default value is MANAGEMENT which
|
||||
will configure the management interface as the fabric interface on each
|
||||
director.
|
||||
network-device-mtu:
|
||||
type: string
|
||||
default: '1580'
|
||||
|
@ -83,8 +83,9 @@ class PGDirContext(context.NeutronContext):
|
||||
pg_ctxt['director_ips_string'] = pg_dir_ips_string
|
||||
pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip']
|
||||
pg_ctxt['pg_hostname'] = "pg-director"
|
||||
from pg_dir_utils import get_mgmt_interface
|
||||
from pg_dir_utils import get_mgmt_interface, get_fabric_interface
|
||||
pg_ctxt['interface'] = get_mgmt_interface()
|
||||
pg_ctxt['fabric_interface'] = get_fabric_interface()
|
||||
pg_ctxt['label'] = get_unit_hostname()
|
||||
pg_ctxt['fabric_mode'] = 'host'
|
||||
virtual_ip_array = re.split('\.', conf['plumgrid-virtual-ip'])
|
||||
|
@ -30,6 +30,7 @@ from pg_dir_utils import (
|
||||
ensure_mtu,
|
||||
add_lcm_key,
|
||||
post_pg_license,
|
||||
fabric_interface_changed
|
||||
)
|
||||
|
||||
hooks = Hooks()
|
||||
@ -65,12 +66,22 @@ def config_changed():
|
||||
This hook is run when a config parameter is changed.
|
||||
It also runs on node reboot.
|
||||
'''
|
||||
if post_pg_license():
|
||||
log("PLUMgrid License Posted")
|
||||
return 1
|
||||
if add_lcm_key():
|
||||
log("PLUMgrid LCM Key added")
|
||||
return 1
|
||||
charm_config = config()
|
||||
if charm_config.changed('plumgrid-license-key'):
|
||||
if post_pg_license():
|
||||
log("PLUMgrid License Posted")
|
||||
return 1
|
||||
if charm_config.changed('fabric-interfaces'):
|
||||
if not fabric_interface_changed():
|
||||
log("Fabric interface already set")
|
||||
return 1
|
||||
if charm_config.changed('os-data-network'):
|
||||
if charm_config['fabric-interfaces'] == 'MANAGEMENT':
|
||||
log('Fabric running on managment network')
|
||||
return 1
|
||||
stop_pg()
|
||||
configure_sources(update=True)
|
||||
pkgs = determine_packages()
|
||||
|
@ -13,7 +13,9 @@ from charmhelpers.contrib.network.ip import (
|
||||
get_iface_from_addr,
|
||||
get_bridges,
|
||||
get_bridge_nics,
|
||||
is_ip
|
||||
is_ip,
|
||||
is_address_in_network,
|
||||
get_iface_addr
|
||||
)
|
||||
from charmhelpers.fetch import (
|
||||
apt_cache
|
||||
@ -29,6 +31,7 @@ from charmhelpers.core.host import (
|
||||
service_start,
|
||||
service_stop,
|
||||
)
|
||||
from socket import gethostname as get_unit_hostname
|
||||
import pg_dir_context
|
||||
import subprocess
|
||||
import time
|
||||
@ -169,22 +172,23 @@ def remove_iovisor():
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def interface_exists(interface):
|
||||
'''
|
||||
Checks if interface exists on node.
|
||||
'''
|
||||
try:
|
||||
subprocess.check_call(['ip', 'link', 'show', interface],
|
||||
stdout=open(os.devnull, 'w'),
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_mgmt_interface():
|
||||
'''
|
||||
Returns the managment interface.
|
||||
'''
|
||||
def interface_exists(interface):
|
||||
'''
|
||||
Checks if interface exists on node.
|
||||
'''
|
||||
try:
|
||||
subprocess.check_call(['ip', 'link', 'show', interface],
|
||||
stdout=open(os.devnull, 'w'),
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
return True
|
||||
|
||||
mgmt_interface = config('mgmt-interface')
|
||||
if interface_exists(mgmt_interface):
|
||||
return mgmt_interface
|
||||
@ -194,17 +198,65 @@ def get_mgmt_interface():
|
||||
return get_iface_from_addr(unit_get('private-address'))
|
||||
|
||||
|
||||
def fabric_interface_changed():
|
||||
'''
|
||||
Returns true if interface for node changed.
|
||||
'''
|
||||
fabric_interface = get_fabric_interface()
|
||||
try:
|
||||
with open(PG_IFCS_CONF, 'r') as ifcs:
|
||||
for line in ifcs:
|
||||
if 'fabric_core' in line:
|
||||
if line.split()[0] == fabric_interface:
|
||||
return False
|
||||
except IOError:
|
||||
return True
|
||||
return True
|
||||
|
||||
|
||||
def get_fabric_interface():
|
||||
'''
|
||||
Returns the fabric interface.
|
||||
'''
|
||||
fabric_interfaces = config('fabric-interfaces')
|
||||
if fabric_interfaces == 'MANAGEMENT':
|
||||
return get_mgmt_interface()
|
||||
else:
|
||||
try:
|
||||
all_fabric_interfaces = json.loads(fabric_interfaces)
|
||||
except ValueError:
|
||||
raise ValueError('Invalid json provided for fabric interfaces')
|
||||
hostname = get_unit_hostname()
|
||||
if hostname in all_fabric_interfaces:
|
||||
node_fabric_interface = all_fabric_interfaces[hostname]
|
||||
elif 'DEFAULT' in all_fabric_interfaces:
|
||||
node_fabric_interface = all_fabric_interfaces['DEFAULT']
|
||||
else:
|
||||
raise ValueError('No fabric interface provided for node')
|
||||
if interface_exists(node_fabric_interface):
|
||||
if is_address_in_network(config('os-data-network'),
|
||||
get_iface_addr(node_fabric_interface)[0]):
|
||||
return node_fabric_interface
|
||||
else:
|
||||
raise ValueError('Fabric interface not in fabric network')
|
||||
else:
|
||||
log('Provided fabric interface %s does not exist'
|
||||
% node_fabric_interface)
|
||||
raise ValueError('Provided fabric interface does not exist')
|
||||
return node_fabric_interface
|
||||
|
||||
|
||||
def ensure_mtu():
|
||||
'''
|
||||
Ensures required MTU of the underlying networking of the node.
|
||||
'''
|
||||
interface_mtu = config('network-device-mtu')
|
||||
mgmt_interface = get_mgmt_interface()
|
||||
if mgmt_interface in get_bridges():
|
||||
attached_interfaces = get_bridge_nics(mgmt_interface)
|
||||
fabric_interface = get_fabric_interface()
|
||||
if fabric_interface in get_bridges():
|
||||
attached_interfaces = get_bridge_nics(fabric_interface)
|
||||
for interface in attached_interfaces:
|
||||
set_nic_mtu(interface, interface_mtu)
|
||||
set_nic_mtu(mgmt_interface, interface_mtu)
|
||||
set_nic_mtu(fabric_interface, interface_mtu)
|
||||
|
||||
|
||||
def _exec_cmd(cmd=None, error_msg='Command exited with ERRORs', fatal=False):
|
||||
|
@ -1,2 +1,2 @@
|
||||
{{ interface }} = fabric_core host
|
||||
{{ fabric_interface }} = fabric_core host
|
||||
|
||||
|
@ -46,11 +46,12 @@ class PGDirContextTest(CharmTestCase):
|
||||
@patch.object(charmhelpers.contrib.openstack.context, 'unit_private_ip')
|
||||
@patch.object(context, '_pg_dir_ips')
|
||||
@patch.object(utils, 'get_mgmt_interface')
|
||||
def test_neutroncc_context_api_rel(self, _mgmt_int, _pg_dir_ips,
|
||||
_unit_priv_ip, _npa, _ens_pkgs,
|
||||
_save_ff, _https, _is_clus,
|
||||
_unit_get, _config, _runits, _rids,
|
||||
_rget):
|
||||
@patch.object(utils, 'get_fabric_interface')
|
||||
def test_neutroncc_context_api_rel(self, _fabric_int, _mgmt_int,
|
||||
_pg_dir_ips, _unit_priv_ip, _npa,
|
||||
_ens_pkgs, _save_ff, _https,
|
||||
_is_clus, _unit_get, _config,
|
||||
_runits, _rids, _rget):
|
||||
def mock_npa(plugin, section, manager):
|
||||
if section == "driver":
|
||||
return "neutron.randomdriver"
|
||||
@ -74,6 +75,7 @@ class PGDirContextTest(CharmTestCase):
|
||||
self.get_host_ip.return_value = '192.168.100.201'
|
||||
_pg_dir_ips.return_value = ['192.168.100.202', '192.168.100.203']
|
||||
_mgmt_int.return_value = 'juju-br0'
|
||||
_fabric_int.return_value = 'juju-br0'
|
||||
napi_ctxt = context.PGDirContext()
|
||||
expect = {
|
||||
'config': 'neutron.randomconfig',
|
||||
@ -86,6 +88,7 @@ class PGDirContextTest(CharmTestCase):
|
||||
'virtual_ip': '192.168.100.250',
|
||||
'pg_hostname': 'pg-director',
|
||||
'interface': 'juju-br0',
|
||||
'fabric_interface': 'juju-br0',
|
||||
'label': 'node0',
|
||||
'fabric_mode': 'host',
|
||||
'virtual_router_id': '250',
|
||||
|
@ -9,7 +9,6 @@ _map = utils.restart_map
|
||||
|
||||
utils.register_configs = MagicMock()
|
||||
utils.restart_map = MagicMock()
|
||||
|
||||
import pg_dir_hooks as hooks
|
||||
|
||||
utils.register_configs = _reg
|
||||
@ -61,22 +60,8 @@ class PGDirHooksTests(CharmTestCase):
|
||||
self.add_lcm_key.assert_called_with()
|
||||
|
||||
def test_config_changed_hook(self):
|
||||
_pkgs = ['plumgrid-lxc', 'iovisor-dkms']
|
||||
self.add_lcm_key.return_value = 0
|
||||
self.post_pg_license.return_value = 0
|
||||
self.determine_packages.return_value = [_pkgs]
|
||||
self.add_lcm_key.return_value = 1
|
||||
self._call_hook('config-changed')
|
||||
self.stop_pg.assert_called_with()
|
||||
self.configure_sources.assert_called_with(update=True)
|
||||
self.apt_install.assert_has_calls([
|
||||
call(_pkgs, fatal=True,
|
||||
options=['--force-yes']),
|
||||
])
|
||||
self.load_iovisor.assert_called_with()
|
||||
self.ensure_mtu.assert_called_with()
|
||||
|
||||
self.CONFIGS.write_all.assert_called_with()
|
||||
self.restart_pg.assert_called_with()
|
||||
|
||||
def test_start(self):
|
||||
self._call_hook('start')
|
||||
|
Loading…
Reference in New Issue
Block a user