Support custom network names with deployed ceph

The cli-deployed-ceph playbook has pre_tasks to define the Ceph
public_network, cluster_network and their names but those tasks
do not support custom network names as per the related bug.

Add a get_ceph_networks utility which extracts the public_network,
cluster_network and their names from the network_data file and
then pass those values directly to the cli-deployed-ceph playbook
via extra_vars.

Related-Bug: #1950178
Change-Id: If96e1d34d9a918d6e99366590b05c8219dc05e1c
This commit is contained in:
John Fulton 2021-11-09 03:07:13 +00:00
parent cfc642646b
commit 149099c19e
4 changed files with 260 additions and 1 deletions

View File

@ -2452,3 +2452,119 @@ class TestWorkingDirDefaults(base.TestCase):
'/rel/path/compute-playbook.yaml')
self.assertEqual(data[1]['ansible_playbooks'][1]['playbook'],
'/abs/path/compute-playbook.yaml')
class TestGetCephNetworks(TestCase):
fake_network_data_default = []
fake_network_data = [
{'name': 'StorageCloud0',
'name_lower': 'storage',
'ip_subnet': '172.16.1.0/24',
'ipv6_subnet': 'fd00:fd00:fd00:3000::/64'},
{'name': 'StorageMgmtCloud0',
'name_lower': 'storage_mgmt',
'ip_subnet': '172.16.3.0/24',
'ipv6_subnet': 'fd00:fd00:fd00:4000::/64'}]
fake_network_data_subnet = [
{'name': 'Storage',
'name_lower': 'storage_cloud_0',
'service_net_map_replace': 'storage',
'subnets':
{'storage_cloud_0_subnet_0':
{'ip_subnet': '172.16.11.0/24'}}},
{'name': 'Storage',
'name_lower': 'storage_mgmt_cloud_0',
'service_net_map_replace': 'storage_mgmt',
'subnets':
{'storage_mgmt_cloud_0_subnet_0':
{'ip_subnet': '172.16.12.0/24'}}}]
fake_double_subnet = yaml.safe_load('''
- name: StorageMgmtCloud0
name_lower: storage_mgmt_cloud_0
service_net_map_replace: storage_mgmt
subnets:
storage_mgmt_cloud_0_subnet12:
ip_subnet: '172.16.12.0/24'
storage_mgmt_cloud_0_subnet13:
ip_subnet: '172.16.13.0/24'
- name: StorageCloud0
name_lower: storage_cloud_0
service_net_map_replace: storage
subnets:
storage_cloud_0_subnet14:
ip_subnet: '172.16.14.0/24'
storage_cloud_0_subnet15:
ip_subnet: '172.16.15.0/24'
''')
def test_network_data_default(self):
expected = {'cluster_network': '192.168.24.0/24',
'cluster_network_name': 'ctlplane',
'public_network': '192.168.24.0/24',
'public_network_name': 'ctlplane'}
with tempfile.NamedTemporaryFile(mode='w') as cfgfile:
yaml.safe_dump(self.fake_network_data_default, cfgfile)
net_name = utils.get_ceph_networks(cfgfile.name,
'storage', 'storage_mgmt')
self.assertEqual(expected, net_name)
def test_network_data(self):
expected = {'cluster_network': '172.16.3.0/24',
'cluster_network_name': 'storage_mgmt',
'public_network': '172.16.1.0/24',
'public_network_name': 'storage'}
with tempfile.NamedTemporaryFile(mode='w') as cfgfile:
yaml.safe_dump(self.fake_network_data, cfgfile)
net_name = utils.get_ceph_networks(cfgfile.name,
'storage', 'storage_mgmt')
self.assertEqual(expected, net_name)
def test_network_data_v6(self):
expected = {'cluster_network': 'fd00:fd00:fd00:4000::/64',
'cluster_network_name': 'storage_mgmt',
'public_network': 'fd00:fd00:fd00:3000::/64',
'public_network_name': 'storage'}
[net.setdefault('ipv6', True) for net in self.fake_network_data]
with tempfile.NamedTemporaryFile(mode='w') as cfgfile:
yaml.safe_dump(self.fake_network_data, cfgfile)
net_name = utils.get_ceph_networks(cfgfile.name,
'storage', 'storage_mgmt')
self.assertEqual(expected, net_name)
def test_network_data_subnets(self):
expected = {'cluster_network': '172.16.12.0/24',
'cluster_network_name': 'storage_mgmt_cloud_0',
'public_network': '172.16.11.0/24',
'public_network_name': 'storage_cloud_0'}
with tempfile.NamedTemporaryFile(mode='w') as cfgfile:
yaml.safe_dump(self.fake_network_data_subnet, cfgfile)
net_name = utils.get_ceph_networks(cfgfile.name,
'storage', 'storage_mgmt')
self.assertEqual(expected, net_name)
def test_network_data_subnets_override_names(self):
expected = {'cluster_network': '172.16.12.0/24',
'cluster_network_name': 'storage_mgmt_cloud_0',
'public_network': '172.16.11.0/24',
'public_network_name': 'storage_cloud_0'}
with tempfile.NamedTemporaryFile(mode='w') as cfgfile:
yaml.safe_dump(self.fake_network_data_subnet, cfgfile)
net_name = utils.get_ceph_networks(cfgfile.name,
'storage_cloud_0',
'storage_mgmt_cloud_0')
self.assertEqual(expected, net_name)
def test_network_data_subnets_multiple(self):
expected = {'cluster_network': '172.16.12.0/24,172.16.13.0/24',
'cluster_network_name': 'storage_mgmt_cloud_0',
'public_network': '172.16.14.0/24,172.16.15.0/24',
'public_network_name': 'storage_cloud_0'}
with tempfile.NamedTemporaryFile(mode='w') as cfgfile:
yaml.safe_dump(self.fake_double_subnet, cfgfile)
net_name = utils.get_ceph_networks(cfgfile.name,
'storage', 'storage_mgmt')
self.assertEqual(expected, net_name)

View File

@ -33,12 +33,14 @@ class TestOvercloudCephDeploy(fakes.FakePlaybookExecution):
self.cmd = overcloud_ceph.OvercloudCephDeploy(self.app,
app_args)
@mock.patch('tripleoclient.utils.get_ceph_networks', autospect=True)
@mock.patch('tripleoclient.utils.TempDirs', autospect=True)
@mock.patch('os.path.abspath', autospect=True)
@mock.patch('os.path.exists', autospect=True)
@mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True)
def test_overcloud_deploy_ceph(self, mock_playbook, mock_abspath,
mock_path_exists, mock_tempdirs):
mock_path_exists, mock_tempdirs,
mock_get_ceph_networks):
arglist = ['deployed-metal.yaml', '--yes',
'--stack', 'overcloud',
'--output', 'deployed-ceph.yaml',

View File

@ -3199,3 +3199,79 @@ def save_stack_outputs(heat, stack, working_dir):
output_path = os.path.join(outputs_dir, output)
with open(output_path, 'w') as f:
f.write(yaml.dump(val))
def get_ceph_networks(network_data_path,
public_network_name,
cluster_network_name):
"""Get {public,cluster}_network{,_name} from network_data_path file
:param network_data_path: the path to a network_data.yaml file
:param str public_network_name: name of public_network, e.g. storage
:param str cluster_network_name: name of cluster_network, e.g. storage_mgmt
:return: dict mapping two network names and two CIDRs for cluster + public
The network_data_path is searched for networks with name_lower values of
storage and storage_mgmt by default. If none found, then search repeats
but with service_net_map_replace in place of name_lower. The params
public_network_name or cluster_network_name override name of the searched
for network from storage or storage_mgmt so a customized name may be used.
The public_network and cluster_network (without '_name') are the subnets
for each network, e.g. 192.168.24.0/24, as mapped by the ip_subnet key.
If the found network has >1 subnet, all ip_subnets are combined.
"""
# default to ctlplane if nothing found in network_data
storage_net_map = {}
storage_net_map['public_network_name'] = constants.CTLPLANE_NET_NAME
storage_net_map['cluster_network_name'] = constants.CTLPLANE_NET_NAME
storage_net_map['public_network'] = constants.CTLPLANE_CIDR_DEFAULT
storage_net_map['cluster_network'] = constants.CTLPLANE_CIDR_DEFAULT
# this dict makes it easier to search for each network type in a loop
net_type = {}
net_type['public_network_name'] = public_network_name
net_type['cluster_network_name'] = cluster_network_name
def _get_subnet(net, ip_subnet):
# Return the subnet, e.g. '192.168.24.0/24', as a string
# The net dict can either have a ip_subnet as a root element
# or a dict where multiple subnets are specified. If we have
# a subnets dict, then parse it looking for the ip_subnet key
if ip_subnet in net:
return net[ip_subnet]
if 'subnets' in net:
ip_subnets = list(map(lambda x: x.get(ip_subnet),
net['subnets'].values()))
return ','.join(ip_subnets)
with open(network_data_path, 'r') as stream:
try:
net_data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise RuntimeError(
"yaml.safe_load(%s) returned '%s'" % (network_data_path, exc))
# 'name_lower' is not mandatory in net_data so give it the standard default
[net.setdefault('name_lower', net['name'].lower()) for net in net_data]
for net in net_data:
if net.get('ipv6', False):
ip_subnet = 'ipv6_subnet'
else:
ip_subnet = 'ip_subnet'
for net_name, net_value in net_type.items():
for search_tag in ['name_lower', 'service_net_map_replace']:
if net.get(search_tag, None) == net_value:
# if service_net_map_replace matched, still want name_lower
storage_net_map[net_name] = net['name_lower']
subnet = _get_subnet(net, ip_subnet)
if not subnet:
error = ("While searching %s, %s matched %s "
"but that network did not have a %s "
"value set."
% (network_data_path, search_tag,
net_value, ip_subnet))
raise RuntimeError(error)
else:
subnet_key = net_name.replace('_name', '')
storage_net_map[subnet_key] = subnet
return storage_net_map

View File

@ -66,6 +66,50 @@ class OvercloudCephDeploy(command.Command):
default=os.path.join(
constants.TRIPLEO_HEAT_TEMPLATES,
constants.OVERCLOUD_ROLES_FILE))
parser.add_argument('--network-data',
help=_(
"Path to an alternative network_data.yaml. "
"Used to define Ceph public_network and "
"cluster_network. This file is searched "
"for networks with name_lower values of "
"storage and storage_mgmt. If none found, "
"then search repeats but with "
"service_net_map_replace in place of "
"name_lower. Use --public-network-name or "
"--cluster-network-name options to override "
"name of the searched for network from "
"storage or storage_mgmt to a customized "
"name. If network_data has no storage "
"networks, both default to ctlplane. "
"If found network has >1 subnet, they are "
"all combined (for routed traffic). "
"If a network has ipv6 true, then "
"the ipv6_subnet is retrieved instead "
"of the ip_subnet and --config should be "
"used to set the Ceph global ms_bind_ipv4 "
"and ms_bind_ipv6 accordingly."),
default=os.path.join(
constants.TRIPLEO_HEAT_TEMPLATES,
constants.OVERCLOUD_NETWORKS_FILE))
parser.add_argument('--public-network-name',
help=_(
"Name of the network defined in "
"network_data.yaml which should be "
"used for the Ceph public_network. "
"Defaults to 'storage'."),
default='storage')
parser.add_argument('--cluster-network-name',
help=_(
"Name of the network defined in "
"network_data.yaml which should be "
"used for the Ceph cluster_network. "
"Defaults to 'storage_mgmt'."),
default='storage_mgmt')
parser.add_argument('--config',
help=_(
"Path to an existing ceph.conf with settings "
"to be assimilated by the new cluster via "
"'cephadm bootstrap --config' ")),
spec_group = parser.add_mutually_exclusive_group()
spec_group.add_argument('--ceph-spec',
help=_(
@ -191,6 +235,27 @@ class OvercloudCephDeploy(command.Command):
extra_vars['tripleo_roles_path'] = \
os.path.abspath(parsed_args.roles_data)
if parsed_args.config:
if not os.path.exists(parsed_args.config):
raise oscexc.CommandError(
"Config file not found --config %s."
% os.path.abspath(parsed_args.config))
else:
extra_vars['tripleo_cephadm_bootstrap_conf'] = \
os.path.abspath(parsed_args.config)
if parsed_args.network_data:
if not os.path.exists(parsed_args.network_data):
raise oscexc.CommandError(
"Network Data file not found --network-data %s."
% os.path.abspath(parsed_args.network_data))
ceph_networks_map = \
oooutils.get_ceph_networks(parsed_args.network_data,
parsed_args.public_network_name,
parsed_args.cluster_network_name)
extra_vars = {**extra_vars, **ceph_networks_map}
if parsed_args.ceph_spec:
if not os.path.exists(parsed_args.ceph_spec):
raise oscexc.CommandError(