Deploy multirack environment with shared networks

Refactor tests for multiple cluster networks feature so
all tests use fuel-devops environment which has 3 nodegroups,
shared L2 network and it's defined using template. Also allow
tests to configure networks for more than 1 custom nodegroup.
Add aditional check for shared storage network (Ceph health).
Create new test case for adding new nodegroup to operational
environment.

Co-Authored-By: Artem Panchenko <apanchenko@mirantis.com>

Closes-Bug: #1521316
Implements blueprint test-nodegroup-add
Implements blueprint test-nodegroups-share-networks
Change-Id: I066248a7b96a6f16b1e24763892234fe48803983
This commit is contained in:
Maksim Strukov 2015-11-30 22:38:10 +02:00 committed by Maksym Strukov
parent 1d579725f9
commit 41405c6415
8 changed files with 741 additions and 105 deletions

View File

@ -29,7 +29,8 @@ from fuelweb_test.helpers.ssh_manager import SSHManager
def configure_second_admin_dhcp(ip, interface):
dhcp_conf_file = '/etc/cobbler/dnsmasq.template'
docker_start_file = '/usr/local/bin/start.sh'
cmd = ("dockerctl shell cobbler sed '/^interface/a interface={0}' -i {1};"
cmd = ("dockerctl shell cobbler sed "
"'0,/^interface.*/s//\\0\\ninterface={0}/' -i {1};"
"dockerctl shell cobbler sed \"/^puppet apply/a "
"sed '/^interface/a interface={0}' -i {1}\" -i {2};"
"dockerctl shell cobbler cobbler sync").format(interface,
@ -50,7 +51,7 @@ def configure_second_admin_firewall(ip, network, netmask, interface,
# enable source NAT for UDP (tftp) and HTTP (proxy server) traffic
# on master node
rules = [
('-I INPUT -i {0} -m comment --comment "input from 2nd admin network" '
('-I INPUT -i {0} -m comment --comment "input from admin network" '
'-j ACCEPT').format(interface),
('-t nat -I POSTROUTING -s {0}/{1} -o e+ -m comment --comment '
'"004 forward_admin_net2" -j MASQUERADE').
@ -72,8 +73,8 @@ def configure_second_admin_firewall(ip, network, netmask, interface,
cmd=cmd
)
assert_equal(result['exit_code'], 0,
('Failed to add firewall rule for second admin net '
'on master node: {0}, {1}').format(rule, result))
('Failed to add firewall rule for admin net on'
' master node: {0}, {1}').format(rule, result))
# Save new firewall configuration
cmd = 'service iptables save'

View File

@ -487,7 +487,7 @@ class EnvironmentModel(object):
)
self.admin_install_updates()
if settings.MULTIPLE_NETWORKS:
self.describe_second_admin_interface()
self.describe_other_admin_interfaces(admin)
if not MASTER_IS_CENTOS7:
self.nailgun_actions.set_collector_address(
settings.FUEL_STATS_HOST,
@ -729,34 +729,51 @@ class EnvironmentModel(object):
return result['stdout']
@logwrap
def describe_second_admin_interface(self):
admin_net2_object = self.d_env.get_network(name=self.d_env.admin_net2)
second_admin_network = admin_net2_object.ip.network
second_admin_netmask = admin_net2_object.ip.netmask
second_admin_if = settings.INTERFACES.get(self.d_env.admin_net2)
second_admin_ip = str(self.d_env.nodes(
).admin.get_ip_address_by_network_name(self.d_env.admin_net2))
logger.info(('Parameters for second admin interface configuration: '
def describe_other_admin_interfaces(self, admin):
admin_networks = [iface.network.name for iface in admin.interfaces]
iface_name = None
for i, network_name in enumerate(admin_networks):
if 'admin' in network_name and 'admin' != network_name:
# This will be replaced with actual interface labels
# form fuel-devops
iface_name = 'enp0s' + str(i + 3)
logger.info("Describe Fuel admin node interface {0} for "
"network {1}".format(iface_name, network_name))
self.describe_admin_interface(iface_name, network_name)
if iface_name:
return self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
cmd="dockerctl shell cobbler cobbler sync")
@logwrap
def describe_admin_interface(self, admin_if, network_name):
admin_net_object = self.d_env.get_network(name=network_name)
admin_network = admin_net_object.ip.network
admin_netmask = admin_net_object.ip.netmask
admin_ip = str(self.d_env.nodes(
).admin.get_ip_address_by_network_name(network_name))
logger.info(('Parameters for admin interface configuration: '
'Network - {0}, Netmask - {1}, Interface - {2}, '
'IP Address - {3}').format(second_admin_network,
second_admin_netmask,
second_admin_if,
second_admin_ip))
add_second_admin_ip = ('DEVICE={0}\\n'
'ONBOOT=yes\\n'
'NM_CONTROLLED=no\\n'
'USERCTL=no\\n'
'PEERDNS=no\\n'
'BOOTPROTO=static\\n'
'IPADDR={1}\\n'
'NETMASK={2}\\n').format(second_admin_if,
second_admin_ip,
second_admin_netmask)
'IP Address - {3}').format(admin_network,
admin_netmask,
admin_if,
admin_ip))
add_admin_ip = ('DEVICE={0}\\n'
'ONBOOT=yes\\n'
'NM_CONTROLLED=no\\n'
'USERCTL=no\\n'
'PEERDNS=no\\n'
'BOOTPROTO=static\\n'
'IPADDR={1}\\n'
'NETMASK={2}\\n').format(admin_if,
admin_ip,
admin_netmask)
cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};'
'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format(
add_second_admin_ip, second_admin_if, second_admin_ip)
add_admin_ip, admin_if, admin_ip)
logger.debug('Trying to assign {0} IP to the {1} on master node...'.
format(second_admin_ip, second_admin_if))
format(admin_ip, admin_if))
result = self.ssh_manager.execute(
ip=self.ssh_manager.admin_ip,
@ -769,13 +786,13 @@ class EnvironmentModel(object):
# TODO for ssh manager
multiple_networks_hacks.configure_second_admin_dhcp(
self.ssh_manager.admin_ip,
second_admin_if
admin_if
)
multiple_networks_hacks.configure_second_admin_firewall(
self.ssh_manager.admin_ip,
second_admin_network,
second_admin_netmask,
second_admin_if,
admin_network,
admin_netmask,
admin_if,
self.get_admin_node_ip()
)

View File

@ -407,7 +407,7 @@ class FuelWebClient(object):
mode=DEPLOYMENT_MODE_HA,
port=514,
release_id=None,
configure_ssl=True, ):
configure_ssl=True):
"""Creates a cluster
:param name:
:param release_name:
@ -551,9 +551,11 @@ class FuelWebClient(object):
self.client.update_cluster_attributes(cluster_id, attributes)
if MULTIPLE_NETWORKS:
node_groups = {n['name']: [] for n in NODEGROUPS}
self.update_nodegroups(cluster_id, node_groups)
self.update_nodegroups_network_configuration(cluster_id)
ng = {rack['name']: [] for rack in NODEGROUPS}
self.update_nodegroups(cluster_id=cluster_id,
node_groups=ng)
self.update_nodegroups_network_configuration(cluster_id,
NODEGROUPS)
logger.debug("Try to update cluster "
"with next attributes {0}".format(attributes))
@ -1525,10 +1527,12 @@ class FuelWebClient(object):
_release['id'], net_settings)
@logwrap
def update_nodegroups_network_configuration(self, cluster_id):
def update_nodegroups_network_configuration(self, cluster_id,
nodegroups=None):
net_config = self.client.get_networks(cluster_id)
new_settings = net_config
for nodegroup in NODEGROUPS:
for nodegroup in nodegroups:
logger.info('Update network settings of cluster %s, '
'nodegroup %s', cluster_id, nodegroup['name'])
new_settings = self.update_nodegroup_net_settings(new_settings,
@ -1544,7 +1548,7 @@ class FuelWebClient(object):
"""Find a devops network name in net_pools"""
for net in net_pools:
if name in net:
return net
return {name: net_pools[net]}
def update_nodegroup_net_settings(self, network_configuration, nodegroup,
cluster_id=None):
@ -1559,17 +1563,33 @@ class FuelWebClient(object):
continue
self.set_network(net_config=net,
net_name=net['name'],
net_pools=nodegroup['pools'],
net_devices=nodegroup['networks'],
seg_type=seg_type)
# For all admin/pxe networks except default use master
# node as router
# TODO(mstrukov): find way to get admin node networks only
if net['name'] != 'fuelweb_admin':
continue
for devops_network in self.environment.d_env.get_networks():
if str(devops_network.ip_network) == net['cidr']:
net['gateway'] = \
self.environment.d_env.nodes().\
admin.get_ip_address_by_network_name(
devops_network.name)
logger.info('Set master node ({0}) as '
'router for admin network '
'in nodegroup {1}.'.format(
net['gateway'], nodegroup_id))
return network_configuration
def set_network(self, net_config, net_name, net_pools=None, seg_type=None):
def set_network(self, net_config, net_name, net_devices=None,
seg_type=None):
nets_wo_floating = ['public', 'management', 'storage', 'baremetal']
if (seg_type == NEUTRON_SEGMENT['tun'] or
seg_type == NEUTRON_SEGMENT['gre']):
nets_wo_floating.append('private')
if not net_pools:
if not net_devices:
if not BONDING:
if 'floating' == net_name:
self.net_settings(net_config, 'public', floating=True)
@ -1585,20 +1605,14 @@ class FuelWebClient(object):
i = nets_wo_floating.index(net_name)
self.net_settings(net_config, pub_subnets[i], jbond=True)
else:
public_net = self._get_true_net_name('public', net_pools)
admin_net = self._get_true_net_name('admin', net_pools)
if not BONDING:
if 'floating' == net_name:
self.net_settings(net_config, public_net, floating=True)
elif net_name in nets_wo_floating:
self.net_settings(net_config,
self._get_true_net_name(net_name,
net_pools))
elif net_name in 'fuelweb_admin':
self.net_settings(net_config, admin_net)
self.net_settings(net_config, net_devices['public'],
floating=True)
self.net_settings(net_config, net_devices[net_name])
else:
ip_obj = self.environment.d_env.get_network(name=public_net).ip
ip_obj = self.environment.d_env.get_network(
name=net_devices['public']).ip
pub_subnets = list(ip_obj.subnet(new_prefix=27))
if "floating" == net_name:
@ -1608,7 +1622,7 @@ class FuelWebClient(object):
i = nets_wo_floating.index(net_name)
self.net_settings(net_config, pub_subnets[i], jbond=True)
elif net_name in 'fuelweb_admin':
self.net_settings(net_config, admin_net)
self.net_settings(net_config, net_devices['fuelweb_admin'])
if 'ip_ranges' in net_config:
if net_config['ip_ranges']:
net_config['meta']['notation'] = 'ip_ranges'

View File

@ -95,18 +95,42 @@ NODE_VOLUME_SIZE = int(os.environ.get('NODE_VOLUME_SIZE', 50))
NODES_COUNT = os.environ.get('NODES_COUNT', 10)
MULTIPLE_NETWORKS = get_var_as_bool('MULTIPLE_NETWORKS', False)
MULTIPLE_NETWORKS_TEMPLATE = os.environ.get(
'MULTIPLE_NETWORKS_TEMPLATE',
os.path.join(os.getcwd(),
'system_test/tests_templates/tests_configs/multirack.yaml'))
if MULTIPLE_NETWORKS:
NODEGROUPS = (
{
'name': 'default',
'pools': ['admin', 'public', 'management', 'private',
'storage']
'networks': {
'fuelweb_admin': 'admin',
'public': 'public',
'management': 'management',
'storage': 'storage',
'private': 'private'
}
},
{
'name': 'group-custom-1',
'pools': ['admin2', 'public2', 'management2', 'private2',
'storage2']
'networks': {
'fuelweb_admin': 'admin2',
'public': 'public2',
'management': 'management2',
'storage': 'storage',
'private': 'private2'
}
},
{
'name': 'group-custom-2',
'networks': {
'fuelweb_admin': 'admin3',
'public': 'public3',
'management': 'management3',
'storage': 'storage',
'private': 'private3'
}
}
)
FORWARD_DEFAULT = os.environ.get('FORWARD_DEFAULT', 'route')

View File

@ -22,6 +22,8 @@ from fuelweb_test.helpers.utils import get_test_method_name
from fuelweb_test.helpers.utils import TimeStat
from fuelweb_test.helpers.ssh_manager import SSHManager
from fuelweb_test.models.environment import EnvironmentModel
from fuelweb_test.settings import MULTIPLE_NETWORKS
from fuelweb_test.settings import MULTIPLE_NETWORKS_TEMPLATE
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS
from fuelweb_test.settings import REPLACE_DEFAULT_REPOS_ONLY_ONCE
@ -151,7 +153,16 @@ class SetupEnvironment(TestBasic):
Snapshot: empty
"""
# TODO: remove this code when fuel-devops will be ready to
# describe all required network parameters (gateway, CIDR, IP range)
# inside 'address_pool', so we can use 'network_pools' section
# for L3 configuration in tests for multi racks
if MULTIPLE_NETWORKS:
from system_test.helpers.utils import load_yaml
self._devops_config = load_yaml(MULTIPLE_NETWORKS_TEMPLATE)
self.check_run("empty")
with TimeStat("setup_environment", is_uniq=True):
self.env.setup_environment()
self.env.make_snapshot("empty", is_make=True)

View File

@ -41,6 +41,10 @@ from fuelweb_test import logger
class TestMultipleClusterNets(TestBasic):
"""TestMultipleClusterNets.""" # TODO documentation
def __init__(self):
self.netconf_all_groups = None
super(TestMultipleClusterNets, self).__init__()
@staticmethod
def get_modified_ranges(net_dict, net_name, group_id):
for net in net_dict['networks']:
@ -100,8 +104,7 @@ class TestMultipleClusterNets(TestBasic):
net_name in net['name'] and group_id == net['group_id']][0]
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["multiple_cluster_networks",
"deploy_neutron_tun_ha_nodegroups", "thread_7"])
groups=["deploy_neutron_tun_ha_nodegroups"])
@log_snapshot_after_test
@check_fuel_statistics
def deploy_neutron_tun_ha_nodegroups(self):
@ -110,18 +113,19 @@ class TestMultipleClusterNets(TestBasic):
Scenario:
1. Revert snapshot with ready master node
2. Bootstrap slaves from default nodegroup
3. Create cluster with Neutron VXLAN and custom nodegroup
4. Bootstrap slave nodes from custom nodegroup
5. Download network configuration
6. Update network.json with customized ip ranges
7. Put new json on master node and update network data
8. Verify that new IP ranges are applied for network config
9. Add 3 controller nodes from default nodegroup
10. Add 2 compute nodes from custom nodegroup
11. Deploy cluster
12. Run network verification
13. Verify that excluded ip is not used for nodes or VIP
14. Run health checks (OSTF)
3. Create cluster with Neutron VXLAN and custom nodegroups
4. Remove 2nd custom nodegroup which is added automatically
5. Bootstrap slave nodes from custom nodegroup
6. Download network configuration
7. Update network.json with customized ip ranges
8. Put new json on master node and update network data
9. Verify that new IP ranges are applied for network config
10. Add 3 controller nodes from default nodegroup
11. Add 2 compute nodes from custom nodegroup
12. Deploy cluster
13. Run network verification
14. Verify that excluded ip is not used for nodes or VIP
15. Run health checks (OSTF)
Duration 110m
Snapshot deploy_neutron_tun_ha_nodegroups
@ -135,7 +139,7 @@ class TestMultipleClusterNets(TestBasic):
self.env.revert_snapshot("ready")
self.show_step(2)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:5:2])
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
self.show_step(3)
cluster_id = self.fuel_web.create_cluster(
@ -151,9 +155,19 @@ class TestMultipleClusterNets(TestBasic):
)
self.show_step(4)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[1:5:2])
self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id)
custom_group2 = self.fuel_web.get_nodegroup(
cluster_id, name=NODEGROUPS[2]['name'])
wait(lambda: not self.is_update_dnsmasq_running(
self.fuel_web.client.get_tasks()), timeout=60,
timeout_msg="Timeout exceeded while waiting for task "
"'update_dnsmasq' is finished!")
self.fuel_web.client.delete_nodegroup(custom_group2['id'])
self.show_step(5)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
self.show_step(6)
with self.env.d_env.get_admin_remote() as remote:
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
@ -165,7 +179,7 @@ class TestMultipleClusterNets(TestBasic):
custom_group_id = self.fuel_web.get_nodegroup(
cluster_id, name=NODEGROUPS[1]['name'])['id']
self.show_step(6)
self.show_step(7)
with self.env.d_env.get_admin_remote() as remote:
current_net = json.loads(remote.open(
'/var/log/network_1.json').read())
@ -200,14 +214,14 @@ class TestMultipleClusterNets(TestBasic):
management_ranges_custom))
# need to push to remote
self.show_step(7)
self.show_step(8)
utils.put_json_on_remote_from_dict(
remote, updated_network, cluster_id)
check_update_network_data_over_cli(remote, cluster_id,
'/var/log/')
self.show_step(8)
self.show_step(9)
with self.env.d_env.get_admin_remote() as remote:
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
latest_net = json.loads(remote.open(
@ -246,26 +260,28 @@ class TestMultipleClusterNets(TestBasic):
'not updated. Expected {0}, Actual: {1}'.format(
management_ranges_custom, updated_mgmt_custom))
self.show_step(10)
self.show_step(11)
nodegroup_default = NODEGROUPS[0]['name']
nodegroup_custom = NODEGROUPS[1]['name']
nodegroup_custom1 = NODEGROUPS[1]['name']
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': [['controller'], nodegroup_default],
'slave-05': [['controller'], nodegroup_default],
'slave-02': [['controller'], nodegroup_default],
'slave-03': [['controller'], nodegroup_default],
'slave-02': [['compute', 'cinder'], nodegroup_custom],
'slave-04': [['compute', 'cinder'], nodegroup_custom],
'slave-04': [['compute', 'cinder'], nodegroup_custom1],
'slave-05': [['compute', 'cinder'], nodegroup_custom1],
}
)
self.show_step(11)
self.show_step(12)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(12)
self.show_step(13)
self.fuel_web.verify_network(cluster_id)
self.show_step(13)
self.show_step(14)
net_data_default_group = [
data['network_data'] for data
in self.fuel_web.client.list_cluster_nodes(
@ -319,14 +335,114 @@ class TestMultipleClusterNets(TestBasic):
asserts.assert_true(self.is_ip_in_range(mgmt_vip.split('/')[0],
updated_mgmt_default[0][0],
updated_mgmt_default[0][-1]))
self.show_step(14)
self.show_step(15)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_tun_ha_nodegroups")
self.env.make_snapshot("deploy_neutron_tun_ha_nodegroups",
is_make=True)
@test(depends_on_groups=['deploy_neutron_tun_ha_nodegroups'],
groups=["add_custom_nodegroup"])
@log_snapshot_after_test
def add_custom_nodegroup(self):
"""Add new nodegroup to operational environment
Scenario:
1. Revert snapshot with operational cluster
2. Create new nodegroup for the environment and configure its networks
3. Bootstrap slave node from custom-2 nodegroup
4. Add node from new nodegroup to the environment with compute role
5. Run network verification
6. Deploy changes
7. Run network verification
8. Run OSTF
9. Check that nodes from 'default' nodegroup can reach nodes
from new nodegroup via management and storage networks
Duration 50m
Snapshot add_custom_nodegroup
"""
self.show_step(1, initialize=True)
self.env.revert_snapshot('deploy_neutron_tun_ha_nodegroups')
cluster_id = self.fuel_web.get_last_created_cluster()
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
asserts.assert_true(not any(ng['name'] == NODEGROUPS[2]['name'] for ng
in self.fuel_web.client.get_nodegroups()),
'Custom nodegroup {0} already '
'exists!'.format(NODEGROUPS[2]['name']))
self.show_step(2)
new_nodegroup = self.fuel_web.client.create_nodegroup(
cluster_id, NODEGROUPS[2]['name'])
logger.debug('Updating custom nodegroup ID in network configuration..')
network_config_new = self.fuel_web.client.get_networks(cluster_id)
asserts.assert_true(self.netconf_all_groups is not None,
'Network configuration for nodegroups is empty!')
for network in self.netconf_all_groups['networks']:
if network['group_id'] is not None and \
not any(network['group_id'] == ng['id']
for ng in self.fuel_web.client.get_nodegroups()):
network['group_id'] = new_nodegroup['id']
for new_network in network_config_new['networks']:
if new_network['name'] == network['name'] and \
new_network['group_id'] == network['group_id']:
network['id'] = new_network['id']
self.fuel_web.client.update_network(
cluster_id,
self.netconf_all_groups['networking_parameters'],
self.netconf_all_groups['networks'])
self.show_step(3)
self.env.bootstrap_nodes([self.env.d_env.nodes().slaves[6]])
self.show_step(4)
self.fuel_web.update_nodes(
cluster_id,
{'slave-07': [['compute'], new_nodegroup['name']]},
True, False
)
self.show_step(5)
self.fuel_web.verify_network(cluster_id)
self.show_step(6)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(7)
self.fuel_web.verify_network(cluster_id)
self.show_step(8)
self.fuel_web.run_ostf(cluster_id=cluster_id)
self.show_step(9)
primary_ctrl = self.fuel_web.get_nailgun_node_by_devops_node(
self.fuel_web.get_nailgun_primary_node(
slave=self.env.d_env.nodes().slaves[0]))
with self.fuel_web.get_ssh_for_node('slave-07') as remote:
new_node_networks = utils.get_net_settings(remote)
for interface in ('br-storage', 'br-mgmt'):
if interface in new_node_networks:
logger.info("Checking new node is accessible from primary "
"controller via {0} interface.".format(interface))
for ip in new_node_networks[interface]['ip_addresses']:
address = ip.split('/')[0]
result = self.ssh_manager.execute(ip=primary_ctrl['ip'],
cmd='ping -q -c 1 -w 3 {'
'0}'.format(address))
asserts.assert_equal(result['exit_code'], 0,
"New node isn't accessible from "
"primary controller via {0} interface"
": {1}.".format(interface, result))
self.env.make_snapshot("add_custom_nodegroup")
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["multiple_cluster_networks",
"deploy_ceph_ha_nodegroups", "thread_7"])
groups=["deploy_ceph_ha_nodegroups"])
@log_snapshot_after_test
def deploy_ceph_ha_nodegroups(self):
"""Deploy HA environment with Neutron VXLAN, Ceph and 2 nodegroups
@ -343,6 +459,7 @@ class TestMultipleClusterNets(TestBasic):
9. Run network verification
10. Run health checks (OSTF)
11. Check that excluded IPs aren't allocated to deployed nodes
12. Check Ceph health
Duration 110m
Snapshot deploy_ceph_ha_nodegroups
@ -362,6 +479,7 @@ class TestMultipleClusterNets(TestBasic):
settings={
'volumes_ceph': True,
'images_ceph': True,
'ephemeral_ceph': True,
'volumes_lvm': False,
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT['tun'],
@ -388,7 +506,7 @@ class TestMultipleClusterNets(TestBasic):
self.show_step(5)
default_ng_nodes = [self.fuel_web.get_nailgun_node_by_devops_node(node)
for node in self.env.d_env.nodes().slaves[0:5:2]]
for node in self.env.d_env.nodes().slaves[0:3]]
for node in default_ng_nodes:
asserts.assert_true(
self.is_ip_in_range(node['ip'], *new_admin_range),
@ -404,10 +522,10 @@ class TestMultipleClusterNets(TestBasic):
cluster_id,
{
'slave-01': [['controller', 'ceph-osd'], nodegroup_default],
'slave-05': [['controller', 'ceph-osd'], nodegroup_default],
'slave-02': [['controller', 'ceph-osd'], nodegroup_default],
'slave-03': [['controller', 'ceph-osd'], nodegroup_default],
'slave-02': [['compute', 'ceph-osd'], nodegroup_custom],
'slave-04': [['compute', 'ceph-osd'], nodegroup_custom],
'slave-05': [['compute', 'ceph-osd'], nodegroup_custom],
}
)
@ -430,10 +548,13 @@ class TestMultipleClusterNets(TestBasic):
"is not from defined IP addresses range:"
" {2}!".format(node['fqdn'], node['ip'], new_admin_range))
self.show_step(12)
self.fuel_web.check_ceph_status(cluster_id)
self.env.make_snapshot("deploy_ceph_ha_nodegroups")
@test(depends_on=[SetupEnvironment.prepare_release],
groups=["deploy_controllers_from_custom_nodegroup", "thread_7",
groups=["deploy_controllers_from_custom_nodegroup",
"multiple_cluster_networks"])
@log_snapshot_after_test
def deploy_controllers_from_custom_nodegroup(self):
@ -488,12 +609,12 @@ class TestMultipleClusterNets(TestBasic):
self.fuel_web.client.update_network(cluster_id, new_settings_float)
self.show_step(4)
custom_nodes = self.env.d_env.nodes().slaves[1:6:2]
self.env.bootstrap_nodes(custom_nodes) # nodes 2, 4 and 6
custom_nodes = self.env.d_env.nodes().slaves[3:6]
self.env.bootstrap_nodes(custom_nodes) # nodes 4, 5 and 6
self.show_step(5)
default_nodes = self.env.d_env.nodes().slaves[0:3:2]
self.env.bootstrap_nodes(default_nodes) # nodes 1 and 3
default_nodes = self.env.d_env.nodes().slaves[0:2]
self.env.bootstrap_nodes(default_nodes) # nodes 1 and 2
self.show_step(6)
@ -502,11 +623,11 @@ class TestMultipleClusterNets(TestBasic):
self.fuel_web.update_nodes(
cluster_id,
{
'slave-02': [['controller'], custom_nodegroup],
'slave-04': [['controller'], custom_nodegroup],
'slave-05': [['controller'], custom_nodegroup],
'slave-06': [['controller'], custom_nodegroup],
'slave-01': [['compute'], default_nodegroup],
'slave-03': [['cinder'], default_nodegroup]
'slave-02': [['cinder'], default_nodegroup]
}
)
@ -546,8 +667,7 @@ class TestMultipleClusterNets(TestBasic):
is_make=True)
@test(depends_on=[deploy_controllers_from_custom_nodegroup],
groups=["delete_cluster_with_custom_nodegroup", "thread_7",
"multiple_cluster_networks"],
groups=["delete_cluster_with_custom_nodegroup"],
# TODO: enable this test when bug #1521682 is fixed
enabled=False)
@log_snapshot_after_test
@ -570,7 +690,7 @@ class TestMultipleClusterNets(TestBasic):
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.show_step(2)
custom_nodes = self.env.d_env.nodes().slaves[1:6:2]
custom_nodes = self.env.d_env.nodes().slaves[3:6]
self.fuel_web.delete_env_wait(cluster_id)
@ -611,8 +731,7 @@ class TestMultipleClusterNets(TestBasic):
self.env.make_snapshot("delete_cluster_with_custom_nodegroup")
@test(depends_on=[deploy_controllers_from_custom_nodegroup],
groups=["delete_custom_nodegroup", "thread_7",
"multiple_cluster_networks"])
groups=["delete_custom_nodegroup"])
@log_snapshot_after_test
def delete_custom_nodegroup(self):
"""Delete nodegroup, check its nodes are marked as 'error'
@ -639,7 +758,7 @@ class TestMultipleClusterNets(TestBasic):
network_config = self.fuel_web.client.get_networks(cluster_id)
self.show_step(3)
custom_nodes = self.env.d_env.nodes().slaves[1:6:2]
custom_nodes = self.env.d_env.nodes().slaves[3:6]
self.fuel_web.stop_reset_env_wait(cluster_id)
logger.info('Waiting for all nodes online for 900 seconds...')
wait(lambda: all(n['online'] for n in

View File

@ -0,0 +1,406 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default-slave-interfaces: &default-slave-interfaces
- label: enp0s3
l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks
- label: enp0s4
l2_network_device: public
- label: enp0s5
l2_network_device: management
- label: enp0s6
l2_network_device: private
- label: enp0s7
l2_network_device: storage
rack-02-slave-interfaces: &rack-02-slave-interfaces
- label: enp0s3
l2_network_device: admin2 # Libvirt bridge name. It is *NOT* Nailgun networks
- label: enp0s4
l2_network_device: public2
- label: enp0s5
l2_network_device: management2
- label: enp0s6
l2_network_device: private2
- label: enp0s7
l2_network_device: storage
rack-03-slave-interfaces: &rack-03-slave-interfaces
- label: enp0s3
l2_network_device: admin3 # Libvirt bridge name. It is *NOT* Nailgun networks
- label: enp0s4
l2_network_device: public3
- label: enp0s5
l2_network_device: management3
- label: enp0s6
l2_network_device: private3
- label: enp0s7
l2_network_device: storage
default-slave-network_config: &default-slave-network_config
enp0s3:
networks:
- fuelweb_admin
enp0s4:
networks:
- public
enp0s5:
networks:
- management
enp0s6:
networks:
- private
enp0s7:
networks:
- storage
rack-02-slave-network_config: &rack-02-slave-network_config
enp0s3:
networks:
- fuelweb_admin2
enp0s4:
networks:
- public2
enp0s5:
networks:
- management2
enp0s6:
networks:
- private2
enp0s7:
networks:
- storage
rack-03-slave-network_config: &rack-03-slave-network_config
enp0s3:
networks:
- fuelweb_admin3
enp0s4:
networks:
- public3
enp0s5:
networks:
- management3
enp0s6:
networks:
- private3
enp0s7:
networks:
- storage
default-slave-node-params: &default-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *default-slave-interfaces
network_config: *default-slave-network_config
rack-02-slave-node-params: &rack-02-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *rack-02-slave-interfaces
network_config: *rack-02-slave-network_config
rack-03-slave-node-params: &rack-03-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 55
format: qcow2
interfaces: *rack-03-slave-interfaces
network_config: *rack-03-slave-network_config
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
public-pool01:
net: *pool_default
params:
tag: 0
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 103
fuelweb_admin-pool02:
net: *pool_default
params:
tag: 0
public-pool02:
net: *pool_default
params:
tag: 0
management-pool02:
net: *pool_default
params:
tag: 102
private-pool02:
net: *pool_default
params:
tag: 103
fuelweb_admin-pool03:
net: *pool_default
params:
tag: 0
public-pool03:
net: *pool_default
params:
tag: 0
management-pool03:
net: *pool_default
params:
tag: 102
private-pool03:
net: *pool_default
params:
tag: 103
groups:
- name: default
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
storage:
address_pool: storage-pool01
dhcp: false
forward:
mode: route
management:
address_pool: management-pool01
dhcp: false
forward:
mode: route
private:
address_pool: private-pool01
dhcp: false
forward:
mode: route
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 80
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: enp0s3
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
- label: enp0s4
l2_network_device: admin2
- label: enp0s5
l2_network_device: admin3
network_config:
enp0s3:
networks:
- fuelweb_admin
- name: slave-01
role: fuel_slave
params: *default-slave-node-params
- name: slave-02
role: fuel_slave
params: *default-slave-node-params
- name: slave-03
role: fuel_slave
params: *default-slave-node-params
- name: rack-02
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool02
public: public-pool02
storage: storage-pool01
management: management-pool02
private: private-pool02
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin2:
address_pool: fuelweb_admin-pool02
dhcp: false
forward:
mode: nat
public2:
address_pool: public-pool02
dhcp: false
forward:
mode: nat
management2:
address_pool: management-pool02
dhcp: false
forward:
mode: route
private2:
address_pool: private-pool02
dhcp: false
forward:
mode: route
nodes:
- name: slave-04
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-05
role: fuel_slave
params: *rack-02-slave-node-params
- name: slave-06
role: fuel_slave
params: *rack-02-slave-node-params
- name: rack-03
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool03
public: public-pool03
storage: storage-pool01
management: management-pool03
private: private-pool03
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin3:
address_pool: fuelweb_admin-pool03
dhcp: false
forward:
mode: nat
public3:
address_pool: public-pool03
dhcp: false
forward:
mode: nat
management3:
address_pool: management-pool03
dhcp: false
forward:
mode: route
private3:
address_pool: private-pool03
dhcp: false
forward:
mode: route
nodes:
- name: slave-07
role: fuel_slave
params: *rack-03-slave-node-params

View File

@ -0,0 +1,44 @@
---
network-config: &network-config
provider: neutron
segment-type: tun
pubip-to-all: false
storages-config: &storages-config
volume-lvm: false
volume-ceph: true
image-ceph: true
rados-ceph: true
ephemeral-ceph: false
replica-ceph: 2
nodes: &nodes
- roles:
- controller
count: 3
nodegroup: default
- roles:
- compute
count: 1
nodegroup: rack-02
- roles:
- ceph-osd
count: 2
nodegroup: rack-03
template:
name: 3 Controller, 1 Compute, 2 Ceph on Neutron/VLAN
slaves: 6
devops_settings: !include devops_configs/multirack.yaml
cluster_template: &environment-config
name: env1
release: ubuntu
settings:
components:
sahara: false
murano: false
ceilometer: false
storages: *storages-config
network: *network-config
nodes: *nodes