Boot slaves after creation of custom nodegroup

Since DHCP server for additional admin/pxe networks
is configured after nodegroup creation, we can't
bootstrap slaves until we created cluster and setup
its networks.
Also align 'hacks' for multiple cluster networks
tests with dynamic dnsmasq feature.

Change-Id: I46b203d65a2fe2f0bb41288aaf8fb738d2bda495
Implements: blueprint align-nodegroups-tests
Related-bug: #1495593
This commit is contained in:
Artem Panchenko 2015-10-29 11:05:20 +02:00
parent 5624764478
commit 672ce759ed
3 changed files with 108 additions and 87 deletions

View File

@ -21,30 +21,51 @@
from proboscis.asserts import assert_equal from proboscis.asserts import assert_equal
from fuelweb_test import settings
from fuelweb_test import logwrap from fuelweb_test import logwrap
@logwrap @logwrap
def configure_second_admin_firewall(self, network, netmask): def configure_second_admin_dhcp(remote, interface):
# Allow input/forwarding for nodes from the second admin network dhcp_conf_file = '/etc/cobbler/dnsmasq.template'
docker_start_file = '/usr/local/bin/start.sh'
cmd = ("dockerctl shell cobbler sed '/^interface/a interface={0}' -i {1};"
"dockerctl shell cobbler sed \"/^puppet apply/a "
"sed '/^interface/a interface={0}' -i {1}\" -i {2};"
"dockerctl shell cobbler cobbler sync").format(interface,
dhcp_conf_file,
docker_start_file)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0, ('Failed to add second admin '
'network to DHCP server: {0}').format(result))
@logwrap
def configure_second_admin_firewall(remote, network, netmask, interface,
master_ip):
# Allow input/forwarding for nodes from the second admin network and
# enable source NAT for UDP (tftp) traffic on master node
rules = [ rules = [
('-I INPUT -i {0} -m comment --comment "input from 2nd admin network" ' ('-I INPUT -i {0} -m comment --comment "input from 2nd admin network" '
'-j ACCEPT').format(settings.INTERFACES.get(self.d_env.admin_net2)), '-j ACCEPT').format(interface),
('-t nat -I POSTROUTING -s {0}/{1} -o eth+ -m comment --comment ' ('-t nat -I POSTROUTING -s {0}/{1} -o eth+ -m comment --comment '
'"004 forward_admin_net2" -j MASQUERADE'). '"004 forward_admin_net2" -j MASQUERADE').
format(network, netmask) format(network, netmask),
("-t nat -I POSTROUTING -o {0} -d {1}/{2} -p udp -m addrtype "
"--src-type LOCAL -j SNAT --to-source {3}").format(interface,
network, netmask,
master_ip)
] ]
with self.d_env.get_admin_remote() as remote:
for rule in rules: for rule in rules:
cmd = 'iptables {0}'.format(rule) cmd = 'iptables {0}'.format(rule)
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to add firewall rule for second admin net'
'on master node: {0}, {1}').format(rule, result))
# Save new firewall configuration
cmd = 'service iptables save'
result = remote.execute(cmd) result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to add firewall rule for second admin net '
'on master node: {0}, {1}').format(rule, result))
# Save new firewall configuration
cmd = 'service iptables save'
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0, assert_equal(result['exit_code'], 0,
('Failed to save firewall configuration on master node:' ('Failed to save firewall configuration on master node:'

View File

@ -656,10 +656,12 @@ class EnvironmentModel(object):
assert_equal(result['exit_code'], 0, ('Failed to assign second admin ' assert_equal(result['exit_code'], 0, ('Failed to assign second admin '
'IP address on master node: {0}').format(result)) 'IP address on master node: {0}').format(result))
logger.debug('Done: {0}'.format(result['stdout'])) logger.debug('Done: {0}'.format(result['stdout']))
multiple_networks_hacks.configure_second_admin_firewall( with self.d_env.get_admin_remote() as remote:
self, multiple_networks_hacks.configure_second_admin_dhcp(
second_admin_network, remote, second_admin_if)
second_admin_netmask) multiple_networks_hacks.configure_second_admin_firewall(
remote, second_admin_network, second_admin_netmask,
second_admin_if, self.get_admin_node_ip())
@logwrap @logwrap
def get_masternode_uuid(self): def get_masternode_uuid(self):

View File

@ -14,7 +14,6 @@
from proboscis import SkipTest from proboscis import SkipTest
from proboscis import test from proboscis import test
from proboscis.asserts import assert_equal
from fuelweb_test.helpers.decorators import check_fuel_statistics from fuelweb_test.helpers.decorators import check_fuel_statistics
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
@ -30,57 +29,24 @@ from fuelweb_test.tests.base_test_case import SetupEnvironment
class TestMultipleClusterNets(TestBasic): class TestMultipleClusterNets(TestBasic):
"""TestMultipleClusterNets.""" # TODO documentation """TestMultipleClusterNets.""" # TODO documentation
@test(depends_on=[SetupEnvironment.prepare_slaves_5], @test(depends_on=[SetupEnvironment.prepare_release],
groups=["multiple_cluster_networks", "multiple_cluster_net_setup"])
@log_snapshot_after_test
def multiple_cluster_net_setup(self):
"""Check master node deployment and configuration with 2 sets of nets
Scenario:
1. Revert snapshot with 5 slaves
2. Check that slaves got IPs via DHCP from both admin/pxe networks
3. Make environment snapshot
Duration 6m
Snapshot multiple_cluster_net_setup
"""
if not MULTIPLE_NETWORKS:
raise SkipTest()
self.env.revert_snapshot("ready_with_5_slaves")
# Get network parts of IP addresses with /24 netmask
admin_net = self.env.d_env.admin_net
admin_net2 = self.env.d_env.admin_net2
get_network = lambda x: self.env.d_env.get_network(name=x).ip_network
# This should be refactored
networks = ['.'.join(get_network(n).split('.')[0:-1])
for n in [admin_net, admin_net2]]
nodes_addresses = ['.'.join(node['ip'].split('.')[0:-1]) for node in
self.fuel_web.client.list_nodes()]
assert_equal(set(networks), set(nodes_addresses),
"Only one admin network is used for discovering slaves:"
" '{0}'".format(set(nodes_addresses)))
self.env.make_snapshot("multiple_cluster_net_setup", is_make=True)
@test(depends_on=[multiple_cluster_net_setup],
groups=["multiple_cluster_networks", groups=["multiple_cluster_networks",
"multiple_cluster_net_neutron_tun_ha", "thread_7"]) "deploy_neutron_tun_ha_nodegroups", "thread_7"])
@log_snapshot_after_test @log_snapshot_after_test
@check_fuel_statistics @check_fuel_statistics
def deploy_neutron_tun_ha_nodegroups(self): def deploy_neutron_tun_ha_nodegroups(self):
"""Deploy HA environment with NeutronVXLAN and 2 nodegroups """Deploy HA environment with NeutronVXLAN and 2 nodegroups
Scenario: Scenario:
1. Revert snapshot with 2 networks sets for slaves 1. Revert snapshot with ready master node
2. Create cluster (HA) with Neutron VXLAN 2. Bootstrap slaves from default nodegroup
3. Add 3 controller nodes from default nodegroup 3. Create cluster with Neutron VXLAN and custom nodegroup
4. Add 2 compute nodes from custom nodegroup 4. Bootstrap slave nodes from custom nodegroup
5. Deploy cluster 5. Add 3 controller nodes from default nodegroup
6. Run health checks (OSTF) 6. Add 2 compute nodes from custom nodegroup
7. Deploy cluster
8. Run network verification
9. Run health checks (OSTF)
Duration 110m Duration 110m
Snapshot deploy_neutron_tun_ha_nodegroups Snapshot deploy_neutron_tun_ha_nodegroups
@ -89,8 +55,14 @@ class TestMultipleClusterNets(TestBasic):
if not MULTIPLE_NETWORKS: if not MULTIPLE_NETWORKS:
raise SkipTest() raise SkipTest()
self.env.revert_snapshot("multiple_cluster_net_setup")
self.show_step(1)
self.env.revert_snapshot("ready")
self.show_step(2)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:5:2])
self.show_step(3)
cluster_id = self.fuel_web.create_cluster( cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__, name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA, mode=DEPLOYMENT_MODE_HA,
@ -103,39 +75,52 @@ class TestMultipleClusterNets(TestBasic):
} }
) )
nodegroup1 = NODEGROUPS[0]['name'] self.show_step(4)
nodegroup2 = NODEGROUPS[1]['name'] self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[1:5:2])
self.show_step(5)
self.show_step(6)
nodegroup_default = NODEGROUPS[0]['name']
nodegroup_custom = NODEGROUPS[1]['name']
self.fuel_web.update_nodes( self.fuel_web.update_nodes(
cluster_id, cluster_id,
{ {
'slave-01': [['controller'], nodegroup1], 'slave-01': [['controller'], nodegroup_default],
'slave-05': [['controller'], nodegroup1], 'slave-05': [['controller'], nodegroup_default],
'slave-03': [['controller'], nodegroup1], 'slave-03': [['controller'], nodegroup_default],
'slave-02': [['compute', 'cinder'], nodegroup2], 'slave-02': [['compute', 'cinder'], nodegroup_custom],
'slave-04': [['compute', 'cinder'], nodegroup2], 'slave-04': [['compute', 'cinder'], nodegroup_custom],
} }
) )
self.show_step(7)
self.fuel_web.deploy_cluster_wait(cluster_id) self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(8)
self.fuel_web.verify_network(cluster_id) self.fuel_web.verify_network(cluster_id)
self.show_step(9)
self.fuel_web.run_ostf(cluster_id=cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_tun_ha_nodegroups") self.env.make_snapshot("deploy_neutron_tun_ha_nodegroups")
@test(depends_on=[multiple_cluster_net_setup], @test(depends_on=[SetupEnvironment.prepare_release],
groups=["multiple_cluster_networks", groups=["multiple_cluster_networks",
"multiple_cluster_net_ceph_ha", "thread_7"]) "deploy_ceph_ha_nodegroups", "thread_7"])
@log_snapshot_after_test @log_snapshot_after_test
def deploy_ceph_ha_nodegroups(self): def deploy_ceph_ha_nodegroups(self):
"""Deploy HA environment with Neutron VXLAN, Ceph and 2 nodegroups """Deploy HA environment with Neutron VXLAN, Ceph and 2 nodegroups
Scenario: Scenario:
1. Revert snapshot with 2 networks sets for slaves 1. Revert snapshot with ready master node
2. Create cluster (HA) with Neutron VXLAN and Ceph 2. Bootstrap slaves from default nodegroup
3. Add 3 controller + ceph nodes from default nodegroup 3. Create cluster with Neutron VXLAN, Ceph and custom nodegroup
4. Add 2 compute + ceph nodes from custom nodegroup 4. Bootstrap slave nodes from custom nodegroup
5. Deploy cluster 5. Add 3 controller + ceph nodes from default nodegroup
6. Run health checks (OSTF) 6. Add 2 compute + ceph nodes from custom nodegroup
7. Deploy cluster
8. Run network verification
9. Run health checks (OSTF)
Duration 110m Duration 110m
Snapshot deploy_ceph_ha_nodegroups Snapshot deploy_ceph_ha_nodegroups
@ -144,8 +129,14 @@ class TestMultipleClusterNets(TestBasic):
if not MULTIPLE_NETWORKS: if not MULTIPLE_NETWORKS:
raise SkipTest() raise SkipTest()
self.env.revert_snapshot("multiple_cluster_net_setup")
self.show_step(1)
self.env.revert_snapshot("ready")
self.show_step(2)
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:5:2])
self.show_step(3)
cluster_id = self.fuel_web.create_cluster( cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__, name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA, mode=DEPLOYMENT_MODE_HA,
@ -161,21 +152,28 @@ class TestMultipleClusterNets(TestBasic):
} }
) )
nodegroup1 = NODEGROUPS[0]['name'] self.show_step(4)
nodegroup2 = NODEGROUPS[1]['name'] self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[1:5:2])
self.show_step(5)
self.show_step(6)
nodegroup_default = NODEGROUPS[0]['name']
nodegroup_custom = NODEGROUPS[1]['name']
self.fuel_web.update_nodes( self.fuel_web.update_nodes(
cluster_id, cluster_id,
{ {
'slave-01': [['controller', 'ceph-osd'], nodegroup1], 'slave-01': [['controller', 'ceph-osd'], nodegroup_default],
'slave-05': [['controller', 'ceph-osd'], nodegroup1], 'slave-05': [['controller', 'ceph-osd'], nodegroup_default],
'slave-03': [['controller', 'ceph-osd'], nodegroup1], 'slave-03': [['controller', 'ceph-osd'], nodegroup_default],
'slave-02': [['compute', 'ceph-osd'], nodegroup2], 'slave-02': [['compute', 'ceph-osd'], nodegroup_custom],
'slave-04': [['compute', 'ceph-osd'], nodegroup2], 'slave-04': [['compute', 'ceph-osd'], nodegroup_custom],
} }
) )
self.show_step(7)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=150 * 60) self.fuel_web.deploy_cluster_wait(cluster_id, timeout=150 * 60)
self.show_step(8)
self.fuel_web.verify_network(cluster_id) self.fuel_web.verify_network(cluster_id)
self.show_step(9)
self.fuel_web.run_ostf(cluster_id=cluster_id) self.fuel_web.run_ostf(cluster_id=cluster_id)
self.env.make_snapshot("deploy_ceph_ha_nodegroups") self.env.make_snapshot("deploy_ceph_ha_nodegroups")