Add a test to network templates tests suite

Add a test that verifies template based network
config consistency after node reboot.

Change-Id: I7ebe1d70355a3cc18b793c371036adb194bf94a5
Closes-bug: #1517890
This commit is contained in:
Dmitriy Kruglov 2015-11-19 13:00:35 +00:00
parent 37c05e38a6
commit 04ad1a5e47
2 changed files with 253 additions and 0 deletions

View File

@ -0,0 +1,147 @@
adv_net_template:
default:
nic_mapping:
default:
if1: eth0 # admin
if2: eth1 # public
if3: eth4 # management
if4: eth2 # private
if5: eth3 # storage
templates_for_node_role:
controller:
- public
- private
- storage
- common
compute:
- common
- private
- storage
cinder:
- common
- storage
network_assignments:
storage:
ep: br-storage
private:
ep: br-prv
public:
ep: br-ex
management:
ep: br-mgmt
fuelweb_admin:
ep: br-fw-admin
network_scheme:
storage:
transformations:
- action: add-br
name: br-storage
provider: ovs
- action: add-port
bridge: br-storage
name: <% if5 %>
endpoints:
- br-storage
roles:
cinder/iscsi: br-storage
swift/replication: br-storage
ceph/replication: br-storage
storage: br-storage
private:
transformations:
- action: add-br
name: br-prv
provider: ovs
- action: add-br
name: br-aux
provider: ovs
- action: add-patch
bridges:
- br-prv
- br-aux
provider: ovs
mtu: 65000
- action: add-port
bridge: br-aux
name: <% if4 %>
endpoints:
- br-prv
roles:
neutron/private: br-prv
public:
transformations:
- action: add-br
name: br-ex
provider: ovs
- action: add-br
name: br-floating
provider: ovs
- action: add-patch
bridges:
- br-floating
- br-ex
provider: ovs
mtu: 65000
- action: add-port
bridge: br-ex
name: <% if2 %>
endpoints:
- br-ex
roles:
public/vip: br-ex
neutron/floating: br-floating
ceph/radosgw: br-ex
ex: br-ex
common:
transformations:
- action: add-br
name: br-fw-admin
provider: ovs
- action: add-br
name: br-test
provider: ovs
- action: add-patch
bridges:
- br-fw-admin
- br-test
provider: ovs
mtu: 65000
- action: add-port
bridge: br-fw-admin
name: <% if1 %>
- action: add-br
name: br-mgmt
provider: ovs
- action: add-port
bridge: br-mgmt
name: <% if3 %>
endpoints:
- br-fw-admin
- br-mgmt
roles:
admin/pxe: br-fw-admin
fw-admin: br-fw-admin
mongo/db: br-mgmt
management: br-mgmt
keystone/api: br-mgmt
neutron/api: br-mgmt
neutron/mesh: br-mgmt
swift/api: br-mgmt
sahara/api: br-mgmt
ceilometer/api: br-mgmt
cinder/api: br-mgmt
glance/api: br-mgmt
heat/api: br-mgmt
nova/api: br-mgmt
nova/migration: br-mgmt
murano/api: br-mgmt
horizon: br-mgmt
mgmt/api: br-mgmt
mgmt/memcache: br-mgmt
mgmt/database: br-mgmt
mgmt/messaging: br-mgmt
mgmt/corosync: br-mgmt
mgmt/vip: br-mgmt
mgmt/api: br-mgmt
ceph/public: br-mgmt

View File

@ -378,3 +378,109 @@ class TestNetworkTemplates(TestNetworkTemplatesBase):
self.check_services_networks(cluster_id, network_template)
self.env.make_snapshot('two_nodegroups_network_templates')
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["network_config_consistency_on_reboot"])
@log_snapshot_after_test
def network_config_consistency_on_reboot(self):
"""Deploy HA environment with Cinder, Neutron and network template
Scenario:
1. Revert snapshot with 5 slaves
2. Create cluster (HA) with Neutron VLAN
3. Add 3 controller and 1 compute + cinder nodes
4. Upload 'default_ovs' network template
5. Create custom network groups basing
on template endpoints assignments
6. Run network verification
7. Deploy cluster and run basic health checks
8. Run network verification
9. Check L3 network configuration on slaves
10. Check that services are listening on their networks only
11. Reboot a node
12. Run network verification
13. Check L3 network configuration on slaves
14. Check that services are listening on their networks only
15. Run OSTF
Duration 180m
Snapshot deploy_cinder_net_tmpl
"""
self.show_step(1)
self.env.revert_snapshot("ready_with_5_slaves")
self.show_step(2)
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_HA,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT[NEUTRON_SEGMENT_TYPE],
'tenant': 'netTemplate',
'user': 'netTemplate',
'password': 'netTemplate',
}
)
self.show_step(3)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute', 'cinder'],
},
update_interfaces=False
)
self.show_step(4)
network_template = get_network_template('default_ovs')
self.fuel_web.client.upload_network_template(
cluster_id=cluster_id, network_template=network_template)
self.show_step(5)
networks = self.generate_networks_for_template(
template=network_template,
ip_nets={'default': '10.200.0.0/16'},
ip_prefixlen='24')
existing_networks = self.fuel_web.client.get_network_groups()
networks = self.create_custom_networks(networks, existing_networks)
logger.debug('Networks: {0}'.format(
self.fuel_web.client.get_network_groups()))
self.show_step(6)
self.fuel_web.verify_network(cluster_id)
self.show_step(7)
self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
self.show_step(8)
self.fuel_web.verify_network(cluster_id)
self.show_step(9)
self.check_ipconfig_for_template(
cluster_id, network_template, networks)
self.show_step(10)
self.check_services_networks(cluster_id, network_template)
self.show_step(11)
self.fuel_web.warm_restart_nodes([self.env.d_env.nodes().slaves[0]])
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.assert_os_services_ready(cluster_id)
self.show_step(12)
self.fuel_web.verify_network(cluster_id)
self.show_step(13)
self.check_ipconfig_for_template(
cluster_id, network_template, networks)
self.show_step(14)
self.check_services_networks(cluster_id, network_template)
self.show_step(15)
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['smoke', 'sanity', 'ha'])
self.env.make_snapshot("network_config_consistency_on_reboot",
is_make=self.is_make_snapshot())