Add test cases for multiple networks.
-contrail_multiple_nodegroups_add_controller -contrail_multiple_nodegroups_delete_controller -contrail_multiple_nodegroups_delete_compute -contrail_multiple_nodegroups_add_compute -add method vsrx_multiple_networks -fix flake8 Change-Id: Ic05831901dc064920ac0f6a1ae6cd3e367cb959e
This commit is contained in:
@@ -12,39 +12,36 @@ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
"""
|
||||
from __future__ import division
|
||||
|
||||
from ipaddr import IPAddress
|
||||
from ipaddr import summarize_address_range
|
||||
import json
|
||||
import netaddr
|
||||
import os
|
||||
import os.path
|
||||
|
||||
from devops.helpers.helpers import wait
|
||||
from proboscis import asserts
|
||||
from proboscis import SkipTest
|
||||
from proboscis import test
|
||||
|
||||
from devops.helpers.helpers import wait
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.helpers.checkers import check_get_network_data_over_cli
|
||||
from fuelweb_test.helpers.checkers import check_update_network_data_over_cli
|
||||
from fuelweb_test.helpers.decorators import check_fuel_statistics
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.helpers import utils
|
||||
|
||||
from fuelweb_test.settings import CONTRAIL_PLUGIN_PACK_UB_PATH
|
||||
from fuelweb_test.settings import MULTIPLE_NETWORKS
|
||||
from fuelweb_test.settings import NODEGROUPS
|
||||
from fuelweb_test.tests.base_test_case import SetupEnvironment
|
||||
from fuelweb_test.tests.test_net_templates_base import TestNetworkTemplatesBase
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test.tests.test_multiple_networks import TestMultipleClusterNets
|
||||
|
||||
from helpers import plugin
|
||||
from helpers import openstack
|
||||
from helpers import settings
|
||||
|
||||
|
||||
@test(groups=["plugins"])
|
||||
class TestMultipleNets(TestNetworkTemplatesBase):
|
||||
@test(groups=["contrail_multiple_networks"])
|
||||
class TestMultipleNets(TestMultipleClusterNets):
|
||||
"""IntegrationTests."""
|
||||
|
||||
pack_copy_path = '/var/www/nailgun/plugins/contrail-4.0'
|
||||
@@ -59,63 +56,100 @@ class TestMultipleNets(TestNetworkTemplatesBase):
|
||||
|
||||
CONTRAIL_DISTRIBUTION = os.environ.get('CONTRAIL_DISTRIBUTION')
|
||||
|
||||
def get_modified_ranges(self, net_dict, net_name, group_id):
|
||||
"""Get modified ip range for network."""
|
||||
for net in net_dict['networks']:
|
||||
if net_name in net['name'] and net['group_id'] == group_id:
|
||||
cidr = net['cidr']
|
||||
sliced_list = list(netaddr.IPNetwork(cidr))[5:-5]
|
||||
return [str(sliced_list[0]), str(sliced_list[-1])]
|
||||
def update_network_config(self, cluster_id):
|
||||
"""Update network configuration."""
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
|
||||
management_ranges_default = []
|
||||
management_ranges_custom = []
|
||||
storage_ranges_default = []
|
||||
storage_ranges_custom = []
|
||||
default_group_id = self.fuel_web.get_nodegroup(cluster_id)['id']
|
||||
custom_group_id = self.fuel_web.get_nodegroup(
|
||||
cluster_id, name=NODEGROUPS[1]['name'])['id']
|
||||
|
||||
def change_default_admin_range(self, networks, number_excluded_ips):
|
||||
"""Change IP range for admin net by excluding N of first addresses.
|
||||
self.show_step(9)
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
current_net = json.loads(remote.open(
|
||||
'/var/log/network_1.json').read())
|
||||
# Get storage ranges for default and custom groups
|
||||
storage_ranges_default.append(self.get_modified_ranges(
|
||||
current_net, 'storage', group_id=default_group_id))
|
||||
|
||||
:param networks: list, environment networks configuration
|
||||
:param number_excluded_ips: int, number of IPs to remove from range
|
||||
"""
|
||||
default_admin_network = [n for n in networks
|
||||
if (n['name'] == "fuelweb_admin" and
|
||||
n['group_id'] is None)]
|
||||
asserts.assert_true(len(default_admin_network) == 1,
|
||||
"Default 'admin/pxe' network not found "
|
||||
"in cluster network configuration!")
|
||||
default_admin_range = [IPAddress(ip) for ip
|
||||
in default_admin_network[0]["ip_ranges"][0]]
|
||||
new_admin_range = [default_admin_range[0] + number_excluded_ips,
|
||||
default_admin_range[1]]
|
||||
default_admin_network[0]["ip_ranges"][0] = [str(ip)
|
||||
for ip in new_admin_range]
|
||||
return default_admin_network[0]["ip_ranges"][0]
|
||||
storage_ranges_custom.append(self.get_modified_ranges(
|
||||
current_net, 'storage', group_id=custom_group_id))
|
||||
|
||||
def is_ip_in_range(self, ip_addr, ip_range_start, ip_range_end):
|
||||
"""Get ip range."""
|
||||
ip_addr_ranges = summarize_address_range(IPAddress(ip_range_start),
|
||||
IPAddress(ip_range_end))
|
||||
return any(IPAddress(ip_addr) in iprange for iprange in ip_addr_ranges)
|
||||
management_ranges_default.append(self.get_modified_ranges(
|
||||
current_net, 'management', group_id=default_group_id))
|
||||
|
||||
def is_update_dnsmasq_running(self, tasks):
|
||||
"""Check update dnsmasq is running."""
|
||||
for task in tasks:
|
||||
if task['name'] == "update_dnsmasq" and \
|
||||
task["status"] == "running":
|
||||
return True
|
||||
return False
|
||||
management_ranges_custom.append(self.get_modified_ranges(
|
||||
current_net, 'management', group_id=custom_group_id))
|
||||
|
||||
def update_network_ranges(self, net_data, update_data):
|
||||
"""Check network range."""
|
||||
for net in net_data['networks']:
|
||||
for group in update_data:
|
||||
for net_name in update_data[group]:
|
||||
if net_name in net['name'] and net['group_id'] == group:
|
||||
net['ip_ranges'] = update_data[group][net_name]
|
||||
net['meta']['notation'] = 'ip_ranges'
|
||||
return net_data
|
||||
update_data = {
|
||||
default_group_id: {'storage': storage_ranges_default,
|
||||
'management': management_ranges_default},
|
||||
custom_group_id: {'storage': storage_ranges_custom,
|
||||
'management': management_ranges_custom}}
|
||||
|
||||
def get_ranges(self, net_data, net_name, group_id):
|
||||
"""Get range."""
|
||||
return [
|
||||
net['ip_ranges'] for net in net_data['networks'] if
|
||||
net_name in net['name'] and group_id == net['group_id']][0]
|
||||
updated_network = self.update_network_ranges(
|
||||
current_net, update_data)
|
||||
|
||||
logger.debug(
|
||||
'Plan to update ranges for default group to {0} for storage '
|
||||
'and {1} for management and for custom group storage {2},'
|
||||
' management {3}'.format(storage_ranges_default,
|
||||
management_ranges_default,
|
||||
storage_ranges_custom,
|
||||
management_ranges_custom))
|
||||
|
||||
self.show_step(10)
|
||||
utils.put_json_on_remote_from_dict(
|
||||
remote, updated_network, cluster_id)
|
||||
|
||||
check_update_network_data_over_cli(remote, cluster_id,
|
||||
'/var/log/')
|
||||
|
||||
self.show_step(11)
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
|
||||
latest_net = json.loads(remote.open(
|
||||
'/var/log/network_1.json').read())
|
||||
updated_storage_default = self.get_ranges(latest_net, 'storage',
|
||||
default_group_id)
|
||||
|
||||
updated_storage_custom = self.get_ranges(latest_net, 'storage',
|
||||
custom_group_id)
|
||||
updated_mgmt_default = self.get_ranges(latest_net, 'management',
|
||||
default_group_id)
|
||||
updated_mgmt_custom = self.get_ranges(latest_net, 'management',
|
||||
custom_group_id)
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_storage_default, storage_ranges_default,
|
||||
'Looks like storage range for default nodegroup '
|
||||
'was not updated. Expected {0}, Actual: {1}'.format(
|
||||
storage_ranges_default, updated_storage_default))
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_storage_custom, storage_ranges_custom,
|
||||
'Looks like storage range for custom nodegroup '
|
||||
'was not updated. Expected {0}, Actual: {1}'.format(
|
||||
storage_ranges_custom, updated_storage_custom))
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_mgmt_default, management_ranges_default,
|
||||
'Looks like management range for default nodegroup was '
|
||||
'not updated. Expected {0}, Actual: {1}'.format(
|
||||
management_ranges_default, updated_mgmt_default))
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_mgmt_custom, management_ranges_custom,
|
||||
'Looks like management range for custom nodegroup was '
|
||||
'not updated. Expected {0}, Actual: {1}'.format(
|
||||
management_ranges_custom, updated_mgmt_custom))
|
||||
|
||||
return updated_storage_default, updated_storage_custom, \
|
||||
updated_mgmt_default, updated_mgmt_custom
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_release],
|
||||
groups=["contrail_ha_multiple_nodegroups"])
|
||||
@@ -167,6 +201,7 @@ class TestMultipleNets(TestNetworkTemplatesBase):
|
||||
plugin.activate_plugin(self)
|
||||
# activate vSRX image
|
||||
vsrx_setup_result = plugin.activate_vsrx()
|
||||
plugin.vsrx_multiple_networks(self)
|
||||
|
||||
self.show_step(6)
|
||||
self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id)
|
||||
@@ -182,101 +217,17 @@ class TestMultipleNets(TestNetworkTemplatesBase):
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
|
||||
|
||||
self.show_step(8)
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
|
||||
management_ranges_default = []
|
||||
management_ranges_custom = []
|
||||
storage_ranges_default = []
|
||||
storage_ranges_custom = []
|
||||
default_group_id = self.fuel_web.get_nodegroup(cluster_id)['id']
|
||||
custom_group_id = self.fuel_web.get_nodegroup(
|
||||
cluster_id, name=NODEGROUPS[1]['name'])['id']
|
||||
|
||||
self.show_step(9)
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
current_net = json.loads(remote.open(
|
||||
'/var/log/network_1.json').read())
|
||||
# Get storage ranges for default and custom groups
|
||||
storage_ranges_default.append(self.get_modified_ranges(
|
||||
current_net, 'storage', group_id=default_group_id))
|
||||
|
||||
storage_ranges_custom.append(self.get_modified_ranges(
|
||||
current_net, 'storage', group_id=custom_group_id))
|
||||
|
||||
management_ranges_default.append(self.get_modified_ranges(
|
||||
current_net, 'management', group_id=default_group_id))
|
||||
|
||||
management_ranges_custom.append(self.get_modified_ranges(
|
||||
current_net, 'management', group_id=custom_group_id))
|
||||
|
||||
update_data = {
|
||||
default_group_id: {'storage': storage_ranges_default,
|
||||
'management': management_ranges_default},
|
||||
custom_group_id: {'storage': storage_ranges_custom,
|
||||
'management': management_ranges_custom}}
|
||||
|
||||
updated_network = self.update_network_ranges(
|
||||
current_net, update_data)
|
||||
|
||||
logger.debug(
|
||||
'Plan to update ranges for default group to {0} for storage '
|
||||
'and {1} for management and for custom group storage {2},'
|
||||
' management {3}'.format(storage_ranges_default,
|
||||
management_ranges_default,
|
||||
storage_ranges_custom,
|
||||
management_ranges_custom))
|
||||
|
||||
# need to push to remote
|
||||
self.show_step(10)
|
||||
utils.put_json_on_remote_from_dict(
|
||||
remote, updated_network, cluster_id)
|
||||
|
||||
check_update_network_data_over_cli(remote, cluster_id,
|
||||
'/var/log/')
|
||||
|
||||
self.show_step(11)
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
check_get_network_data_over_cli(remote, cluster_id, '/var/log/')
|
||||
latest_net = json.loads(remote.open(
|
||||
'/var/log/network_1.json').read())
|
||||
updated_storage_default = self.get_ranges(latest_net, 'storage',
|
||||
default_group_id)
|
||||
|
||||
updated_storage_custom = self.get_ranges(latest_net, 'storage',
|
||||
custom_group_id)
|
||||
updated_mgmt_default = self.get_ranges(latest_net, 'management',
|
||||
default_group_id)
|
||||
updated_mgmt_custom = self.get_ranges(latest_net, 'management',
|
||||
custom_group_id)
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_storage_default, storage_ranges_default,
|
||||
'Looks like storage range for default nodegroup '
|
||||
'was not updated. Expected {0}, Actual: {1}'.format(
|
||||
storage_ranges_default, updated_storage_default))
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_storage_custom, storage_ranges_custom,
|
||||
'Looks like storage range for custom nodegroup '
|
||||
'was not updated. Expected {0}, Actual: {1}'.format(
|
||||
storage_ranges_custom, updated_storage_custom))
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_mgmt_default, management_ranges_default,
|
||||
'Looks like management range for default nodegroup was '
|
||||
'not updated. Expected {0}, Actual: {1}'.format(
|
||||
management_ranges_default, updated_mgmt_default))
|
||||
|
||||
asserts.assert_equal(
|
||||
updated_mgmt_custom, management_ranges_custom,
|
||||
'Looks like management range for custom nodegroup was '
|
||||
'not updated. Expected {0}, Actual: {1}'.format(
|
||||
management_ranges_custom, updated_mgmt_custom))
|
||||
updated_storage_default, updated_storage_custom, \
|
||||
updated_mgmt_default, updated_mgmt_custom = \
|
||||
self.update_network_config(cluster_id)
|
||||
|
||||
self.show_step(12)
|
||||
self.show_step(13)
|
||||
nodegroup_default = NODEGROUPS[0]['name']
|
||||
nodegroup_custom1 = NODEGROUPS[1]['name']
|
||||
default_group_id = self.fuel_web.get_nodegroup(cluster_id)['id']
|
||||
custom_group_id = self.fuel_web.get_nodegroup(
|
||||
cluster_id, name=NODEGROUPS[1]['name'])['id']
|
||||
self.fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
{
|
||||
@@ -355,3 +306,437 @@ class TestMultipleNets(TestNetworkTemplatesBase):
|
||||
self.show_step(17)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_release],
|
||||
groups=["contrail_multiple_nodegroups_add_controller"])
|
||||
@log_snapshot_after_test
|
||||
@check_fuel_statistics
|
||||
def contrail_multiple_nodegroups_add_controller(self):
|
||||
"""Deploy HA environment with Neutron GRE and 2 nodegroups.
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot with ready master node
|
||||
2. Install contrail plugin
|
||||
3. Bootstrap slaves from default nodegroup
|
||||
4. Create cluster with Neutron GRE and custom nodegroups
|
||||
5. Activate plugin and configure plugins setings
|
||||
6. Remove 2nd custom nodegroup which is added automatically
|
||||
7. Bootstrap slave nodes from custom nodegroup
|
||||
8. Download network configuration
|
||||
9. Update network.json with customized ip ranges
|
||||
10. Put new json on master node and update network data
|
||||
11. Verify that new IP ranges are applied for network config
|
||||
12. Add following nodes to custom nodegroup:
|
||||
* 1 controller+mongo
|
||||
13. Add following nodes to default nodegroup:
|
||||
* 1 compute
|
||||
* 1 contrail-config+contrail-control+contrail-db
|
||||
* 1 cinder
|
||||
14. Deploy cluster
|
||||
15. Run health checks (OSTF)
|
||||
16. Add 1 controller node
|
||||
17. Redeploy cluster
|
||||
18. Run health checks (OSTF)
|
||||
|
||||
Duration 2.5 hours
|
||||
|
||||
"""
|
||||
if not MULTIPLE_NETWORKS:
|
||||
raise SkipTest()
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("ready")
|
||||
self.show_step(2)
|
||||
plugin.prepare_contrail_plugin(self, snapshot_name="ready",
|
||||
options={'ceilometer': True})
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
|
||||
|
||||
plugin.activate_plugin(self)
|
||||
# activate vSRX image
|
||||
vsrx_setup_result = plugin.activate_vsrx()
|
||||
plugin.vsrx_multiple_networks(self)
|
||||
|
||||
self.show_step(6)
|
||||
self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id)
|
||||
custom_group2 = self.fuel_web.get_nodegroup(
|
||||
cluster_id, name=NODEGROUPS[2]['name'])
|
||||
wait(lambda: not self.is_update_dnsmasq_running(
|
||||
self.fuel_web.client.get_tasks()), timeout=60,
|
||||
timeout_msg="Timeout exceeded while waiting for task "
|
||||
"'update_dnsmasq' is finished!")
|
||||
self.fuel_web.client.delete_nodegroup(custom_group2['id'])
|
||||
|
||||
self.show_step(7)
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
|
||||
|
||||
self.show_step(8)
|
||||
updated_storage_default, updated_storage_custom, \
|
||||
updated_mgmt_default, updated_mgmt_custom = \
|
||||
self.update_network_config(cluster_id)
|
||||
|
||||
self.show_step(12)
|
||||
self.show_step(13)
|
||||
nodegroup_default = NODEGROUPS[0]['name']
|
||||
nodegroup_custom1 = NODEGROUPS[1]['name']
|
||||
self.fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
{
|
||||
'slave-01': [
|
||||
['contrail-config', 'contrail-control', 'contrail-db'],
|
||||
nodegroup_default],
|
||||
'slave-02': [['compute'], nodegroup_default],
|
||||
'slave-03': [['cinder'], nodegroup_default],
|
||||
'slave-04': [['controller', 'mongo'], nodegroup_custom1],
|
||||
}
|
||||
)
|
||||
|
||||
self.show_step(14)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(15)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=self.cluster_id,
|
||||
test_sets=['smoke', 'sanity', 'tests_platform'],
|
||||
timeout=settings.OSTF_RUN_TIMEOUT
|
||||
)
|
||||
|
||||
self.show_step(16)
|
||||
self.fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
{'slave-05': [['controller'], nodegroup_custom1], }
|
||||
)
|
||||
|
||||
self.show_step(17)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(18)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=self.cluster_id,
|
||||
test_sets=['smoke', 'sanity', 'tests_platform'],
|
||||
timeout=settings.OSTF_RUN_TIMEOUT
|
||||
)
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_release],
|
||||
groups=["contrail_multiple_nodegroups_delete_controller"])
|
||||
@log_snapshot_after_test
|
||||
@check_fuel_statistics
|
||||
def contrail_multiple_nodegroups_delete_controller(self):
|
||||
"""Deploy HA environment with Neutron GRE and 2 nodegroups.
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot with ready master node
|
||||
2. Install contrail plugin
|
||||
3. Bootstrap slaves from default nodegroup
|
||||
4. Create cluster with Neutron GRE and custom nodegroups
|
||||
5. Activate plugin and configure plugins setings
|
||||
6. Remove 2nd custom nodegroup which is added automatically
|
||||
7. Bootstrap slave nodes from custom nodegroup
|
||||
8. Download network configuration
|
||||
9. Update network.json with customized ip ranges
|
||||
10. Put new json on master node and update network data
|
||||
11. Verify that new IP ranges are applied for network config
|
||||
12. Add following nodes to default nodegroup:
|
||||
* 3 controller
|
||||
13. Add following nodes to custom nodegroup:
|
||||
* 1 compute
|
||||
* 1 contrail-config+contrail-control+contrail-db
|
||||
14. Deploy cluster
|
||||
15. Run health checks (OSTF)
|
||||
16. Remove 1 controller node
|
||||
17. Redeploy cluster
|
||||
18. Run health checks (OSTF)
|
||||
|
||||
Duration 2.5 hours
|
||||
|
||||
"""
|
||||
if not MULTIPLE_NETWORKS:
|
||||
raise SkipTest()
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("ready")
|
||||
self.show_step(2)
|
||||
plugin.prepare_contrail_plugin(self, snapshot_name="ready")
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
|
||||
|
||||
plugin.activate_plugin(self)
|
||||
# activate vSRX image
|
||||
vsrx_setup_result = plugin.activate_vsrx()
|
||||
plugin.vsrx_multiple_networks(self)
|
||||
|
||||
self.show_step(6)
|
||||
self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id)
|
||||
custom_group2 = self.fuel_web.get_nodegroup(
|
||||
cluster_id, name=NODEGROUPS[2]['name'])
|
||||
wait(lambda: not self.is_update_dnsmasq_running(
|
||||
self.fuel_web.client.get_tasks()), timeout=60,
|
||||
timeout_msg="Timeout exceeded while waiting for task "
|
||||
"'update_dnsmasq' is finished!")
|
||||
self.fuel_web.client.delete_nodegroup(custom_group2['id'])
|
||||
|
||||
self.show_step(7)
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])
|
||||
|
||||
self.show_step(8)
|
||||
updated_storage_default, updated_storage_custom, \
|
||||
updated_mgmt_default, updated_mgmt_custom = \
|
||||
self.update_network_config(cluster_id)
|
||||
|
||||
self.show_step(12)
|
||||
self.show_step(13)
|
||||
nodegroup_default = NODEGROUPS[0]['name']
|
||||
nodegroup_custom1 = NODEGROUPS[1]['name']
|
||||
self.fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
{
|
||||
'slave-01': [['controller'], nodegroup_custom1],
|
||||
'slave-02': [['controller'], nodegroup_custom1],
|
||||
'slave-03': [['controller'], nodegroup_custom1],
|
||||
'slave-04': [
|
||||
['contrail-config', 'contrail-control', 'contrail-db'],
|
||||
nodegroup_default],
|
||||
'slave-05': [['compute'], nodegroup_default],
|
||||
}
|
||||
)
|
||||
self.show_step(14)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(15)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=self.cluster_id,
|
||||
test_sets=['smoke', 'sanity', 'ha'],
|
||||
timeout=settings.OSTF_RUN_TIMEOUT
|
||||
)
|
||||
|
||||
self.show_step(16)
|
||||
conf_control = {'slave-03': [['controller'], nodegroup_custom1]}
|
||||
|
||||
openstack.update_deploy_check(self,
|
||||
conf_control, delete=True,
|
||||
is_vsrx=vsrx_setup_result)
|
||||
|
||||
self.show_step(17)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(18)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=self.cluster_id,
|
||||
test_sets=['smoke', 'sanity'],
|
||||
should_fail=1,
|
||||
failed_test_name=['Check that required services are running']
|
||||
)
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_release],
|
||||
groups=["contrail_multiple_nodegroups_delete_compute"])
|
||||
@log_snapshot_after_test
|
||||
@check_fuel_statistics
|
||||
def contrail_multiple_nodegroups_delete_compute(self):
|
||||
"""Deploy HA environment with Neutron GRE and 2 nodegroups.
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot with ready master node
|
||||
2. Install contrail plugin
|
||||
3. Bootstrap slaves from default nodegroup
|
||||
4. Create cluster with Neutron GRE and custom nodegroups
|
||||
5. Activate plugin and configure plugins setings
|
||||
6. Remove 2nd custom nodegroup which is added automatically
|
||||
7. Bootstrap slave nodes from custom nodegroup
|
||||
8. Download network configuration
|
||||
9. Update network.json with customized ip ranges
|
||||
10. Put new json on master node and update network data
|
||||
11. Verify that new IP ranges are applied for network config
|
||||
12. Add following nodes to default nodegroup:
|
||||
* 3 controller
|
||||
13. Add following nodes to custom nodegroup:
|
||||
* 2 compute
|
||||
* 1 contrail-config+contrail-control+contrail-db
|
||||
14. Deploy cluster
|
||||
15. Run health checks (OSTF)
|
||||
16. Remove 1 compute node
|
||||
17. Redeploy cluster
|
||||
18. Run health checks (OSTF)
|
||||
|
||||
Duration 2.5 hours
|
||||
|
||||
"""
|
||||
if not MULTIPLE_NETWORKS:
|
||||
raise SkipTest()
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("ready")
|
||||
self.show_step(2)
|
||||
plugin.prepare_contrail_plugin(self, snapshot_name="ready")
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
|
||||
|
||||
plugin.activate_plugin(self)
|
||||
# activate vSRX image
|
||||
vsrx_setup_result = plugin.activate_vsrx()
|
||||
plugin.vsrx_multiple_networks(self)
|
||||
|
||||
self.show_step(6)
|
||||
self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id)
|
||||
custom_group2 = self.fuel_web.get_nodegroup(
|
||||
cluster_id, name=NODEGROUPS[2]['name'])
|
||||
wait(lambda: not self.is_update_dnsmasq_running(
|
||||
self.fuel_web.client.get_tasks()), timeout=60,
|
||||
timeout_msg="Timeout exceeded while waiting for task "
|
||||
"'update_dnsmasq' is finished!")
|
||||
self.fuel_web.client.delete_nodegroup(custom_group2['id'])
|
||||
|
||||
self.show_step(7)
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:6])
|
||||
|
||||
self.show_step(8)
|
||||
updated_storage_default, updated_storage_custom, \
|
||||
updated_mgmt_default, updated_mgmt_custom = \
|
||||
self.update_network_config(cluster_id)
|
||||
|
||||
self.show_step(12)
|
||||
self.show_step(13)
|
||||
nodegroup_default = NODEGROUPS[0]['name']
|
||||
nodegroup_custom1 = NODEGROUPS[1]['name']
|
||||
self.fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
{
|
||||
'slave-01': [['controller'], nodegroup_default],
|
||||
'slave-02': [['controller'], nodegroup_default],
|
||||
'slave-03': [['controller'], nodegroup_default],
|
||||
'slave-04': [
|
||||
['contrail-config', 'contrail-control', 'contrail-db'],
|
||||
nodegroup_custom1],
|
||||
'slave-05': [['compute'], nodegroup_custom1],
|
||||
'slave-06': [['compute'], nodegroup_custom1],
|
||||
}
|
||||
)
|
||||
self.show_step(14)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(15)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
self.show_step(16)
|
||||
conf_compute = {'slave-06': [['compute'], nodegroup_custom1], }
|
||||
|
||||
openstack.update_deploy_check(self,
|
||||
conf_compute, delete=True,
|
||||
is_vsrx=vsrx_setup_result)
|
||||
|
||||
self.show_step(17)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(18)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=self.cluster_id,
|
||||
test_sets=['smoke', 'sanity'],
|
||||
should_fail=1,
|
||||
failed_test_name=['Check that required services are running']
|
||||
)
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_release],
|
||||
groups=["contrail_multiple_nodegroups_add_compute"])
|
||||
@log_snapshot_after_test
|
||||
@check_fuel_statistics
|
||||
def contrail_multiple_nodegroups_add_compute(self):
|
||||
"""Deploy HA environment with Neutron GRE and 2 nodegroups.
|
||||
|
||||
Scenario:
|
||||
1. Revert snapshot with ready master node
|
||||
2. Install contrail plugin
|
||||
3. Bootstrap slaves from default nodegroup
|
||||
4. Create cluster with Neutron GRE and custom nodegroups
|
||||
5. Activate plugin and configure plugins setings
|
||||
6. Remove 2nd custom nodegroup which is added automatically
|
||||
7. Bootstrap slave nodes from custom nodegroup
|
||||
8. Download network configuration
|
||||
9. Update network.json with customized ip ranges
|
||||
10. Put new json on master node and update network data
|
||||
11. Verify that new IP ranges are applied for network config
|
||||
12. Add following nodes to default nodegroup:
|
||||
* 3 controller
|
||||
13. Add following nodes to custom nodegroup:
|
||||
* 1 compute
|
||||
* 1 contrail-config+contrail-control+contrail-db
|
||||
14. Deploy cluster
|
||||
15. Run health checks (OSTF)
|
||||
16. Add 1 compute node
|
||||
17. Redeploy cluster
|
||||
18. Run health checks (OSTF)
|
||||
|
||||
Duration 2.5 hours
|
||||
|
||||
"""
|
||||
if not MULTIPLE_NETWORKS:
|
||||
raise SkipTest()
|
||||
self.show_step(1, initialize=True)
|
||||
self.env.revert_snapshot("ready")
|
||||
self.show_step(2)
|
||||
plugin.prepare_contrail_plugin(self, snapshot_name="ready")
|
||||
|
||||
cluster_id = self.fuel_web.get_last_created_cluster()
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
|
||||
|
||||
plugin.activate_plugin(self)
|
||||
# activate vSRX image
|
||||
vsrx_setup_result = plugin.activate_vsrx()
|
||||
plugin.vsrx_multiple_networks(self)
|
||||
|
||||
self.show_step(6)
|
||||
self.netconf_all_groups = self.fuel_web.client.get_networks(cluster_id)
|
||||
custom_group2 = self.fuel_web.get_nodegroup(
|
||||
cluster_id, name=NODEGROUPS[2]['name'])
|
||||
wait(lambda: not self.is_update_dnsmasq_running(
|
||||
self.fuel_web.client.get_tasks()), timeout=60,
|
||||
timeout_msg="Timeout exceeded while waiting for task "
|
||||
"'update_dnsmasq' is finished!")
|
||||
self.fuel_web.client.delete_nodegroup(custom_group2['id'])
|
||||
|
||||
self.show_step(7)
|
||||
self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:6])
|
||||
|
||||
self.show_step(8)
|
||||
updated_storage_default, updated_storage_custom, \
|
||||
updated_mgmt_default, updated_mgmt_custom = \
|
||||
self.update_network_config(cluster_id)
|
||||
|
||||
self.show_step(12)
|
||||
self.show_step(13)
|
||||
nodegroup_default = NODEGROUPS[0]['name']
|
||||
nodegroup_custom1 = NODEGROUPS[1]['name']
|
||||
self.fuel_web.update_nodes(
|
||||
cluster_id,
|
||||
{
|
||||
'slave-01': [['controller'], nodegroup_default],
|
||||
'slave-02': [['controller'], nodegroup_default],
|
||||
'slave-03': [['controller'], nodegroup_default],
|
||||
'slave-04': [
|
||||
['contrail-config', 'contrail-control', 'contrail-db'],
|
||||
nodegroup_custom1],
|
||||
'slave-05': [['compute'], nodegroup_custom1],
|
||||
}
|
||||
)
|
||||
self.show_step(14)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(15)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
self.show_step(16)
|
||||
conf_compute = {'slave-06': [['compute'], nodegroup_custom1]}
|
||||
|
||||
self.fuel_web.update_nodes(cluster_id, conf_compute)
|
||||
|
||||
self.show_step(17)
|
||||
openstack.deploy_cluster(self)
|
||||
|
||||
self.show_step(18)
|
||||
if vsrx_setup_result:
|
||||
self.fuel_web.run_ostf(cluster_id=cluster_id)
|
||||
|
||||
@@ -587,7 +587,6 @@ class DPDKTests(TestBasic):
|
||||
'services are running']
|
||||
)
|
||||
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
|
||||
groups=["contrail_add_to_dpdk_sriov"])
|
||||
@log_snapshot_after_test
|
||||
@@ -595,7 +594,8 @@ class DPDKTests(TestBasic):
|
||||
"""Verify that Contrail controller role can be added after deploying.
|
||||
|
||||
Scenario:
|
||||
1. Create an environment with "Neutron with tunneling segmentation" as a network configuration
|
||||
1. Create an environment with "Neutron with tunneling segmentation"
|
||||
as a network configuration
|
||||
2. Enable and configure Contrail plugin
|
||||
3. Enable dpdk and sriov
|
||||
4. Deploy cluster with following node configuration:
|
||||
@@ -663,4 +663,3 @@ class DPDKTests(TestBasic):
|
||||
if vsrx_setup_result:
|
||||
self.show_step(9)
|
||||
self.fuel_web.run_ostf(cluster_id=self.cluster_id)
|
||||
|
||||
|
||||
@@ -478,7 +478,7 @@ class SRIOVTests(TestBasic):
|
||||
conf_nodes = {
|
||||
'slave-01': ['controller', 'ceph-osd'],
|
||||
'slave-02': ['contrail-config',
|
||||
'contrail-control',],
|
||||
'contrail-control'],
|
||||
'slave-03': ['contrail-db'],
|
||||
'slave-04': ['compute', 'ceph-osd'],
|
||||
'slave-05': ['compute', 'ceph-osd'],
|
||||
|
||||
Reference in New Issue
Block a user