diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..c52ce8f --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "plugin_test/fuel-qa"] + path = plugin_test/fuel-qa + url = https://github.com/openstack/fuel-qa.git + branch = stable/7.0 diff --git a/plugin_test/fuel-qa b/plugin_test/fuel-qa new file mode 160000 index 0000000..36318fb --- /dev/null +++ b/plugin_test/fuel-qa @@ -0,0 +1 @@ +Subproject commit 36318fba9e3c67abb3abd5ff146b246d6c5ee248 diff --git a/plugin_test/run_tests.py b/plugin_test/run_tests.py new file mode 100644 index 0000000..062ad18 --- /dev/null +++ b/plugin_test/run_tests.py @@ -0,0 +1,72 @@ +# Copyright 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + + +import os +import re +from nose.plugins import Plugin +from paramiko.transport import _join_lingering_threads + + +class CloseSSHConnectionsPlugin(Plugin): + """Closes all paramiko's ssh connections after each test case + + Plugin fixes proboscis disability to run cleanup of any kind. + 'afterTest' calls _join_lingering_threads function from paramiko, + which stops all threads (set the state to inactive and joins for 10s) + """ + name = 'closesshconnections' + + def options(self, parser, env=os.environ): + super(CloseSSHConnectionsPlugin, self).options(parser, env=env) + + def configure(self, options, conf): + super(CloseSSHConnectionsPlugin, self).configure(options, conf) + self.enabled = True + + def afterTest(self, *args, **kwargs): + _join_lingering_threads() + + +def import_tests(): + from tests import test_plugin_vmware_dvs + + +def run_tests(): + from proboscis import TestProgram # noqa + + # Check if the specified test group starts any test case + if not TestProgram().cases: + from fuelweb_test import logger + logger.fatal('No test cases matched provided groups') + sys.exit(1) + + # Run Proboscis and exit. + TestProgram( + addplugins=[CloseSSHConnectionsPlugin()] + ).run_and_exit() + + +if __name__ == '__main__': + sys.path.append(sys.path[0]+"/fuel-qa") + import_tests() + from fuelweb_test.helpers.patching import map_test + if any(re.search(r'--group=patching_master_tests', arg) + for arg in sys.argv): + map_test('master') + elif any(re.search(r'--group=patching.*', arg) for arg in sys.argv): + map_test('environment') + run_tests() diff --git a/plugin_test/tests/__init__.py b/plugin_test/tests/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/plugin_test/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/plugin_test/tests/test_plugin_vmware_dvs.py b/plugin_test/tests/test_plugin_vmware_dvs.py new file mode 100644 index 0000000..8fc3ee8 --- /dev/null +++ b/plugin_test/tests/test_plugin_vmware_dvs.py @@ -0,0 +1,1178 @@ +# Copyright 2014 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os + + +from proboscis import test +from proboscis.asserts import assert_true +from fuelweb_test.helpers import checkers +from devops.helpers.helpers import wait +from devops.error import TimeoutError + + +from fuelweb_test.helpers.decorators import log_snapshot_after_test +from fuelweb_test import logger +from fuelweb_test.settings import DEPLOYMENT_MODE +from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE +from fuelweb_test.settings import SERVTEST_USERNAME +from fuelweb_test.settings import SERVTEST_PASSWORD +from fuelweb_test.settings import SERVTEST_TENANT +from fuelweb_test.tests.base_test_case import SetupEnvironment +from fuelweb_test.tests.base_test_case import TestBasic +from fuelweb_test.helpers import os_actions + + +@test(groups=["plugins", 'dvs_vcenter_plugin', 'dvs_vcenter_system']) +class TestDVSPlugin(TestBasic): + + # constants + DVS_PLUGIN_PATH = os.environ.get('DVS_PLUGIN_PATH') + plugin_name = 'fuel-plugin-vmware-dvs' + msg = "Plugin couldn't be enabled. Check plugin version. Test aborted" + dvs_switch_name = ['dvSwitch'] + cluster_settings = {'images_vcenter': True, + 'images_ceph': False, + 'net_provider': 'neutron', + 'net_segment_type': NEUTRON_SEGMENT_TYPE, + } + node_name = lambda self, name_node: self.fuel_web. \ + get_nailgun_node_by_name(name_node)['hostname'] + + net_data = [{'net_1': '192.168.112.0/24'}, + {'net_2': '192.168.113.0/24'}] + + def install_dvs_plugin(self): + # copy plugins to the master node + checkers.upload_tarball( + self.env.d_env.get_admin_remote(), + self.DVS_PLUGIN_PATH, "/var") + + # install plugin + checkers.install_plugin_check_code( + self.env.d_env.get_admin_remote(), + plugin=os.path.basename(self.DVS_PLUGIN_PATH)) + + def enable_plugin(self, cluster_id=None): + assert_true( + self.fuel_web.check_plugin_exists(cluster_id, self.plugin_name), + self.msg) + options = {'metadata/enabled': True, + 'vmware_dvs_net_maps/value': self.dvs_switch_name[0]} + self.fuel_web.update_plugin_data(cluster_id, self.plugin_name, options) + + logger.info("cluster is {}".format(cluster_id)) + + def create_instances(self, os_conn=None, vm_count=None, nics=None, + security_group=None): + """Create Vms on available hypervisors + :param os_conn: type object, openstack + :param vm_count: type interger, count of VMs to create + :param nics: type dictionary, neutron networks + to assign to instance + :param security_group: type dictionary, security group to assign to + instances + """ + # Get list of available images,flavors and hipervisors + images_list = os_conn.nova.images.list() + flavors_list = os_conn.nova.flavors.list() + + for image in images_list: + if image.name == 'TestVM-VMDK': + os_conn.nova.servers.create( + flavor=flavors_list[0], + name='test_{0}'.format(image.name), + image=image, min_count=vm_count, + availability_zone='vcenter', + nics=nics + ) + else: + os_conn.nova.servers.create( + flavor=flavors_list[0], + name='test_{0}'.format(image.name), + image=image, min_count=vm_count, + availability_zone='nova', + nics=nics + ) + + # Verify that current state of each VMs is Active + srv_list = os_conn.get_servers() + for srv in srv_list: + assert_true(os_conn.get_instance_detail(srv).status != 'ERROR', + "Current state of Vm {0} is {1}".format( + srv.name, os_conn.get_instance_detail(srv).status)) + try: + wait( + lambda: + os_conn.get_instance_detail(srv).status == "ACTIVE", + timeout=500) + except TimeoutError: + logger.error( + "Timeout is reached.Current state of Vm {0} is {1}".format( + srv.name, os_conn.get_instance_detail(srv).status)) + # assign security group + if security_group: + srv.add_security_group(security_group) + + def check_connection_vms(self, os_conn=None, srv_list=None, + packets=3, remote=None, ip=None): + """Check network connectivity between VMs with ping + :param os_conn: type object, openstack + :param srv_list: type list, instances + :param packets: type int, packets count of icmp reply + :param remote: SSHClient + :param ip: type list, remote ip to check by ping + """ + + for srv in srv_list: + # VMs on different hypervisors should communicate between + # each other + if not remote: + primary_controller = self.fuel_web.get_nailgun_primary_node( + self.env.d_env.nodes().slaves[0] + ) + remote = self.fuel_web.get_ssh_for_node( + primary_controller.name) + + addresses = srv.addresses[srv.addresses.keys()[0]] + fip = [add['addr'] for add in addresses + if add['OS-EXT-IPS:type'] == 'floating'][0] + logger.info("Connect to VM {0}".format(fip)) + + if not ip: + for s in srv_list: + if s != srv: + ip_2 = s.networks[s.networks.keys()[0]][0] + res = os_conn.execute_through_host( + remote, fip, + "ping -q -c3 {}" + "| grep -o '[0-9] packets received' | cut" + " -f1 -d ' '".format(ip_2)) + + else: + for ip_2 in ip: + if ip_2 != srv.networks[srv.networks.keys()[0]][0]: + res = os_conn.execute_through_host( + remote, fip, + "ping -q -c3 {}" + "| grep -o '[0-9] packets received' | cut" + " -f1 -d ' '".format(ip_2)) + + assert_true( + int(res) == packets, + "Ping VM{0} from Vm {1}," + " received {2} icmp".format(ip_2, fip, res) + ) + + def check_service(self, ssh=None, commands=None): + """Check that required nova services are running on controller + :param ssh: SSHClient + :param commands: type list, nova commands to execute on controller, + example of commands: + ['nova-manage service list | grep vcenter-vmcluster1' + """ + ssh.execute('source openrc') + for cmd in commands: + output = list(ssh.execute(cmd)['stdout']) + wait( + lambda: + ':-)' in output[-1].split(' '), + timeout=200) + + def create_and_assign_floating_ip(self, os_conn, srv_list=None, + ext_net=None, tenant_id=None): + """Create Vms on available hypervisors + :param os_conn: type object, openstack + :param srv_list: type list, objects of created instances + :param ext_net: type object, neutron external network + :param tenant_id: type string, tenant id + """ + + if not ext_net: + ext_net = [net for net + in os_conn.neutron.list_networks()["networks"] + if net['name'] == "net04_ext"][0] + if not tenant_id: + tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id + ext_net = [net for net + in os_conn.neutron.list_networks()["networks"] + if net['name'] == "net04_ext"][0] + if not srv_list: + srv_list = os_conn.get_servers() + for srv in srv_list: + fip = os_conn.neutron.create_floatingip( + {'floatingip': { + 'floating_network_id': ext_net['id'], + 'tenant_id': tenant_id}}) + os_conn.nova.servers.add_floating_ip( + srv, fip['floatingip']['floating_ip_address'] + ) + + def add_router(self, os_conn, router_name, ext_net_name="net04_ext", + tenant_name='admin'): + """Create router with gateway + :param router_name: type string + :param ext_net: type string + :param tenant_name: type string + """ + + ext_net = [net for net + in self.neutron.list_networks()["networks"] + if net['name'] == ext_net_name][0] + + gateway = {"network_id": ext_net["id"], + "enable_snat": True + } + tenant_id = os_conn.get_tenant(tenant_name).id + router_param = {'router': {'name': router_name, + 'external_gateway_info': gateway, + 'tenant_id': tenant_id}} + router = os_conn.neutron.create_router(body=router_param)['router'] + return router + + def add_subnet_to_router(self, os_conn, router_id, sub_id): + os_conn.neutron.add_interface_router( + router_id, + {'subnet_id': sub_id} + ) + + def create_network(self, os_conn, name, + tenant_name='admin'): + tenant_id = os_conn.get_tenant(tenant_name).id + + net_body = {"network": {"name": name, + "tenant_id": tenant_id + } + } + network = os_conn.neutron.create_network(net_body)['network'] + return network + + def create_subnet(self, os_conn, network, + cidr, tenant_name='admin'): + tenant_id = os_conn.get_tenant(tenant_name).id + subnet_body = {"subnet": {"network_id": network['id'], + "ip_version": 4, + "cidr": cidr, + "name": 'subnet_{}'.format( + network['name'][-1]), + "tenant_id": tenant_id + } + } + subnet = os_conn.neutron.create_subnet(subnet_body)['subnet'] + return subnet + + def get_role(self, os_conn, role_name): + role_list = os_conn.keystone.roles.list() + for role in role_list: + if role.name == role_name: + return role + return None + + def add_role_to_user(self, os_conn, user_name, role_name, tenant_name): + tenant_id = os_conn.get_tenant(tenant_name).id + user_id = os_conn.get_user(user_name).id + role_id = self.get_role(os_conn, role_name).id + os_conn.keystone.roles.add_user_role(user_id, role_id, tenant_id) + + @test(depends_on=[SetupEnvironment.prepare_slaves_3], + groups=["dvs_vcenter_smoke", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_smoke(self): + """Deploy cluster with plugin and vmware datastore backend + + Scenario: + 1. Upload plugins to the master node + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 1 node with controller role. + 5. Add 1 node with compute role. + 6. Deploy the cluster. + 7. Run OSTF. + + Duration 1.8 hours + + """ + self.env.revert_snapshot("ready_with_3_slaves") + + self.install_dvs_plugin() + + # Configure cluster with 2 vcenter clusters + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE + } + ) + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller']} + ) + + # Configure VMWare vCenter settings + self.fuel_web.vcenter_configure(cluster_id) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_3], + groups=["dvs_vcenter_bvt", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_bvt(self): + """Deploy cluster with plugin and vmware datastore backend + + Scenario: + 1. Upload plugins to the master node + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 1 node with controller role. + 5. Add 1 node with compute role. + 6. Deploy the cluster. + 7. Run OSTF. + + Duration 1.8 hours + + """ + self.env.revert_snapshot("ready_with_3_slaves") + + self.install_dvs_plugin() + + # Configure cluster with 2 vcenter clusters and vcenter glance + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE + } + ) + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller'], + 'slave-02': ['compute'], + 'slave-03': ['compute-vmware'], } + ) + + # Configure VMWare vCenter settings + self.fuel_web.vcenter_configure(cluster_id) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_3], + groups=["dvs_vcenter_bvt_2", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_bvt_2(self): + """Deploy cluster with plugin and vmware datastore backend + + Scenario: + 1. Upload plugins to the master node + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 1 node with controller role. + 5. Add 1 node with compute role. + 6. Deploy the cluster. + 7. Run OSTF. + + Duration 1.8 hours + + """ + self.env.revert_snapshot("ready_with_3_slaves") + + self.install_dvs_plugin() + + # Configure cluster with 2 vcenter clusters and vcenter glance + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE + } + ) + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller'], + 'slave-02': ['compute-vmware'], + 'slave-03': ['compute'], } + ) + + # Configure VMWare vCenter settings + target_node_2 = self.node_name('slave-02') + self.fuel_web.vcenter_configure( + cluster_id, + target_node_2=target_node_2, + multiclusters=True + ) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + self.env.make_snapshot("dvs_vcenter_bvt_2", is_make=True) + + @test(depends_on=[SetupEnvironment.prepare_slaves_5], + groups=["dvs_vcenter_ha_mode", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_ha_mode(self): + """Deploy cluster with plugin in HA mode + + Scenario: + 1. Upload plugins to the master node + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 3 node with controller role. + 5. Add 2 node with compute role. + 6. Deploy the cluster. + 7. Run OSTF. + + Duration 2.5 hours + + """ + self.env.revert_snapshot("ready_with_5_slaves") + + self.install_dvs_plugin() + + # Configure cluster + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE + } + ) + + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller'], + 'slave-02': ['controller'], + 'slave-03': ['controller'], + 'slave-04': ['compute-vmware'], + 'slave-05': ['compute']} + ) + + # Configure VMWare vCenter settings + target_node_1 = self.node_name('slave-04') + self.fuel_web.vcenter_configure( + cluster_id, multiclusters=True, + target_node_1=target_node_1 + ) + + self.fuel_web.deploy_cluster_wait(cluster_id, timeout=70 * 60) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_5], + groups=["dvs_vcenter_ceph", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_ceph(self): + """Deploy cluster with plugin and ceph backend + + Scenario: + 1. Upload plugins to the master node. + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 3 node with controller role. + 5. Add 1 node with compute + ceph-osd roles. + 6. Add 1 node with cinder-vmware + ceph-osd roles. + 7. Deploy the cluster + 8. Run OSTF + + Duration 2.5 hours + + """ + self.env.revert_snapshot("ready_with_5_slaves") + + self.install_dvs_plugin() + + # Configure cluster + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE, + 'images_ceph': True, + 'volumes_ceph': True, + 'objects_ceph': True, + 'volumes_lvm': False}) + + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller'], + 'slave-02': ['controller', 'ceph-osd'], + 'slave-03': ['controller', 'ceph-osd'], + 'slave-04': ['compute'], + 'slave-05': ['cinder-vmware']} + ) + + # Configure VMWare vCenter settings + self.fuel_web.vcenter_configure(cluster_id, multiclusters=True) + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_9], + groups=["dvs_vcenter_ceph_2", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_ceph_2(self): + """Deploy cluster with plugin and ceph backend + + Scenario: + 1. Upload plugins to the master node. + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Set CephOSD as backend for Glance and Cinder + 5. Add nodes with following roles: + controller + compute-vmware + compute-vmware + compute + 2 ceph-osd + 6. Deploy the cluster + 7. Run OSTF + + Duration 2.5 hours + + """ + self.env.revert_snapshot("ready_with_9_slaves") + + self.install_dvs_plugin() + + # Configure cluster + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE, + 'images_ceph': True, + 'volumes_ceph': True, + 'objects_ceph': True, + 'volumes_lvm': False}) + + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + { + 'slave-01': ['controller'], + 'slave-02': ['compute-vmware'], + 'slave-03': ['compute-vmware'], + 'slave-04': ['compute'], + 'slave-05': ['ceph-osd'], + 'slave-06': ['ceph-osd'] + } + ) + + # Configure VMWare vCenter settings + target_node_1 = self.node_name('slave-02') + target_node_2 = self.node_name('slave-03') + self.fuel_web.vcenter_configure( + cluster_id, + target_node_1=target_node_1, + target_node_2=target_node_2, + multiclusters=True + ) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, + test_sets=['smoke', 'tests_platform']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_5], + groups=["dvs_vcenter_ceilometer", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_ceilometer(self): + """Deploy cluster with plugin and ceilometer + + Scenario: + 1. Upload plugins to the master node. + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 3 node with controller + mongo roles. + 5. Add 2 node with compute role. + 5. Deploy the cluster. + 6. Run OSTF. + + Duration 3 hours + + """ + self.env.revert_snapshot("ready_with_5_slaves") + + self.install_dvs_plugin() + + # Configure cluster + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE, + 'ceilometer': True}) + + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller', 'mongo'], + 'slave-02': ['controller', 'mongo'], + 'slave-03': ['controller', 'mongo'], + 'slave-04': ['compute'], + 'slave-05': ['compute'], + } + ) + + # Configure VMWare vCenter settings + self.fuel_web.vcenter_configure(cluster_id, multiclusters=True) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, + test_sets=['smoke', 'tests_platform']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_9], + groups=["dvs_vcenter_ceilometer_2", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_ceilometer_2(self): + """Deploy cluster with plugin and ceilometer + + Scenario: + 1. Upload plugins to the master node. + 2. Install plugin. + 3. Create cluster with vcenter and ceilometr. + 4. Add nodes with following roles: + controller + compute + cinder + cinder-vmware + compute-vmware + compute-vmware + mongo + 4. Assign vCenter cluster(s) to: + compute-vmware + 5. Deploy the cluster. + 6. Run OSTF. + + Duration 3 hours + + """ + self.env.revert_snapshot("ready_with_9_slaves") + + self.install_dvs_plugin() + + # Configure cluster + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE, + 'ceilometer': True}) + + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller'], + 'slave-02': ['compute', 'cinder'], + 'slave-03': ['cinder-vmware'], + 'slave-04': ['compute-vmware'], + 'slave-05': ['compute-vmware'], + 'slave-06': ['mongo'] + } + ) + + # Configure VMWare vCenter settings + target_node_1 = self.node_name('slave-04') + target_node_2 = self.node_name('slave-05') + self.fuel_web.vcenter_configure( + cluster_id, + target_node_1=target_node_1, + target_node_2=target_node_2, + multiclusters=True + ) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, + test_sets=['smoke', 'tests_platform']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_9], + groups=["dvs_vcenter_add_delete_nodes", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_add_delete_nodes(self): + """Deploy cluster with plugin and vmware datastore backend + + Scenario: + 1. Upload plugins to the master node. + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 3 node with controller role. + 5. Add 2 node with cinder-vmdk role. + 6. Add 1 node with compute role. + 7. Remove node with cinder-vmdk role. + 8. Add node with cinder role. + 9. Redeploy cluster. + 10. Run OSTF. + 11. Remove node with compute role. + 12. Add node with cinder-vmdk role. + 13. Redeploy cluster. + 14. Run OSTF. + + Duration 3 hours + + """ + self.env.revert_snapshot("ready_with_9_slaves") + + self.install_dvs_plugin() + + # Configure cluster + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE + } + ) + + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller'], + 'slave-02': ['controller'], + 'slave-03': ['controller'], + 'slave-04': ['cinder-vmware'], + 'slave-05': ['compute'], + 'slave-06': ['compute'], + 'slave-07': ['compute-vmware'], }) + + # Configure VMWare vCenter settings + target_node_1 = self.node_name('slave-07') + self.fuel_web.vcenter_configure( + cluster_id, + target_node_1=target_node_1, + multiclusters=True + ) + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + # Remove node with cinder-vmdk role + self.fuel_web.update_nodes( + cluster_id, + {'slave-04': ['cinder-vmware'], }, False, True) + + # Add 1 node with cinder role and redeploy cluster + self.fuel_web.update_nodes( + cluster_id, + { + 'slave-08': ['cinder'], + } + ) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + # Remove node with compute role + self.fuel_web.update_nodes( + cluster_id, + {'slave-05': ['compute'], }, False, True) + + # Add 1 node with cinder-vmdk role and redeploy cluster + self.fuel_web.update_nodes( + cluster_id, + { + 'slave-04': ['cinder-vmware'], + } + ) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + @test(depends_on=[SetupEnvironment.prepare_slaves_9], + groups=["dvs_vcenter_add_delete_controller", "dvs_vcenter_plugin"]) + @log_snapshot_after_test + def dvs_vcenter_add_delete_controller(self): + """Deploy cluster with plugin, adding and deletion controler node. + + Scenario: + 1. Upload plugins to the master node. + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 4 node with controller role. + 5. Add 1 node with cinder-vmdk role. + 6. Add 1 node with compute role. + 7. Deploy cluster. + 8. Run OSTF. + 9. Remove node with controller role. + 10. Redeploy cluster. + 11. Run OSTF. + 12. Add node with controller role. + 13. Redeploy cluster. + 14. Run OSTF. + + Duration 3.5 hours + + """ + self.env.revert_snapshot("ready_with_9_slaves") + + self.install_dvs_plugin() + + # Configure cluster + cluster_id = self.fuel_web.create_cluster( + name=self.__class__.__name__, + mode=DEPLOYMENT_MODE, + settings={ + "net_provider": 'neutron', + "net_segment_type": NEUTRON_SEGMENT_TYPE + } + ) + + self.enable_plugin(cluster_id=cluster_id) + + # Assign role to node + self.fuel_web.update_nodes( + cluster_id, + {'slave-01': ['controller'], + 'slave-02': ['controller'], + 'slave-03': ['controller'], + 'slave-04': ['controller'], + 'slave-05': ['cinder-vmware'], + 'slave-06': ['compute'], }) + + # Configure VMWare vCenter settings + self.fuel_web.vcenter_configure(cluster_id) + + self.fuel_web.deploy_cluster_wait(cluster_id) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + logger.info("Connect to primary controler") + + primary_controller = self.fuel_web.get_nailgun_primary_node( + self.env.d_env.nodes().slaves[0] + ) + remote = self.fuel_web.get_ssh_for_node(primary_controller.name) + # Remove networks before redeployment + command = '/etc/fuel/plugins/' + \ + 'fuel-plugin-vmware-dvs-1.0/del_predefined_networks.sh' + result = remote.execute(command) + for output in result['stdout']: + logger.info(" {0}".format(output)) + + # Remove node with controller role + self.fuel_web.update_nodes( + cluster_id, + {'slave-04': ['controller'], }, False, True) + + self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + # Add node with controller role + + logger.info("Connect to primary controler") + + primary_controller = self.fuel_web.get_nailgun_primary_node( + self.env.d_env.nodes().slaves[0] + ) + remote = self.fuel_web.get_ssh_for_node(primary_controller.name) + + # Remove networks before redeployment + result = remote.execute(command) + for output in result['stdout']: + logger.info(" {0}".format(output)) + + self.fuel_web.update_nodes( + cluster_id, + { + 'slave-04': ['controller'], + } + ) + + self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False) + + self.fuel_web.run_ostf( + cluster_id=cluster_id, test_sets=['smoke']) + + @test(depends_on=[dvs_vcenter_bvt_2], + groups=["dvs_vcenter_networks", 'dvs_vcenter_system']) + @log_snapshot_after_test + def dvs_vcenter_networks(self): + """Check abilities to create and terminate networks on DVS. + + Scenario: + 1. Upload plugins to the master node + 2. Install plugin. + 3. Create cluster with vcenter. + 4. Add 1 node with controller role. + 5. Add 1 node with compute role. + 6. Deploy the cluster. + 7. Add 2 private networks net_1 and net_2. + 8. Check that networks are created. + 9. Delete net_1. + 10. Check that net_1 is deleted. + 11. Add net_1 again. + + Duration 1.8 hours + + """ + self.env.revert_snapshot("dvs_vcenter_bvt_2") + + cluster_id = self.fuel_web.get_last_created_cluster() + + # Create new network + os_ip = self.fuel_web.get_public_vip(cluster_id) + os_conn = os_actions.OpenStackActions( + os_ip, SERVTEST_USERNAME, + SERVTEST_PASSWORD, + SERVTEST_TENANT) + + subnets = [] + networks = [] + + for net in self.net_data: + logger.info('Create network {}'.format(net.keys()[0])) + network = self.create_network( + os_conn, + net.keys()[0], tenant_name=SERVTEST_TENANT + ) + + logger.info('Create subnet {}'.format(net.keys()[0])) + subnet = self.create_subnet( + os_conn, + network, + net[net.keys()[0]], tenant_name=SERVTEST_TENANT + ) + + subnets.append(subnet) + networks.append(network) + + # Check that networks are created. + for network in networks: + assert_true( + os_conn.get_network(network['name'])['id'] == network['id'] + ) + + # Delete net_1. + logger.info('Delete network net_1') + os_conn.neutron.delete_subnet(subnets[0]['id']) + os_conn.neutron.delete_network(networks[0]['id']) + + # Check that net_1 is deleted. + assert_true( + os_conn.get_network(networks[0]) is None + ) + logger.info('Networks net_1 is removed.') + logger.info('Created net_1 again.') + network = self.create_network( + os_conn, + self.net_data[0].keys()[0]) + subnet = self.create_subnet( + os_conn, + network, + self.net_data[0][self.net_data[0].keys()[0]], + tenant_name=SERVTEST_TENANT + ) + assert_true( + os_conn.get_network(network['name'])['id'] == network['id'] + ) + logger.info('Networks net_2 and net_3 are present.') + + @test(depends_on=[dvs_vcenter_bvt_2], + groups=["dvs_vcenter_ping_public", 'dvs_vcenter_system']) + @log_snapshot_after_test + def dvs_vcenter_ping_public(self): + """Check connectivity Vms to public network with floating ip. + + Scenario: + 1. Revert snapshot to dvs_vcenter_bvt_2 + 2. Create private networks net01 with sunet. + 3. Add one subnet (net01_subnet01: 192.168.101.0/24 + 4. Create Router_01, set gateway and add interface + to external network. + 5. Launch instances VM_1 and VM_2 in the net01 + with image TestVM and flavor m1.micro in nova az. + 6. Send ping from instances VM_1 and VM_2 to 8.8.8.8 + or other outside ip. + + Duration 1,5 hours + + """ + + self.env.revert_snapshot("dvs_vcenter_bvt_2") + + cluster_id = self.fuel_web.get_last_created_cluster() + + # Create new network + os_ip = self.fuel_web.get_public_vip(cluster_id) + os_conn = os_actions.OpenStackActions( + os_ip, SERVTEST_USERNAME, + + SERVTEST_PASSWORD, + SERVTEST_TENANT) + + # Create non default network with subnet. + logger.info('Create network {}'.format(self.net_data[0].keys()[0])) + network = self.create_network( + os_conn, + self.net_data[0].keys()[0], tenant_name=SERVTEST_TENANT + ) + + logger.info('Create subnet {}'.format(self.net_data[0].keys()[0])) + subnet = self.create_subnet( + os_conn, + network, + self.net_data[0][self.net_data[0].keys()[0]], + tenant_name=SERVTEST_TENANT + ) + + # Check that network are created. + assert_true( + os_conn.get_network(network['name'])['id'] == network['id'] + ) + + # create security group with rules for ssh and ping + security_group = {} + security_group[os_conn.get_tenant(SERVTEST_TENANT).id] =\ + os_conn.create_sec_group_for_ssh() + security_group = security_group[ + os_conn.get_tenant(SERVTEST_TENANT).id].id + + # Launch instance VM_1, VM_2 in the tenant network net_01 + # with image TestVMDK and flavor m1.micro in the nova az. + self.create_instances( + os_conn=os_conn, vm_count=1, + nics=[{'net-id': network['id']}], security_group=security_group + ) + + # Add net_1 to default router + router = os_conn.get_router(os_conn.get_network('net04_ext')) + self.add_subnet_to_router( + os_conn, + router['id'], subnet['id']) + + self.create_and_assign_floating_ip(os_conn=os_conn) + + # Send ping from instances VM_1 and VM_2 to 8.8.8.8 + # or other outside ip.e + srv_list = os_conn.get_servers() + self.check_connection_vms( + os_conn=os_conn, srv_list=srv_list, + ip=['8.8.8.8'] + ) + + @test(depends_on=[dvs_vcenter_bvt_2], + groups=["dvs_vcenter_5_instances", 'dvs_vcenter_system']) + @log_snapshot_after_test + def dvs_vcenter_5_instances(self): + """Check creation instance in the one group simultaneously + + Scenario: + 1. Revert snapshot to dvs_vcenter_bvt_2 + 2. Upload plugins to the master node + 3. Install plugin. + 4. Create cluster with vcenter. + 5. Add 1 node with controller role. + 6. Add 1 node with compute role. + 7. Deploy the cluster. + 8. Create 5 instances of vcenter and 5 of nova simultaneously. + + Duration 1.8 hours + + """ + self.env.revert_snapshot("dvs_vcenter_bvt_2") + + cluster_id = self.fuel_web.get_last_created_cluster() + + # Create 5 instances of vcenter and 5 of nova simultaneously. + os_ip = self.fuel_web.get_public_vip(cluster_id) + os_conn = os_actions.OpenStackActions( + os_ip, SERVTEST_USERNAME, + SERVTEST_PASSWORD, + SERVTEST_TENANT) + + network = os_conn.nova.networks.find(label='net04') + self.create_instances( + os_conn=os_conn, vm_count=5, + nics=[{'net-id': network.id}]) + + @test(depends_on=[dvs_vcenter_bvt_2], + groups=["dvs_vcenter_uninstall", 'dvs_vcenter_system']) + @log_snapshot_after_test + def dvs_vcenter_uninstall(self): + """Verify that it is not possibility to uninstall + of Fuel DVS plugin with deployed environment. + + Scenario: + 1. Revert snapshot to dvs_vcenter_bvt_2 + 2. Try to uninstall dvs plugin. + + Duration 1.8 hours + + """ + + self.env.revert_snapshot("dvs_vcenter_bvt_2") + + # Try to uninstall dvs plugin + cmd = 'fuel plugins --remove {}==1.1.0'.format(self.plugin_name) + self.env.d_env.get_admin_remote().execute(cmd)['exit_code'] == 1 + + # Check that plugin is not removed + output = list(self.env.d_env.get_admin_remote().execute( + 'fuel plugins list')['stdout']) + + assert_true( + self.plugin_name in output[-1].split(' '), + "Plugin is removed {}".format(self.plugin_name) + ) diff --git a/plugin_test/utils/jenkins/system_tests.sh b/plugin_test/utils/jenkins/system_tests.sh new file mode 100644 index 0000000..b6e6202 --- /dev/null +++ b/plugin_test/utils/jenkins/system_tests.sh @@ -0,0 +1,494 @@ +#!/bin/sh +PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +# functions + +INVALIDOPTS_ERR=100 +NOJOBNAME_ERR=101 +NOISOPATH_ERR=102 +NOTASKNAME_ERR=103 +NOWORKSPACE_ERR=104 +DEEPCLEAN_ERR=105 +MAKEISO_ERR=106 +NOISOFOUND_ERR=107 +COPYISO_ERR=108 +SYMLINKISO_ERR=109 +CDWORKSPACE_ERR=110 +ISODOWNLOAD_ERR=111 +INVALIDTASK_ERR=112 + +# Defaults + +export REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-5000} +export ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT=${ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:-true} + +# Export settings + +ShowHelp() { +cat << EOF +System Tests Script + +It can perform several actions depending on Jenkins JOB_NAME it's ran from +or it can take names from exported environment variables or command line options +if you do need to override them. + +-w (dir) - Path to workspace where fuelweb git repository was checked out. + Uses Jenkins' WORKSPACE if not set +-e (name) - Directly specify environment name used in tests + Uses ENV_NAME variable is set. +-j (name) - Name of this job. Determines ISO name, Task name and used by tests. + Uses Jenkins' JOB_NAME if not set +-v - Do not use virtual environment +-V (dir) - Path to python virtual environment +-i (file) - Full path to ISO file to build or use for tests. + Made from iso dir and name if not set. +-t (name) - Name of task this script should perform. Should be one of defined ones. + Taken from Jenkins' job's suffix if not set. +-o (str) - Allows you any extra command line option to run test job if you + want to use some parameters. +-a (str) - Allows you to path NOSE_ATTR to the test job if you want + to use some parameters. +-A (str) - Allows you to path NOSE_EVAL_ATTR if you want to enter attributes + as python expressions. +-m (name) - Use this mirror to build ISO from. + Uses 'srt' if not set. +-U - ISO URL for tests. + Null by default. +-r (yes/no) - Should built ISO file be places with build number tag and + symlinked to the last build or just copied over the last file. +-b (num) - Allows you to override Jenkins' build number if you need to. +-l (dir) - Path to logs directory. Can be set by LOGS_DIR evironment variable. + Uses WORKSPACE/logs if not set. +-L - Disable fuel_logs tool to extract the useful lines from Astute and Puppet logs + within the Fuel log snapshot or on the live Fuel Master node. +-d - Dry run mode. Only show what would be done and do nothing. + Useful for debugging. +-k - Keep previously created test environment before tests run +-K - Keep test environment after tests are finished +-h - Show this help page + +Most variables uses guesing from Jenkins' job name but can be overriden +by exported variable before script is run or by one of command line options. + +You can override following variables using export VARNAME="value" before running this script +WORKSPACE - path to directory where Fuelweb repository was checked out by Jenkins or manually +JOB_NAME - name of Jenkins job that determines which task should be done and ISO file name. + +If task name is "iso" it will make iso file +Other defined names will run Nose tests using previously built ISO file. + +ISO file name is taken from job name prefix +Task name is taken from job name suffix +Separator is one dot '.' + +For example if JOB_NAME is: +mytest.somestring.iso +ISO name: mytest.iso +Task name: iso +If ran with such JOB_NAME iso file with name mytest.iso will be created + +If JOB_NAME is: +mytest.somestring.node +ISO name: mytest.iso +Task name: node +If script was run with this JOB_NAME node tests will be using ISO file mytest.iso. + +First you should run mytest.somestring.iso job to create mytest.iso. +Then you can ran mytest.somestring.node job to start tests using mytest.iso and other tests too. +EOF +} + +GlobalVariables() { + # where built iso's should be placed + # use hardcoded default if not set before by export + ISO_DIR="${ISO_DIR:=/var/www/fuelweb-iso}" + + # name of iso file + # taken from jenkins job prefix + # if not set before by variable export + if [ -z "${ISO_NAME}" ]; then + ISO_NAME="${JOB_NAME%.*}.iso" + fi + + # full path where iso file should be placed + # make from iso name and path to iso shared directory + # if was not overriden by options or export + if [ -z "${ISO_PATH}" ]; then + ISO_PATH="${ISO_DIR}/${ISO_NAME}" + fi + + # what task should be ran + # it's taken from jenkins job name suffix if not set by options + if [ -z "${TASK_NAME}" ]; then + TASK_NAME="${JOB_NAME##*.}" + fi + + # do we want to keep iso's for each build or just copy over single file + ROTATE_ISO="${ROTATE_ISO:=yes}" + + # choose mirror to build iso from. Default is 'srt' for Saratov's mirror + # you can change mirror by exporting USE_MIRROR variable before running this script + USE_MIRROR="${USE_MIRROR:=srt}" + + # only show what commands would be executed but do nothing + # this feature is usefull if you want to debug this script's behaviour + DRY_RUN="${DRY_RUN:=no}" + + VENV="${VENV:=yes}" +} + +GetoptsVariables() { + while getopts ":w:j:i:t:o:a:A:m:U:r:b:V:l:LdkKe:v:h" opt; do + case $opt in + w) + WORKSPACE="${OPTARG}" + ;; + j) + JOB_NAME="${OPTARG}" + ;; + i) + ISO_PATH="${OPTARG}" + ;; + t) + TASK_NAME="${OPTARG}" + ;; + o) + TEST_OPTIONS="${TEST_OPTIONS} ${OPTARG}" + ;; + a) + NOSE_ATTR="${OPTARG}" + ;; + A) + NOSE_EVAL_ATTR="${OPTARG}" + ;; + m) + USE_MIRROR="${OPTARG}" + ;; + U) + ISO_URL="${OPTARG}" + ;; + r) + ROTATE_ISO="${OPTARG}" + ;; + b) + BUILD_NUMBER="${OPTARG}" + ;; + V) + VENV_PATH="${OPTARG}" + ;; + l) + LOGS_DIR="${OPTARG}" + ;; + L) + FUELLOGS_TOOL="no" + ;; + k) + KEEP_BEFORE="yes" + ;; + K) + KEEP_AFTER="yes" + ;; + e) + ENV_NAME="${OPTARG}" + ;; + d) + DRY_RUN="yes" + ;; + v) + VENV="no" + ;; + h) + ShowHelp + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" + ShowHelp + exit $INVALIDOPTS_ERR + ;; + :) + echo "Option -$OPTARG requires an argument." + ShowHelp + exit $INVALIDOPTS_ERR + ;; + esac + done +} + +CheckVariables() { + + if [ -z "${JOB_NAME}" ]; then + echo "Error! JOB_NAME is not set!" + exit $NOJOBNAME_ERR + fi + + if [ -z "${ISO_PATH}" ]; then + echo "Error! ISO_PATH is not set!" + exit $NOISOPATH_ERR + fi + + if [ -z "${TASK_NAME}" ]; then + echo "Error! TASK_NAME is not set!" + exit $NOTASKNAME_ERR + fi + + if [ -z "${WORKSPACE}" ]; then + echo "Error! WORKSPACE is not set!" + exit $NOWORKSPACE_ERR + fi +} + +MakeISO() { + # Create iso file to be used in tests + + # clean previous garbage + if [ "${DRY_RUN}" = "yes" ]; then + echo make deep_clean + else + make deep_clean + fi + ec="${?}" + + if [ "${ec}" -gt "0" ]; then + echo "Error! Deep clean failed!" + exit $DEEPCLEAN_ERR + fi + + # create ISO file + export USE_MIRROR + if [ "${DRY_RUN}" = "yes" ]; then + echo make iso + else + make iso + fi + ec=$? + + if [ "${ec}" -gt "0" ]; then + echo "Error making ISO!" + exit $MAKEISO_ERR + fi + + if [ "${DRY_RUN}" = "yes" ]; then + ISO="${WORKSPACE}/build/iso/fuel.iso" + else + ISO="`ls ${WORKSPACE}/build/iso/*.iso | head -n 1`" + # check that ISO file exists + if [ ! -f "${ISO}" ]; then + echo "Error! ISO file not found!" + exit $NOISOFOUND_ERR + fi + fi + + # copy ISO file to storage dir + # if rotation is enabled and build number is aviable + # save iso to tagged file and symlink to the last build + # if rotation is not enabled just copy iso to iso_dir + + if [ "${ROTATE_ISO}" = "yes" -a "${BUILD_NUMBER}" != "" ]; then + # copy iso file to shared dir with revision tagged name + NEW_BUILD_ISO_PATH="${ISO_PATH#.iso}_${BUILD_NUMBER}.iso" + if [ "${DRY_RUN}" = "yes" ]; then + echo cp "${ISO}" "${NEW_BUILD_ISO_PATH}" + else + cp "${ISO}" "${NEW_BUILD_ISO_PATH}" + fi + ec=$? + + if [ "${ec}" -gt "0" ]; then + echo "Error! Copy ${ISO} to ${NEW_BUILD_ISO_PATH} failed!" + exit $COPYISO_ERR + fi + + # create symlink to the last built ISO file + if [ "${DRY_RUN}" = "yes" ]; then + echo ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}" + else + ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}" + fi + ec=$? + + if [ "${ec}" -gt "0" ]; then + echo "Error! Create symlink from ${NEW_BUILD_ISO_PATH} to ${ISO_PATH} failed!" + exit $SYMLINKISO_ERR + fi + else + # just copy file to shared dir + if [ "${DRY_RUN}" = "yes" ]; then + echo cp "${ISO}" "${ISO_PATH}" + else + cp "${ISO}" "${ISO_PATH}" + fi + ec=$? + + if [ "${ec}" -gt "0" ]; then + echo "Error! Copy ${ISO} to ${ISO_PATH} failed!" + exit $COPYISO_ERR + fi + fi + + if [ "${ec}" -gt "0" ]; then + echo "Error! Copy ISO from ${ISO} to ${ISO_PATH} failed!" + exit $COPYISO_ERR + fi + echo "Finished building ISO: ${ISO_PATH}" + exit 0 +} + +CdWorkSpace() { + # chdir into workspace or fail if could not + if [ "${DRY_RUN}" != "yes" ]; then + cd "${WORKSPACE}" + ec=$? + + if [ "${ec}" -gt "0" ]; then + echo "Error! Cannot cd to WORKSPACE!" + exit $CDWORKSPACE_ERR + fi + else + echo cd "${WORKSPACE}" + fi +} + +RunTest() { + # Run test selected by task name + + # check if iso file exists + if [ ! -f "${ISO_PATH}" ]; then + if [ -z "${ISO_URL}" -a "${DRY_RUN}" != "yes" ]; then + echo "Error! File ${ISO_PATH} not found and no ISO_URL (-U key) for downloading!" + exit $NOISOFOUND_ERR + else + if [ "${DRY_RUN}" = "yes" ]; then + echo wget -c ${ISO_URL} -O ${ISO_PATH} + else + echo "No ${ISO_PATH} found. Trying to download file." + wget -c ${ISO_URL} -O ${ISO_PATH} + rc=$? + if [ $rc -ne 0 ]; then + echo "Failed to fetch ISO from ${ISO_URL}" + exit $ISODOWNLOAD_ERR + fi + fi + fi + fi + + if [ -z "${VENV_PATH}" ]; then + VENV_PATH="/home/jenkins/venv-nailgun-tests" + fi + + # run python virtualenv + if [ "${VENV}" = "yes" ]; then + if [ "${DRY_RUN}" = "yes" ]; then + echo . $VENV_PATH/bin/activate + else + . $VENV_PATH/bin/activate + fi + fi + + if [ "${ENV_NAME}" = "" ]; then + ENV_NAME="${JOB_NAME}_system_test" + fi + + if [ "${LOGS_DIR}" = "" ]; then + LOGS_DIR="${WORKSPACE}/logs" + fi + + if [ ! -f "$LOGS_DIR" ]; then + mkdir -p $LOGS_DIR + fi + + export ENV_NAME + export LOGS_DIR + export ISO_PATH + + if [ "${KEEP_BEFORE}" != "yes" ]; then + # remove previous environment + if [ "${DRY_RUN}" = "yes" ]; then + echo dos.py erase "${ENV_NAME}" + else + if [ $(dos.py list | grep "^${ENV_NAME}\$") ]; then + dos.py erase "${ENV_NAME}" + fi + fi + fi + + # gather additional option for this nose test run + OPTS="" + if [ -n "${NOSE_ATTR}" ]; then + OPTS="${OPTS} -a ${NOSE_ATTR}" + fi + if [ -n "${NOSE_EVAL_ATTR}" ]; then + OPTS="${OPTS} -A ${NOSE_EVAL_ATTR}" + fi + if [ -n "${TEST_OPTIONS}" ]; then + OPTS="${OPTS} ${TEST_OPTIONS}" + fi + + # run python test set to create environments, deploy and test product + if [ "${DRY_RUN}" = "yes" ]; then + echo export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}" + echo python run_tests.py -q --nologcapture --with-xunit ${OPTS} + else + export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}" + echo ${PYTHONPATH} + python run_tests.py -q --nologcapture --with-xunit ${OPTS} + + fi + ec=$? + + # Extract logs using fuel_logs utility + if [ "${FUELLOGS_TOOL}" != "no" ]; then + for logfile in $(find "${LOGS_DIR}" -name "fail*.tar.xz" -type f); + do + ./utils/jenkins/fuel_logs.py "${logfile}" > "${logfile}.filtered.log" + done + fi + + if [ "${KEEP_AFTER}" != "yes" ]; then + # remove environment after tests + if [ "${DRY_RUN}" = "yes" ]; then + echo dos.py destroy "${ENV_NAME}" + else + dos.py destroy "${ENV_NAME}" + fi + fi + + exit "${ec}" +} + +RouteTasks() { + # this selector defines task names that are recognised by this script + # and runs corresponding jobs for them + # running any jobs should exit this script + + case "${TASK_NAME}" in + test) + RunTest + ;; + iso) + MakeISO + ;; + *) + echo "Unknown task: ${TASK_NAME}!" + exit $INVALIDTASK_ERR + ;; + esac + exit 0 +} + +# MAIN + +# first we want to get variable from command line options +GetoptsVariables ${@} + +# then we define global variables and there defaults when needed +GlobalVariables + +# check do we have all critical variables set +CheckVariables + +# first we chdir into our working directory unless we dry run +CdWorkSpace + +# finally we can choose what to do according to TASK_NAME +RouteTasks