Browse Source

Merge "Add scale and integration autotests from Test Plan"

Jenkins 2 years ago
parent
commit
042d03e583

+ 1
- 1
doc/test/source/test_suite_failover.rst View File

@@ -65,7 +65,7 @@ core
65 65
 Steps
66 66
 #####
67 67
 
68
-    1. Log in to the Fuel with preinstalled plugin and deployed enviroment with 3 controllers.
68
+    1. Log in to the Fuel with preinstalled plugin and deployed enviroment with 3 controllers and 1 compute.
69 69
     2. Log in to Horizon.
70 70
     3. Create vcenter VM and check connectivity to outside world from VM.
71 71
     4. Shutdown primary controller.

+ 1
- 1
doc/test/source/test_suite_integration.rst View File

@@ -42,7 +42,7 @@ Steps
42 42
     4. Configure interfaces on nodes.
43 43
     5. Configure network settings.
44 44
     6. Enable and configure NSX-T plugin.
45
-    7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware.
45
+    7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware.
46 46
     8. Verify networks.
47 47
     9. Deploy cluster.
48 48
     10. Run OSTF.

+ 8
- 10
doc/test/source/test_suite_scale.rst View File

@@ -36,21 +36,19 @@ Steps
36 36
         * Controller
37 37
         * Controller
38 38
         * Controller
39
-        * Controller
40
-        * Cinder-vmware
41
-        * Compute-vmware
39
+        * Compute
42 40
     4. Configure interfaces on nodes.
43 41
     5. Configure network settings.
44 42
     6. Enable and configure NSX-T plugin.
45
-    7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware.
43
+    7. Configure VMware vCenter Settings. Add vSphere clusters and configure Nova Compute instance on controllers.
46 44
     8. Deploy cluster.
47 45
     9. Run OSTF.
48
-    10. Launch 1 KVM and 1 vcenter VMs.
49
-    11. Remove node with controller role.
46
+    10. Launch 1 vcenter instance and 1 nova instance.
47
+    11. Add 2 controller nodes.
50 48
     12. Redeploy cluster.
51 49
     13. Check that all instances are in place.
52 50
     14. Run OSTF.
53
-    15. Add controller.
51
+    15. Delete 2 controller nodes.
54 52
     16. Redeploy cluster.
55 53
     17. Check that all instances are in place.
56 54
     18. Run OSTF.
@@ -103,7 +101,7 @@ Steps
103 101
     6. Enable and configure NSX-T plugin.
104 102
     7. Deploy cluster.
105 103
     8. Run OSTF.
106
-    9. Launch KVM vm.
104
+    9. Launch instance.
107 105
     10. Add node with compute role.
108 106
     11. Redeploy cluster.
109 107
     12. Check that all instances are in place.
@@ -163,12 +161,12 @@ Steps
163 161
     8. Deploy cluster.
164 162
     9. Run OSTF.
165 163
     10. Launch vcenter vm.
166
-    11. Remove node with compute-vmware role.
164
+    11. Add node with compute-vmware role.
167 165
     12. Reconfigure vcenter compute clusters.
168 166
     13. Redeploy cluster.
169 167
     14. Check vm instance has been removed.
170 168
     15. Run OSTF.
171
-    16. Add node with compute-vmware role.
169
+    16. Remove node with compute-vmware role from base installation.
172 170
     17. Reconfigure vcenter compute clusters.
173 171
     18. Redeploy cluster.
174 172
     19. Run OSTF.

+ 1
- 1
doc/test/source/test_suite_system.rst View File

@@ -41,7 +41,7 @@ Steps
41 41
     4. Configure interfaces on nodes.
42 42
     5. Configure network settings.
43 43
     6. Enable and configure NSX-T plugin.
44
-    7. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instance on conrollers.
44
+    7. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instance on controllers.
45 45
     8. Verify networks.
46 46
     9. Deploy cluster.
47 47
     10. Run OSTF.

+ 394
- 0
plugin_test/helpers/openstack.py View File

@@ -0,0 +1,394 @@
1
+"""Copyright 2016 Mirantis, Inc.
2
+Licensed under the Apache License, Version 2.0 (the "License"); you may
3
+not use this file except in compliance with the License. You may obtain
4
+copy of the License at
5
+http://www.apache.org/licenses/LICENSE-2.0
6
+Unless required by applicable law or agreed to in writing, software
7
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
8
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
9
+License for the specific language governing permissions and limitations
10
+under the License.
11
+"""
12
+import time
13
+
14
+import paramiko
15
+from proboscis.asserts import assert_true
16
+from devops.helpers.helpers import icmp_ping
17
+from devops.helpers.helpers import tcp_ping
18
+from devops.helpers.helpers import wait
19
+
20
+from fuelweb_test import logger
21
+from fuelweb_test.helpers.ssh_manager import SSHManager
22
+from fuelweb_test.helpers.utils import pretty_log
23
+from helpers import settings
24
+
25
+
26
+# Defaults
27
+external_net_name = settings.ADMIN_NET
28
+zone_image_maps = {
29
+    'vcenter': 'TestVM-VMDK',
30
+    'nova': 'TestVM',
31
+    'vcenter-cinder': 'TestVM-VMDK'
32
+}
33
+instance_creds = (settings.VM_USER, settings.VM_PASS)
34
+
35
+
36
+def create_instance(os_conn, net=None, az='nova', sg_names=None,
37
+                    flavor_name='m1.micro', timeout=180, **kwargs):
38
+    """Create instance with specified az and flavor.
39
+
40
+    :param os_conn: OpenStack
41
+    :param net: network object (default is private net)
42
+    :param az: availability zone name
43
+    :param sg_names: list of security group names
44
+    :param flavor_name: name of flavor
45
+    :param timeout: seconds to wait creation
46
+    :return: vm
47
+    """
48
+    sg_names = sg_names if sg_names else ['default']
49
+
50
+    def find_by_name(objects, name):
51
+        for obj in objects:
52
+            if obj.name == name:
53
+                return obj
54
+
55
+    image = find_by_name(os_conn.nova.images.list(), zone_image_maps[az])
56
+    flavor = find_by_name(os_conn.nova.flavors.list(), flavor_name)
57
+
58
+    net = net if net else os_conn.get_network(settings.PRIVATE_NET)
59
+    sg = [os_conn.get_security_group(name) for name in sg_names]
60
+
61
+    vm = os_conn.create_server(availability_zone=az,
62
+                               timeout=timeout,
63
+                               image=image,
64
+                               net_id=net['id'],
65
+                               security_groups=sg,
66
+                               flavor_id=flavor.id,
67
+                               **kwargs)
68
+    return vm
69
+
70
+
71
+def check_instances_state(os_conn):
72
+    """Check that instances were not deleted and have 'active' status."""
73
+    instances = os_conn.nova.servers.list()
74
+    for inst in instances:
75
+        assert_true(not os_conn.is_srv_deleted(inst))
76
+        assert_true(os_conn.get_instance_detail(inst).status == 'ACTIVE')
77
+
78
+
79
+def check_connection_vms(ip_pair, command='pingv4', result_of_command=0,
80
+                         timeout=30, interval=5):
81
+    """Check network connectivity between instances.
82
+
83
+    :param ip_pair: type dict, {ip_from: [ip_to1, ip_to2, etc.]}
84
+    :param command: type string, key 'pingv4', 'pingv6' or 'arping'
85
+    :param result_of_command: type integer, exit code of command execution
86
+    :param timeout: wait to get expected result
87
+    :param interval: interval of executing command
88
+    """
89
+    commands = {
90
+        'pingv4': 'ping -c 5 {}',
91
+        'pingv6': 'ping6 -c 5 {}',
92
+        'arping': 'sudo arping -I eth0 {}'
93
+    }
94
+
95
+    msg = 'Command "{0}", Actual exit code is NOT {1}'
96
+    for ip_from in ip_pair:
97
+        with get_ssh_connection(ip_from, instance_creds[0],
98
+                                instance_creds[1]) as ssh:
99
+            for ip_to in ip_pair[ip_from]:
100
+                logger.info('Check connection from {0} to {1}'.format(
101
+                    ip_from, ip_to))
102
+                cmd = commands[command].format(ip_to)
103
+
104
+                wait(lambda:
105
+                     execute(ssh, cmd)['exit_code'] == result_of_command,
106
+                     interval=interval,
107
+                     timeout=timeout,
108
+                     timeout_msg=msg.format(cmd, result_of_command))
109
+
110
+
111
+def check_connection_through_host(remote, ip_pair, command='pingv4',
112
+                                  result_of_command=0, timeout=30,
113
+                                  interval=5):
114
+    """Check network connectivity between instances.
115
+
116
+    :param ip_pair: type list, ips of instances
117
+    :param remote: access point IP
118
+    :param command: type string, key 'pingv4', 'pingv6' or 'arping'
119
+    :param  result_of_command: type integer, exit code of command execution
120
+    :param timeout: wait to get expected result
121
+    :param interval: interval of executing command
122
+    """
123
+    commands = {
124
+        'pingv4': 'ping -c 5 {}',
125
+        'pingv6': 'ping6 -c 5 {}',
126
+        'arping': 'sudo arping -I eth0 {}'
127
+    }
128
+
129
+    msg = 'Command "{0}", Actual exit code is NOT {1}'
130
+
131
+    for ip_from in ip_pair:
132
+        for ip_to in ip_pair[ip_from]:
133
+            logger.info('Check ping from {0} to {1}'.format(ip_from, ip_to))
134
+            cmd = commands[command].format(ip_to)
135
+            wait(lambda:
136
+                 remote_execute_command(
137
+                     remote,
138
+                     ip_from,
139
+                     cmd,
140
+                     wait=timeout)['exit_code'] == result_of_command,
141
+                 interval=interval,
142
+                 timeout=timeout,
143
+                 timeout_msg=msg.format(cmd, result_of_command))
144
+
145
+
146
+def ping_each_other(ips, command='pingv4', expected_ec=0,
147
+                    timeout=30, interval=5, access_point_ip=None):
148
+    """Check network connectivity between instances.
149
+
150
+    :param ips: list, list of ips
151
+    :param command: type string, key 'pingv4', 'pingv6' or 'arping'
152
+    :param expected_ec: type integer, exit code of command execution
153
+    :param timeout: wait to get expected result
154
+    :param interval: interval of executing command
155
+    :param access_point_ip: It is used if check via host
156
+    """
157
+    ip_pair = {key: [ip for ip in ips if ip != key] for key in ips}
158
+    if access_point_ip:
159
+        check_connection_through_host(remote=access_point_ip,
160
+                                      ip_pair=ip_pair,
161
+                                      command=command,
162
+                                      result_of_command=expected_ec,
163
+                                      timeout=timeout,
164
+                                      interval=interval)
165
+    else:
166
+        check_connection_vms(ip_pair=ip_pair,
167
+                             command=command,
168
+                             result_of_command=expected_ec,
169
+                             timeout=timeout,
170
+                             interval=interval)
171
+
172
+
173
+def create_and_assign_floating_ips(os_conn, instances):
174
+    """Associate floating ips with specified instances.
175
+
176
+    :param os_conn: type object, openstack
177
+    :param instances: type list, instances
178
+    """
179
+    fips = []
180
+    for instance in instances:
181
+        ip = os_conn.assign_floating_ip(instance).ip
182
+        fips.append(ip)
183
+        wait(lambda: icmp_ping(ip), timeout=60 * 5, interval=5)
184
+    return fips
185
+
186
+
187
+def get_ssh_connection(ip, username, userpassword, timeout=30, port=22):
188
+    """Get ssh to host.
189
+
190
+    :param ip: string, host ip to connect to
191
+    :param username: string, a username to use for authentication
192
+    :param userpassword: string, a password to use for authentication
193
+    :param timeout: timeout (in seconds) for the TCP connection
194
+    :param port: host port to connect to
195
+    """
196
+    ssh = paramiko.SSHClient()
197
+    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
198
+    ssh.connect(ip, port=port, username=username,
199
+                password=userpassword, timeout=timeout)
200
+    return ssh
201
+
202
+
203
+def execute(ssh_client, command):
204
+    """Execute command on remote host.
205
+
206
+    :param ssh_client: SSHClient to instance
207
+    :param command: type string, command to execute
208
+    """
209
+    channel = ssh_client.get_transport().open_session()
210
+    channel.exec_command(command)
211
+    result = {
212
+        'stdout': channel.recv(1024),
213
+        'stderr': channel.recv_stderr(1024),
214
+        'exit_code': channel.recv_exit_status()
215
+    }
216
+    return result
217
+
218
+
219
+def remote_execute_command(instance1_ip, instance2_ip, command, wait=30):
220
+    """Check execute remote command.
221
+
222
+    :param instance1_ip: string, instance ip connect from
223
+    :param instance2_ip: string, instance ip connect to
224
+    :param command: string, remote command
225
+    :param wait: integer, time to wait available ip of instances
226
+    """
227
+    with get_ssh_connection(instance1_ip, *instance_creds) as ssh:
228
+        interm_transp = ssh.get_transport()
229
+        try:
230
+            logger.info('Opening channel between VMs {0} and {1}'.format(
231
+                instance1_ip, instance2_ip))
232
+            interm_chan = interm_transp.open_channel('direct-tcpip',
233
+                                                     (instance2_ip, 22),
234
+                                                     (instance1_ip, 0))
235
+        except Exception as e:
236
+            message = '{} Wait to update sg rules. Try to open channel again'
237
+            logger.info(message.format(e))
238
+            time.sleep(wait)
239
+            interm_chan = interm_transp.open_channel('direct-tcpip',
240
+                                                     (instance2_ip, 22),
241
+                                                     (instance1_ip, 0))
242
+        transport = paramiko.Transport(interm_chan)
243
+        transport.start_client()
244
+
245
+        logger.info("Passing authentication to VM")
246
+        transport.auth_password(
247
+            instance_creds[0], instance_creds[1])
248
+        channel = transport.open_session()
249
+        channel.get_pty()
250
+        channel.fileno()
251
+        channel.exec_command(command)
252
+
253
+        logger.debug("Receiving exit_code, stdout, stderr")
254
+        result = {
255
+            'stdout': channel.recv(1024),
256
+            'stderr': channel.recv_stderr(1024),
257
+            'exit_code': channel.recv_exit_status()
258
+        }
259
+        logger.debug('Command: {}'.format(command))
260
+        logger.debug(pretty_log(result))
261
+
262
+        logger.debug('Closing channel''')
263
+        channel.close()
264
+
265
+        return result
266
+
267
+
268
+def get_role(os_conn, role_name):
269
+    """Get role by name."""
270
+    role_list = os_conn.keystone.roles.list()
271
+    for role in role_list:
272
+        if role.name == role_name:
273
+            return role
274
+
275
+
276
+def add_role_to_user(os_conn, user_name, role_name, tenant_name):
277
+    """Assign role to user.
278
+
279
+    :param os_conn: type object
280
+    :param user_name: type string,
281
+    :param role_name: type string
282
+    :param tenant_name: type string
283
+    """
284
+    tenant_id = os_conn.get_tenant(tenant_name).id
285
+    user_id = os_conn.get_user(user_name).id
286
+    role_id = get_role(os_conn, role_name).id
287
+    os_conn.keystone.roles.add_user_role(user_id, role_id, tenant_id)
288
+
289
+
290
+def check_service(ip, commands):
291
+    """Check that required nova services are running on controller.
292
+
293
+    :param ip: ip address of node
294
+    :param commands: type list, nova commands to execute on controller,
295
+                     example of commands:
296
+                     ['nova-manage service list | grep vcenter-vmcluster1'
297
+    """
298
+    ssh_manager = SSHManager()
299
+    ssh_manager.check_call(ip=ip, command='source openrc')
300
+
301
+    for cmd in commands:
302
+        wait(lambda:
303
+             ':-)' in ssh_manager.check_call(ip=ip, command=cmd).stdout[-1],
304
+             timeout=200)
305
+
306
+
307
+def create_instances(os_conn, nics, vm_count=1,
308
+                     security_groups=None, available_hosts=None,
309
+                     flavor_name='m1.micro'):
310
+    """Create VMs on available hypervisors.
311
+
312
+    :param os_conn: type object, openstack
313
+    :param vm_count: type integer, count of VMs to create
314
+    :param nics: type dictionary, neutron networks to assign to instance
315
+    :param security_groups: list of security group names
316
+    :param available_hosts: available hosts for creating instances
317
+    :param flavor_name: name of flavor
318
+    """
319
+    def find_by_name(objects, name):
320
+        for obj in objects:
321
+            if obj.name == name:
322
+                return obj
323
+
324
+    # Get list of available images, flavors and hypervisors
325
+    instances = []
326
+    images = os_conn.nova.images.list()
327
+    flavor = find_by_name(os_conn.nova.flavors.list(), flavor_name)
328
+
329
+    if not available_hosts:
330
+        available_hosts = os_conn.nova.services.list(binary='nova-compute')
331
+
332
+    for host in available_hosts:
333
+        image = find_by_name(images, zone_image_maps[host.zone])
334
+
335
+        instance = os_conn.nova.servers.create(
336
+            flavor=flavor,
337
+            name='test_{0}'.format(image.name),
338
+            image=image,
339
+            min_count=vm_count,
340
+            availability_zone='{0}:{1}'.format(host.zone, host.host),
341
+            nics=nics, security_groups=security_groups)
342
+
343
+        instances.append(instance)
344
+    return instances
345
+
346
+
347
+def verify_instance_state(os_conn, instances=None, expected_state='ACTIVE',
348
+                          boot_timeout=300):
349
+    """Verify that current state of each instance/s is expected.
350
+
351
+    :param os_conn: type object, openstack
352
+    :param instances: type list, list of created instances
353
+    :param expected_state: type string, expected state of instance
354
+    :param boot_timeout: type int, time in seconds to build instance
355
+    """
356
+    if not instances:
357
+        instances = os_conn.nova.servers.list()
358
+    for instance in instances:
359
+        wait(lambda:
360
+             os_conn.get_instance_detail(instance).status == expected_state,
361
+             timeout=boot_timeout,
362
+             timeout_msg='Timeout is reached. '
363
+                         'Current state of VM {0} is {1}.'
364
+                         'Expected state is {2}'.format(
365
+                             instance.name,
366
+                             os_conn.get_instance_detail(instance).status,
367
+                             expected_state))
368
+
369
+
370
+def create_access_point(os_conn, nics, security_groups):
371
+    """Create access point.
372
+
373
+    Creating instance with floating ip as access point to instances
374
+    with private ip in the same network.
375
+
376
+    :param os_conn: type object, openstack
377
+    :param nics: type dictionary, neutron networks to assign to instance
378
+    :param security_groups: A list of security group names
379
+    """
380
+    # Get any available host
381
+    host = os_conn.nova.services.list(binary='nova-compute')[0]
382
+
383
+    access_point = create_instances(  # create access point server
384
+        os_conn=os_conn, nics=nics,
385
+        vm_count=1,
386
+        security_groups=security_groups,
387
+        available_hosts=[host]).pop()
388
+
389
+    verify_instance_state(os_conn)
390
+
391
+    access_point_ip = os_conn.assign_floating_ip(
392
+        access_point, use_neutron=True)['floating_ip_address']
393
+    wait(lambda: tcp_ping(access_point_ip, 22), timeout=60 * 5, interval=5)
394
+    return access_point, access_point_ip

+ 2
- 0
plugin_test/run_tests.py View File

@@ -43,6 +43,8 @@ class CloseSSHConnectionsPlugin(Plugin):
43 43
 
44 44
 def import_tests():
45 45
     from tests import test_plugin_nsxt  # noqa
46
+    from tests import test_plugin_integration  # noqa
47
+    from tests import test_plugin_scale  # noqa
46 48
 
47 49
 
48 50
 def run_tests():

+ 175
- 0
plugin_test/tests/test_plugin_integration.py View File

@@ -0,0 +1,175 @@
1
+"""Copyright 2016 Mirantis, Inc.
2
+
3
+Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+not use this file except in compliance with the License. You may obtain
5
+copy of the License at
6
+
7
+http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+Unless required by applicable law or agreed to in writing, software
10
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+License for the specific language governing permissions and limitations
13
+under the License.
14
+"""
15
+
16
+from proboscis import test
17
+
18
+from fuelweb_test.helpers.decorators import log_snapshot_after_test
19
+from fuelweb_test.settings import DEPLOYMENT_MODE
20
+from fuelweb_test.tests.base_test_case import SetupEnvironment
21
+from tests.base_plugin_test import TestNSXtBase
22
+
23
+
24
+@test(groups=['nsxt_plugin', 'nsxt_integration'])
25
+class TestNSXtIntegration(TestNSXtBase):
26
+    """Tests from test plan that have been marked as 'Automated'."""
27
+
28
+    @test(depends_on=[SetupEnvironment.prepare_slaves_5],
29
+          groups=['nsxt_ceilometer'])
30
+    @log_snapshot_after_test
31
+    def nsxt_ceilometer(self):
32
+        """Check environment deployment with Fuel NSX-T plugin and Ceilometer.
33
+
34
+        Scenario:
35
+            1. Install NSX-T plugin to Fuel Master node with 5 slaves.
36
+            2. Create new environment with the following parameters:
37
+                * Compute: KVM/QEMU with vCenter
38
+                * Networking: Neutron with NSX-T plugin
39
+                * Storage: default
40
+                * Additional services: Ceilometer
41
+            3. Add nodes with the following roles:
42
+                * Controller + Mongo
43
+                * Controller + Mongo
44
+                * Controller + Mongo
45
+                * Compute-vmware
46
+                * Compute
47
+            4. Configure interfaces on nodes.
48
+            5. Enable plugin and configure network settings.
49
+            6. Configure VMware vCenter Settings.
50
+               Add 2 vSphere clusters and configure Nova Compute instances on
51
+               controllers and compute-vmware.
52
+            7. Verify networks.
53
+            8. Deploy cluster.
54
+            9. Run OSTF.
55
+
56
+        Duration: 180
57
+        """
58
+        # Install NSX-T plugin to Fuel Master node with 5 slaves
59
+        self.show_step(1)
60
+        self.env.revert_snapshot('ready_with_5_slaves')
61
+        self.install_nsxt_plugin()
62
+
63
+        self.show_step(2)  # Create new environment with Ceilometer
64
+        settings = self.default.cluster_settings
65
+        settings['ceilometer'] = True
66
+
67
+        cluster_id = self.fuel_web.create_cluster(
68
+            name=self.__class__.__name__,
69
+            mode=DEPLOYMENT_MODE,
70
+            settings=settings)
71
+
72
+        self.show_step(3)  # Add nodes
73
+        self.fuel_web.update_nodes(cluster_id,
74
+                                   {'slave-01': ['controller', 'mongo'],
75
+                                    'slave-02': ['controller', 'mongo'],
76
+                                    'slave-03': ['controller', 'mongo'],
77
+                                    'slave-04': ['compute-vmware'],
78
+                                    'slave-05': ['compute']})
79
+
80
+        self.show_step(4)  # Configure interfaces on nodes
81
+        self.reconfigure_cluster_interfaces(cluster_id)
82
+
83
+        self.show_step(5)  # Enable plugin and configure network settings
84
+        self.enable_plugin(cluster_id)
85
+
86
+        # Configure VMware settings. 2 clusters, 2 Nova Compute instances:
87
+        # 1 on controllers and 1 on compute-vmware
88
+        self.show_step(6)
89
+        target_node = self.fuel_web.get_nailgun_node_by_name('slave-04')
90
+        self.fuel_web.vcenter_configure(cluster_id,
91
+                                        target_node_2=target_node['hostname'],
92
+                                        multiclusters=True)
93
+        self.show_step(7)  # Verify networks
94
+        self.fuel_web.verify_network(cluster_id)
95
+
96
+        self.show_step(8)  # Deploy cluster
97
+        self.fuel_web.deploy_cluster_wait(cluster_id)
98
+
99
+        self.show_step(9)  # Run OSTF
100
+        self.fuel_web.run_ostf(cluster_id, timeout=3600,
101
+                               test_sets=['smoke', 'sanity', 'ha',
102
+                                                      'tests_platform'])
103
+
104
+    @test(depends_on=[SetupEnvironment.prepare_slaves_5],
105
+          groups=['nsxt_ceph'])
106
+    @log_snapshot_after_test
107
+    def nsxt_ceph(self):
108
+        """Check environment deployment with Fuel NSX-T plugin and Ceph.
109
+
110
+        Scenario:
111
+            1. Install NSX-T plugin to Fuel Master node with 5 slaves.
112
+            2. Create new environment with the following parameters:
113
+                * Compute: KVM/QEMU with vCenter
114
+                * Networking: Neutron with NSX-T plugin
115
+                * Storage: Ceph
116
+                * Additional services: default
117
+            3. Add nodes with the following roles:
118
+                * Controller
119
+                * Ceph-OSD
120
+                * Ceph-OSD
121
+                * Ceph-OSD
122
+                * Compute
123
+            4. Configure interfaces on nodes.
124
+            5. Enable plugin and configure network settings.
125
+            6. Configure VMware vCenter Settings. Add 1 vSphere cluster and
126
+               configure Nova Compute instance on controller.
127
+            7. Verify networks.
128
+            8. Deploy cluster.
129
+            9. Run OSTF.
130
+
131
+        Duration: 180
132
+        """
133
+        # Install NSX-T plugin to Fuel Master node with 5 slaves
134
+        self.show_step(1)
135
+        self.env.revert_snapshot('ready_with_5_slaves')
136
+        self.install_nsxt_plugin()
137
+
138
+        self.show_step(2)  # Create new environment with Ceph
139
+        settings = self.default.cluster_settings
140
+        settings['volumes_lvm'] = False
141
+        settings['volumes_ceph'] = True
142
+        settings['images_ceph'] = True
143
+        settings['ephemeral_ceph'] = True
144
+        settings['objects_ceph'] = True
145
+        cluster_id = self.fuel_web.create_cluster(
146
+            name=self.__class__.__name__,
147
+            mode=DEPLOYMENT_MODE,
148
+            settings=settings)
149
+
150
+        self.show_step(3)  # Add nodes
151
+        self.fuel_web.update_nodes(cluster_id,
152
+                                   {'slave-01': ['controller'],
153
+                                    'slave-02': ['ceph-osd'],
154
+                                    'slave-03': ['ceph-osd'],
155
+                                    'slave-04': ['ceph-osd'],
156
+                                    'slave-05': ['compute']})
157
+
158
+        self.show_step(4)  # Configure interfaces on nodes
159
+        self.reconfigure_cluster_interfaces(cluster_id)
160
+
161
+        self.show_step(5)  # Enable plugin and configure network settings
162
+        self.enable_plugin(cluster_id)
163
+
164
+        # Configure VMware settings. 1 cluster, 1 Compute instance: controller
165
+        self.show_step(6)
166
+        self.fuel_web.vcenter_configure(cluster_id)
167
+
168
+        self.show_step(7)  # Verify networks
169
+        self.fuel_web.verify_network(cluster_id)
170
+
171
+        self.show_step(8)  # Deploy cluster
172
+        self.fuel_web.deploy_cluster_wait(cluster_id)
173
+
174
+        self.show_step(9)  # Run OSTF
175
+        self.fuel_web.run_ostf(cluster_id)

+ 359
- 0
plugin_test/tests/test_plugin_scale.py View File

@@ -0,0 +1,359 @@
1
+"""Copyright 2016 Mirantis, Inc.
2
+
3
+Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+not use this file except in compliance with the License. You may obtain
5
+copy of the License at
6
+
7
+http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+Unless required by applicable law or agreed to in writing, software
10
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+License for the specific language governing permissions and limitations
13
+under the License.
14
+"""
15
+
16
+from proboscis import test
17
+from proboscis.asserts import assert_true
18
+
19
+from fuelweb_test.helpers import os_actions
20
+from fuelweb_test.helpers.decorators import log_snapshot_after_test
21
+from fuelweb_test.settings import DEPLOYMENT_MODE
22
+from fuelweb_test.settings import SERVTEST_PASSWORD
23
+from fuelweb_test.settings import SERVTEST_TENANT
24
+from fuelweb_test.settings import SERVTEST_USERNAME
25
+from fuelweb_test.tests.base_test_case import SetupEnvironment
26
+from helpers import openstack as os_help
27
+from tests.base_plugin_test import TestNSXtBase
28
+
29
+
30
+@test(groups=['nsxt_plugin', 'nsxt_scale'])
31
+class TestNSXtScale(TestNSXtBase):
32
+    """Tests from test plan that have been marked as 'Automated'."""
33
+
34
+    @test(depends_on=[SetupEnvironment.prepare_slaves_9],
35
+          groups=['nsxt_add_delete_controller'])
36
+    @log_snapshot_after_test
37
+    def nsxt_add_delete_controller(self):
38
+        """Check functionality when controller has been removed or added.
39
+
40
+        Scenario:
41
+            1. Install NSX-T plugin to Fuel Master node with 9 slaves.
42
+            2. Create new environment with the following parameters:
43
+                * Compute: KVM/QEMU with vCenter
44
+                * Networking: Neutron with NSX-T plugin
45
+                * Storage: default
46
+            3. Add nodes with the following roles:
47
+                * Controller
48
+                * Controller
49
+                * Controller
50
+                * Compute
51
+            4. Configure interfaces on nodes.
52
+            5. Enable plugin and configure network settings.
53
+            6. Configure VMware vCenter Settings. Add vSphere cluster and
54
+               configure Nova Compute instance on conrollers.
55
+            7. Deploy cluster.
56
+            8. Run OSTF.
57
+            9. Launch 1 vcenter instance and 1 KVM instance.
58
+            10. Add 2 controller nodes.
59
+            11. Redeploy cluster.
60
+            12. Check that all instances are in place.
61
+            13. Run OSTF.
62
+            14. Remove 2 controller nodes.
63
+            15. Redeploy cluster.
64
+            16. Check that all instances are in place.
65
+            17. Run OSTF.
66
+
67
+        Duration: 180 min
68
+        """
69
+        # Install NSX-T plugin to Fuel Master node with 9 slaves
70
+        self.show_step(1)
71
+        self.env.revert_snapshot('ready_with_9_slaves')
72
+        self.install_nsxt_plugin()
73
+
74
+        self.show_step(2)  # Create new environment
75
+        cluster_id = self.fuel_web.create_cluster(
76
+            name=self.__class__.__name__,
77
+            mode=DEPLOYMENT_MODE,
78
+            settings=self.default.cluster_settings,
79
+            configure_ssl=False)
80
+
81
+        self.show_step(3)  # Add nodes
82
+        self.fuel_web.update_nodes(cluster_id,
83
+                                   {'slave-01': ['controller'],
84
+                                    'slave-02': ['controller'],
85
+                                    'slave-03': ['controller'],
86
+                                    'slave-04': ['compute']})
87
+
88
+        self.show_step(4)  # Configure interfaces on nodes
89
+        self.reconfigure_cluster_interfaces(cluster_id)
90
+
91
+        self.show_step(5)  # Enable plugin and configure network settings
92
+        self.enable_plugin(cluster_id)
93
+
94
+        # Configure VMware settings. 1 cluster, 1 Nova Compute on controllers
95
+        self.show_step(6)
96
+        self.fuel_web.vcenter_configure(cluster_id)
97
+
98
+        self.show_step(7)  # Deploy cluster
99
+        self.fuel_web.deploy_cluster_wait(cluster_id)
100
+
101
+        self.show_step(8)  # Run OSTF
102
+        self.fuel_web.run_ostf(cluster_id)
103
+
104
+        # Launch 1 vcenter instance and 1 KVM instance
105
+        self.show_step(9)
106
+        os_conn = os_actions.OpenStackActions(
107
+            self.fuel_web.get_public_vip(cluster_id),
108
+            SERVTEST_USERNAME,
109
+            SERVTEST_PASSWORD,
110
+            SERVTEST_TENANT)
111
+
112
+        os_help.create_instance(os_conn)
113
+        os_help.create_instance(os_conn, az='vcenter')
114
+
115
+        self.show_step(10)  # Add 2 controller nodes
116
+        self.fuel_web.update_nodes(cluster_id, {'slave-05': ['controller'],
117
+                                                'slave-06': ['controller']})
118
+
119
+        self.show_step(11)  # Redeploy cluster
120
+        self.fuel_web.deploy_cluster_wait(cluster_id)
121
+
122
+        self.show_step(12)  # Check that all instances are in place
123
+        os_help.check_instances_state(os_conn)
124
+
125
+        self.show_step(13)  # Run OSTF
126
+        self.fuel_web.run_ostf(cluster_id)
127
+
128
+        self.show_step(14)  # Remove 2 controller nodes
129
+        self.fuel_web.update_nodes(cluster_id,
130
+                                   {'slave-01': ['controller'],
131
+                                    'slave-02': ['controller']},
132
+                                   False, True)
133
+
134
+        self.show_step(15)  # Redeploy cluster
135
+        self.fuel_web.deploy_cluster_wait(cluster_id)
136
+
137
+        self.show_step(16)  # Check that all instances are in place
138
+        os_help.check_instances_state(os_conn)
139
+
140
+        self.show_step(17)  # Run OSTF
141
+        self.fuel_web.run_ostf(cluster_id)
142
+
143
+    @test(depends_on=[SetupEnvironment.prepare_slaves_5],
144
+          groups=['nsxt_add_delete_compute_node'])
145
+    @log_snapshot_after_test
146
+    def nsxt_add_delete_compute_node(self):
147
+        """Verify functionality when compute node has been removed or added.
148
+
149
+        Scenario:
150
+            1. Install NSX-T plugin to Fuel Master node with 5 slaves.
151
+            2. Create new environment with the following parameters:
152
+                * Compute: KVM/QEMU
153
+                * Networking: Neutron with NSX-T plugin
154
+                * Storage: default
155
+                * Additional services: default
156
+            3. Add nodes with the following roles:
157
+                * Controller
158
+                * Controller
159
+                * Controller
160
+                * Compute
161
+            4. Configure interfaces on nodes.
162
+            5. Enable plugin and configure network settings.
163
+            6. Deploy cluster.
164
+            7. Run OSTF.
165
+            8. Launch KVM vm.
166
+            9. Add node with compute role.
167
+            10. Redeploy cluster.
168
+            11. Check that instance is in place.
169
+            12. Run OSTF.
170
+            13. Remove node with compute role.
171
+            14. Redeploy cluster.
172
+            15. Check that instance is in place.
173
+            16. Run OSTF.
174
+
175
+        Duration: 180min
176
+        """
177
+        # Install NSX-T plugin to Fuel Master node with 5 slaves
178
+        self.show_step(1)
179
+        self.env.revert_snapshot('ready_with_5_slaves')
180
+        self.install_nsxt_plugin()
181
+
182
+        self.show_step(2)  # Create new environment
183
+        cluster_id = self.fuel_web.create_cluster(
184
+            name=self.__class__.__name__,
185
+            mode=DEPLOYMENT_MODE,
186
+            settings=self.default.cluster_settings,
187
+            configure_ssl=False)
188
+
189
+        self.show_step(3)  # Add nodes
190
+        self.fuel_web.update_nodes(cluster_id,
191
+                                   {'slave-01': ['controller'],
192
+                                    'slave-02': ['controller'],
193
+                                    'slave-03': ['controller'],
194
+                                    'slave-04': ['compute']})
195
+
196
+        self.show_step(4)  # Configure interfaces on nodes
197
+        self.reconfigure_cluster_interfaces(cluster_id)
198
+
199
+        self.show_step(5)  # Enable plugin and configure network settings
200
+        self.enable_plugin(cluster_id)
201
+
202
+        self.show_step(6)  # Deploy cluster
203
+        self.fuel_web.deploy_cluster_wait(cluster_id)
204
+
205
+        self.show_step(7)  # Run OSTF
206
+        self.fuel_web.run_ostf(cluster_id)
207
+
208
+        self.show_step(8)  # Launch KVM vm
209
+        os_conn = os_actions.OpenStackActions(
210
+            self.fuel_web.get_public_vip(cluster_id),
211
+            SERVTEST_USERNAME,
212
+            SERVTEST_PASSWORD,
213
+            SERVTEST_TENANT)
214
+
215
+        os_help.create_instance(os_conn)
216
+
217
+        self.show_step(9)  # Add node with compute role
218
+        self.fuel_web.update_nodes(cluster_id, {'slave-05': ['compute']})
219
+
220
+        self.show_step(10)  # Redeploy cluster
221
+        self.fuel_web.deploy_cluster_wait(cluster_id)
222
+
223
+        self.show_step(11)  # Check that instance is in place
224
+        os_help.check_instances_state(os_conn)
225
+
226
+        self.show_step(12)  # Run OSTF
227
+        self.fuel_web.run_ostf(cluster_id)
228
+
229
+        self.show_step(13)  # Remove node with compute role
230
+        self.fuel_web.update_nodes(cluster_id,
231
+                                   {'slave-04': ['compute']},
232
+                                   False, True)
233
+
234
+        self.show_step(14)  # Redeploy cluster
235
+        self.fuel_web.deploy_cluster_wait(cluster_id)
236
+
237
+        self.show_step(15)  # Check that instance is in place
238
+        os_help.check_instances_state(os_conn)
239
+
240
+        self.show_step(16)  # Run OSTF
241
+        self.fuel_web.run_ostf(cluster_id)
242
+
243
+    @test(depends_on=[SetupEnvironment.prepare_slaves_5],
244
+          groups=['nsxt_add_delete_compute_vmware_node'])
245
+    @log_snapshot_after_test
246
+    def nsxt_add_delete_compute_vmware_node(self):
247
+        """Verify functionality when compute-vmware has been removed or added.
248
+
249
+        Scenario:
250
+            1. Install NSX-T plugin to Fuel Master node with 5 slaves.
251
+            2. Create new environment with the following parameters:
252
+                * Compute: KVM/QEMU with vCenter
253
+                * Networking: Neutron with NSX-T plugin
254
+                * Storage: default
255
+                * Additional services: default
256
+            3. Add nodes with the following roles:
257
+                * Controller
258
+                * Controller
259
+                * Controller
260
+                * Compute-vmware
261
+            4. Configure interfaces on nodes.
262
+            5. Enable plugin and configure network settings.
263
+            6. Configure VMware vCenter Settings. Add 1 vSphere cluster and
264
+            configure Nova Compute instance on compute-vmware.
265
+            7. Deploy cluster.
266
+            8. Run OSTF.
267
+            9. Launch vcenter vm.
268
+            10. Add node with compute-vmware role.
269
+            11. Reconfigure vcenter compute clusters.
270
+            12. Redeploy cluster.
271
+            13. Check that instance has been removed.
272
+            14. Run OSTF.
273
+            15. Remove node with compute-vmware role.
274
+            16. Reconfigure vcenter compute clusters.
275
+            17. Redeploy cluster.
276
+            18. Run OSTF.
277
+
278
+        Duration: 240 min
279
+        """
280
+        # Install NSX-T plugin to Fuel Master node with 5 slaves
281
+        self.show_step(1)
282
+        self.env.revert_snapshot('ready_with_5_slaves')
283
+        self.install_nsxt_plugin()
284
+
285
+        self.show_step(2)  # Create new environment
286
+        cluster_id = self.fuel_web.create_cluster(
287
+            name=self.__class__.__name__,
288
+            mode=DEPLOYMENT_MODE,
289
+            settings=self.default.cluster_settings,
290
+            configure_ssl=False)
291
+
292
+        self.show_step(3)  # Add nodes
293
+        self.fuel_web.update_nodes(cluster_id,
294
+                                   {'slave-01': ['controller'],
295
+                                    'slave-02': ['controller'],
296
+                                    'slave-03': ['controller'],
297
+                                    'slave-04': ['compute-vmware']})
298
+
299
+        self.show_step(4)  # Configure interfaces on nodes
300
+        self.reconfigure_cluster_interfaces(cluster_id)
301
+
302
+        self.show_step(5)  # Enable plugin and configure network settings
303
+        self.enable_plugin(self.fuel_web, cluster_id)
304
+
305
+        # Configure VMware settings. 1 cluster, 1 Nova Compute: compute-vmware
306
+        self.show_step(6)
307
+        target_node1 = self.fuel_web.get_nailgun_node_by_name('slave-04')
308
+        self.fuel_web.vcenter_configure(cluster_id,
309
+                                        target_node_1=target_node1['hostname'])
310
+
311
+        self.show_step(7)  # Deploy cluster
312
+        self.fuel_web.deploy_cluster_wait(cluster_id)
313
+
314
+        self.show_step(8)  # Run OSTF
315
+        self.fuel_web.run_ostf(cluster_id)
316
+
317
+        self.show_step(9)  # Launch vcenter vm
318
+        os_conn = os_actions.OpenStackActions(
319
+            self.fuel_web.get_public_vip(cluster_id),
320
+            SERVTEST_USERNAME,
321
+            SERVTEST_PASSWORD,
322
+            SERVTEST_TENANT)
323
+
324
+        vcenter_vm = os_help.create_instance(os_conn, az='vcenter')
325
+
326
+        self.show_step(10)  # Add node with compute-vmware role
327
+        self.fuel_web.update_nodes(cluster_id,
328
+                                   {'slave-05': ['compute-vmware']})
329
+
330
+        self.show_step(11)  # Reconfigure vcenter compute clusters
331
+        target_node2 = self.fuel_web.get_nailgun_node_by_name('slave-05')
332
+        self.fuel_web.vcenter_configure(cluster_id,
333
+                                        target_node_1=target_node1['hostname'],
334
+                                        target_node_2=target_node2['hostname'])
335
+
336
+        self.show_step(12)  # Redeploy cluster
337
+        self.fuel_web.deploy_cluster_wait(cluster_id)
338
+
339
+        self.show_step(13)  # Check that instance has been removed
340
+        assert_true(os_conn.is_srv_deleted(vcenter_vm))
341
+
342
+        self.show_step(14)  # Run OSTF
343
+        self.fuel_web.run_ostf(cluster_id)
344
+
345
+        self.show_step(15)  # Remove node with compute-vmware role
346
+        self.fuel_web.update_nodes(cluster_id,
347
+                                   {'slave-04': ['compute-vmware']},
348
+                                   False, True)
349
+
350
+        self.show_step(16)  # Reconfigure vcenter compute clusters
351
+        target_node2 = self.fuel_web.get_nailgun_node_by_name('slave-04')
352
+        self.fuel_web.vcenter_configure(cluster_id,
353
+                                        target_node_1=target_node2['hostname'])
354
+
355
+        self.show_step(17)    # Redeploy cluster
356
+        self.fuel_web.deploy_cluster_wait(cluster_id)
357
+
358
+        self.show_step(18)  # Run OSTF
359
+        self.fuel_web.run_ostf(cluster_id)

Loading…
Cancel
Save