OpenStack Testing (Tempest) of an existing cloud
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1451 lines
63KB

  1. # Copyright 2012 OpenStack Foundation
  2. # Copyright 2013 IBM Corp.
  3. # All Rights Reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  6. # not use this file except in compliance with the License. You may obtain
  7. # a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  13. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  14. # License for the specific language governing permissions and limitations
  15. # under the License.
  16. import subprocess
  17. import netaddr
  18. from oslo_log import log
  19. from oslo_serialization import jsonutils as json
  20. from oslo_utils import netutils
  21. from tempest.common import compute
  22. from tempest.common import image as common_image
  23. from tempest.common.utils.linux import remote_client
  24. from tempest.common.utils import net_utils
  25. from tempest.common import waiters
  26. from tempest import config
  27. from tempest import exceptions
  28. from tempest.lib.common import api_microversion_fixture
  29. from tempest.lib.common import api_version_utils
  30. from tempest.lib.common.utils import data_utils
  31. from tempest.lib.common.utils import test_utils
  32. from tempest.lib import exceptions as lib_exc
  33. import tempest.test
  34. CONF = config.CONF
  35. LOG = log.getLogger(__name__)
  36. LATEST_MICROVERSION = 'latest'
  37. class ScenarioTest(tempest.test.BaseTestCase):
  38. """Base class for scenario tests. Uses tempest own clients. """
  39. credentials = ['primary']
  40. compute_min_microversion = None
  41. compute_max_microversion = LATEST_MICROVERSION
  42. volume_min_microversion = None
  43. volume_max_microversion = LATEST_MICROVERSION
  44. placement_min_microversion = None
  45. placement_max_microversion = LATEST_MICROVERSION
  46. @classmethod
  47. def skip_checks(cls):
  48. super(ScenarioTest, cls).skip_checks()
  49. api_version_utils.check_skip_with_microversion(
  50. cls.compute_min_microversion, cls.compute_max_microversion,
  51. CONF.compute.min_microversion, CONF.compute.max_microversion)
  52. api_version_utils.check_skip_with_microversion(
  53. cls.volume_min_microversion, cls.volume_max_microversion,
  54. CONF.volume.min_microversion, CONF.volume.max_microversion)
  55. api_version_utils.check_skip_with_microversion(
  56. cls.placement_min_microversion, cls.placement_max_microversion,
  57. CONF.placement.min_microversion, CONF.placement.max_microversion)
  58. @classmethod
  59. def resource_setup(cls):
  60. super(ScenarioTest, cls).resource_setup()
  61. cls.compute_request_microversion = (
  62. api_version_utils.select_request_microversion(
  63. cls.compute_min_microversion,
  64. CONF.compute.min_microversion))
  65. cls.volume_request_microversion = (
  66. api_version_utils.select_request_microversion(
  67. cls.volume_min_microversion,
  68. CONF.volume.min_microversion))
  69. cls.placement_request_microversion = (
  70. api_version_utils.select_request_microversion(
  71. cls.placement_min_microversion,
  72. CONF.placement.min_microversion))
  73. def setUp(self):
  74. super(ScenarioTest, self).setUp()
  75. self.useFixture(api_microversion_fixture.APIMicroversionFixture(
  76. compute_microversion=self.compute_request_microversion,
  77. volume_microversion=self.volume_request_microversion,
  78. placement_microversion=self.placement_request_microversion))
  79. @classmethod
  80. def setup_clients(cls):
  81. super(ScenarioTest, cls).setup_clients()
  82. # Clients (in alphabetical order)
  83. cls.flavors_client = cls.os_primary.flavors_client
  84. cls.compute_floating_ips_client = (
  85. cls.os_primary.compute_floating_ips_client)
  86. if CONF.service_available.glance:
  87. # Check if glance v1 is available to determine which client to use.
  88. if CONF.image_feature_enabled.api_v1:
  89. cls.image_client = cls.os_primary.image_client
  90. elif CONF.image_feature_enabled.api_v2:
  91. cls.image_client = cls.os_primary.image_client_v2
  92. else:
  93. raise lib_exc.InvalidConfiguration(
  94. 'Either api_v1 or api_v2 must be True in '
  95. '[image-feature-enabled].')
  96. # Compute image client
  97. cls.compute_images_client = cls.os_primary.compute_images_client
  98. cls.keypairs_client = cls.os_primary.keypairs_client
  99. # Nova security groups client
  100. cls.compute_security_groups_client = (
  101. cls.os_primary.compute_security_groups_client)
  102. cls.compute_security_group_rules_client = (
  103. cls.os_primary.compute_security_group_rules_client)
  104. cls.servers_client = cls.os_primary.servers_client
  105. cls.interface_client = cls.os_primary.interfaces_client
  106. # Neutron network client
  107. cls.networks_client = cls.os_primary.networks_client
  108. cls.ports_client = cls.os_primary.ports_client
  109. cls.routers_client = cls.os_primary.routers_client
  110. cls.subnets_client = cls.os_primary.subnets_client
  111. cls.floating_ips_client = cls.os_primary.floating_ips_client
  112. cls.security_groups_client = cls.os_primary.security_groups_client
  113. cls.security_group_rules_client = (
  114. cls.os_primary.security_group_rules_client)
  115. # Use the latest available volume clients
  116. if CONF.service_available.cinder:
  117. cls.volumes_client = cls.os_primary.volumes_client_latest
  118. cls.snapshots_client = cls.os_primary.snapshots_client_latest
  119. cls.backups_client = cls.os_primary.backups_client_latest
  120. # ## Test functions library
  121. #
  122. # The create_[resource] functions only return body and discard the
  123. # resp part which is not used in scenario tests
  124. def create_port(self, network_id, client=None, **kwargs):
  125. if not client:
  126. client = self.ports_client
  127. name = data_utils.rand_name(self.__class__.__name__)
  128. if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs:
  129. kwargs['binding:vnic_type'] = CONF.network.port_vnic_type
  130. if CONF.network.port_profile and 'binding:profile' not in kwargs:
  131. kwargs['binding:profile'] = CONF.network.port_profile
  132. result = client.create_port(
  133. name=name,
  134. network_id=network_id,
  135. **kwargs)
  136. port = result['port']
  137. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  138. client.delete_port, port['id'])
  139. return port
  140. def create_keypair(self, client=None):
  141. if not client:
  142. client = self.keypairs_client
  143. name = data_utils.rand_name(self.__class__.__name__)
  144. # We don't need to create a keypair by pubkey in scenario
  145. body = client.create_keypair(name=name)
  146. self.addCleanup(client.delete_keypair, name)
  147. return body['keypair']
  148. def create_server(self, name=None, image_id=None, flavor=None,
  149. validatable=False, wait_until='ACTIVE',
  150. clients=None, **kwargs):
  151. """Wrapper utility that returns a test server.
  152. This wrapper utility calls the common create test server and
  153. returns a test server. The purpose of this wrapper is to minimize
  154. the impact on the code of the tests already using this
  155. function.
  156. :param **kwargs:
  157. See extra parameters below
  158. :Keyword Arguments:
  159. * *vnic_type* (``string``) --
  160. used when launching instances with pre-configured ports.
  161. Examples:
  162. normal: a traditional virtual port that is either attached
  163. to a linux bridge or an openvswitch bridge on a
  164. compute node.
  165. direct: an SR-IOV port that is directly attached to a VM
  166. macvtap: an SR-IOV port that is attached to a VM via a macvtap
  167. device.
  168. Defaults to ``CONF.network.port_vnic_type``.
  169. * *port_profile* (``dict``) --
  170. This attribute is a dictionary that can be used (with admin
  171. credentials) to supply information influencing the binding of
  172. the port.
  173. example: port_profile = "capabilities:[switchdev]"
  174. Defaults to ``CONF.network.port_profile``.
  175. """
  176. # NOTE(jlanoux): As a first step, ssh checks in the scenario
  177. # tests need to be run regardless of the run_validation and
  178. # validatable parameters and thus until the ssh validation job
  179. # becomes voting in CI. The test resources management and IP
  180. # association are taken care of in the scenario tests.
  181. # Therefore, the validatable parameter is set to false in all
  182. # those tests. In this way create_server just return a standard
  183. # server and the scenario tests always perform ssh checks.
  184. # Needed for the cross_tenant_traffic test:
  185. if clients is None:
  186. clients = self.os_primary
  187. if name is None:
  188. name = data_utils.rand_name(self.__class__.__name__ + "-server")
  189. vnic_type = kwargs.pop('vnic_type', CONF.network.port_vnic_type)
  190. profile = kwargs.pop('port_profile', CONF.network.port_profile)
  191. # If vnic_type or profile are configured create port for
  192. # every network
  193. if vnic_type or profile:
  194. ports = []
  195. create_port_body = {}
  196. if vnic_type:
  197. create_port_body['binding:vnic_type'] = vnic_type
  198. if profile:
  199. create_port_body['binding:profile'] = profile
  200. if kwargs:
  201. # Convert security group names to security group ids
  202. # to pass to create_port
  203. if 'security_groups' in kwargs:
  204. security_groups = \
  205. clients.security_groups_client.list_security_groups(
  206. ).get('security_groups')
  207. sec_dict = dict([(s['name'], s['id'])
  208. for s in security_groups])
  209. sec_groups_names = [s['name'] for s in kwargs.pop(
  210. 'security_groups')]
  211. security_groups_ids = [sec_dict[s]
  212. for s in sec_groups_names]
  213. if security_groups_ids:
  214. create_port_body[
  215. 'security_groups'] = security_groups_ids
  216. networks = kwargs.pop('networks', [])
  217. else:
  218. networks = []
  219. # If there are no networks passed to us we look up
  220. # for the project's private networks and create a port.
  221. # The same behaviour as we would expect when passing
  222. # the call to the clients with no networks
  223. if not networks:
  224. networks = clients.networks_client.list_networks(
  225. **{'router:external': False, 'fields': 'id'})['networks']
  226. # It's net['uuid'] if networks come from kwargs
  227. # and net['id'] if they come from
  228. # clients.networks_client.list_networks
  229. for net in networks:
  230. net_id = net.get('uuid', net.get('id'))
  231. if 'port' not in net:
  232. port = self.create_port(network_id=net_id,
  233. client=clients.ports_client,
  234. **create_port_body)
  235. ports.append({'port': port['id']})
  236. else:
  237. ports.append({'port': net['port']})
  238. if ports:
  239. kwargs['networks'] = ports
  240. self.ports = ports
  241. tenant_network = self.get_tenant_network()
  242. if CONF.compute.compute_volume_common_az:
  243. kwargs.setdefault('availability_zone',
  244. CONF.compute.compute_volume_common_az)
  245. body, _ = compute.create_test_server(
  246. clients,
  247. tenant_network=tenant_network,
  248. wait_until=wait_until,
  249. name=name, flavor=flavor,
  250. image_id=image_id, **kwargs)
  251. self.addCleanup(waiters.wait_for_server_termination,
  252. clients.servers_client, body['id'])
  253. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  254. clients.servers_client.delete_server, body['id'])
  255. server = clients.servers_client.show_server(body['id'])['server']
  256. return server
  257. def create_volume(self, size=None, name=None, snapshot_id=None,
  258. imageRef=None, volume_type=None):
  259. if size is None:
  260. size = CONF.volume.volume_size
  261. if imageRef:
  262. if CONF.image_feature_enabled.api_v1:
  263. resp = self.image_client.check_image(imageRef)
  264. image = common_image.get_image_meta_from_headers(resp)
  265. else:
  266. image = self.image_client.show_image(imageRef)
  267. min_disk = image.get('min_disk')
  268. size = max(size, min_disk)
  269. if name is None:
  270. name = data_utils.rand_name(self.__class__.__name__ + "-volume")
  271. kwargs = {'display_name': name,
  272. 'snapshot_id': snapshot_id,
  273. 'imageRef': imageRef,
  274. 'volume_type': volume_type,
  275. 'size': size}
  276. if CONF.compute.compute_volume_common_az:
  277. kwargs.setdefault('availability_zone',
  278. CONF.compute.compute_volume_common_az)
  279. volume = self.volumes_client.create_volume(**kwargs)['volume']
  280. self.addCleanup(self.volumes_client.wait_for_resource_deletion,
  281. volume['id'])
  282. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  283. self.volumes_client.delete_volume, volume['id'])
  284. self.assertEqual(name, volume['name'])
  285. waiters.wait_for_volume_resource_status(self.volumes_client,
  286. volume['id'], 'available')
  287. # The volume retrieved on creation has a non-up-to-date status.
  288. # Retrieval after it becomes active ensures correct details.
  289. volume = self.volumes_client.show_volume(volume['id'])['volume']
  290. return volume
  291. def create_backup(self, volume_id, name=None, description=None,
  292. force=False, snapshot_id=None, incremental=False,
  293. container=None):
  294. name = name or data_utils.rand_name(
  295. self.__class__.__name__ + "-backup")
  296. kwargs = {'name': name,
  297. 'description': description,
  298. 'force': force,
  299. 'snapshot_id': snapshot_id,
  300. 'incremental': incremental,
  301. 'container': container}
  302. backup = self.backups_client.create_backup(volume_id=volume_id,
  303. **kwargs)['backup']
  304. self.addCleanup(self.backups_client.delete_backup, backup['id'])
  305. waiters.wait_for_volume_resource_status(self.backups_client,
  306. backup['id'], 'available')
  307. return backup
  308. def restore_backup(self, backup_id):
  309. restore = self.backups_client.restore_backup(backup_id)['restore']
  310. self.addCleanup(self.volumes_client.delete_volume,
  311. restore['volume_id'])
  312. waiters.wait_for_volume_resource_status(self.backups_client,
  313. backup_id, 'available')
  314. waiters.wait_for_volume_resource_status(self.volumes_client,
  315. restore['volume_id'],
  316. 'available')
  317. self.assertEqual(backup_id, restore['backup_id'])
  318. return restore
  319. def create_volume_snapshot(self, volume_id, name=None, description=None,
  320. metadata=None, force=False):
  321. name = name or data_utils.rand_name(
  322. self.__class__.__name__ + '-snapshot')
  323. snapshot = self.snapshots_client.create_snapshot(
  324. volume_id=volume_id,
  325. force=force,
  326. display_name=name,
  327. description=description,
  328. metadata=metadata)['snapshot']
  329. self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
  330. snapshot['id'])
  331. self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
  332. waiters.wait_for_volume_resource_status(self.snapshots_client,
  333. snapshot['id'], 'available')
  334. snapshot = self.snapshots_client.show_snapshot(
  335. snapshot['id'])['snapshot']
  336. return snapshot
  337. def _cleanup_volume_type(self, volume_type):
  338. """Clean up a given volume type.
  339. Ensuring all volumes associated to a type are first removed before
  340. attempting to remove the type itself. This includes any image volume
  341. cache volumes stored in a separate tenant to the original volumes
  342. created from the type.
  343. """
  344. admin_volume_type_client = self.os_admin.volume_types_client_latest
  345. admin_volumes_client = self.os_admin.volumes_client_latest
  346. volumes = admin_volumes_client.list_volumes(
  347. detail=True, params={'all_tenants': 1})['volumes']
  348. type_name = volume_type['name']
  349. for volume in [v for v in volumes if v['volume_type'] == type_name]:
  350. test_utils.call_and_ignore_notfound_exc(
  351. admin_volumes_client.delete_volume, volume['id'])
  352. admin_volumes_client.wait_for_resource_deletion(volume['id'])
  353. admin_volume_type_client.delete_volume_type(volume_type['id'])
  354. def create_volume_type(self, client=None, name=None, backend_name=None):
  355. if not client:
  356. client = self.os_admin.volume_types_client_latest
  357. if not name:
  358. class_name = self.__class__.__name__
  359. name = data_utils.rand_name(class_name + '-volume-type')
  360. randomized_name = data_utils.rand_name('scenario-type-' + name)
  361. LOG.debug("Creating a volume type: %s on backend %s",
  362. randomized_name, backend_name)
  363. extra_specs = {}
  364. if backend_name:
  365. extra_specs = {"volume_backend_name": backend_name}
  366. volume_type = client.create_volume_type(
  367. name=randomized_name, extra_specs=extra_specs)['volume_type']
  368. self.addCleanup(self._cleanup_volume_type, volume_type)
  369. return volume_type
  370. def _create_loginable_secgroup_rule(self, secgroup_id=None):
  371. _client = self.compute_security_groups_client
  372. _client_rules = self.compute_security_group_rules_client
  373. if secgroup_id is None:
  374. sgs = _client.list_security_groups()['security_groups']
  375. for sg in sgs:
  376. if sg['name'] == 'default':
  377. secgroup_id = sg['id']
  378. # These rules are intended to permit inbound ssh and icmp
  379. # traffic from all sources, so no group_id is provided.
  380. # Setting a group_id would only permit traffic from ports
  381. # belonging to the same security group.
  382. rulesets = [
  383. {
  384. # ssh
  385. 'ip_protocol': 'tcp',
  386. 'from_port': 22,
  387. 'to_port': 22,
  388. 'cidr': '0.0.0.0/0',
  389. },
  390. {
  391. # ping
  392. 'ip_protocol': 'icmp',
  393. 'from_port': -1,
  394. 'to_port': -1,
  395. 'cidr': '0.0.0.0/0',
  396. }
  397. ]
  398. rules = list()
  399. for ruleset in rulesets:
  400. sg_rule = _client_rules.create_security_group_rule(
  401. parent_group_id=secgroup_id, **ruleset)['security_group_rule']
  402. rules.append(sg_rule)
  403. return rules
  404. def _create_security_group(self):
  405. # Create security group
  406. sg_name = data_utils.rand_name(self.__class__.__name__)
  407. sg_desc = sg_name + " description"
  408. secgroup = self.compute_security_groups_client.create_security_group(
  409. name=sg_name, description=sg_desc)['security_group']
  410. self.assertEqual(secgroup['name'], sg_name)
  411. self.assertEqual(secgroup['description'], sg_desc)
  412. self.addCleanup(
  413. test_utils.call_and_ignore_notfound_exc,
  414. self.compute_security_groups_client.delete_security_group,
  415. secgroup['id'])
  416. # Add rules to the security group
  417. self._create_loginable_secgroup_rule(secgroup['id'])
  418. return secgroup
  419. def get_remote_client(self, ip_address, username=None, private_key=None,
  420. server=None):
  421. """Get a SSH client to a remote server
  422. :param ip_address: the server floating or fixed IP address to use
  423. for ssh validation
  424. :param username: name of the Linux account on the remote server
  425. :param private_key: the SSH private key to use
  426. :param server: server dict, used for debugging purposes
  427. :return: a RemoteClient object
  428. """
  429. if username is None:
  430. username = CONF.validation.image_ssh_user
  431. # Set this with 'keypair' or others to log in with keypair or
  432. # username/password.
  433. if CONF.validation.auth_method == 'keypair':
  434. password = None
  435. if private_key is None:
  436. private_key = self.keypair['private_key']
  437. else:
  438. password = CONF.validation.image_ssh_password
  439. private_key = None
  440. linux_client = remote_client.RemoteClient(
  441. ip_address, username, pkey=private_key, password=password,
  442. server=server, servers_client=self.servers_client)
  443. linux_client.validate_authentication()
  444. return linux_client
  445. def _image_create(self, name, fmt, path,
  446. disk_format=None, properties=None):
  447. if properties is None:
  448. properties = {}
  449. name = data_utils.rand_name('%s-' % name)
  450. params = {
  451. 'name': name,
  452. 'container_format': fmt,
  453. 'disk_format': disk_format or fmt,
  454. }
  455. if CONF.image_feature_enabled.api_v1:
  456. params['is_public'] = 'False'
  457. params['properties'] = properties
  458. params = {'headers': common_image.image_meta_to_headers(**params)}
  459. else:
  460. params['visibility'] = 'private'
  461. # Additional properties are flattened out in the v2 API.
  462. params.update(properties)
  463. body = self.image_client.create_image(**params)
  464. image = body['image'] if 'image' in body else body
  465. self.addCleanup(self.image_client.delete_image, image['id'])
  466. self.assertEqual("queued", image['status'])
  467. with open(path, 'rb') as image_file:
  468. if CONF.image_feature_enabled.api_v1:
  469. self.image_client.update_image(image['id'], data=image_file)
  470. else:
  471. self.image_client.store_image_file(image['id'], image_file)
  472. return image['id']
  473. def glance_image_create(self):
  474. img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
  475. aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
  476. ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
  477. ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
  478. img_container_format = CONF.scenario.img_container_format
  479. img_disk_format = CONF.scenario.img_disk_format
  480. img_properties = CONF.scenario.img_properties
  481. LOG.debug("paths: img: %s, container_format: %s, disk_format: %s, "
  482. "properties: %s, ami: %s, ari: %s, aki: %s",
  483. img_path, img_container_format, img_disk_format,
  484. img_properties, ami_img_path, ari_img_path, aki_img_path)
  485. try:
  486. image = self._image_create('scenario-img',
  487. img_container_format,
  488. img_path,
  489. disk_format=img_disk_format,
  490. properties=img_properties)
  491. except IOError:
  492. LOG.warning(
  493. "A(n) %s image was not found. Retrying with uec image.",
  494. img_disk_format)
  495. kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
  496. ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
  497. properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
  498. image = self._image_create('scenario-ami', 'ami',
  499. path=ami_img_path,
  500. properties=properties)
  501. LOG.debug("image:%s", image)
  502. return image
  503. def _log_console_output(self, servers=None, client=None):
  504. if not CONF.compute_feature_enabled.console_output:
  505. LOG.debug('Console output not supported, cannot log')
  506. return
  507. client = client or self.servers_client
  508. if not servers:
  509. servers = client.list_servers()
  510. servers = servers['servers']
  511. for server in servers:
  512. try:
  513. console_output = client.get_console_output(
  514. server['id'])['output']
  515. LOG.debug('Console output for %s\nbody=\n%s',
  516. server['id'], console_output)
  517. except lib_exc.NotFound:
  518. LOG.debug("Server %s disappeared(deleted) while looking "
  519. "for the console log", server['id'])
  520. def _log_net_info(self, exc):
  521. # network debug is called as part of ssh init
  522. if not isinstance(exc, lib_exc.SSHTimeout):
  523. LOG.debug('Network information on a devstack host')
  524. def create_server_snapshot(self, server, name=None):
  525. # Glance client
  526. _image_client = self.image_client
  527. # Compute client
  528. _images_client = self.compute_images_client
  529. if name is None:
  530. name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
  531. LOG.debug("Creating a snapshot image for server: %s", server['name'])
  532. image = _images_client.create_image(server['id'], name=name)
  533. image_id = image.response['location'].split('images/')[1]
  534. waiters.wait_for_image_status(_image_client, image_id, 'active')
  535. self.addCleanup(_image_client.wait_for_resource_deletion,
  536. image_id)
  537. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  538. _image_client.delete_image, image_id)
  539. if CONF.image_feature_enabled.api_v1:
  540. # In glance v1 the additional properties are stored in the headers.
  541. resp = _image_client.check_image(image_id)
  542. snapshot_image = common_image.get_image_meta_from_headers(resp)
  543. image_props = snapshot_image.get('properties', {})
  544. else:
  545. # In glance v2 the additional properties are flattened.
  546. snapshot_image = _image_client.show_image(image_id)
  547. image_props = snapshot_image
  548. bdm = image_props.get('block_device_mapping')
  549. if bdm:
  550. bdm = json.loads(bdm)
  551. if bdm and 'snapshot_id' in bdm[0]:
  552. snapshot_id = bdm[0]['snapshot_id']
  553. self.addCleanup(
  554. self.snapshots_client.wait_for_resource_deletion,
  555. snapshot_id)
  556. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  557. self.snapshots_client.delete_snapshot,
  558. snapshot_id)
  559. waiters.wait_for_volume_resource_status(self.snapshots_client,
  560. snapshot_id,
  561. 'available')
  562. image_name = snapshot_image['name']
  563. self.assertEqual(name, image_name)
  564. LOG.debug("Created snapshot image %s for server %s",
  565. image_name, server['name'])
  566. return snapshot_image
  567. def nova_volume_attach(self, server, volume_to_attach):
  568. volume = self.servers_client.attach_volume(
  569. server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
  570. % CONF.compute.volume_device_name)['volumeAttachment']
  571. self.assertEqual(volume_to_attach['id'], volume['id'])
  572. waiters.wait_for_volume_resource_status(self.volumes_client,
  573. volume['id'], 'in-use')
  574. # Return the updated volume after the attachment
  575. return self.volumes_client.show_volume(volume['id'])['volume']
  576. def nova_volume_detach(self, server, volume):
  577. self.servers_client.detach_volume(server['id'], volume['id'])
  578. waiters.wait_for_volume_resource_status(self.volumes_client,
  579. volume['id'], 'available')
  580. def ping_ip_address(self, ip_address, should_succeed=True,
  581. ping_timeout=None, mtu=None, server=None):
  582. timeout = ping_timeout or CONF.validation.ping_timeout
  583. cmd = ['ping', '-c1', '-w1']
  584. if mtu:
  585. cmd += [
  586. # don't fragment
  587. '-M', 'do',
  588. # ping receives just the size of ICMP payload
  589. '-s', str(net_utils.get_ping_payload_size(mtu, 4))
  590. ]
  591. cmd.append(ip_address)
  592. def ping():
  593. proc = subprocess.Popen(cmd,
  594. stdout=subprocess.PIPE,
  595. stderr=subprocess.PIPE)
  596. proc.communicate()
  597. return (proc.returncode == 0) == should_succeed
  598. caller = test_utils.find_test_caller()
  599. LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
  600. ' expected result is %(should_succeed)s', {
  601. 'caller': caller, 'ip': ip_address, 'timeout': timeout,
  602. 'should_succeed':
  603. 'reachable' if should_succeed else 'unreachable'
  604. })
  605. result = test_utils.call_until_true(ping, timeout, 1)
  606. LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
  607. 'ping result is %(result)s', {
  608. 'caller': caller, 'ip': ip_address, 'timeout': timeout,
  609. 'result': 'expected' if result else 'unexpected'
  610. })
  611. if server:
  612. self._log_console_output([server])
  613. return result
  614. def check_vm_connectivity(self, ip_address,
  615. username=None,
  616. private_key=None,
  617. should_connect=True,
  618. extra_msg="",
  619. server=None,
  620. mtu=None):
  621. """Check server connectivity
  622. :param ip_address: server to test against
  623. :param username: server's ssh username
  624. :param private_key: server's ssh private key to be used
  625. :param should_connect: True/False indicates positive/negative test
  626. positive - attempt ping and ssh
  627. negative - attempt ping and fail if succeed
  628. :param extra_msg: Message to help with debugging if ``ping_ip_address``
  629. fails
  630. :param server: The server whose console to log for debugging
  631. :param mtu: network MTU to use for connectivity validation
  632. :raises: AssertError if the result of the connectivity check does
  633. not match the value of the should_connect param
  634. """
  635. LOG.debug('checking network connections to IP %s with user: %s',
  636. ip_address, username)
  637. if should_connect:
  638. msg = "Timed out waiting for %s to become reachable" % ip_address
  639. else:
  640. msg = "ip address %s is reachable" % ip_address
  641. if extra_msg:
  642. msg = "%s\n%s" % (extra_msg, msg)
  643. self.assertTrue(self.ping_ip_address(ip_address,
  644. should_succeed=should_connect,
  645. mtu=mtu, server=server),
  646. msg=msg)
  647. if should_connect:
  648. # no need to check ssh for negative connectivity
  649. try:
  650. self.get_remote_client(ip_address, username, private_key,
  651. server=server)
  652. except Exception:
  653. if not extra_msg:
  654. extra_msg = 'Failed to ssh to %s' % ip_address
  655. LOG.exception(extra_msg)
  656. raise
  657. def create_floating_ip(self, thing, pool_name=None):
  658. """Create a floating IP and associates to a server on Nova"""
  659. if not pool_name:
  660. pool_name = CONF.network.floating_network_name
  661. floating_ip = (self.compute_floating_ips_client.
  662. create_floating_ip(pool=pool_name)['floating_ip'])
  663. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  664. self.compute_floating_ips_client.delete_floating_ip,
  665. floating_ip['id'])
  666. self.compute_floating_ips_client.associate_floating_ip_to_server(
  667. floating_ip['ip'], thing['id'])
  668. return floating_ip
  669. def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
  670. private_key=None, server=None):
  671. ssh_client = self.get_remote_client(ip_address,
  672. private_key=private_key,
  673. server=server)
  674. if dev_name is not None:
  675. ssh_client.make_fs(dev_name)
  676. ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
  677. mount_path))
  678. cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
  679. ssh_client.exec_command(cmd_timestamp)
  680. timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
  681. % mount_path)
  682. if dev_name is not None:
  683. ssh_client.exec_command('sudo umount %s' % mount_path)
  684. return timestamp
  685. def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
  686. private_key=None, server=None):
  687. ssh_client = self.get_remote_client(ip_address,
  688. private_key=private_key,
  689. server=server)
  690. if dev_name is not None:
  691. ssh_client.mount(dev_name, mount_path)
  692. timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
  693. % mount_path)
  694. if dev_name is not None:
  695. ssh_client.exec_command('sudo umount %s' % mount_path)
  696. return timestamp
  697. def get_server_ip(self, server):
  698. """Get the server fixed or floating IP.
  699. Based on the configuration we're in, return a correct ip
  700. address for validating that a guest is up.
  701. """
  702. if CONF.validation.connect_method == 'floating':
  703. # The tests calling this method don't have a floating IP
  704. # and can't make use of the validation resources. So the
  705. # method is creating the floating IP there.
  706. return self.create_floating_ip(server)['ip']
  707. elif CONF.validation.connect_method == 'fixed':
  708. # Determine the network name to look for based on config or creds
  709. # provider network resources.
  710. if CONF.validation.network_for_ssh:
  711. addresses = server['addresses'][
  712. CONF.validation.network_for_ssh]
  713. else:
  714. network = self.get_tenant_network()
  715. addresses = (server['addresses'][network['name']]
  716. if network else [])
  717. for address in addresses:
  718. if (address['version'] == CONF.validation.ip_version_for_ssh and # noqa
  719. address['OS-EXT-IPS:type'] == 'fixed'):
  720. return address['addr']
  721. raise exceptions.ServerUnreachable(server_id=server['id'])
  722. else:
  723. raise lib_exc.InvalidConfiguration()
  724. @classmethod
  725. def get_host_for_server(cls, server_id):
  726. server_details = cls.os_admin.servers_client.show_server(server_id)
  727. return server_details['server']['OS-EXT-SRV-ATTR:host']
  728. class NetworkScenarioTest(ScenarioTest):
  729. """Base class for network scenario tests.
  730. This class provide helpers for network scenario tests, using the neutron
  731. API. Helpers from ancestor which use the nova network API are overridden
  732. with the neutron API.
  733. This Class also enforces using Neutron instead of novanetwork.
  734. Subclassed tests will be skipped if Neutron is not enabled
  735. """
  736. credentials = ['primary', 'admin']
  737. @classmethod
  738. def skip_checks(cls):
  739. super(NetworkScenarioTest, cls).skip_checks()
  740. if not CONF.service_available.neutron:
  741. raise cls.skipException('Neutron not available')
  742. def _create_network(self, networks_client=None,
  743. tenant_id=None,
  744. namestart='network-smoke-',
  745. port_security_enabled=True, **net_dict):
  746. if not networks_client:
  747. networks_client = self.networks_client
  748. if not tenant_id:
  749. tenant_id = networks_client.tenant_id
  750. name = data_utils.rand_name(namestart)
  751. network_kwargs = dict(name=name, tenant_id=tenant_id)
  752. if net_dict:
  753. network_kwargs.update(net_dict)
  754. # Neutron disables port security by default so we have to check the
  755. # config before trying to create the network with port_security_enabled
  756. if CONF.network_feature_enabled.port_security:
  757. network_kwargs['port_security_enabled'] = port_security_enabled
  758. result = networks_client.create_network(**network_kwargs)
  759. network = result['network']
  760. self.assertEqual(network['name'], name)
  761. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  762. networks_client.delete_network,
  763. network['id'])
  764. return network
  765. def create_subnet(self, network, subnets_client=None,
  766. namestart='subnet-smoke', **kwargs):
  767. """Create a subnet for the given network
  768. within the cidr block configured for tenant networks.
  769. """
  770. if not subnets_client:
  771. subnets_client = self.subnets_client
  772. def cidr_in_use(cidr, tenant_id):
  773. """Check cidr existence
  774. :returns: True if subnet with cidr already exist in tenant
  775. False else
  776. """
  777. cidr_in_use = self.os_admin.subnets_client.list_subnets(
  778. tenant_id=tenant_id, cidr=cidr)['subnets']
  779. return len(cidr_in_use) != 0
  780. ip_version = kwargs.pop('ip_version', 4)
  781. if ip_version == 6:
  782. tenant_cidr = netaddr.IPNetwork(
  783. CONF.network.project_network_v6_cidr)
  784. num_bits = CONF.network.project_network_v6_mask_bits
  785. else:
  786. tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
  787. num_bits = CONF.network.project_network_mask_bits
  788. result = None
  789. str_cidr = None
  790. # Repeatedly attempt subnet creation with sequential cidr
  791. # blocks until an unallocated block is found.
  792. for subnet_cidr in tenant_cidr.subnet(num_bits):
  793. str_cidr = str(subnet_cidr)
  794. if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
  795. continue
  796. subnet = dict(
  797. name=data_utils.rand_name(namestart),
  798. network_id=network['id'],
  799. tenant_id=network['tenant_id'],
  800. cidr=str_cidr,
  801. ip_version=ip_version,
  802. **kwargs
  803. )
  804. try:
  805. result = subnets_client.create_subnet(**subnet)
  806. break
  807. except lib_exc.Conflict as e:
  808. is_overlapping_cidr = 'overlaps with another subnet' in str(e)
  809. if not is_overlapping_cidr:
  810. raise
  811. self.assertIsNotNone(result, 'Unable to allocate tenant network')
  812. subnet = result['subnet']
  813. self.assertEqual(subnet['cidr'], str_cidr)
  814. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  815. subnets_client.delete_subnet, subnet['id'])
  816. return subnet
  817. def _get_server_port_id_and_ip4(self, server, ip_addr=None):
  818. if ip_addr:
  819. ports = self.os_admin.ports_client.list_ports(
  820. device_id=server['id'],
  821. fixed_ips='ip_address=%s' % ip_addr)['ports']
  822. else:
  823. ports = self.os_admin.ports_client.list_ports(
  824. device_id=server['id'])['ports']
  825. # A port can have more than one IP address in some cases.
  826. # If the network is dual-stack (IPv4 + IPv6), this port is associated
  827. # with 2 subnets
  828. p_status = ['ACTIVE']
  829. # NOTE(vsaienko) With Ironic, instances live on separate hardware
  830. # servers. Neutron does not bind ports for Ironic instances, as a
  831. # result the port remains in the DOWN state.
  832. # TODO(vsaienko) remove once bug: #1599836 is resolved.
  833. if getattr(CONF.service_available, 'ironic', False):
  834. p_status.append('DOWN')
  835. port_map = [(p["id"], fxip["ip_address"])
  836. for p in ports
  837. for fxip in p["fixed_ips"]
  838. if (netutils.is_valid_ipv4(fxip["ip_address"]) and
  839. p['status'] in p_status)]
  840. inactive = [p for p in ports if p['status'] != 'ACTIVE']
  841. if inactive:
  842. LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
  843. self.assertNotEmpty(port_map,
  844. "No IPv4 addresses found in: %s" % ports)
  845. self.assertEqual(len(port_map), 1,
  846. "Found multiple IPv4 addresses: %s. "
  847. "Unable to determine which port to target."
  848. % port_map)
  849. return port_map[0]
  850. def _get_network_by_name(self, network_name):
  851. net = self.os_admin.networks_client.list_networks(
  852. name=network_name)['networks']
  853. self.assertNotEmpty(net,
  854. "Unable to get network by name: %s" % network_name)
  855. return net[0]
  856. def create_floating_ip(self, thing, external_network_id=None,
  857. port_id=None, client=None):
  858. """Create a floating IP and associates to a resource/port on Neutron"""
  859. if not external_network_id:
  860. external_network_id = CONF.network.public_network_id
  861. if not client:
  862. client = self.floating_ips_client
  863. if not port_id:
  864. port_id, ip4 = self._get_server_port_id_and_ip4(thing)
  865. else:
  866. ip4 = None
  867. result = client.create_floatingip(
  868. floating_network_id=external_network_id,
  869. port_id=port_id,
  870. tenant_id=thing['tenant_id'],
  871. fixed_ip_address=ip4
  872. )
  873. floating_ip = result['floatingip']
  874. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  875. client.delete_floatingip,
  876. floating_ip['id'])
  877. return floating_ip
  878. def check_floating_ip_status(self, floating_ip, status):
  879. """Verifies floatingip reaches the given status
  880. :param dict floating_ip: floating IP dict to check status
  881. :param status: target status
  882. :raises: AssertionError if status doesn't match
  883. """
  884. floatingip_id = floating_ip['id']
  885. def refresh():
  886. result = (self.floating_ips_client.
  887. show_floatingip(floatingip_id)['floatingip'])
  888. return status == result['status']
  889. if not test_utils.call_until_true(refresh,
  890. CONF.network.build_timeout,
  891. CONF.network.build_interval):
  892. floating_ip = self.floating_ips_client.show_floatingip(
  893. floatingip_id)['floatingip']
  894. self.assertEqual(status, floating_ip['status'],
  895. message="FloatingIP: {fp} is at status: {cst}. "
  896. "failed to reach status: {st}"
  897. .format(fp=floating_ip, cst=floating_ip['status'],
  898. st=status))
  899. LOG.info("FloatingIP: {fp} is at status: {st}"
  900. .format(fp=floating_ip, st=status))
  901. def check_tenant_network_connectivity(self, server,
  902. username,
  903. private_key,
  904. should_connect=True,
  905. servers_for_debug=None):
  906. if not CONF.network.project_networks_reachable:
  907. msg = 'Tenant networks not configured to be reachable.'
  908. LOG.info(msg)
  909. return
  910. # The target login is assumed to have been configured for
  911. # key-based authentication by cloud-init.
  912. try:
  913. for ip_addresses in server['addresses'].values():
  914. for ip_address in ip_addresses:
  915. self.check_vm_connectivity(ip_address['addr'],
  916. username,
  917. private_key,
  918. should_connect=should_connect)
  919. except Exception as e:
  920. LOG.exception('Tenant network connectivity check failed')
  921. self._log_console_output(servers_for_debug)
  922. self._log_net_info(e)
  923. raise
  924. def check_remote_connectivity(self, source, dest, should_succeed=True,
  925. nic=None, protocol='icmp'):
  926. """check server connectivity via source ssh connection
  927. :param source: RemoteClient: an ssh connection from which to execute
  928. the check
  929. :param dest: an IP to check connectivity against
  930. :param should_succeed: boolean should connection succeed or not
  931. :param nic: specific network interface to test connectivity from
  932. :param protocol: the protocol used to test connectivity with.
  933. :returns: True, if the connection succeeded and it was expected to
  934. succeed. False otherwise.
  935. """
  936. method_name = '%s_check' % protocol
  937. connectivity_checker = getattr(source, method_name)
  938. def connect_remote():
  939. try:
  940. connectivity_checker(dest, nic=nic)
  941. except lib_exc.SSHExecCommandFailed:
  942. LOG.warning('Failed to check %(protocol)s connectivity for '
  943. 'IP %(dest)s via a ssh connection from: %(src)s.',
  944. dict(protocol=protocol, dest=dest,
  945. src=source.ssh_client.host))
  946. return not should_succeed
  947. return should_succeed
  948. result = test_utils.call_until_true(connect_remote,
  949. CONF.validation.ping_timeout, 1)
  950. if result:
  951. return
  952. source_host = source.ssh_client.host
  953. if should_succeed:
  954. msg = "Timed out waiting for %s to become reachable from %s" \
  955. % (dest, source_host)
  956. else:
  957. msg = "%s is reachable from %s" % (dest, source_host)
  958. self._log_console_output()
  959. self.fail(msg)
  960. def _create_security_group(self, security_group_rules_client=None,
  961. tenant_id=None,
  962. namestart='secgroup-smoke',
  963. security_groups_client=None):
  964. if security_group_rules_client is None:
  965. security_group_rules_client = self.security_group_rules_client
  966. if security_groups_client is None:
  967. security_groups_client = self.security_groups_client
  968. if tenant_id is None:
  969. tenant_id = security_groups_client.tenant_id
  970. secgroup = self._create_empty_security_group(
  971. namestart=namestart, client=security_groups_client,
  972. tenant_id=tenant_id)
  973. # Add rules to the security group
  974. rules = self._create_loginable_secgroup_rule(
  975. security_group_rules_client=security_group_rules_client,
  976. secgroup=secgroup,
  977. security_groups_client=security_groups_client)
  978. for rule in rules:
  979. self.assertEqual(tenant_id, rule['tenant_id'])
  980. self.assertEqual(secgroup['id'], rule['security_group_id'])
  981. return secgroup
  982. def _create_empty_security_group(self, client=None, tenant_id=None,
  983. namestart='secgroup-smoke'):
  984. """Create a security group without rules.
  985. Default rules will be created:
  986. - IPv4 egress to any
  987. - IPv6 egress to any
  988. :param tenant_id: secgroup will be created in this tenant
  989. :returns: the created security group
  990. """
  991. if client is None:
  992. client = self.security_groups_client
  993. if not tenant_id:
  994. tenant_id = client.tenant_id
  995. sg_name = data_utils.rand_name(namestart)
  996. sg_desc = sg_name + " description"
  997. sg_dict = dict(name=sg_name,
  998. description=sg_desc)
  999. sg_dict['tenant_id'] = tenant_id
  1000. result = client.create_security_group(**sg_dict)
  1001. secgroup = result['security_group']
  1002. self.assertEqual(secgroup['name'], sg_name)
  1003. self.assertEqual(tenant_id, secgroup['tenant_id'])
  1004. self.assertEqual(secgroup['description'], sg_desc)
  1005. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  1006. client.delete_security_group, secgroup['id'])
  1007. return secgroup
  1008. def _create_security_group_rule(self, secgroup=None,
  1009. sec_group_rules_client=None,
  1010. tenant_id=None,
  1011. security_groups_client=None, **kwargs):
  1012. """Create a rule from a dictionary of rule parameters.
  1013. Create a rule in a secgroup. if secgroup not defined will search for
  1014. default secgroup in tenant_id.
  1015. :param secgroup: the security group.
  1016. :param tenant_id: if secgroup not passed -- the tenant in which to
  1017. search for default secgroup
  1018. :param kwargs: a dictionary containing rule parameters:
  1019. for example, to allow incoming ssh:
  1020. rule = {
  1021. direction: 'ingress'
  1022. protocol:'tcp',
  1023. port_range_min: 22,
  1024. port_range_max: 22
  1025. }
  1026. """
  1027. if sec_group_rules_client is None:
  1028. sec_group_rules_client = self.security_group_rules_client
  1029. if security_groups_client is None:
  1030. security_groups_client = self.security_groups_client
  1031. if not tenant_id:
  1032. tenant_id = security_groups_client.tenant_id
  1033. if secgroup is None:
  1034. # Get default secgroup for tenant_id
  1035. default_secgroups = security_groups_client.list_security_groups(
  1036. name='default', tenant_id=tenant_id)['security_groups']
  1037. msg = "No default security group for tenant %s." % (tenant_id)
  1038. self.assertNotEmpty(default_secgroups, msg)
  1039. secgroup = default_secgroups[0]
  1040. ruleset = dict(security_group_id=secgroup['id'],
  1041. tenant_id=secgroup['tenant_id'])
  1042. ruleset.update(kwargs)
  1043. sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
  1044. sg_rule = sg_rule['security_group_rule']
  1045. self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
  1046. self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
  1047. return sg_rule
  1048. def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
  1049. secgroup=None,
  1050. security_groups_client=None):
  1051. """Create loginable security group rule
  1052. This function will create:
  1053. 1. egress and ingress tcp port 22 allow rule in order to allow ssh
  1054. access for ipv4.
  1055. 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
  1056. 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
  1057. """
  1058. if security_group_rules_client is None:
  1059. security_group_rules_client = self.security_group_rules_client
  1060. if security_groups_client is None:
  1061. security_groups_client = self.security_groups_client
  1062. rules = []
  1063. rulesets = [
  1064. dict(
  1065. # ssh
  1066. protocol='tcp',
  1067. port_range_min=22,
  1068. port_range_max=22,
  1069. ),
  1070. dict(
  1071. # ping
  1072. protocol='icmp',
  1073. ),
  1074. dict(
  1075. # ipv6-icmp for ping6
  1076. protocol='icmp',
  1077. ethertype='IPv6',
  1078. )
  1079. ]
  1080. sec_group_rules_client = security_group_rules_client
  1081. for ruleset in rulesets:
  1082. for r_direction in ['ingress', 'egress']:
  1083. ruleset['direction'] = r_direction
  1084. try:
  1085. sg_rule = self._create_security_group_rule(
  1086. sec_group_rules_client=sec_group_rules_client,
  1087. secgroup=secgroup,
  1088. security_groups_client=security_groups_client,
  1089. **ruleset)
  1090. except lib_exc.Conflict as ex:
  1091. # if rule already exist - skip rule and continue
  1092. msg = 'Security group rule already exists'
  1093. if msg not in ex._error_string:
  1094. raise ex
  1095. else:
  1096. self.assertEqual(r_direction, sg_rule['direction'])
  1097. rules.append(sg_rule)
  1098. return rules
  1099. def _get_router(self, client=None, tenant_id=None):
  1100. """Retrieve a router for the given tenant id.
  1101. If a public router has been configured, it will be returned.
  1102. If a public router has not been configured, but a public
  1103. network has, a tenant router will be created and returned that
  1104. routes traffic to the public network.
  1105. """
  1106. if not client:
  1107. client = self.routers_client
  1108. if not tenant_id:
  1109. tenant_id = client.tenant_id
  1110. router_id = CONF.network.public_router_id
  1111. network_id = CONF.network.public_network_id
  1112. if router_id:
  1113. body = client.show_router(router_id)
  1114. return body['router']
  1115. elif network_id:
  1116. router = client.create_router(
  1117. name=data_utils.rand_name(self.__class__.__name__ + '-router'),
  1118. admin_state_up=True,
  1119. tenant_id=tenant_id,
  1120. external_gateway_info=dict(network_id=network_id))['router']
  1121. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  1122. client.delete_router, router['id'])
  1123. return router
  1124. else:
  1125. raise Exception("Neither of 'public_router_id' or "
  1126. "'public_network_id' has been defined.")
  1127. def create_networks(self, networks_client=None,
  1128. routers_client=None, subnets_client=None,
  1129. tenant_id=None, dns_nameservers=None,
  1130. port_security_enabled=True, **net_dict):
  1131. """Create a network with a subnet connected to a router.
  1132. The baremetal driver is a special case since all nodes are
  1133. on the same shared network.
  1134. :param tenant_id: id of tenant to create resources in.
  1135. :param dns_nameservers: list of dns servers to send to subnet.
  1136. :param port_security_enabled: whether or not port_security is enabled
  1137. :param net_dict: a dict containing experimental network information in
  1138. a form like this: {'provider:network_type': 'vlan',
  1139. 'provider:physical_network': 'foo',
  1140. 'provider:segmentation_id': '42'}
  1141. :returns: network, subnet, router
  1142. """
  1143. if CONF.network.shared_physical_network:
  1144. # NOTE(Shrews): This exception is for environments where tenant
  1145. # credential isolation is available, but network separation is
  1146. # not (the current baremetal case). Likely can be removed when
  1147. # test account mgmt is reworked:
  1148. # https://blueprints.launchpad.net/tempest/+spec/test-accounts
  1149. if not CONF.compute.fixed_network_name:
  1150. m = 'fixed_network_name must be specified in config'
  1151. raise lib_exc.InvalidConfiguration(m)
  1152. network = self._get_network_by_name(
  1153. CONF.compute.fixed_network_name)
  1154. router = None
  1155. subnet = None
  1156. else:
  1157. network = self._create_network(
  1158. networks_client=networks_client,
  1159. tenant_id=tenant_id,
  1160. port_security_enabled=port_security_enabled,
  1161. **net_dict)
  1162. router = self._get_router(client=routers_client,
  1163. tenant_id=tenant_id)
  1164. subnet_kwargs = dict(network=network,
  1165. subnets_client=subnets_client)
  1166. # use explicit check because empty list is a valid option
  1167. if dns_nameservers is not None:
  1168. subnet_kwargs['dns_nameservers'] = dns_nameservers
  1169. subnet = self.create_subnet(**subnet_kwargs)
  1170. if not routers_client:
  1171. routers_client = self.routers_client
  1172. router_id = router['id']
  1173. routers_client.add_router_interface(router_id,
  1174. subnet_id=subnet['id'])
  1175. # save a cleanup job to remove this association between
  1176. # router and subnet
  1177. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  1178. routers_client.remove_router_interface, router_id,
  1179. subnet_id=subnet['id'])
  1180. return network, subnet, router
  1181. class EncryptionScenarioTest(ScenarioTest):
  1182. """Base class for encryption scenario tests"""
  1183. credentials = ['primary', 'admin']
  1184. @classmethod
  1185. def setup_clients(cls):
  1186. super(EncryptionScenarioTest, cls).setup_clients()
  1187. cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
  1188. cls.admin_encryption_types_client =\
  1189. cls.os_admin.encryption_types_client_latest
  1190. def create_encryption_type(self, client=None, type_id=None, provider=None,
  1191. key_size=None, cipher=None,
  1192. control_location=None):
  1193. if not client:
  1194. client = self.admin_encryption_types_client
  1195. if not type_id:
  1196. volume_type = self.create_volume_type()
  1197. type_id = volume_type['id']
  1198. LOG.debug("Creating an encryption type for volume type: %s", type_id)
  1199. client.create_encryption_type(
  1200. type_id, provider=provider, key_size=key_size, cipher=cipher,
  1201. control_location=control_location)
  1202. def create_encrypted_volume(self, encryption_provider, volume_type,
  1203. key_size=256, cipher='aes-xts-plain64',
  1204. control_location='front-end'):
  1205. volume_type = self.create_volume_type(name=volume_type)
  1206. self.create_encryption_type(type_id=volume_type['id'],
  1207. provider=encryption_provider,
  1208. key_size=key_size,
  1209. cipher=cipher,
  1210. control_location=control_location)
  1211. return self.create_volume(volume_type=volume_type['name'])
  1212. class ObjectStorageScenarioTest(ScenarioTest):
  1213. """Provide harness to do Object Storage scenario tests.
  1214. Subclasses implement the tests that use the methods provided by this
  1215. class.
  1216. """
  1217. @classmethod
  1218. def skip_checks(cls):
  1219. super(ObjectStorageScenarioTest, cls).skip_checks()
  1220. if not CONF.service_available.swift:
  1221. skip_msg = ("%s skipped as swift is not available" %
  1222. cls.__name__)
  1223. raise cls.skipException(skip_msg)
  1224. @classmethod
  1225. def setup_credentials(cls):
  1226. cls.set_network_resources()
  1227. super(ObjectStorageScenarioTest, cls).setup_credentials()
  1228. operator_role = CONF.object_storage.operator_role
  1229. cls.os_operator = cls.get_client_manager(roles=[operator_role])
  1230. @classmethod
  1231. def setup_clients(cls):
  1232. super(ObjectStorageScenarioTest, cls).setup_clients()
  1233. # Clients for Swift
  1234. cls.account_client = cls.os_operator.account_client
  1235. cls.container_client = cls.os_operator.container_client
  1236. cls.object_client = cls.os_operator.object_client
  1237. def get_swift_stat(self):
  1238. """get swift status for our user account."""
  1239. self.account_client.list_account_containers()
  1240. LOG.debug('Swift status information obtained successfully')
  1241. def create_container(self, container_name=None):
  1242. name = container_name or data_utils.rand_name(
  1243. 'swift-scenario-container')
  1244. self.container_client.update_container(name)
  1245. # look for the container to assure it is created
  1246. self.list_and_check_container_objects(name)
  1247. LOG.debug('Container %s created', name)
  1248. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  1249. self.container_client.delete_container,
  1250. name)
  1251. return name
  1252. def delete_container(self, container_name):
  1253. self.container_client.delete_container(container_name)
  1254. LOG.debug('Container %s deleted', container_name)
  1255. def upload_object_to_container(self, container_name, obj_name=None):
  1256. obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
  1257. obj_data = data_utils.random_bytes()
  1258. self.object_client.create_object(container_name, obj_name, obj_data)
  1259. self.addCleanup(test_utils.call_and_ignore_notfound_exc,
  1260. self.object_client.delete_object,
  1261. container_name,
  1262. obj_name)
  1263. return obj_name, obj_data
  1264. def delete_object(self, container_name, filename):
  1265. self.object_client.delete_object(container_name, filename)
  1266. self.list_and_check_container_objects(container_name,
  1267. not_present_obj=[filename])
  1268. def list_and_check_container_objects(self, container_name,
  1269. present_obj=None,
  1270. not_present_obj=None):
  1271. # List objects for a given container and assert which are present and
  1272. # which are not.
  1273. if present_obj is None:
  1274. present_obj = []
  1275. if not_present_obj is None:
  1276. not_present_obj = []
  1277. _, object_list = self.container_client.list_container_objects(
  1278. container_name)
  1279. if present_obj:
  1280. for obj in present_obj:
  1281. self.assertIn(obj, object_list)
  1282. if not_present_obj:
  1283. for obj in not_present_obj:
  1284. self.assertNotIn(obj, object_list)
  1285. def download_and_verify(self, container_name, obj_name, expected_data):
  1286. _, obj = self.object_client.get_object(container_name, obj_name)
  1287. self.assertEqual(obj, expected_data)