OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

986 lines
41 KiB

  1. # Copyright 2011 Justin Santa Barbara
  2. # All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. """
  16. Provides common functionality for integrated unit tests
  17. """
  18. import collections
  19. import random
  20. import six
  21. import string
  22. import time
  23. import os_traits
  24. from oslo_log import log as logging
  25. from oslo_utils.fixture import uuidsentinel as uuids
  26. from nova.compute import instance_actions
  27. from nova.compute import utils as compute_utils
  28. import nova.conf
  29. from nova import context
  30. from nova.db import api as db
  31. import nova.image.glance
  32. from nova import objects
  33. from nova import test
  34. from nova.tests import fixtures as nova_fixtures
  35. from nova.tests.functional.api import client as api_client
  36. from nova.tests.functional import fixtures as func_fixtures
  37. from nova.tests.unit import cast_as_call
  38. from nova.tests.unit import fake_notifier
  39. import nova.tests.unit.image.fake
  40. from nova.tests.unit import policy_fixture
  41. from nova import utils
  42. CONF = nova.conf.CONF
  43. LOG = logging.getLogger(__name__)
  44. def generate_random_alphanumeric(length):
  45. """Creates a random alphanumeric string of specified length."""
  46. return ''.join(random.choice(string.ascii_uppercase + string.digits)
  47. for _x in range(length))
  48. def generate_random_numeric(length):
  49. """Creates a random numeric string of specified length."""
  50. return ''.join(random.choice(string.digits)
  51. for _x in range(length))
  52. def generate_new_element(items, prefix, numeric=False):
  53. """Creates a random string with prefix, that is not in 'items' list."""
  54. while True:
  55. if numeric:
  56. candidate = prefix + generate_random_numeric(8)
  57. else:
  58. candidate = prefix + generate_random_alphanumeric(8)
  59. if candidate not in items:
  60. return candidate
  61. LOG.debug("Random collision on %s", candidate)
  62. class InstanceHelperMixin(object):
  63. def _wait_for_server_parameter(self, admin_api, server, expected_params,
  64. max_retries=10):
  65. retry_count = 0
  66. while True:
  67. server = admin_api.get_server(server['id'])
  68. if all([server[attr] == expected_params[attr]
  69. for attr in expected_params]):
  70. break
  71. retry_count += 1
  72. if retry_count == max_retries:
  73. self.fail('Wait for state change failed, '
  74. 'expected_params=%s, server=%s' % (
  75. expected_params, server))
  76. time.sleep(0.5)
  77. return server
  78. def _wait_for_state_change(self, admin_api, server, expected_status,
  79. max_retries=10):
  80. return self._wait_for_server_parameter(
  81. admin_api, server, {'status': expected_status}, max_retries)
  82. def _build_minimal_create_server_request(self, api, name, image_uuid=None,
  83. flavor_id=None, networks=None,
  84. az=None, host=None):
  85. server = {}
  86. # We now have a valid imageId
  87. server['imageRef'] = image_uuid or api.get_images()[0]['id']
  88. if not flavor_id:
  89. # Set a valid flavorId
  90. flavor_id = api.get_flavors()[1]['id']
  91. server['flavorRef'] = ('http://fake.server/%s' % flavor_id)
  92. server['name'] = name
  93. if networks is not None:
  94. server['networks'] = networks
  95. if az is not None:
  96. server['availability_zone'] = az
  97. # This requires at least microversion 2.74 to work
  98. if host is not None:
  99. server['host'] = host
  100. return server
  101. def _wait_until_deleted(self, server):
  102. initially_in_error = (server['status'] == 'ERROR')
  103. try:
  104. for i in range(40):
  105. server = self.api.get_server(server['id'])
  106. if not initially_in_error and server['status'] == 'ERROR':
  107. self.fail('Server went to error state instead of'
  108. 'disappearing.')
  109. time.sleep(0.5)
  110. self.fail('Server failed to delete.')
  111. except api_client.OpenStackApiNotFoundException:
  112. return
  113. def _wait_for_action_fail_completion(
  114. self, server, expected_action, event_name, api=None):
  115. """Polls instance action events for the given instance, action and
  116. action event name until it finds the action event with an error
  117. result.
  118. """
  119. if api is None:
  120. api = self.api
  121. return self._wait_for_instance_action_event(
  122. api, server, expected_action, event_name, event_result='error')
  123. def _wait_for_instance_action_event(
  124. self, api, server, action_name, event_name, event_result):
  125. """Polls the instance action events for the given instance, action,
  126. event, and event result until it finds the event.
  127. """
  128. actions = []
  129. events = []
  130. for attempt in range(10):
  131. actions = api.get_instance_actions(server['id'])
  132. # The API returns the newest event first
  133. for action in actions:
  134. if action['action'] == action_name:
  135. events = (
  136. api.api_get(
  137. '/servers/%s/os-instance-actions/%s' %
  138. (server['id'], action['request_id'])
  139. ).body['instanceAction']['events'])
  140. # Look for the action event being in error state.
  141. for event in events:
  142. result = event['result']
  143. if (event['event'] == event_name and
  144. result is not None and
  145. result.lower() == event_result.lower()):
  146. return event
  147. # We didn't find the completion event yet, so wait a bit.
  148. time.sleep(0.5)
  149. self.fail(
  150. 'Timed out waiting for %s instance action event. Current instance '
  151. 'actions: %s. Events in the last matching action: %s'
  152. % (event_name, actions, events))
  153. def _assert_resize_migrate_action_fail(self, server, action, error_in_tb):
  154. """Waits for the conductor_migrate_server action event to fail for
  155. the given action and asserts the error is in the event traceback.
  156. :param server: API response dict of the server being resized/migrated
  157. :param action: Either "resize" or "migrate" instance action.
  158. :param error_in_tb: Some expected part of the error event traceback.
  159. """
  160. api = self.admin_api if hasattr(self, 'admin_api') else self.api
  161. event = self._wait_for_action_fail_completion(
  162. server, action, 'conductor_migrate_server', api=api)
  163. self.assertIn(error_in_tb, event['traceback'])
  164. def _wait_for_migration_status(self, server, expected_statuses):
  165. """Waits for a migration record with the given statuses to be found
  166. for the given server, else the test fails. The migration record, if
  167. found, is returned.
  168. """
  169. api = getattr(self, 'admin_api', None)
  170. if api is None:
  171. api = self.api
  172. statuses = [status.lower() for status in expected_statuses]
  173. for attempt in range(10):
  174. migrations = api.api_get('/os-migrations').body['migrations']
  175. for migration in migrations:
  176. if (migration['instance_uuid'] == server['id'] and
  177. migration['status'].lower() in statuses):
  178. return migration
  179. time.sleep(0.5)
  180. self.fail('Timed out waiting for migration with status "%s" for '
  181. 'instance: %s' % (expected_statuses, server['id']))
  182. def _wait_for_log(self, log_line):
  183. for i in range(10):
  184. if log_line in self.stdlog.logger.output:
  185. return
  186. time.sleep(0.5)
  187. self.fail('The line "%(log_line)s" did not appear in the log')
  188. class _IntegratedTestBase(test.TestCase):
  189. REQUIRES_LOCKING = True
  190. ADMIN_API = False
  191. # Override this in subclasses which use the legacy nova-network service.
  192. # New tests should rely on Neutron and old ones migrated to use this since
  193. # nova-network is deprecated.
  194. USE_NEUTRON = True
  195. # This indicates whether to include the project ID in the URL for API
  196. # requests through OSAPIFixture. Overridden by subclasses.
  197. _use_project_id = False
  198. def setUp(self):
  199. super(_IntegratedTestBase, self).setUp()
  200. # TODO(mriedem): Fix the functional tests to work with Neutron.
  201. self.flags(use_neutron=self.USE_NEUTRON)
  202. # NOTE(mikal): this is used to stub away privsep helpers
  203. def fake_noop(*args, **kwargs):
  204. return None
  205. self.stub_out('nova.privsep.linux_net.bind_ip', fake_noop)
  206. nova.tests.unit.image.fake.stub_out_image_service(self)
  207. self.useFixture(cast_as_call.CastAsCall(self))
  208. placement = self.useFixture(func_fixtures.PlacementFixture())
  209. self.placement_api = placement.api
  210. if self.USE_NEUTRON:
  211. self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
  212. self._setup_services()
  213. self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
  214. def _setup_compute_service(self):
  215. return self.start_service('compute')
  216. def _setup_scheduler_service(self):
  217. return self.start_service('scheduler')
  218. def _setup_services(self):
  219. # NOTE(danms): Set the global MQ connection to that of our first cell
  220. # for any cells-ignorant code. Normally this is defaulted in the tests
  221. # which will result in us not doing the right thing.
  222. if 'cell1' in self.cell_mappings:
  223. self.flags(transport_url=self.cell_mappings['cell1'].transport_url)
  224. self.conductor = self.start_service('conductor')
  225. if not self.USE_NEUTRON:
  226. self.network = self.start_service('network',
  227. manager=CONF.network_manager)
  228. self.scheduler = self._setup_scheduler_service()
  229. self.compute = self._setup_compute_service()
  230. self.api_fixture = self.useFixture(
  231. nova_fixtures.OSAPIFixture(
  232. api_version=self.api_major_version,
  233. use_project_id_in_urls=self._use_project_id))
  234. # if the class needs to run as admin, make the api endpoint
  235. # the admin, otherwise it's safer to run as non admin user.
  236. if self.ADMIN_API:
  237. self.api = self.api_fixture.admin_api
  238. else:
  239. self.api = self.api_fixture.api
  240. if hasattr(self, 'microversion'):
  241. self.api.microversion = self.microversion
  242. def get_unused_server_name(self):
  243. servers = self.api.get_servers()
  244. server_names = [server['name'] for server in servers]
  245. return generate_new_element(server_names, 'server')
  246. def get_unused_flavor_name_id(self):
  247. flavors = self.api.get_flavors()
  248. flavor_names = list()
  249. flavor_ids = list()
  250. [(flavor_names.append(flavor['name']),
  251. flavor_ids.append(flavor['id']))
  252. for flavor in flavors]
  253. return (generate_new_element(flavor_names, 'flavor'),
  254. int(generate_new_element(flavor_ids, '', True)))
  255. def get_invalid_image(self):
  256. return uuids.fake
  257. def _build_minimal_create_server_request(self, image_uuid=None):
  258. server = {}
  259. # NOTE(takashin): In API version 2.36, image APIs were deprecated.
  260. # In API version 2.36 or greater, self.api.get_images() returns
  261. # a 404 error. In that case, 'image_uuid' should be specified.
  262. server[self._image_ref_parameter] = (image_uuid or
  263. self.api.get_images()[0]['id'])
  264. # Set a valid flavorId
  265. flavor = self.api.get_flavors()[0]
  266. LOG.debug("Using flavor: %s", flavor)
  267. server[self._flavor_ref_parameter] = ('http://fake.server/%s'
  268. % flavor['id'])
  269. # Set a valid server name
  270. server_name = self.get_unused_server_name()
  271. server['name'] = server_name
  272. return server
  273. def _create_flavor_body(self, name, ram, vcpus, disk, ephemeral, id, swap,
  274. rxtx_factor, is_public):
  275. return {
  276. "flavor": {
  277. "name": name,
  278. "ram": ram,
  279. "vcpus": vcpus,
  280. "disk": disk,
  281. "OS-FLV-EXT-DATA:ephemeral": ephemeral,
  282. "id": id,
  283. "swap": swap,
  284. "rxtx_factor": rxtx_factor,
  285. "os-flavor-access:is_public": is_public,
  286. }
  287. }
  288. def _create_flavor(self, memory_mb=2048, vcpu=2, disk=10, ephemeral=10,
  289. swap=0, rxtx_factor=1.0, is_public=True,
  290. extra_spec=None):
  291. flv_name, flv_id = self.get_unused_flavor_name_id()
  292. body = self._create_flavor_body(flv_name, memory_mb, vcpu, disk,
  293. ephemeral, flv_id, swap, rxtx_factor,
  294. is_public)
  295. self.api_fixture.admin_api.post_flavor(body)
  296. if extra_spec is not None:
  297. spec = {"extra_specs": extra_spec}
  298. self.api_fixture.admin_api.post_extra_spec(flv_id, spec)
  299. return flv_id
  300. def _build_server(self, flavor_id, image=None):
  301. server = {}
  302. if image is None:
  303. # TODO(stephenfin): We need to stop relying on this API
  304. with utils.temporary_mutation(self.api, microversion='2.35'):
  305. image = self.api.get_images()[0]
  306. LOG.debug("Image: %s", image)
  307. # We now have a valid imageId
  308. server[self._image_ref_parameter] = image['id']
  309. else:
  310. server[self._image_ref_parameter] = image
  311. # Set a valid flavorId
  312. flavor = self.api.get_flavor(flavor_id)
  313. LOG.debug("Using flavor: %s", flavor)
  314. server[self._flavor_ref_parameter] = ('http://fake.server/%s'
  315. % flavor['id'])
  316. # Set a valid server name
  317. server_name = self.get_unused_server_name()
  318. server['name'] = server_name
  319. return server
  320. def _check_api_endpoint(self, endpoint, expected_middleware):
  321. app = self.api_fixture.app().get((None, '/v2'))
  322. while getattr(app, 'application', False):
  323. for middleware in expected_middleware:
  324. if isinstance(app.application, middleware):
  325. expected_middleware.remove(middleware)
  326. break
  327. app = app.application
  328. self.assertEqual([],
  329. expected_middleware,
  330. ("The expected wsgi middlewares %s are not "
  331. "existed") % expected_middleware)
  332. class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
  333. """Base test class for functional tests that check provider usage
  334. and consumer allocations in Placement during various operations.
  335. Subclasses must define a **compute_driver** attribute for the virt driver
  336. to use.
  337. This class sets up standard fixtures and controller services but does not
  338. start any compute services, that is left to the subclass.
  339. """
  340. microversion = 'latest'
  341. # These must match the capabilities in
  342. # nova.virt.libvirt.driver.LibvirtDriver.capabilities
  343. expected_libvirt_driver_capability_traits = set([
  344. six.u(trait) for trait in [
  345. os_traits.COMPUTE_DEVICE_TAGGING,
  346. os_traits.COMPUTE_NET_ATTACH_INTERFACE,
  347. os_traits.COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,
  348. os_traits.COMPUTE_VOLUME_ATTACH_WITH_TAG,
  349. os_traits.COMPUTE_VOLUME_EXTEND,
  350. os_traits.COMPUTE_TRUSTED_CERTS,
  351. os_traits.COMPUTE_IMAGE_TYPE_AKI,
  352. os_traits.COMPUTE_IMAGE_TYPE_AMI,
  353. os_traits.COMPUTE_IMAGE_TYPE_ARI,
  354. os_traits.COMPUTE_IMAGE_TYPE_ISO,
  355. os_traits.COMPUTE_IMAGE_TYPE_QCOW2,
  356. os_traits.COMPUTE_IMAGE_TYPE_RAW,
  357. ]
  358. ])
  359. # These must match the capabilities in
  360. # nova.virt.fake.FakeDriver.capabilities
  361. expected_fake_driver_capability_traits = set([
  362. six.u(trait) for trait in [
  363. os_traits.COMPUTE_IMAGE_TYPE_RAW,
  364. os_traits.COMPUTE_DEVICE_TAGGING,
  365. os_traits.COMPUTE_NET_ATTACH_INTERFACE,
  366. os_traits.COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG,
  367. os_traits.COMPUTE_VOLUME_ATTACH_WITH_TAG,
  368. os_traits.COMPUTE_VOLUME_EXTEND,
  369. os_traits.COMPUTE_VOLUME_MULTI_ATTACH,
  370. os_traits.COMPUTE_TRUSTED_CERTS,
  371. ]
  372. ])
  373. def setUp(self):
  374. self.flags(compute_driver=self.compute_driver)
  375. super(ProviderUsageBaseTestCase, self).setUp()
  376. self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
  377. self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
  378. self.useFixture(nova_fixtures.AllServicesCurrent())
  379. fake_notifier.stub_notifier(self)
  380. self.addCleanup(fake_notifier.reset)
  381. placement = self.useFixture(func_fixtures.PlacementFixture())
  382. self.placement_api = placement.api
  383. self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
  384. api_version='v2.1'))
  385. self.admin_api = self.api_fixture.admin_api
  386. self.admin_api.microversion = self.microversion
  387. self.api = self.admin_api
  388. # the image fake backend needed for image discovery
  389. nova.tests.unit.image.fake.stub_out_image_service(self)
  390. self.start_service('conductor')
  391. self.scheduler_service = self.start_service('scheduler')
  392. self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
  393. self.computes = {}
  394. def _start_compute(self, host, cell_name=None):
  395. """Start a nova compute service on the given host
  396. :param host: the name of the host that will be associated to the
  397. compute service.
  398. :param cell_name: optional name of the cell in which to start the
  399. compute service (defaults to cell1)
  400. :return: the nova compute service object
  401. """
  402. compute = self.start_service('compute', host=host, cell=cell_name)
  403. self.computes[host] = compute
  404. return compute
  405. def _get_provider_uuid_by_host(self, host):
  406. # NOTE(gibi): the compute node id is the same as the compute node
  407. # provider uuid on that compute
  408. resp = self.admin_api.api_get(
  409. 'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
  410. return resp['hypervisors'][0]['id']
  411. def _get_provider_usages(self, provider_uuid):
  412. return self.placement_api.get(
  413. '/resource_providers/%s/usages' % provider_uuid).body['usages']
  414. def _get_allocations_by_server_uuid(self, server_uuid):
  415. return self.placement_api.get(
  416. '/allocations/%s' % server_uuid).body['allocations']
  417. def _get_allocations_by_provider_uuid(self, rp_uuid):
  418. return self.placement_api.get(
  419. '/resource_providers/%s/allocations' % rp_uuid).body['allocations']
  420. def _get_all_providers(self):
  421. return self.placement_api.get(
  422. '/resource_providers', version='1.14').body['resource_providers']
  423. def _create_trait(self, trait):
  424. return self.placement_api.put('/traits/%s' % trait, {}, version='1.6')
  425. def _delete_trait(self, trait):
  426. return self.placement_api.delete('/traits/%s' % trait, version='1.6')
  427. def _get_provider_traits(self, provider_uuid):
  428. return self.placement_api.get(
  429. '/resource_providers/%s/traits' % provider_uuid,
  430. version='1.6').body['traits']
  431. def _set_provider_traits(self, rp_uuid, traits):
  432. """This will overwrite any existing traits.
  433. :param rp_uuid: UUID of the resource provider to update
  434. :param traits: list of trait strings to set on the provider
  435. :returns: APIResponse object with the results
  436. """
  437. provider = self.placement_api.get(
  438. '/resource_providers/%s' % rp_uuid).body
  439. put_traits_req = {
  440. 'resource_provider_generation': provider['generation'],
  441. 'traits': traits
  442. }
  443. return self.placement_api.put(
  444. '/resource_providers/%s/traits' % rp_uuid,
  445. put_traits_req, version='1.6')
  446. def _get_all_resource_classes(self):
  447. dicts = self.placement_api.get(
  448. '/resource_classes', version='1.2').body['resource_classes']
  449. return [d['name'] for d in dicts]
  450. def _get_all_traits(self):
  451. return self.placement_api.get('/traits', version='1.6').body['traits']
  452. def _get_provider_inventory(self, rp_uuid):
  453. return self.placement_api.get(
  454. '/resource_providers/%s/inventories' % rp_uuid).body['inventories']
  455. def _get_provider_aggregates(self, rp_uuid):
  456. return self.placement_api.get(
  457. '/resource_providers/%s/aggregates' % rp_uuid,
  458. version='1.1').body['aggregates']
  459. def _post_resource_provider(self, rp_name):
  460. return self.placement_api.post(
  461. url='/resource_providers',
  462. version='1.20', body={'name': rp_name}).body
  463. def _set_inventory(self, rp_uuid, inv_body):
  464. """This will set the inventory for a given resource provider.
  465. :param rp_uuid: UUID of the resource provider to update
  466. :param inv_body: inventory to set on the provider
  467. :returns: APIResponse object with the results
  468. """
  469. return self.placement_api.post(
  470. url= ('/resource_providers/%s/inventories' % rp_uuid),
  471. version='1.15', body=inv_body).body
  472. def _update_inventory(self, rp_uuid, inv_body):
  473. """This will update the inventory for a given resource provider.
  474. :param rp_uuid: UUID of the resource provider to update
  475. :param inv_body: inventory to set on the provider
  476. :returns: APIResponse object with the results
  477. """
  478. return self.placement_api.put(
  479. url= ('/resource_providers/%s/inventories' % rp_uuid),
  480. body=inv_body).body
  481. def _get_resource_provider_by_uuid(self, rp_uuid):
  482. return self.placement_api.get(
  483. '/resource_providers/%s' % rp_uuid, version='1.15').body
  484. def _set_aggregate(self, rp_uuid, agg_id):
  485. provider = self.placement_api.get(
  486. '/resource_providers/%s' % rp_uuid).body
  487. post_agg_req = {"aggregates": [agg_id],
  488. "resource_provider_generation": provider['generation']}
  489. return self.placement_api.put(
  490. '/resource_providers/%s/aggregates' % rp_uuid, version='1.19',
  491. body=post_agg_req).body
  492. def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
  493. rps = self.placement_api.get(
  494. '/resource_providers?in_tree=%s' % in_tree_rp_uuid,
  495. version='1.20').body['resource_providers']
  496. return [rp['uuid'] for rp in rps]
  497. def assertRequestMatchesUsage(self, requested_resources, root_rp_uuid):
  498. # It matches the usages of the whole tree against the request
  499. rp_uuids = self._get_all_rp_uuids_in_a_tree(root_rp_uuid)
  500. # NOTE(gibi): flattening the placement usages means we cannot
  501. # verify the structure here. However I don't see any way to define this
  502. # function for nested and non-nested trees in a generic way.
  503. total_usage = collections.defaultdict(int)
  504. for rp in rp_uuids:
  505. usage = self._get_provider_usages(rp)
  506. for rc, amount in usage.items():
  507. total_usage[rc] += amount
  508. # Cannot simply do an assertEqual(expected, actual) as usages always
  509. # contain every RC even if the usage is 0 and the flavor could also
  510. # contain explicit 0 request for some resources.
  511. # So if the flavor contains an explicit 0 resource request (e.g. in
  512. # case of ironic resources:VCPU=0) then this code needs to assert that
  513. # such resource has 0 usage in the tree. In the other hand if the usage
  514. # contains 0 value for some resources that the flavor does not request
  515. # then that is totally fine.
  516. for rc, value in requested_resources.items():
  517. self.assertIn(
  518. rc, total_usage,
  519. 'The requested resource class not found in the total_usage of '
  520. 'the RP tree')
  521. self.assertEqual(
  522. value,
  523. total_usage[rc],
  524. 'The requested resource amount does not match with the total '
  525. 'resource usage of the RP tree')
  526. for rc, value in total_usage.items():
  527. if value != 0:
  528. self.assertEqual(
  529. requested_resources[rc],
  530. value,
  531. 'The requested resource amount does not match with the '
  532. 'total resource usage of the RP tree')
  533. def assertFlavorMatchesUsage(self, root_rp_uuid, *flavors):
  534. resources = collections.defaultdict(int)
  535. for flavor in flavors:
  536. res = self._resources_from_flavor(flavor)
  537. for rc, value in res.items():
  538. resources[rc] += value
  539. self.assertRequestMatchesUsage(resources, root_rp_uuid)
  540. def _resources_from_flavor(self, flavor):
  541. resources = collections.defaultdict(int)
  542. resources['VCPU'] = flavor['vcpus']
  543. resources['MEMORY_MB'] = flavor['ram']
  544. resources['DISK_GB'] = flavor['disk']
  545. for key, value in flavor['extra_specs'].items():
  546. if key.startswith('resources'):
  547. resources[key.split(':')[1]] += value
  548. return resources
  549. def assertFlavorMatchesAllocation(self, flavor, consumer_uuid,
  550. root_rp_uuid):
  551. # NOTE(gibi): This function does not handle sharing RPs today.
  552. expected_rps = self._get_all_rp_uuids_in_a_tree(root_rp_uuid)
  553. allocations = self._get_allocations_by_server_uuid(consumer_uuid)
  554. # NOTE(gibi): flattening the placement allocation means we cannot
  555. # verify the structure here. However I don't see any way to define this
  556. # function for nested and non-nested trees in a generic way.
  557. total_allocation = collections.defaultdict(int)
  558. for rp, alloc in allocations.items():
  559. self.assertIn(rp, expected_rps, 'Unexpected, out of tree RP in the'
  560. ' allocation')
  561. for rc, value in alloc['resources'].items():
  562. total_allocation[rc] += value
  563. self.assertEqual(
  564. self._resources_from_flavor(flavor),
  565. total_allocation,
  566. 'The resources requested in the flavor does not match with total '
  567. 'allocation in the RP tree')
  568. def get_migration_uuid_for_instance(self, instance_uuid):
  569. # NOTE(danms): This is too much introspection for a test like this, but
  570. # we can't see the migration uuid from the API, so we just encapsulate
  571. # the peek behind the curtains here to keep it out of the tests.
  572. # TODO(danms): Get the migration uuid from the API once it is exposed
  573. ctxt = context.get_admin_context()
  574. migrations = db.migration_get_all_by_filters(
  575. ctxt, {'instance_uuid': instance_uuid})
  576. self.assertEqual(1, len(migrations),
  577. 'Test expected a single migration, '
  578. 'but found %i' % len(migrations))
  579. return migrations[0].uuid
  580. def _boot_and_check_allocations(self, flavor, source_hostname):
  581. """Boot an instance and check that the resource allocation is correct
  582. After booting an instance on the given host with a given flavor it
  583. asserts that both the providers usages and resource allocations match
  584. with the resources requested in the flavor. It also asserts that
  585. running the periodic update_available_resource call does not change the
  586. resource state.
  587. :param flavor: the flavor the instance will be booted with
  588. :param source_hostname: the name of the host the instance will be
  589. booted on
  590. :return: the API representation of the booted instance
  591. """
  592. server_req = self._build_minimal_create_server_request(
  593. self.api, 'some-server', flavor_id=flavor['id'],
  594. image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
  595. networks='none')
  596. server_req['availability_zone'] = 'nova:%s' % source_hostname
  597. LOG.info('booting on %s', source_hostname)
  598. created_server = self.api.post_server({'server': server_req})
  599. server = self._wait_for_state_change(
  600. self.admin_api, created_server, 'ACTIVE')
  601. # Verify that our source host is what the server ended up on
  602. self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
  603. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  604. # Before we run periodics, make sure that we have allocations/usages
  605. # only on the source host
  606. self.assertFlavorMatchesUsage(source_rp_uuid, flavor)
  607. # Check that the other providers has no usage
  608. for rp_uuid in [self._get_provider_uuid_by_host(hostname)
  609. for hostname in self.computes.keys()
  610. if hostname != source_hostname]:
  611. self.assertRequestMatchesUsage({'VCPU': 0,
  612. 'MEMORY_MB': 0,
  613. 'DISK_GB': 0}, rp_uuid)
  614. # Check that the server only allocates resource from the host it is
  615. # booted on
  616. self.assertFlavorMatchesAllocation(flavor, server['id'],
  617. source_rp_uuid)
  618. self._run_periodics()
  619. # After running the periodics but before we start any other operation,
  620. # we should have exactly the same allocation/usage information as
  621. # before running the periodics
  622. # Check usages on the selected host after boot
  623. self.assertFlavorMatchesUsage(source_rp_uuid, flavor)
  624. # Check that the server only allocates resource from the host it is
  625. # booted on
  626. self.assertFlavorMatchesAllocation(flavor, server['id'],
  627. source_rp_uuid)
  628. # Check that the other providers has no usage
  629. for rp_uuid in [self._get_provider_uuid_by_host(hostname)
  630. for hostname in self.computes.keys()
  631. if hostname != source_hostname]:
  632. self.assertRequestMatchesUsage({'VCPU': 0,
  633. 'MEMORY_MB': 0,
  634. 'DISK_GB': 0}, rp_uuid)
  635. return server
  636. def _delete_and_check_allocations(self, server):
  637. """Delete the instance and asserts that the allocations are cleaned
  638. If the server was moved (resized or live migrated), also checks that
  639. migration-based allocations are also cleaned up.
  640. :param server: The API representation of the instance to be deleted
  641. """
  642. # First check to see if there is a related migration record so we can
  643. # assert its allocations (if any) are not leaked.
  644. with utils.temporary_mutation(self.admin_api, microversion='2.59'):
  645. migrations = self.admin_api.api_get(
  646. '/os-migrations?instance_uuid=%s' %
  647. server['id']).body['migrations']
  648. if migrations:
  649. # If there is more than one migration, they are sorted by
  650. # created_at in descending order so we'll get the last one
  651. # which is probably what we'd always want anyway.
  652. migration_uuid = migrations[0]['uuid']
  653. else:
  654. migration_uuid = None
  655. self.api.delete_server(server['id'])
  656. self._wait_until_deleted(server)
  657. # NOTE(gibi): The resource allocation is deleted after the instance is
  658. # destroyed in the db so wait_until_deleted might return before the
  659. # the resource are deleted in placement. So we need to wait for the
  660. # instance.delete.end notification as that is emitted after the
  661. # resources are freed.
  662. fake_notifier.wait_for_versioned_notifications('instance.delete.end')
  663. for rp_uuid in [self._get_provider_uuid_by_host(hostname)
  664. for hostname in self.computes.keys()]:
  665. self.assertRequestMatchesUsage({'VCPU': 0,
  666. 'MEMORY_MB': 0,
  667. 'DISK_GB': 0}, rp_uuid)
  668. # and no allocations for the deleted server
  669. allocations = self._get_allocations_by_server_uuid(server['id'])
  670. self.assertEqual(0, len(allocations))
  671. if migration_uuid:
  672. # and no allocations for the delete migration
  673. allocations = self._get_allocations_by_server_uuid(migration_uuid)
  674. self.assertEqual(0, len(allocations))
  675. def _run_periodics(self):
  676. """Run the update_available_resource task on every compute manager
  677. This runs periodics on the computes in an undefined order; some child
  678. class redefined this function to force a specific order.
  679. """
  680. ctx = context.get_admin_context()
  681. for host, compute in self.computes.items():
  682. LOG.info('Running periodic for compute (%s)', host)
  683. # Make sure the context is targeted to the proper cell database
  684. # for multi-cell tests.
  685. with context.target_cell(
  686. ctx, self.host_mappings[host].cell_mapping) as cctxt:
  687. compute.manager.update_available_resource(cctxt)
  688. LOG.info('Finished with periodics')
  689. def _move_and_check_allocations(self, server, request, old_flavor,
  690. new_flavor, source_rp_uuid, dest_rp_uuid):
  691. self.api.post_server_action(server['id'], request)
  692. self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
  693. def _check_allocation():
  694. self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor)
  695. self.assertFlavorMatchesUsage(dest_rp_uuid, new_flavor)
  696. # The instance should own the new_flavor allocation against the
  697. # destination host created by the scheduler
  698. self.assertFlavorMatchesAllocation(new_flavor, server['id'],
  699. dest_rp_uuid)
  700. # The migration should own the old_flavor allocation against the
  701. # source host created by conductor
  702. migration_uuid = self.get_migration_uuid_for_instance(server['id'])
  703. self.assertFlavorMatchesAllocation(old_flavor, migration_uuid,
  704. source_rp_uuid)
  705. # OK, so the move operation has run, but we have not yet confirmed or
  706. # reverted the move operation. Before we run periodics, make sure
  707. # that we have allocations/usages on BOTH the source and the
  708. # destination hosts.
  709. _check_allocation()
  710. self._run_periodics()
  711. _check_allocation()
  712. # Make sure the RequestSpec.flavor matches the new_flavor.
  713. ctxt = context.get_admin_context()
  714. reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
  715. self.assertEqual(new_flavor['id'], reqspec.flavor.flavorid)
  716. def _migrate_and_check_allocations(self, server, flavor, source_rp_uuid,
  717. dest_rp_uuid):
  718. request = {
  719. 'migrate': None
  720. }
  721. self._move_and_check_allocations(
  722. server, request=request, old_flavor=flavor, new_flavor=flavor,
  723. source_rp_uuid=source_rp_uuid, dest_rp_uuid=dest_rp_uuid)
  724. def _resize_and_check_allocations(self, server, old_flavor, new_flavor,
  725. source_rp_uuid, dest_rp_uuid):
  726. request = {
  727. 'resize': {
  728. 'flavorRef': new_flavor['id']
  729. }
  730. }
  731. self._move_and_check_allocations(
  732. server, request=request, old_flavor=old_flavor,
  733. new_flavor=new_flavor, source_rp_uuid=source_rp_uuid,
  734. dest_rp_uuid=dest_rp_uuid)
  735. def _resize_to_same_host_and_check_allocations(self, server, old_flavor,
  736. new_flavor, rp_uuid):
  737. # Resize the server to the same host and check usages in VERIFY_RESIZE
  738. # state
  739. self.flags(allow_resize_to_same_host=True)
  740. resize_req = {
  741. 'resize': {
  742. 'flavorRef': new_flavor['id']
  743. }
  744. }
  745. self.api.post_server_action(server['id'], resize_req)
  746. self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
  747. self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor)
  748. # The instance should hold a new_flavor allocation
  749. self.assertFlavorMatchesAllocation(new_flavor, server['id'],
  750. rp_uuid)
  751. # The migration should hold an old_flavor allocation
  752. migration_uuid = self.get_migration_uuid_for_instance(server['id'])
  753. self.assertFlavorMatchesAllocation(old_flavor, migration_uuid,
  754. rp_uuid)
  755. # We've resized to the same host and have doubled allocations for both
  756. # the old and new flavor on the same host. Run the periodic on the
  757. # compute to see if it tramples on what the scheduler did.
  758. self._run_periodics()
  759. # In terms of usage, it's still double on the host because the instance
  760. # and the migration each hold an allocation for the new and old
  761. # flavors respectively.
  762. self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor)
  763. # The instance should hold a new_flavor allocation
  764. self.assertFlavorMatchesAllocation(new_flavor, server['id'],
  765. rp_uuid)
  766. # The migration should hold an old_flavor allocation
  767. self.assertFlavorMatchesAllocation(old_flavor, migration_uuid,
  768. rp_uuid)
  769. def _check_allocation_during_evacuate(
  770. self, flavor, server_uuid, source_root_rp_uuid, dest_root_rp_uuid):
  771. allocations = self._get_allocations_by_server_uuid(server_uuid)
  772. self.assertEqual(2, len(allocations))
  773. self.assertFlavorMatchesUsage(source_root_rp_uuid, flavor)
  774. self.assertFlavorMatchesUsage(dest_root_rp_uuid, flavor)
  775. def assert_hypervisor_usage(self, compute_node_uuid, flavor,
  776. volume_backed):
  777. """Asserts the given hypervisor's resource usage matches the
  778. given flavor (assumes a single instance on the hypervisor).
  779. :param compute_node_uuid: UUID of the ComputeNode to check.
  780. :param flavor: "flavor" entry dict from from GET /flavors/{flavor_id}
  781. :param volume_backed: True if the flavor is used with a volume-backed
  782. server, False otherwise.
  783. """
  784. # GET /os-hypervisors/{uuid} requires at least 2.53
  785. with utils.temporary_mutation(self.admin_api, microversion='2.53'):
  786. hypervisor = self.admin_api.api_get(
  787. '/os-hypervisors/%s' % compute_node_uuid).body['hypervisor']
  788. if volume_backed:
  789. expected_disk_usage = 0
  790. else:
  791. expected_disk_usage = flavor['disk']
  792. # Account for reserved_host_disk_mb.
  793. expected_disk_usage += compute_utils.convert_mb_to_ceil_gb(
  794. CONF.reserved_host_disk_mb)
  795. self.assertEqual(expected_disk_usage, hypervisor['local_gb_used'])
  796. # Account for reserved_host_memory_mb.
  797. expected_ram_usage = CONF.reserved_host_memory_mb + flavor['ram']
  798. self.assertEqual(expected_ram_usage, hypervisor['memory_mb_used'])
  799. # Account for reserved_host_cpus.
  800. expected_vcpu_usage = CONF.reserved_host_cpus + flavor['vcpus']
  801. self.assertEqual(expected_vcpu_usage, hypervisor['vcpus_used'])
  802. def _confirm_resize(self, server):
  803. self.api.post_server_action(server['id'], {'confirmResize': None})
  804. server = self._wait_for_state_change(self.api, server, 'ACTIVE')
  805. self._wait_for_instance_action_event(
  806. self.api, server, instance_actions.CONFIRM_RESIZE,
  807. 'compute_confirm_resize', 'success')
  808. return server
  809. def _revert_resize(self, server):
  810. self.api.post_server_action(server['id'], {'revertResize': None})
  811. server = self._wait_for_state_change(self.api, server, 'ACTIVE')
  812. self._wait_for_migration_status(server, ['reverted'])
  813. # Note that the migration status is changed to "reverted" in the
  814. # dest host revert_resize method but the allocations are cleaned up
  815. # in the source host finish_revert_resize method so we need to wait
  816. # for the finish_revert_resize method to complete.
  817. fake_notifier.wait_for_versioned_notifications(
  818. 'instance.resize_revert.end')
  819. return server
  820. def get_unused_flavor_name_id(self):
  821. flavors = self.api.get_flavors()
  822. flavor_names = list()
  823. flavor_ids = list()
  824. [(flavor_names.append(flavor['name']),
  825. flavor_ids.append(flavor['id']))
  826. for flavor in flavors]
  827. return (generate_new_element(flavor_names, 'flavor'),
  828. int(generate_new_element(flavor_ids, '', True)))