OpenStack Compute (Nova)
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

3676 lignes
157KB

  1. # Copyright 2011 Justin Santa Barbara
  2. # All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. import datetime
  16. import time
  17. import zlib
  18. import mock
  19. from oslo_log import log as logging
  20. from oslo_serialization import base64
  21. from oslo_utils import timeutils
  22. import six
  23. from nova.compute import api as compute_api
  24. from nova.compute import instance_actions
  25. from nova.compute import manager as compute_manager
  26. from nova.compute import rpcapi
  27. from nova import context
  28. from nova import db
  29. from nova import exception
  30. from nova import objects
  31. from nova.objects import block_device as block_device_obj
  32. from nova.scheduler import weights
  33. from nova import test
  34. from nova.tests import fixtures as nova_fixtures
  35. from nova.tests.functional.api import client
  36. from nova.tests.functional import integrated_helpers
  37. from nova.tests.unit.api.openstack import fakes
  38. from nova.tests.unit import fake_block_device
  39. from nova.tests.unit import fake_network
  40. from nova.tests.unit import fake_notifier
  41. import nova.tests.unit.image.fake
  42. from nova.tests.unit import policy_fixture
  43. from nova.tests import uuidsentinel as uuids
  44. from nova.virt import fake
  45. from nova import volume
  46. LOG = logging.getLogger(__name__)
  47. class AltHostWeigher(weights.BaseHostWeigher):
  48. """Used in the alternate host tests to return a pre-determined list of
  49. hosts.
  50. """
  51. def _weigh_object(self, host_state, weight_properties):
  52. """Return a defined order of hosts."""
  53. weights = {"selection": 999, "alt_host1": 888, "alt_host2": 777,
  54. "alt_host3": 666, "host1": 0, "host2": 0}
  55. return weights.get(host_state.host, 0)
  56. class ServersTestBase(integrated_helpers._IntegratedTestBase):
  57. api_major_version = 'v2'
  58. _force_delete_parameter = 'forceDelete'
  59. _image_ref_parameter = 'imageRef'
  60. _flavor_ref_parameter = 'flavorRef'
  61. _access_ipv4_parameter = 'accessIPv4'
  62. _access_ipv6_parameter = 'accessIPv6'
  63. _return_resv_id_parameter = 'return_reservation_id'
  64. _min_count_parameter = 'min_count'
  65. def setUp(self):
  66. self.computes = {}
  67. super(ServersTestBase, self).setUp()
  68. # The network service is called as part of server creates but no
  69. # networks have been populated in the db, so stub the methods.
  70. # The networks aren't relevant to what is being tested.
  71. fake_network.set_stub_network_methods(self)
  72. self.conductor = self.start_service(
  73. 'conductor', manager='nova.conductor.manager.ConductorManager')
  74. def _wait_for_state_change(self, server, from_status):
  75. for i in range(0, 50):
  76. server = self.api.get_server(server['id'])
  77. if server['status'] != from_status:
  78. break
  79. time.sleep(.1)
  80. return server
  81. def _wait_for_deletion(self, server_id):
  82. # Wait (briefly) for deletion
  83. for _retries in range(50):
  84. try:
  85. found_server = self.api.get_server(server_id)
  86. except client.OpenStackApiNotFoundException:
  87. found_server = None
  88. LOG.debug("Got 404, proceeding")
  89. break
  90. LOG.debug("Found_server=%s", found_server)
  91. # TODO(justinsb): Mock doesn't yet do accurate state changes
  92. # if found_server['status'] != 'deleting':
  93. # break
  94. time.sleep(.1)
  95. # Should be gone
  96. self.assertFalse(found_server)
  97. def _delete_server(self, server_id):
  98. # Delete the server
  99. self.api.delete_server(server_id)
  100. self._wait_for_deletion(server_id)
  101. def _get_access_ips_params(self):
  102. return {self._access_ipv4_parameter: "172.19.0.2",
  103. self._access_ipv6_parameter: "fe80::2"}
  104. def _verify_access_ips(self, server):
  105. self.assertEqual('172.19.0.2',
  106. server[self._access_ipv4_parameter])
  107. self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
  108. class ServersTest(ServersTestBase):
  109. def test_get_servers(self):
  110. # Simple check that listing servers works.
  111. servers = self.api.get_servers()
  112. for server in servers:
  113. LOG.debug("server: %s", server)
  114. def _get_node_build_failures(self):
  115. ctxt = context.get_admin_context()
  116. computes = objects.ComputeNodeList.get_all(ctxt)
  117. return {
  118. node.hypervisor_hostname: int(node.stats.get('failed_builds', 0))
  119. for node in computes}
  120. def _run_periodics(self):
  121. """Run the update_available_resource task on every compute manager
  122. This runs periodics on the computes in an undefined order; some child
  123. class redefined this function to force a specific order.
  124. """
  125. if self.compute.host not in self.computes:
  126. self.computes[self.compute.host] = self.compute
  127. ctx = context.get_admin_context()
  128. for compute in self.computes.values():
  129. LOG.info('Running periodic for compute (%s)',
  130. compute.manager.host)
  131. compute.manager.update_available_resource(ctx)
  132. LOG.info('Finished with periodics')
  133. def test_create_server_with_error(self):
  134. # Create a server which will enter error state.
  135. def throw_error(*args, **kwargs):
  136. raise exception.BuildAbortException(reason='',
  137. instance_uuid='fake')
  138. self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error)
  139. server = self._build_minimal_create_server_request()
  140. created_server = self.api.post_server({"server": server})
  141. created_server_id = created_server['id']
  142. found_server = self.api.get_server(created_server_id)
  143. self.assertEqual(created_server_id, found_server['id'])
  144. found_server = self._wait_for_state_change(found_server, 'BUILD')
  145. self.assertEqual('ERROR', found_server['status'])
  146. self._delete_server(created_server_id)
  147. # We should have no (persisted) build failures until we update
  148. # resources, after which we should have one
  149. self.assertEqual([0], list(self._get_node_build_failures().values()))
  150. self._run_periodics()
  151. self.assertEqual([1], list(self._get_node_build_failures().values()))
  152. def _test_create_server_with_error_with_retries(self):
  153. # Create a server which will enter error state.
  154. fake.set_nodes(['host2'])
  155. self.addCleanup(fake.restore_nodes)
  156. self.flags(host='host2')
  157. self.compute2 = self.start_service('compute', host='host2')
  158. self.computes['compute2'] = self.compute2
  159. fails = []
  160. def throw_error(*args, **kwargs):
  161. fails.append('one')
  162. raise test.TestingException('Please retry me')
  163. self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error)
  164. server = self._build_minimal_create_server_request()
  165. created_server = self.api.post_server({"server": server})
  166. created_server_id = created_server['id']
  167. found_server = self.api.get_server(created_server_id)
  168. self.assertEqual(created_server_id, found_server['id'])
  169. found_server = self._wait_for_state_change(found_server, 'BUILD')
  170. self.assertEqual('ERROR', found_server['status'])
  171. self._delete_server(created_server_id)
  172. return len(fails)
  173. def test_create_server_with_error_with_retries(self):
  174. self.flags(max_attempts=2, group='scheduler')
  175. fails = self._test_create_server_with_error_with_retries()
  176. self.assertEqual(2, fails)
  177. self._run_periodics()
  178. self.assertEqual(
  179. [1, 1], list(self._get_node_build_failures().values()))
  180. def test_create_server_with_error_with_no_retries(self):
  181. self.flags(max_attempts=1, group='scheduler')
  182. fails = self._test_create_server_with_error_with_retries()
  183. self.assertEqual(1, fails)
  184. self._run_periodics()
  185. self.assertEqual(
  186. [0, 1], list(sorted(self._get_node_build_failures().values())))
  187. def test_create_and_delete_server(self):
  188. # Creates and deletes a server.
  189. # Create server
  190. # Build the server data gradually, checking errors along the way
  191. server = {}
  192. good_server = self._build_minimal_create_server_request()
  193. post = {'server': server}
  194. # Without an imageRef, this throws 500.
  195. # TODO(justinsb): Check whatever the spec says should be thrown here
  196. self.assertRaises(client.OpenStackApiException,
  197. self.api.post_server, post)
  198. # With an invalid imageRef, this throws 500.
  199. server[self._image_ref_parameter] = self.get_invalid_image()
  200. # TODO(justinsb): Check whatever the spec says should be thrown here
  201. self.assertRaises(client.OpenStackApiException,
  202. self.api.post_server, post)
  203. # Add a valid imageRef
  204. server[self._image_ref_parameter] = good_server.get(
  205. self._image_ref_parameter)
  206. # Without flavorRef, this throws 500
  207. # TODO(justinsb): Check whatever the spec says should be thrown here
  208. self.assertRaises(client.OpenStackApiException,
  209. self.api.post_server, post)
  210. server[self._flavor_ref_parameter] = good_server.get(
  211. self._flavor_ref_parameter)
  212. # Without a name, this throws 500
  213. # TODO(justinsb): Check whatever the spec says should be thrown here
  214. self.assertRaises(client.OpenStackApiException,
  215. self.api.post_server, post)
  216. # Set a valid server name
  217. server['name'] = good_server['name']
  218. created_server = self.api.post_server(post)
  219. LOG.debug("created_server: %s", created_server)
  220. self.assertTrue(created_server['id'])
  221. created_server_id = created_server['id']
  222. # Check it's there
  223. found_server = self.api.get_server(created_server_id)
  224. self.assertEqual(created_server_id, found_server['id'])
  225. # It should also be in the all-servers list
  226. servers = self.api.get_servers()
  227. server_ids = [s['id'] for s in servers]
  228. self.assertIn(created_server_id, server_ids)
  229. found_server = self._wait_for_state_change(found_server, 'BUILD')
  230. # It should be available...
  231. # TODO(justinsb): Mock doesn't yet do this...
  232. self.assertEqual('ACTIVE', found_server['status'])
  233. servers = self.api.get_servers(detail=True)
  234. for server in servers:
  235. self.assertIn("image", server)
  236. self.assertIn("flavor", server)
  237. self._delete_server(created_server_id)
  238. def _force_reclaim(self):
  239. # Make sure that compute manager thinks the instance is
  240. # old enough to be expired
  241. the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
  242. timeutils.set_time_override(override_time=the_past)
  243. self.addCleanup(timeutils.clear_time_override)
  244. ctxt = context.get_admin_context()
  245. self.compute._reclaim_queued_deletes(ctxt)
  246. def test_deferred_delete(self):
  247. # Creates, deletes and waits for server to be reclaimed.
  248. self.flags(reclaim_instance_interval=1)
  249. # Create server
  250. server = self._build_minimal_create_server_request()
  251. created_server = self.api.post_server({'server': server})
  252. LOG.debug("created_server: %s", created_server)
  253. self.assertTrue(created_server['id'])
  254. created_server_id = created_server['id']
  255. # Wait for it to finish being created
  256. found_server = self._wait_for_state_change(created_server, 'BUILD')
  257. # It should be available...
  258. self.assertEqual('ACTIVE', found_server['status'])
  259. # Cannot restore unless instance is deleted
  260. self.assertRaises(client.OpenStackApiException,
  261. self.api.post_server_action, created_server_id,
  262. {'restore': {}})
  263. # Delete the server
  264. self.api.delete_server(created_server_id)
  265. # Wait for queued deletion
  266. found_server = self._wait_for_state_change(found_server, 'ACTIVE')
  267. self.assertEqual('SOFT_DELETED', found_server['status'])
  268. self._force_reclaim()
  269. # Wait for real deletion
  270. self._wait_for_deletion(created_server_id)
  271. def test_deferred_delete_restore(self):
  272. # Creates, deletes and restores a server.
  273. self.flags(reclaim_instance_interval=3600)
  274. # Create server
  275. server = self._build_minimal_create_server_request()
  276. created_server = self.api.post_server({'server': server})
  277. LOG.debug("created_server: %s", created_server)
  278. self.assertTrue(created_server['id'])
  279. created_server_id = created_server['id']
  280. # Wait for it to finish being created
  281. found_server = self._wait_for_state_change(created_server, 'BUILD')
  282. # It should be available...
  283. self.assertEqual('ACTIVE', found_server['status'])
  284. # Delete the server
  285. self.api.delete_server(created_server_id)
  286. # Wait for queued deletion
  287. found_server = self._wait_for_state_change(found_server, 'ACTIVE')
  288. self.assertEqual('SOFT_DELETED', found_server['status'])
  289. # Restore server
  290. self.api.post_server_action(created_server_id, {'restore': {}})
  291. # Wait for server to become active again
  292. found_server = self._wait_for_state_change(found_server, 'DELETED')
  293. self.assertEqual('ACTIVE', found_server['status'])
  294. def test_deferred_delete_restore_overquota(self):
  295. # Test that a restore that would put the user over quota fails
  296. self.flags(instances=1, group='quota')
  297. # Creates, deletes and restores a server.
  298. self.flags(reclaim_instance_interval=3600)
  299. # Create server
  300. server = self._build_minimal_create_server_request()
  301. created_server1 = self.api.post_server({'server': server})
  302. LOG.debug("created_server: %s", created_server1)
  303. self.assertTrue(created_server1['id'])
  304. created_server_id1 = created_server1['id']
  305. # Wait for it to finish being created
  306. found_server1 = self._wait_for_state_change(created_server1, 'BUILD')
  307. # It should be available...
  308. self.assertEqual('ACTIVE', found_server1['status'])
  309. # Delete the server
  310. self.api.delete_server(created_server_id1)
  311. # Wait for queued deletion
  312. found_server1 = self._wait_for_state_change(found_server1, 'ACTIVE')
  313. self.assertEqual('SOFT_DELETED', found_server1['status'])
  314. # Create a second server
  315. server = self._build_minimal_create_server_request()
  316. created_server2 = self.api.post_server({'server': server})
  317. LOG.debug("created_server: %s", created_server2)
  318. self.assertTrue(created_server2['id'])
  319. # Wait for it to finish being created
  320. found_server2 = self._wait_for_state_change(created_server2, 'BUILD')
  321. # It should be available...
  322. self.assertEqual('ACTIVE', found_server2['status'])
  323. # Try to restore the first server, it should fail
  324. ex = self.assertRaises(client.OpenStackApiException,
  325. self.api.post_server_action,
  326. created_server_id1, {'restore': {}})
  327. self.assertEqual(403, ex.response.status_code)
  328. self.assertEqual('SOFT_DELETED', found_server1['status'])
  329. def test_deferred_delete_force(self):
  330. # Creates, deletes and force deletes a server.
  331. self.flags(reclaim_instance_interval=3600)
  332. # Create server
  333. server = self._build_minimal_create_server_request()
  334. created_server = self.api.post_server({'server': server})
  335. LOG.debug("created_server: %s", created_server)
  336. self.assertTrue(created_server['id'])
  337. created_server_id = created_server['id']
  338. # Wait for it to finish being created
  339. found_server = self._wait_for_state_change(created_server, 'BUILD')
  340. # It should be available...
  341. self.assertEqual('ACTIVE', found_server['status'])
  342. # Delete the server
  343. self.api.delete_server(created_server_id)
  344. # Wait for queued deletion
  345. found_server = self._wait_for_state_change(found_server, 'ACTIVE')
  346. self.assertEqual('SOFT_DELETED', found_server['status'])
  347. # Force delete server
  348. self.api.post_server_action(created_server_id,
  349. {self._force_delete_parameter: {}})
  350. # Wait for real deletion
  351. self._wait_for_deletion(created_server_id)
  352. def test_create_server_with_metadata(self):
  353. # Creates a server with metadata.
  354. # Build the server data gradually, checking errors along the way
  355. server = self._build_minimal_create_server_request()
  356. metadata = {}
  357. for i in range(30):
  358. metadata['key_%s' % i] = 'value_%s' % i
  359. server['metadata'] = metadata
  360. post = {'server': server}
  361. created_server = self.api.post_server(post)
  362. LOG.debug("created_server: %s", created_server)
  363. self.assertTrue(created_server['id'])
  364. created_server_id = created_server['id']
  365. found_server = self.api.get_server(created_server_id)
  366. self.assertEqual(created_server_id, found_server['id'])
  367. self.assertEqual(metadata, found_server.get('metadata'))
  368. # The server should also be in the all-servers details list
  369. servers = self.api.get_servers(detail=True)
  370. server_map = {server['id']: server for server in servers}
  371. found_server = server_map.get(created_server_id)
  372. self.assertTrue(found_server)
  373. # Details do include metadata
  374. self.assertEqual(metadata, found_server.get('metadata'))
  375. # The server should also be in the all-servers summary list
  376. servers = self.api.get_servers(detail=False)
  377. server_map = {server['id']: server for server in servers}
  378. found_server = server_map.get(created_server_id)
  379. self.assertTrue(found_server)
  380. # Summary should not include metadata
  381. self.assertFalse(found_server.get('metadata'))
  382. # Cleanup
  383. self._delete_server(created_server_id)
  384. def test_server_metadata_actions_negative_invalid_state(self):
  385. # Create server with metadata
  386. server = self._build_minimal_create_server_request()
  387. metadata = {'key_1': 'value_1'}
  388. server['metadata'] = metadata
  389. post = {'server': server}
  390. created_server = self.api.post_server(post)
  391. found_server = self._wait_for_state_change(created_server, 'BUILD')
  392. self.assertEqual('ACTIVE', found_server['status'])
  393. self.assertEqual(metadata, found_server.get('metadata'))
  394. server_id = found_server['id']
  395. # Change status from ACTIVE to SHELVED for negative test
  396. self.flags(shelved_offload_time = -1)
  397. self.api.post_server_action(server_id, {'shelve': {}})
  398. found_server = self._wait_for_state_change(found_server, 'ACTIVE')
  399. self.assertEqual('SHELVED', found_server['status'])
  400. metadata = {'key_2': 'value_2'}
  401. # Update Metadata item in SHELVED (not ACTIVE, etc.)
  402. ex = self.assertRaises(client.OpenStackApiException,
  403. self.api.post_server_metadata,
  404. server_id, metadata)
  405. self.assertEqual(409, ex.response.status_code)
  406. self.assertEqual('SHELVED', found_server['status'])
  407. # Delete Metadata item in SHELVED (not ACTIVE, etc.)
  408. ex = self.assertRaises(client.OpenStackApiException,
  409. self.api.delete_server_metadata,
  410. server_id, 'key_1')
  411. self.assertEqual(409, ex.response.status_code)
  412. self.assertEqual('SHELVED', found_server['status'])
  413. # Cleanup
  414. self._delete_server(server_id)
  415. def test_create_and_rebuild_server(self):
  416. # Rebuild a server with metadata.
  417. # create a server with initially has no metadata
  418. server = self._build_minimal_create_server_request()
  419. server_post = {'server': server}
  420. metadata = {}
  421. for i in range(30):
  422. metadata['key_%s' % i] = 'value_%s' % i
  423. server_post['server']['metadata'] = metadata
  424. created_server = self.api.post_server(server_post)
  425. LOG.debug("created_server: %s", created_server)
  426. self.assertTrue(created_server['id'])
  427. created_server_id = created_server['id']
  428. created_server = self._wait_for_state_change(created_server, 'BUILD')
  429. # rebuild the server with metadata and other server attributes
  430. post = {}
  431. post['rebuild'] = {
  432. self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
  433. "name": "blah",
  434. self._access_ipv4_parameter: "172.19.0.2",
  435. self._access_ipv6_parameter: "fe80::2",
  436. "metadata": {'some': 'thing'},
  437. }
  438. post['rebuild'].update(self._get_access_ips_params())
  439. self.api.post_server_action(created_server_id, post)
  440. LOG.debug("rebuilt server: %s", created_server)
  441. self.assertTrue(created_server['id'])
  442. found_server = self.api.get_server(created_server_id)
  443. self.assertEqual(created_server_id, found_server['id'])
  444. self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
  445. self.assertEqual('blah', found_server.get('name'))
  446. self.assertEqual(post['rebuild'][self._image_ref_parameter],
  447. found_server.get('image')['id'])
  448. self._verify_access_ips(found_server)
  449. # rebuild the server with empty metadata and nothing else
  450. post = {}
  451. post['rebuild'] = {
  452. self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
  453. "metadata": {},
  454. }
  455. self.api.post_server_action(created_server_id, post)
  456. LOG.debug("rebuilt server: %s", created_server)
  457. self.assertTrue(created_server['id'])
  458. found_server = self.api.get_server(created_server_id)
  459. self.assertEqual(created_server_id, found_server['id'])
  460. self.assertEqual({}, found_server.get('metadata'))
  461. self.assertEqual('blah', found_server.get('name'))
  462. self.assertEqual(post['rebuild'][self._image_ref_parameter],
  463. found_server.get('image')['id'])
  464. self._verify_access_ips(found_server)
  465. # Cleanup
  466. self._delete_server(created_server_id)
  467. def test_rename_server(self):
  468. # Test building and renaming a server.
  469. # Create a server
  470. server = self._build_minimal_create_server_request()
  471. created_server = self.api.post_server({'server': server})
  472. LOG.debug("created_server: %s", created_server)
  473. server_id = created_server['id']
  474. self.assertTrue(server_id)
  475. # Rename the server to 'new-name'
  476. self.api.put_server(server_id, {'server': {'name': 'new-name'}})
  477. # Check the name of the server
  478. created_server = self.api.get_server(server_id)
  479. self.assertEqual(created_server['name'], 'new-name')
  480. # Cleanup
  481. self._delete_server(server_id)
  482. def test_create_multiple_servers(self):
  483. # Creates multiple servers and checks for reservation_id.
  484. # Create 2 servers, setting 'return_reservation_id, which should
  485. # return a reservation_id
  486. server = self._build_minimal_create_server_request()
  487. server[self._min_count_parameter] = 2
  488. server[self._return_resv_id_parameter] = True
  489. post = {'server': server}
  490. response = self.api.post_server(post)
  491. self.assertIn('reservation_id', response)
  492. reservation_id = response['reservation_id']
  493. self.assertNotIn(reservation_id, ['', None])
  494. # Create 1 more server, which should not return a reservation_id
  495. server = self._build_minimal_create_server_request()
  496. post = {'server': server}
  497. created_server = self.api.post_server(post)
  498. self.assertTrue(created_server['id'])
  499. created_server_id = created_server['id']
  500. # lookup servers created by the first request.
  501. servers = self.api.get_servers(detail=True,
  502. search_opts={'reservation_id': reservation_id})
  503. server_map = {server['id']: server for server in servers}
  504. found_server = server_map.get(created_server_id)
  505. # The server from the 2nd request should not be there.
  506. self.assertIsNone(found_server)
  507. # Should have found 2 servers.
  508. self.assertEqual(len(server_map), 2)
  509. # Cleanup
  510. self._delete_server(created_server_id)
  511. for server_id in server_map:
  512. self._delete_server(server_id)
  513. def test_create_server_with_injected_files(self):
  514. # Creates a server with injected_files.
  515. personality = []
  516. # Inject a text file
  517. data = 'Hello, World!'
  518. personality.append({
  519. 'path': '/helloworld.txt',
  520. 'contents': base64.encode_as_bytes(data),
  521. })
  522. # Inject a binary file
  523. data = zlib.compress(b'Hello, World!')
  524. personality.append({
  525. 'path': '/helloworld.zip',
  526. 'contents': base64.encode_as_bytes(data),
  527. })
  528. # Create server
  529. server = self._build_minimal_create_server_request()
  530. server['personality'] = personality
  531. post = {'server': server}
  532. created_server = self.api.post_server(post)
  533. LOG.debug("created_server: %s", created_server)
  534. self.assertTrue(created_server['id'])
  535. created_server_id = created_server['id']
  536. # Check it's there
  537. found_server = self.api.get_server(created_server_id)
  538. self.assertEqual(created_server_id, found_server['id'])
  539. found_server = self._wait_for_state_change(found_server, 'BUILD')
  540. self.assertEqual('ACTIVE', found_server['status'])
  541. # Cleanup
  542. self._delete_server(created_server_id)
  543. def test_stop_start_servers_negative_invalid_state(self):
  544. # Create server
  545. server = self._build_minimal_create_server_request()
  546. created_server = self.api.post_server({"server": server})
  547. created_server_id = created_server['id']
  548. found_server = self._wait_for_state_change(created_server, 'BUILD')
  549. self.assertEqual('ACTIVE', found_server['status'])
  550. # Start server in ACTIVE
  551. # NOTE(mkoshiya): When os-start API runs, the server status
  552. # must be SHUTOFF.
  553. # By returning 409, I want to confirm that the ACTIVE server does not
  554. # cause unexpected behavior.
  555. post = {'os-start': {}}
  556. ex = self.assertRaises(client.OpenStackApiException,
  557. self.api.post_server_action,
  558. created_server_id, post)
  559. self.assertEqual(409, ex.response.status_code)
  560. self.assertEqual('ACTIVE', found_server['status'])
  561. # Stop server
  562. post = {'os-stop': {}}
  563. self.api.post_server_action(created_server_id, post)
  564. found_server = self._wait_for_state_change(found_server, 'ACTIVE')
  565. self.assertEqual('SHUTOFF', found_server['status'])
  566. # Stop server in SHUTOFF
  567. # NOTE(mkoshiya): When os-stop API runs, the server status
  568. # must be ACTIVE or ERROR.
  569. # By returning 409, I want to confirm that the SHUTOFF server does not
  570. # cause unexpected behavior.
  571. post = {'os-stop': {}}
  572. ex = self.assertRaises(client.OpenStackApiException,
  573. self.api.post_server_action,
  574. created_server_id, post)
  575. self.assertEqual(409, ex.response.status_code)
  576. self.assertEqual('SHUTOFF', found_server['status'])
  577. # Cleanup
  578. self._delete_server(created_server_id)
  579. def test_revert_resized_server_negative_invalid_state(self):
  580. # Create server
  581. server = self._build_minimal_create_server_request()
  582. created_server = self.api.post_server({"server": server})
  583. created_server_id = created_server['id']
  584. found_server = self._wait_for_state_change(created_server, 'BUILD')
  585. self.assertEqual('ACTIVE', found_server['status'])
  586. # Revert resized server in ACTIVE
  587. # NOTE(yatsumi): When revert resized server API runs,
  588. # the server status must be VERIFY_RESIZE.
  589. # By returning 409, I want to confirm that the ACTIVE server does not
  590. # cause unexpected behavior.
  591. post = {'revertResize': {}}
  592. ex = self.assertRaises(client.OpenStackApiException,
  593. self.api.post_server_action,
  594. created_server_id, post)
  595. self.assertEqual(409, ex.response.status_code)
  596. self.assertEqual('ACTIVE', found_server['status'])
  597. # Cleanup
  598. self._delete_server(created_server_id)
  599. def test_resize_server_negative_invalid_state(self):
  600. # Avoid migration
  601. self.flags(allow_resize_to_same_host=True)
  602. # Create server
  603. server = self._build_minimal_create_server_request()
  604. created_server = self.api.post_server({"server": server})
  605. created_server_id = created_server['id']
  606. found_server = self._wait_for_state_change(created_server, 'BUILD')
  607. self.assertEqual('ACTIVE', found_server['status'])
  608. # Resize server(flavorRef: 1 -> 2)
  609. post = {'resize': {"flavorRef": "2", "OS-DCF:diskConfig": "AUTO"}}
  610. self.api.post_server_action(created_server_id, post)
  611. found_server = self._wait_for_state_change(found_server, 'RESIZE')
  612. self.assertEqual('VERIFY_RESIZE', found_server['status'])
  613. # Resize server in VERIFY_RESIZE(flavorRef: 2 -> 1)
  614. # NOTE(yatsumi): When resize API runs, the server status
  615. # must be ACTIVE or SHUTOFF.
  616. # By returning 409, I want to confirm that the VERIFY_RESIZE server
  617. # does not cause unexpected behavior.
  618. post = {'resize': {"flavorRef": "1", "OS-DCF:diskConfig": "AUTO"}}
  619. ex = self.assertRaises(client.OpenStackApiException,
  620. self.api.post_server_action,
  621. created_server_id, post)
  622. self.assertEqual(409, ex.response.status_code)
  623. self.assertEqual('VERIFY_RESIZE', found_server['status'])
  624. # Cleanup
  625. self._delete_server(created_server_id)
  626. def test_confirm_resized_server_negative_invalid_state(self):
  627. # Create server
  628. server = self._build_minimal_create_server_request()
  629. created_server = self.api.post_server({"server": server})
  630. created_server_id = created_server['id']
  631. found_server = self._wait_for_state_change(created_server, 'BUILD')
  632. self.assertEqual('ACTIVE', found_server['status'])
  633. # Confirm resized server in ACTIVE
  634. # NOTE(yatsumi): When confirm resized server API runs,
  635. # the server status must be VERIFY_RESIZE.
  636. # By returning 409, I want to confirm that the ACTIVE server does not
  637. # cause unexpected behavior.
  638. post = {'confirmResize': {}}
  639. ex = self.assertRaises(client.OpenStackApiException,
  640. self.api.post_server_action,
  641. created_server_id, post)
  642. self.assertEqual(409, ex.response.status_code)
  643. self.assertEqual('ACTIVE', found_server['status'])
  644. # Cleanup
  645. self._delete_server(created_server_id)
  646. def test_resize_server_overquota(self):
  647. self.flags(cores=1, group='quota')
  648. self.flags(ram=512, group='quota')
  649. # Create server with default flavor, 1 core, 512 ram
  650. server = self._build_minimal_create_server_request()
  651. created_server = self.api.post_server({"server": server})
  652. created_server_id = created_server['id']
  653. found_server = self._wait_for_state_change(created_server, 'BUILD')
  654. self.assertEqual('ACTIVE', found_server['status'])
  655. # Try to resize to flavorid 2, 1 core, 2048 ram
  656. post = {'resize': {'flavorRef': '2'}}
  657. ex = self.assertRaises(client.OpenStackApiException,
  658. self.api.post_server_action,
  659. created_server_id, post)
  660. self.assertEqual(403, ex.response.status_code)
  661. class ServersTestV21(ServersTest):
  662. api_major_version = 'v2.1'
  663. class ServersTestV219(ServersTestBase):
  664. api_major_version = 'v2.1'
  665. def _create_server(self, set_desc = True, desc = None):
  666. server = self._build_minimal_create_server_request()
  667. if set_desc:
  668. server['description'] = desc
  669. post = {'server': server}
  670. response = self.api.api_post('/servers', post).body
  671. return (server, response['server'])
  672. def _update_server(self, server_id, set_desc = True, desc = None):
  673. new_name = integrated_helpers.generate_random_alphanumeric(8)
  674. server = {'server': {'name': new_name}}
  675. if set_desc:
  676. server['server']['description'] = desc
  677. self.api.api_put('/servers/%s' % server_id, server)
  678. def _rebuild_server(self, server_id, set_desc = True, desc = None):
  679. new_name = integrated_helpers.generate_random_alphanumeric(8)
  680. post = {}
  681. post['rebuild'] = {
  682. "name": new_name,
  683. self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
  684. self._access_ipv4_parameter: "172.19.0.2",
  685. self._access_ipv6_parameter: "fe80::2",
  686. "metadata": {'some': 'thing'},
  687. }
  688. post['rebuild'].update(self._get_access_ips_params())
  689. if set_desc:
  690. post['rebuild']['description'] = desc
  691. self.api.api_post('/servers/%s/action' % server_id, post)
  692. def _create_server_and_verify(self, set_desc = True, expected_desc = None):
  693. # Creates a server with a description and verifies it is
  694. # in the GET responses.
  695. created_server_id = self._create_server(set_desc,
  696. expected_desc)[1]['id']
  697. self._verify_server_description(created_server_id, expected_desc)
  698. self._delete_server(created_server_id)
  699. def _update_server_and_verify(self, server_id, set_desc = True,
  700. expected_desc = None):
  701. # Updates a server with a description and verifies it is
  702. # in the GET responses.
  703. self._update_server(server_id, set_desc, expected_desc)
  704. self._verify_server_description(server_id, expected_desc)
  705. def _rebuild_server_and_verify(self, server_id, set_desc = True,
  706. expected_desc = None):
  707. # Rebuilds a server with a description and verifies it is
  708. # in the GET responses.
  709. self._rebuild_server(server_id, set_desc, expected_desc)
  710. self._verify_server_description(server_id, expected_desc)
  711. def _verify_server_description(self, server_id, expected_desc = None,
  712. desc_in_resp = True):
  713. # Calls GET on the servers and verifies that the description
  714. # is set as expected in the response, or not set at all.
  715. response = self.api.api_get('/servers/%s' % server_id)
  716. found_server = response.body['server']
  717. self.assertEqual(server_id, found_server['id'])
  718. if desc_in_resp:
  719. # Verify the description is set as expected (can be None)
  720. self.assertEqual(expected_desc, found_server.get('description'))
  721. else:
  722. # Verify the description is not included in the response.
  723. self.assertNotIn('description', found_server)
  724. servers = self.api.api_get('/servers/detail').body['servers']
  725. server_map = {server['id']: server for server in servers}
  726. found_server = server_map.get(server_id)
  727. self.assertTrue(found_server)
  728. if desc_in_resp:
  729. # Verify the description is set as expected (can be None)
  730. self.assertEqual(expected_desc, found_server.get('description'))
  731. else:
  732. # Verify the description is not included in the response.
  733. self.assertNotIn('description', found_server)
  734. def _create_assertRaisesRegex(self, desc):
  735. # Verifies that a 400 error is thrown on create server
  736. with self.assertRaisesRegex(client.OpenStackApiException,
  737. ".*Unexpected status code.*") as cm:
  738. self._create_server(True, desc)
  739. self.assertEqual(400, cm.exception.response.status_code)
  740. def _update_assertRaisesRegex(self, server_id, desc):
  741. # Verifies that a 400 error is thrown on update server
  742. with self.assertRaisesRegex(client.OpenStackApiException,
  743. ".*Unexpected status code.*") as cm:
  744. self._update_server(server_id, True, desc)
  745. self.assertEqual(400, cm.exception.response.status_code)
  746. def _rebuild_assertRaisesRegex(self, server_id, desc):
  747. # Verifies that a 400 error is thrown on rebuild server
  748. with self.assertRaisesRegex(client.OpenStackApiException,
  749. ".*Unexpected status code.*") as cm:
  750. self._rebuild_server(server_id, True, desc)
  751. self.assertEqual(400, cm.exception.response.status_code)
  752. def test_create_server_with_description(self):
  753. self.api.microversion = '2.19'
  754. # Create and get a server with a description
  755. self._create_server_and_verify(True, 'test description')
  756. # Create and get a server with an empty description
  757. self._create_server_and_verify(True, '')
  758. # Create and get a server with description set to None
  759. self._create_server_and_verify()
  760. # Create and get a server without setting the description
  761. self._create_server_and_verify(False)
  762. def test_update_server_with_description(self):
  763. self.api.microversion = '2.19'
  764. # Create a server with an initial description
  765. server_id = self._create_server(True, 'test desc 1')[1]['id']
  766. # Update and get the server with a description
  767. self._update_server_and_verify(server_id, True, 'updated desc')
  768. # Update and get the server name without changing the description
  769. self._update_server_and_verify(server_id, False, 'updated desc')
  770. # Update and get the server with an empty description
  771. self._update_server_and_verify(server_id, True, '')
  772. # Update and get the server by removing the description (set to None)
  773. self._update_server_and_verify(server_id)
  774. # Update and get the server with a 2nd new description
  775. self._update_server_and_verify(server_id, True, 'updated desc2')
  776. # Cleanup
  777. self._delete_server(server_id)
  778. def test_rebuild_server_with_description(self):
  779. self.api.microversion = '2.19'
  780. # Create a server with an initial description
  781. server = self._create_server(True, 'test desc 1')[1]
  782. server_id = server['id']
  783. self._wait_for_state_change(server, 'BUILD')
  784. # Rebuild and get the server with a description
  785. self._rebuild_server_and_verify(server_id, True, 'updated desc')
  786. # Rebuild and get the server name without changing the description
  787. self._rebuild_server_and_verify(server_id, False, 'updated desc')
  788. # Rebuild and get the server with an empty description
  789. self._rebuild_server_and_verify(server_id, True, '')
  790. # Rebuild and get the server by removing the description (set to None)
  791. self._rebuild_server_and_verify(server_id)
  792. # Rebuild and get the server with a 2nd new description
  793. self._rebuild_server_and_verify(server_id, True, 'updated desc2')
  794. # Cleanup
  795. self._delete_server(server_id)
  796. def test_version_compatibility(self):
  797. # Create a server with microversion v2.19 and a description.
  798. self.api.microversion = '2.19'
  799. server_id = self._create_server(True, 'test desc 1')[1]['id']
  800. # Verify that the description is not included on V2.18 GETs
  801. self.api.microversion = '2.18'
  802. self._verify_server_description(server_id, desc_in_resp = False)
  803. # Verify that updating the server with description on V2.18
  804. # results in a 400 error
  805. self._update_assertRaisesRegex(server_id, 'test update 2.18')
  806. # Verify that rebuilding the server with description on V2.18
  807. # results in a 400 error
  808. self._rebuild_assertRaisesRegex(server_id, 'test rebuild 2.18')
  809. # Cleanup
  810. self._delete_server(server_id)
  811. # Create a server on V2.18 and verify that the description
  812. # defaults to the name on a V2.19 GET
  813. server_req, response = self._create_server(False)
  814. server_id = response['id']
  815. self.api.microversion = '2.19'
  816. self._verify_server_description(server_id, server_req['name'])
  817. # Cleanup
  818. self._delete_server(server_id)
  819. # Verify that creating a server with description on V2.18
  820. # results in a 400 error
  821. self.api.microversion = '2.18'
  822. self._create_assertRaisesRegex('test create 2.18')
  823. def test_description_errors(self):
  824. self.api.microversion = '2.19'
  825. # Create servers with invalid descriptions. These throw 400.
  826. # Invalid unicode with non-printable control char
  827. self._create_assertRaisesRegex(u'invalid\0dstring')
  828. # Description is longer than 255 chars
  829. self._create_assertRaisesRegex('x' * 256)
  830. # Update and rebuild servers with invalid descriptions.
  831. # These throw 400.
  832. server_id = self._create_server(True, "desc")[1]['id']
  833. # Invalid unicode with non-printable control char
  834. self._update_assertRaisesRegex(server_id, u'invalid\u0604string')
  835. self._rebuild_assertRaisesRegex(server_id, u'invalid\u0604string')
  836. # Description is longer than 255 chars
  837. self._update_assertRaisesRegex(server_id, 'x' * 256)
  838. self._rebuild_assertRaisesRegex(server_id, 'x' * 256)
  839. class ServerTestV220(ServersTestBase):
  840. api_major_version = 'v2.1'
  841. def setUp(self):
  842. super(ServerTestV220, self).setUp()
  843. self.api.microversion = '2.20'
  844. fake_network.set_stub_network_methods(self)
  845. self.ctxt = context.get_admin_context()
  846. def _create_server(self):
  847. server = self._build_minimal_create_server_request()
  848. post = {'server': server}
  849. response = self.api.api_post('/servers', post).body
  850. return (server, response['server'])
  851. def _shelve_server(self):
  852. server = self._create_server()[1]
  853. server_id = server['id']
  854. self._wait_for_state_change(server, 'BUILD')
  855. self.api.post_server_action(server_id, {'shelve': None})
  856. return self._wait_for_state_change(server, 'ACTIVE')
  857. def _get_fake_bdms(self, ctxt):
  858. return block_device_obj.block_device_make_list(self.ctxt,
  859. [fake_block_device.FakeDbBlockDeviceDict(
  860. {'device_name': '/dev/vda',
  861. 'source_type': 'volume',
  862. 'destination_type': 'volume',
  863. 'volume_id': '5d721593-f033-4f6d-ab6f-b5b067e61bc4'})])
  864. def test_attach_detach_vol_to_shelved_server(self):
  865. self.flags(shelved_offload_time=-1)
  866. found_server = self._shelve_server()
  867. self.assertEqual('SHELVED', found_server['status'])
  868. server_id = found_server['id']
  869. # Test attach volume
  870. self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
  871. with test.nested(mock.patch.object(volume.cinder,
  872. 'is_microversion_supported'),
  873. mock.patch.object(compute_api.API,
  874. '_check_attach_and_reserve_volume'),
  875. mock.patch.object(rpcapi.ComputeAPI,
  876. 'attach_volume')) as (mock_cinder_mv,
  877. mock_reserve,
  878. mock_attach):
  879. mock_cinder_mv.side_effect = \
  880. exception.CinderAPIVersionNotAvailable(version='3.44')
  881. volume_attachment = {"volumeAttachment": {"volumeId":
  882. "5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
  883. self.api.api_post(
  884. '/servers/%s/os-volume_attachments' % (server_id),
  885. volume_attachment)
  886. self.assertTrue(mock_reserve.called)
  887. self.assertTrue(mock_attach.called)
  888. # Test detach volume
  889. with test.nested(mock.patch.object(volume.cinder.API,
  890. 'begin_detaching'),
  891. mock.patch.object(objects.BlockDeviceMappingList,
  892. 'get_by_instance_uuid'),
  893. mock.patch.object(rpcapi.ComputeAPI,
  894. 'detach_volume')
  895. ) as (mock_check, mock_get_bdms, mock_rpc):
  896. mock_get_bdms.return_value = self._get_fake_bdms(self.ctxt)
  897. attachment_id = mock_get_bdms.return_value[0]['volume_id']
  898. self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
  899. (server_id, attachment_id))
  900. self.assertTrue(mock_check.called)
  901. self.assertTrue(mock_rpc.called)
  902. self._delete_server(server_id)
  903. def test_attach_detach_vol_to_shelved_offloaded_server(self):
  904. self.flags(shelved_offload_time=0)
  905. found_server = self._shelve_server()
  906. self.assertEqual('SHELVED_OFFLOADED', found_server['status'])
  907. server_id = found_server['id']
  908. # Test attach volume
  909. self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
  910. with test.nested(mock.patch.object(volume.cinder,
  911. 'is_microversion_supported'),
  912. mock.patch.object(compute_api.API,
  913. '_check_attach_and_reserve_volume'),
  914. mock.patch.object(volume.cinder.API,
  915. 'attach')) as (mock_cinder_mv,
  916. mock_reserve, mock_vol):
  917. mock_cinder_mv.side_effect = \
  918. exception.CinderAPIVersionNotAvailable(version='3.44')
  919. volume_attachment = {"volumeAttachment": {"volumeId":
  920. "5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
  921. attach_response = self.api.api_post(
  922. '/servers/%s/os-volume_attachments' % (server_id),
  923. volume_attachment).body['volumeAttachment']
  924. self.assertTrue(mock_reserve.called)
  925. self.assertTrue(mock_vol.called)
  926. self.assertIsNone(attach_response['device'])
  927. # Test detach volume
  928. with test.nested(mock.patch.object(volume.cinder.API,
  929. 'begin_detaching'),
  930. mock.patch.object(objects.BlockDeviceMappingList,
  931. 'get_by_instance_uuid'),
  932. mock.patch.object(compute_api.API,
  933. '_local_cleanup_bdm_volumes')
  934. ) as (mock_check, mock_get_bdms, mock_clean_vols):
  935. mock_get_bdms.return_value = self._get_fake_bdms(self.ctxt)
  936. attachment_id = mock_get_bdms.return_value[0]['volume_id']
  937. self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
  938. (server_id, attachment_id))
  939. self.assertTrue(mock_check.called)
  940. self.assertTrue(mock_clean_vols.called)
  941. self._delete_server(server_id)
  942. def test_attach_detach_vol_to_shelved_offloaded_server_new_flow(self):
  943. self.flags(shelved_offload_time=0)
  944. found_server = self._shelve_server()
  945. self.assertEqual('SHELVED_OFFLOADED', found_server['status'])
  946. server_id = found_server['id']
  947. fake_bdms = self._get_fake_bdms(self.ctxt)
  948. # Test attach volume
  949. self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
  950. with test.nested(mock.patch.object(volume.cinder,
  951. 'is_microversion_supported'),
  952. mock.patch.object(compute_api.API,
  953. '_check_volume_already_attached_to_instance'),
  954. mock.patch.object(volume.cinder.API,
  955. 'check_availability_zone'),
  956. mock.patch.object(volume.cinder.API,
  957. 'attachment_create'),
  958. mock.patch.object(volume.cinder.API,
  959. 'attachment_complete')
  960. ) as (mock_cinder_mv, mock_check_vol_attached,
  961. mock_check_av_zone, mock_attach_create,
  962. mock_attachment_complete):
  963. mock_attach_create.return_value = {'id': uuids.volume}
  964. volume_attachment = {"volumeAttachment": {"volumeId":
  965. "5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
  966. attach_response = self.api.api_post(
  967. '/servers/%s/os-volume_attachments' % (server_id),
  968. volume_attachment).body['volumeAttachment']
  969. self.assertTrue(mock_attach_create.called)
  970. mock_attachment_complete.assert_called_once_with(
  971. mock.ANY, uuids.volume)
  972. self.assertIsNone(attach_response['device'])
  973. # Test detach volume
  974. with test.nested(mock.patch.object(objects.BlockDeviceMappingList,
  975. 'get_by_instance_uuid'),
  976. mock.patch.object(compute_api.API,
  977. '_local_cleanup_bdm_volumes')
  978. ) as (mock_get_bdms, mock_clean_vols):
  979. mock_get_bdms.return_value = fake_bdms
  980. attachment_id = mock_get_bdms.return_value[0]['volume_id']
  981. self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
  982. (server_id, attachment_id))
  983. self.assertTrue(mock_clean_vols.called)
  984. self._delete_server(server_id)
  985. class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
  986. integrated_helpers.InstanceHelperMixin):
  987. api_major_version = 'v2.1'
  988. # We have to cap the microversion at 2.38 because that's the max we
  989. # can use to update image metadata via our compute images proxy API.
  990. microversion = '2.38'
  991. def _disable_compute_for(self, server):
  992. # Refresh to get its host
  993. server = self.api.get_server(server['id'])
  994. host = server['OS-EXT-SRV-ATTR:host']
  995. # Disable the service it is on
  996. self.api_fixture.admin_api.put_service('disable',
  997. {'host': host,
  998. 'binary': 'nova-compute'})
  999. def test_rebuild_with_image_novalidhost(self):
  1000. """Creates a server with an image that is valid for the single compute
  1001. that we have. Then rebuilds the server, passing in an image with
  1002. metadata that does not fit the single compute which should result in
  1003. a NoValidHost error. The ImagePropertiesFilter filter is enabled by
  1004. default so that should filter out the host based on the image meta.
  1005. """
  1006. fake.set_nodes(['host2'])
  1007. self.addCleanup(fake.restore_nodes)
  1008. self.flags(host='host2')
  1009. self.compute2 = self.start_service('compute', host='host2')
  1010. # We hard-code from a fake image since we can't get images
  1011. # via the compute /images proxy API with microversion > 2.35.
  1012. original_image_ref = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
  1013. server_req_body = {
  1014. 'server': {
  1015. 'imageRef': original_image_ref,
  1016. 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
  1017. 'name': 'test_rebuild_with_image_novalidhost',
  1018. # We don't care about networking for this test. This requires
  1019. # microversion >= 2.37.
  1020. 'networks': 'none'
  1021. }
  1022. }
  1023. server = self.api.post_server(server_req_body)
  1024. self._wait_for_state_change(self.api, server, 'ACTIVE')
  1025. # Disable the host we're on so ComputeFilter would have ruled it out
  1026. # normally
  1027. self._disable_compute_for(server)
  1028. # Now update the image metadata to be something that won't work with
  1029. # the fake compute driver we're using since the fake driver has an
  1030. # "x86_64" architecture.
  1031. rebuild_image_ref = (
  1032. nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
  1033. self.api.put_image_meta_key(
  1034. rebuild_image_ref, 'hw_architecture', 'unicore32')
  1035. # Now rebuild the server with that updated image and it should result
  1036. # in a NoValidHost failure from the scheduler.
  1037. rebuild_req_body = {
  1038. 'rebuild': {
  1039. 'imageRef': rebuild_image_ref
  1040. }
  1041. }
  1042. # Since we're using the CastAsCall fixture, the NoValidHost error
  1043. # should actually come back to the API and result in a 500 error.
  1044. # Normally the user would get a 202 response because nova-api RPC casts
  1045. # to nova-conductor which RPC calls the scheduler which raises the
  1046. # NoValidHost. We can mimic the end user way to figure out the failure
  1047. # by looking for the failed 'rebuild' instance action event.
  1048. self.api.api_post('/servers/%s/action' % server['id'],
  1049. rebuild_req_body, check_response_status=[500])
  1050. # Look for the failed rebuild action.
  1051. self._wait_for_action_fail_completion(
  1052. server, instance_actions.REBUILD, 'rebuild_server',
  1053. # Before microversion 2.51 events are only returned for instance
  1054. # actions if you're an admin.
  1055. self.api_fixture.admin_api)
  1056. # Assert the server image_ref was rolled back on failure.
  1057. server = self.api.get_server(server['id'])
  1058. self.assertEqual(original_image_ref, server['image']['id'])
  1059. # The server should be in ERROR state
  1060. self.assertEqual('ERROR', server['status'])
  1061. self.assertIn('No valid host', server['fault']['message'])
  1062. # Rebuild it again with the same bad image to make sure it's rejected
  1063. # again. Since we're using CastAsCall here, there is no 202 from the
  1064. # API, and the exception from conductor gets passed back through the
  1065. # API.
  1066. ex = self.assertRaises(
  1067. client.OpenStackApiException, self.api.api_post,
  1068. '/servers/%s/action' % server['id'], rebuild_req_body)
  1069. self.assertIn('NoValidHost', six.text_type(ex))
  1070. # A rebuild to the same host should never attempt a rebuild claim.
  1071. @mock.patch('nova.compute.resource_tracker.ResourceTracker.rebuild_claim',
  1072. new_callable=mock.NonCallableMock)
  1073. def test_rebuild_with_new_image(self, mock_rebuild_claim):
  1074. """Rebuilds a server with a different image which will run it through
  1075. the scheduler to validate the image is still OK with the compute host
  1076. that the instance is running on.
  1077. Validates that additional resources are not allocated against the
  1078. instance.host in Placement due to the rebuild on same host.
  1079. """
  1080. admin_api = self.api_fixture.admin_api
  1081. admin_api.microversion = '2.53'
  1082. def _get_provider_uuid_by_host(host):
  1083. resp = admin_api.api_get(
  1084. 'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
  1085. return resp['hypervisors'][0]['id']
  1086. def _get_provider_usages(provider_uuid):
  1087. return self.placement_api.get(
  1088. '/resource_providers/%s/usages' % provider_uuid).body['usages']
  1089. def _get_allocations_by_server_uuid(server_uuid):
  1090. return self.placement_api.get(
  1091. '/allocations/%s' % server_uuid).body['allocations']
  1092. def _set_provider_inventory(rp_uuid, resource_class, inventory):
  1093. # Get the resource provider generation for the inventory update.
  1094. rp = self.placement_api.get(
  1095. '/resource_providers/%s' % rp_uuid).body
  1096. inventory['resource_provider_generation'] = rp['generation']
  1097. return self.placement_api.put(
  1098. '/resource_providers/%s/inventories/%s' %
  1099. (rp_uuid, resource_class), inventory).body
  1100. def assertFlavorMatchesAllocation(flavor, allocation):
  1101. self.assertEqual(flavor['vcpus'], allocation['VCPU'])
  1102. self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
  1103. self.assertEqual(flavor['disk'], allocation['DISK_GB'])
  1104. nodename = self.compute.manager._get_nodename(None)
  1105. rp_uuid = _get_provider_uuid_by_host(nodename)
  1106. # make sure we start with no usage on the compute node
  1107. rp_usages = _get_provider_usages(rp_uuid)
  1108. self.assertEqual({'VCPU': 0, 'MEMORY_MB': 0, 'DISK_GB': 0}, rp_usages)
  1109. server_req_body = {
  1110. 'server': {
  1111. # We hard-code from a fake image since we can't get images
  1112. # via the compute /images proxy API with microversion > 2.35.
  1113. 'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
  1114. 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
  1115. 'name': 'test_rebuild_with_new_image',
  1116. # We don't care about networking for this test. This requires
  1117. # microversion >= 2.37.
  1118. 'networks': 'none'
  1119. }
  1120. }
  1121. server = self.api.post_server(server_req_body)
  1122. self._wait_for_state_change(self.api, server, 'ACTIVE')
  1123. flavor = self.api.api_get('/flavors/1').body['flavor']
  1124. # make the compute node full and ensure rebuild still succeed
  1125. _set_provider_inventory(rp_uuid, "VCPU", {"total": 1})
  1126. # There should be usage for the server on the compute node now.
  1127. rp_usages = _get_provider_usages(rp_uuid)
  1128. assertFlavorMatchesAllocation(flavor, rp_usages)
  1129. allocs = _get_allocations_by_server_uuid(server['id'])
  1130. self.assertIn(rp_uuid, allocs)
  1131. allocs = allocs[rp_uuid]['resources']
  1132. assertFlavorMatchesAllocation(flavor, allocs)
  1133. rebuild_image_ref = (
  1134. nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
  1135. # Now rebuild the server with a different image.
  1136. rebuild_req_body = {
  1137. 'rebuild': {
  1138. 'imageRef': rebuild_image_ref
  1139. }
  1140. }
  1141. self.api.api_post('/servers/%s/action' % server['id'],
  1142. rebuild_req_body)
  1143. self._wait_for_server_parameter(
  1144. self.api, server, {'OS-EXT-STS:task_state': None})
  1145. # The usage and allocations should not have changed.
  1146. rp_usages = _get_provider_usages(rp_uuid)
  1147. assertFlavorMatchesAllocation(flavor, rp_usages)
  1148. allocs = _get_allocations_by_server_uuid(server['id'])
  1149. self.assertIn(rp_uuid, allocs)
  1150. allocs = allocs[rp_uuid]['resources']
  1151. assertFlavorMatchesAllocation(flavor, allocs)
  1152. def test_volume_backed_rebuild_different_image(self):
  1153. """Tests that trying to rebuild a volume-backed instance with a
  1154. different image than what is in the root disk of the root volume
  1155. will result in a 400 BadRequest error.
  1156. """
  1157. self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
  1158. # First create our server as normal.
  1159. server_req_body = {
  1160. # There is no imageRef because this is boot from volume.
  1161. 'server': {
  1162. 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
  1163. 'name': 'test_volume_backed_rebuild_different_image',
  1164. # We don't care about networking for this test. This requires
  1165. # microversion >= 2.37.
  1166. 'networks': 'none',
  1167. 'block_device_mapping_v2': [{
  1168. 'boot_index': 0,
  1169. 'uuid':
  1170. nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL,
  1171. 'source_type': 'volume',
  1172. 'destination_type': 'volume'
  1173. }]
  1174. }
  1175. }
  1176. server = self.api.post_server(server_req_body)
  1177. server = self._wait_for_state_change(self.api, server, 'ACTIVE')
  1178. # For a volume-backed server, the image ref will be an empty string
  1179. # in the server response.
  1180. self.assertEqual('', server['image'])
  1181. # Now rebuild the server with a different image than was used to create
  1182. # our fake volume.
  1183. rebuild_image_ref = (
  1184. nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
  1185. rebuild_req_body = {
  1186. 'rebuild': {
  1187. 'imageRef': rebuild_image_ref
  1188. }
  1189. }
  1190. resp = self.api.api_post('/servers/%s/action' % server['id'],
  1191. rebuild_req_body, check_response_status=[400])
  1192. # Assert that we failed because of the image change and not something
  1193. # else.
  1194. self.assertIn('Unable to rebuild with a different image for a '
  1195. 'volume-backed server', six.text_type(resp))
  1196. class ProviderUsageBaseTestCase(test.TestCase,
  1197. integrated_helpers.InstanceHelperMixin):
  1198. """Base test class for functional tests that check provider usage
  1199. and consumer allocations in Placement during various operations.
  1200. Subclasses must define a **compute_driver** attribute for the virt driver
  1201. to use.
  1202. This class sets up standard fixtures and controller services but does not
  1203. start any compute services, that is left to the subclass.
  1204. """
  1205. microversion = 'latest'
  1206. def setUp(self):
  1207. self.flags(compute_driver=self.compute_driver)
  1208. super(ProviderUsageBaseTestCase, self).setUp()
  1209. self.useFixture(policy_fixture.RealPolicyFixture())
  1210. self.useFixture(nova_fixtures.NeutronFixture(self))
  1211. self.useFixture(nova_fixtures.AllServicesCurrent())
  1212. placement = self.useFixture(nova_fixtures.PlacementFixture())
  1213. self.placement_api = placement.api
  1214. api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
  1215. api_version='v2.1'))
  1216. self.admin_api = api_fixture.admin_api
  1217. self.admin_api.microversion = self.microversion
  1218. self.api = self.admin_api
  1219. # the image fake backend needed for image discovery
  1220. nova.tests.unit.image.fake.stub_out_image_service(self)
  1221. self.start_service('conductor')
  1222. self.scheduler_service = self.start_service('scheduler')
  1223. self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
  1224. fake_network.set_stub_network_methods(self)
  1225. self.computes = {}
  1226. def _start_compute(self, host):
  1227. """Start a nova compute service on the given host
  1228. :param host: the name of the host that will be associated to the
  1229. compute service.
  1230. :return: the nova compute service object
  1231. """
  1232. fake.set_nodes([host])
  1233. self.addCleanup(fake.restore_nodes)
  1234. compute = self.start_service('compute', host=host)
  1235. self.computes[host] = compute
  1236. return compute
  1237. def _get_provider_uuid_by_host(self, host):
  1238. # NOTE(gibi): the compute node id is the same as the compute node
  1239. # provider uuid on that compute
  1240. resp = self.admin_api.api_get(
  1241. 'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
  1242. return resp['hypervisors'][0]['id']
  1243. def _get_provider_usages(self, provider_uuid):
  1244. return self.placement_api.get(
  1245. '/resource_providers/%s/usages' % provider_uuid).body['usages']
  1246. def _get_allocations_by_server_uuid(self, server_uuid):
  1247. return self.placement_api.get(
  1248. '/allocations/%s' % server_uuid).body['allocations']
  1249. def _get_traits(self):
  1250. return self.placement_api.get('/traits', version='1.6').body['traits']
  1251. def _get_all_providers(self):
  1252. return self.placement_api.get(
  1253. '/resource_providers').body['resource_providers']
  1254. def _get_provider_traits(self, provider_uuid):
  1255. return self.placement_api.get(
  1256. '/resource_providers/%s/traits' % provider_uuid,
  1257. version='1.6').body['traits']
  1258. def _set_provider_traits(self, rp_uuid, traits):
  1259. """This will overwrite any existing traits.
  1260. :param rp_uuid: UUID of the resource provider to update
  1261. :param traits: list of trait strings to set on the provider
  1262. :returns: APIResponse object with the results
  1263. """
  1264. provider = self.placement_api.get(
  1265. '/resource_providers/%s' % rp_uuid).body
  1266. put_traits_req = {
  1267. 'resource_provider_generation': provider['generation'],
  1268. 'traits': traits
  1269. }
  1270. return self.placement_api.put(
  1271. '/resource_providers/%s/traits' % rp_uuid,
  1272. put_traits_req, version='1.6')
  1273. def _get_provider_inventory(self, rp_uuid):
  1274. return self.placement_api.get(
  1275. '/resource_providers/%s/inventories' % rp_uuid).body['inventories']
  1276. def assertFlavorMatchesAllocation(self, flavor, allocation):
  1277. self.assertEqual(flavor['vcpus'], allocation['VCPU'])
  1278. self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
  1279. self.assertEqual(flavor['disk'], allocation['DISK_GB'])
  1280. def assertFlavorsMatchAllocation(self, old_flavor, new_flavor, allocation):
  1281. self.assertEqual(old_flavor['vcpus'] + new_flavor['vcpus'],
  1282. allocation['VCPU'])
  1283. self.assertEqual(old_flavor['ram'] + new_flavor['ram'],
  1284. allocation['MEMORY_MB'])
  1285. self.assertEqual(old_flavor['disk'] + new_flavor['disk'],
  1286. allocation['DISK_GB'])
  1287. def assertFlavorMatchesUsage(self, rp_uuid, flavor):
  1288. usages = self._get_provider_usages(rp_uuid)
  1289. self.assertFlavorMatchesAllocation(flavor, usages)
  1290. def get_migration_uuid_for_instance(self, instance_uuid):
  1291. # NOTE(danms): This is too much introspection for a test like this, but
  1292. # we can't see the migration uuid from the API, so we just encapsulate
  1293. # the peek behind the curtains here to keep it out of the tests.
  1294. # TODO(danms): Get the migration uuid from the API once it is exposed
  1295. ctxt = context.get_admin_context()
  1296. migrations = db.migration_get_all_by_filters(
  1297. ctxt, {'instance_uuid': instance_uuid})
  1298. self.assertEqual(1, len(migrations),
  1299. 'Test expected a single migration, '
  1300. 'but found %i' % len(migrations))
  1301. return migrations[0].uuid
  1302. def _boot_and_check_allocations(self, flavor, source_hostname):
  1303. """Boot an instance and check that the resource allocation is correct
  1304. After booting an instance on the given host with a given flavor it
  1305. asserts that both the providers usages and resource allocations match
  1306. with the resources requested in the flavor. It also asserts that
  1307. running the periodic update_available_resource call does not change the
  1308. resource state.
  1309. :param flavor: the flavor the instance will be booted with
  1310. :param source_hostname: the name of the host the instance will be
  1311. booted on
  1312. :return: the API representation of the booted instance
  1313. """
  1314. server_req = self._build_minimal_create_server_request(
  1315. self.api, 'some-server', flavor_id=flavor['id'],
  1316. image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
  1317. networks=[])
  1318. server_req['availability_zone'] = 'nova:%s' % source_hostname
  1319. LOG.info('booting on %s', source_hostname)
  1320. created_server = self.api.post_server({'server': server_req})
  1321. server = self._wait_for_state_change(
  1322. self.admin_api, created_server, 'ACTIVE')
  1323. # Verify that our source host is what the server ended up on
  1324. self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
  1325. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1326. # Before we run periodics, make sure that we have allocations/usages
  1327. # only on the source host
  1328. source_usages = self._get_provider_usages(source_rp_uuid)
  1329. self.assertFlavorMatchesAllocation(flavor, source_usages)
  1330. # Check that the other providers has no usage
  1331. for rp_uuid in [self._get_provider_uuid_by_host(hostname)
  1332. for hostname in self.computes.keys()
  1333. if hostname != source_hostname]:
  1334. usages = self._get_provider_usages(rp_uuid)
  1335. self.assertEqual({'VCPU': 0,
  1336. 'MEMORY_MB': 0,
  1337. 'DISK_GB': 0}, usages)
  1338. # Check that the server only allocates resource from the host it is
  1339. # booted on
  1340. allocations = self._get_allocations_by_server_uuid(server['id'])
  1341. self.assertEqual(1, len(allocations),
  1342. 'No allocation for the server on the host it '
  1343. 'is booted on')
  1344. allocation = allocations[source_rp_uuid]['resources']
  1345. self.assertFlavorMatchesAllocation(flavor, allocation)
  1346. self._run_periodics()
  1347. # After running the periodics but before we start any other operation,
  1348. # we should have exactly the same allocation/usage information as
  1349. # before running the periodics
  1350. # Check usages on the selected host after boot
  1351. source_usages = self._get_provider_usages(source_rp_uuid)
  1352. self.assertFlavorMatchesAllocation(flavor, source_usages)
  1353. # Check that the server only allocates resource from the host it is
  1354. # booted on
  1355. allocations = self._get_allocations_by_server_uuid(server['id'])
  1356. self.assertEqual(1, len(allocations),
  1357. 'No allocation for the server on the host it '
  1358. 'is booted on')
  1359. allocation = allocations[source_rp_uuid]['resources']
  1360. self.assertFlavorMatchesAllocation(flavor, allocation)
  1361. # Check that the other providers has no usage
  1362. for rp_uuid in [self._get_provider_uuid_by_host(hostname)
  1363. for hostname in self.computes.keys()
  1364. if hostname != source_hostname]:
  1365. usages = self._get_provider_usages(rp_uuid)
  1366. self.assertEqual({'VCPU': 0,
  1367. 'MEMORY_MB': 0,
  1368. 'DISK_GB': 0}, usages)
  1369. return server
  1370. def _delete_and_check_allocations(self, server):
  1371. """Delete the instance and asserts that the allocations are cleaned
  1372. :param server: The API representation of the instance to be deleted
  1373. """
  1374. self.api.delete_server(server['id'])
  1375. self._wait_until_deleted(server)
  1376. # NOTE(gibi): The resource allocation is deleted after the instance is
  1377. # destroyed in the db so wait_until_deleted might return before the
  1378. # the resource are deleted in placement. So we need to wait for the
  1379. # instance.delete.end notification as that is emitted after the
  1380. # resources are freed.
  1381. fake_notifier.wait_for_versioned_notifications('instance.delete.end')
  1382. for rp_uuid in [self._get_provider_uuid_by_host(hostname)
  1383. for hostname in self.computes.keys()]:
  1384. usages = self._get_provider_usages(rp_uuid)
  1385. self.assertEqual({'VCPU': 0,
  1386. 'MEMORY_MB': 0,
  1387. 'DISK_GB': 0}, usages)
  1388. # and no allocations for the deleted server
  1389. allocations = self._get_allocations_by_server_uuid(server['id'])
  1390. self.assertEqual(0, len(allocations))
  1391. def _run_periodics(self):
  1392. """Run the update_available_resource task on every compute manager
  1393. This runs periodics on the computes in an undefined order; some child
  1394. class redefined this function to force a specific order.
  1395. """
  1396. ctx = context.get_admin_context()
  1397. for compute in self.computes.values():
  1398. LOG.info('Running periodic for compute (%s)',
  1399. compute.manager.host)
  1400. compute.manager.update_available_resource(ctx)
  1401. LOG.info('Finished with periodics')
  1402. def _move_and_check_allocations(self, server, request, old_flavor,
  1403. new_flavor, source_rp_uuid, dest_rp_uuid):
  1404. self.api.post_server_action(server['id'], request)
  1405. self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
  1406. def _check_allocation():
  1407. source_usages = self._get_provider_usages(source_rp_uuid)
  1408. self.assertFlavorMatchesAllocation(old_flavor, source_usages)
  1409. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1410. self.assertFlavorMatchesAllocation(new_flavor, dest_usages)
  1411. # The instance should own the new_flavor allocation against the
  1412. # destination host created by the scheduler
  1413. allocations = self._get_allocations_by_server_uuid(server['id'])
  1414. self.assertEqual(1, len(allocations))
  1415. dest_alloc = allocations[dest_rp_uuid]['resources']
  1416. self.assertFlavorMatchesAllocation(new_flavor, dest_alloc)
  1417. # The migration should own the old_flavor allocation against the
  1418. # source host created by conductor
  1419. migration_uuid = self.get_migration_uuid_for_instance(server['id'])
  1420. allocations = self._get_allocations_by_server_uuid(migration_uuid)
  1421. source_alloc = allocations[source_rp_uuid]['resources']
  1422. self.assertFlavorMatchesAllocation(old_flavor, source_alloc)
  1423. # OK, so the move operation has run, but we have not yet confirmed or
  1424. # reverted the move operation. Before we run periodics, make sure
  1425. # that we have allocations/usages on BOTH the source and the
  1426. # destination hosts.
  1427. _check_allocation()
  1428. self._run_periodics()
  1429. _check_allocation()
  1430. # Make sure the RequestSpec.flavor matches the new_flavor.
  1431. ctxt = context.get_admin_context()
  1432. reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
  1433. self.assertEqual(new_flavor['id'], reqspec.flavor.flavorid)
  1434. def _migrate_and_check_allocations(self, server, flavor, source_rp_uuid,
  1435. dest_rp_uuid):
  1436. request = {
  1437. 'migrate': None
  1438. }
  1439. self._move_and_check_allocations(
  1440. server, request=request, old_flavor=flavor, new_flavor=flavor,
  1441. source_rp_uuid=source_rp_uuid, dest_rp_uuid=dest_rp_uuid)
  1442. def _resize_and_check_allocations(self, server, old_flavor, new_flavor,
  1443. source_rp_uuid, dest_rp_uuid):
  1444. request = {
  1445. 'resize': {
  1446. 'flavorRef': new_flavor['id']
  1447. }
  1448. }
  1449. self._move_and_check_allocations(
  1450. server, request=request, old_flavor=old_flavor,
  1451. new_flavor=new_flavor, source_rp_uuid=source_rp_uuid,
  1452. dest_rp_uuid=dest_rp_uuid)
  1453. class TraitsTrackingTests(ProviderUsageBaseTestCase):
  1454. compute_driver = 'fake.SmallFakeDriver'
  1455. @mock.patch.object(fake.SmallFakeDriver, 'get_traits')
  1456. def test_resource_provider_traits(self, mock_traits):
  1457. traits = ['CUSTOM_FOO', 'HW_CPU_X86_VMX']
  1458. mock_traits.return_value = traits
  1459. self.assertNotIn('CUSTOM_FOO', self._get_traits())
  1460. self.assertEqual([], self._get_all_providers())
  1461. self.compute = self._start_compute(host='host1')
  1462. rp_uuid = self._get_provider_uuid_by_host('host1')
  1463. self.assertEqual(traits, sorted(self._get_provider_traits(rp_uuid)))
  1464. self.assertIn('CUSTOM_FOO', self._get_traits())
  1465. class ServerMovingTests(ProviderUsageBaseTestCase):
  1466. """Tests moving servers while checking the resource allocations and usages
  1467. These tests use two compute hosts. Boot a server on one of them then try to
  1468. move the server to the other. At every step resource allocation of the
  1469. server and the resource usages of the computes are queried from placement
  1470. API and asserted.
  1471. """
  1472. REQUIRES_LOCKING = True
  1473. # NOTE(danms): The test defaults to using SmallFakeDriver,
  1474. # which only has one vcpu, which can't take the doubled allocation
  1475. # we're now giving it. So, use the bigger MediumFakeDriver here.
  1476. compute_driver = 'fake.MediumFakeDriver'
  1477. def setUp(self):
  1478. super(ServerMovingTests, self).setUp()
  1479. fake_notifier.stub_notifier(self)
  1480. self.addCleanup(fake_notifier.reset)
  1481. self.compute1 = self._start_compute(host='host1')
  1482. self.compute2 = self._start_compute(host='host2')
  1483. flavors = self.api.get_flavors()
  1484. self.flavor1 = flavors[0]
  1485. self.flavor2 = flavors[1]
  1486. # create flavor3 which has less MEMORY_MB but more DISK_GB than flavor2
  1487. flavor_body = {'flavor':
  1488. {'name': 'test_flavor3',
  1489. 'ram': int(self.flavor2['ram'] / 2),
  1490. 'vcpus': 1,
  1491. 'disk': self.flavor2['disk'] * 2,
  1492. 'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
  1493. }}
  1494. self.flavor3 = self.api.post_flavor(flavor_body)
  1495. def _other_hostname(self, host):
  1496. other_host = {'host1': 'host2',
  1497. 'host2': 'host1'}
  1498. return other_host[host]
  1499. def _run_periodics(self):
  1500. # NOTE(jaypipes): We always run periodics in the same order: first on
  1501. # compute1, then on compute2. However, we want to test scenarios when
  1502. # the periodics run at different times during mover operations. This is
  1503. # why we have the "reverse" tests which simply switch the source and
  1504. # dest host while keeping the order in which we run the
  1505. # periodics. This effectively allows us to test the matrix of timing
  1506. # scenarios during move operations.
  1507. ctx = context.get_admin_context()
  1508. LOG.info('Running periodic for compute1 (%s)',
  1509. self.compute1.manager.host)
  1510. self.compute1.manager.update_available_resource(ctx)
  1511. LOG.info('Running periodic for compute2 (%s)',
  1512. self.compute2.manager.host)
  1513. self.compute2.manager.update_available_resource(ctx)
  1514. LOG.info('Finished with periodics')
  1515. def test_resize_revert(self):
  1516. self._test_resize_revert(dest_hostname='host1')
  1517. def test_resize_revert_reverse(self):
  1518. self._test_resize_revert(dest_hostname='host2')
  1519. def test_resize_confirm(self):
  1520. self._test_resize_confirm(dest_hostname='host1')
  1521. def test_resize_confirm_reverse(self):
  1522. self._test_resize_confirm(dest_hostname='host2')
  1523. def _resize_and_check_allocations(self, server, old_flavor, new_flavor,
  1524. source_rp_uuid, dest_rp_uuid):
  1525. self.flags(allow_resize_to_same_host=False)
  1526. resize_req = {
  1527. 'resize': {
  1528. 'flavorRef': new_flavor['id']
  1529. }
  1530. }
  1531. self._move_and_check_allocations(
  1532. server, request=resize_req, old_flavor=old_flavor,
  1533. new_flavor=new_flavor, source_rp_uuid=source_rp_uuid,
  1534. dest_rp_uuid=dest_rp_uuid)
  1535. def test_migration_confirm_resize_error(self):
  1536. source_hostname = self.compute1.host
  1537. dest_hostname = self.compute2.host
  1538. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1539. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1540. server = self._boot_and_check_allocations(self.flavor1,
  1541. source_hostname)
  1542. self._move_and_check_allocations(
  1543. server, request={'migrate': None}, old_flavor=self.flavor1,
  1544. new_flavor=self.flavor1, source_rp_uuid=source_rp_uuid,
  1545. dest_rp_uuid=dest_rp_uuid)
  1546. # Mock failure
  1547. def fake_confirm_migration(context, migration, instance, network_info):
  1548. raise exception.MigrationPreCheckError(
  1549. reason='test_migration_confirm_resize_error')
  1550. with mock.patch('nova.virt.fake.FakeDriver.'
  1551. 'confirm_migration',
  1552. side_effect=fake_confirm_migration):
  1553. # Confirm the migration/resize and check the usages
  1554. post = {'confirmResize': None}
  1555. self.api.post_server_action(
  1556. server['id'], post, check_response_status=[204])
  1557. server = self._wait_for_state_change(self.api, server, 'ERROR')
  1558. # After confirming and error, we should have an allocation only on the
  1559. # destination host
  1560. source_usages = self._get_provider_usages(source_rp_uuid)
  1561. self.assertEqual({'VCPU': 0,
  1562. 'MEMORY_MB': 0,
  1563. 'DISK_GB': 0}, source_usages,
  1564. 'Source host %s still has usage after the failed '
  1565. 'migration_confirm' % source_hostname)
  1566. # Check that the server only allocates resource from the original host
  1567. allocations = self._get_allocations_by_server_uuid(server['id'])
  1568. self.assertEqual(1, len(allocations))
  1569. dest_allocation = allocations[dest_rp_uuid]['resources']
  1570. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  1571. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1572. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  1573. self._run_periodics()
  1574. # After confirming and error, we should have an allocation only on the
  1575. # destination host
  1576. source_usages = self._get_provider_usages(source_rp_uuid)
  1577. self.assertEqual({'VCPU': 0,
  1578. 'MEMORY_MB': 0,
  1579. 'DISK_GB': 0}, source_usages,
  1580. 'Source host %s still has usage after the failed '
  1581. 'migration_confirm' % source_hostname)
  1582. # Check that the server only allocates resource from the original host
  1583. allocations = self._get_allocations_by_server_uuid(server['id'])
  1584. self.assertEqual(1, len(allocations))
  1585. dest_allocation = allocations[dest_rp_uuid]['resources']
  1586. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  1587. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1588. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  1589. self._delete_and_check_allocations(server)
  1590. def _test_resize_revert(self, dest_hostname):
  1591. source_hostname = self._other_hostname(dest_hostname)
  1592. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1593. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1594. server = self._boot_and_check_allocations(self.flavor1,
  1595. source_hostname)
  1596. self._resize_and_check_allocations(server, self.flavor1, self.flavor2,
  1597. source_rp_uuid, dest_rp_uuid)
  1598. # Revert the resize and check the usages
  1599. post = {'revertResize': None}
  1600. self.api.post_server_action(server['id'], post)
  1601. self._wait_for_state_change(self.api, server, 'ACTIVE')
  1602. # Make sure the RequestSpec.flavor matches the original flavor.
  1603. ctxt = context.get_admin_context()
  1604. reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
  1605. self.assertEqual(self.flavor1['id'], reqspec.flavor.flavorid)
  1606. self._run_periodics()
  1607. # the original host expected to have the old resource allocation
  1608. source_usages = self._get_provider_usages(source_rp_uuid)
  1609. allocations = self._get_allocations_by_server_uuid(server['id'])
  1610. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  1611. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1612. self.assertEqual({'VCPU': 0,
  1613. 'MEMORY_MB': 0,
  1614. 'DISK_GB': 0}, dest_usages,
  1615. 'Target host %s still has usage after the resize '
  1616. 'has been reverted' % dest_hostname)
  1617. # Check that the server only allocates resource from the original host
  1618. self.assertEqual(1, len(allocations))
  1619. source_allocation = allocations[source_rp_uuid]['resources']
  1620. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  1621. self._delete_and_check_allocations(server)
  1622. def _test_resize_confirm(self, dest_hostname):
  1623. source_hostname = self._other_hostname(dest_hostname)
  1624. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1625. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1626. server = self._boot_and_check_allocations(self.flavor1,
  1627. source_hostname)
  1628. self._resize_and_check_allocations(server, self.flavor1, self.flavor2,
  1629. source_rp_uuid, dest_rp_uuid)
  1630. # Confirm the resize and check the usages
  1631. post = {'confirmResize': None}
  1632. self.api.post_server_action(
  1633. server['id'], post, check_response_status=[204])
  1634. self._wait_for_state_change(self.api, server, 'ACTIVE')
  1635. # After confirming, we should have an allocation only on the
  1636. # destination host
  1637. allocations = self._get_allocations_by_server_uuid(server['id'])
  1638. # and the server allocates only from the target host
  1639. self.assertEqual(1, len(allocations))
  1640. source_usages = self._get_provider_usages(source_rp_uuid)
  1641. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1642. # and the target host allocation should be according to the new flavor
  1643. self.assertFlavorMatchesAllocation(self.flavor2, dest_usages)
  1644. self.assertEqual({'VCPU': 0,
  1645. 'MEMORY_MB': 0,
  1646. 'DISK_GB': 0}, source_usages,
  1647. 'The source host %s still has usages after the '
  1648. 'resize has been confirmed' % source_hostname)
  1649. # and the target host allocation should be according to the new flavor
  1650. self.assertFlavorMatchesAllocation(self.flavor2, dest_usages)
  1651. dest_allocation = allocations[dest_rp_uuid]['resources']
  1652. self.assertFlavorMatchesAllocation(self.flavor2, dest_allocation)
  1653. self._run_periodics()
  1654. # Check we're still accurate after running the periodics
  1655. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1656. source_usages = self._get_provider_usages(source_rp_uuid)
  1657. # and the target host allocation should be according to the new flavor
  1658. self.assertFlavorMatchesAllocation(self.flavor2, dest_usages)
  1659. self.assertEqual({'VCPU': 0,
  1660. 'MEMORY_MB': 0,
  1661. 'DISK_GB': 0}, source_usages,
  1662. 'The source host %s still has usages after the '
  1663. 'resize has been confirmed' % source_hostname)
  1664. allocations = self._get_allocations_by_server_uuid(server['id'])
  1665. # and the server allocates only from the target host
  1666. self.assertEqual(1, len(allocations))
  1667. dest_allocation = allocations[dest_rp_uuid]['resources']
  1668. self.assertFlavorMatchesAllocation(self.flavor2, dest_allocation)
  1669. self._delete_and_check_allocations(server)
  1670. def _resize_to_same_host_and_check_allocations(self, server, old_flavor,
  1671. new_flavor, rp_uuid):
  1672. # Resize the server to the same host and check usages in VERIFY_RESIZE
  1673. # state
  1674. self.flags(allow_resize_to_same_host=True)
  1675. resize_req = {
  1676. 'resize': {
  1677. 'flavorRef': new_flavor['id']
  1678. }
  1679. }
  1680. self.api.post_server_action(server['id'], resize_req)
  1681. self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
  1682. usages = self._get_provider_usages(rp_uuid)
  1683. self.assertFlavorsMatchAllocation(old_flavor, new_flavor, usages)
  1684. # The instance should hold a new_flavor allocation
  1685. allocations = self._get_allocations_by_server_uuid(server['id'])
  1686. self.assertEqual(1, len(allocations))
  1687. allocation = allocations[rp_uuid]['resources']
  1688. self.assertFlavorMatchesAllocation(new_flavor, allocation)
  1689. # The migration should hold an old_flavor allocation
  1690. migration_uuid = self.get_migration_uuid_for_instance(server['id'])
  1691. allocations = self._get_allocations_by_server_uuid(migration_uuid)
  1692. self.assertEqual(1, len(allocations))
  1693. allocation = allocations[rp_uuid]['resources']
  1694. self.assertFlavorMatchesAllocation(old_flavor, allocation)
  1695. # We've resized to the same host and have doubled allocations for both
  1696. # the old and new flavor on the same host. Run the periodic on the
  1697. # compute to see if it tramples on what the scheduler did.
  1698. self._run_periodics()
  1699. usages = self._get_provider_usages(rp_uuid)
  1700. # In terms of usage, it's still double on the host because the instance
  1701. # and the migration each hold an allocation for the new and old
  1702. # flavors respectively.
  1703. self.assertFlavorsMatchAllocation(old_flavor, new_flavor, usages)
  1704. # The instance should hold a new_flavor allocation
  1705. allocations = self._get_allocations_by_server_uuid(server['id'])
  1706. self.assertEqual(1, len(allocations))
  1707. allocation = allocations[rp_uuid]['resources']
  1708. self.assertFlavorMatchesAllocation(new_flavor, allocation)
  1709. # The migration should hold an old_flavor allocation
  1710. allocations = self._get_allocations_by_server_uuid(migration_uuid)
  1711. self.assertEqual(1, len(allocations))
  1712. allocation = allocations[rp_uuid]['resources']
  1713. self.assertFlavorMatchesAllocation(old_flavor, allocation)
  1714. def test_resize_revert_same_host(self):
  1715. # make sure that the test only uses a single host
  1716. compute2_service_id = self.admin_api.get_services(
  1717. host=self.compute2.host, binary='nova-compute')[0]['id']
  1718. self.admin_api.put_service(compute2_service_id, {'status': 'disabled'})
  1719. hostname = self.compute1.manager.host
  1720. rp_uuid = self._get_provider_uuid_by_host(hostname)
  1721. server = self._boot_and_check_allocations(self.flavor2, hostname)
  1722. self._resize_to_same_host_and_check_allocations(
  1723. server, self.flavor2, self.flavor3, rp_uuid)
  1724. # Revert the resize and check the usages
  1725. post = {'revertResize': None}
  1726. self.api.post_server_action(server['id'], post)
  1727. self._wait_for_state_change(self.api, server, 'ACTIVE')
  1728. self._run_periodics()
  1729. # after revert only allocations due to the old flavor should remain
  1730. usages = self._get_provider_usages(rp_uuid)
  1731. self.assertFlavorMatchesAllocation(self.flavor2, usages)
  1732. allocations = self._get_allocations_by_server_uuid(server['id'])
  1733. self.assertEqual(1, len(allocations))
  1734. allocation = allocations[rp_uuid]['resources']
  1735. self.assertFlavorMatchesAllocation(self.flavor2, allocation)
  1736. self._delete_and_check_allocations(server)
  1737. def test_resize_confirm_same_host(self):
  1738. # make sure that the test only uses a single host
  1739. compute2_service_id = self.admin_api.get_services(
  1740. host=self.compute2.host, binary='nova-compute')[0]['id']
  1741. self.admin_api.put_service(compute2_service_id, {'status': 'disabled'})
  1742. hostname = self.compute1.manager.host
  1743. rp_uuid = self._get_provider_uuid_by_host(hostname)
  1744. server = self._boot_and_check_allocations(self.flavor2, hostname)
  1745. self._resize_to_same_host_and_check_allocations(
  1746. server, self.flavor2, self.flavor3, rp_uuid)
  1747. # Confirm the resize and check the usages
  1748. post = {'confirmResize': None}
  1749. self.api.post_server_action(
  1750. server['id'], post, check_response_status=[204])
  1751. self._wait_for_state_change(self.api, server, 'ACTIVE')
  1752. self._run_periodics()
  1753. # after confirm only allocations due to the new flavor should remain
  1754. usages = self._get_provider_usages(rp_uuid)
  1755. self.assertFlavorMatchesAllocation(self.flavor3, usages)
  1756. allocations = self._get_allocations_by_server_uuid(server['id'])
  1757. self.assertEqual(1, len(allocations))
  1758. allocation = allocations[rp_uuid]['resources']
  1759. self.assertFlavorMatchesAllocation(self.flavor3, allocation)
  1760. self._delete_and_check_allocations(server)
  1761. def test_resize_not_enough_resource(self):
  1762. # Try to resize to a flavor that requests more VCPU than what the
  1763. # compute hosts has available and expect the resize to fail
  1764. flavor_body = {'flavor':
  1765. {'name': 'test_too_big_flavor',
  1766. 'ram': 1024,
  1767. 'vcpus': fake.MediumFakeDriver.vcpus + 1,
  1768. 'disk': 20,
  1769. }}
  1770. big_flavor = self.api.post_flavor(flavor_body)
  1771. dest_hostname = self.compute2.host
  1772. source_hostname = self._other_hostname(dest_hostname)
  1773. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1774. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1775. server = self._boot_and_check_allocations(
  1776. self.flavor1, source_hostname)
  1777. self.flags(allow_resize_to_same_host=False)
  1778. resize_req = {
  1779. 'resize': {
  1780. 'flavorRef': big_flavor['id']
  1781. }
  1782. }
  1783. resp = self.api.post_server_action(
  1784. server['id'], resize_req, check_response_status=[400])
  1785. self.assertEqual(
  1786. resp['badRequest']['message'],
  1787. "No valid host was found. No valid host found for resize")
  1788. server = self.admin_api.get_server(server['id'])
  1789. self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
  1790. # only the source host shall have usages after the failed resize
  1791. source_usages = self._get_provider_usages(source_rp_uuid)
  1792. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  1793. # Check that the other provider has no usage
  1794. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1795. self.assertEqual({'VCPU': 0,
  1796. 'MEMORY_MB': 0,
  1797. 'DISK_GB': 0}, dest_usages)
  1798. # Check that the server only allocates resource from the host it is
  1799. # booted on
  1800. allocations = self._get_allocations_by_server_uuid(server['id'])
  1801. self.assertEqual(1, len(allocations))
  1802. allocation = allocations[source_rp_uuid]['resources']
  1803. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  1804. self._delete_and_check_allocations(server)
  1805. def test_evacuate(self):
  1806. source_hostname = self.compute1.host
  1807. dest_hostname = self.compute2.host
  1808. server = self._boot_and_check_allocations(
  1809. self.flavor1, source_hostname)
  1810. source_compute_id = self.admin_api.get_services(
  1811. host=source_hostname, binary='nova-compute')[0]['id']
  1812. self.compute1.stop()
  1813. # force it down to avoid waiting for the service group to time out
  1814. self.admin_api.put_service(
  1815. source_compute_id, {'forced_down': 'true'})
  1816. # evacuate the server
  1817. post = {'evacuate': {}}
  1818. self.api.post_server_action(
  1819. server['id'], post)
  1820. expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname,
  1821. 'status': 'ACTIVE'}
  1822. server = self._wait_for_server_parameter(self.api, server,
  1823. expected_params)
  1824. # Expect to have allocation and usages on both computes as the
  1825. # source compute is still down
  1826. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1827. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1828. source_usages = self._get_provider_usages(source_rp_uuid)
  1829. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  1830. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1831. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  1832. allocations = self._get_allocations_by_server_uuid(server['id'])
  1833. self.assertEqual(2, len(allocations))
  1834. source_allocation = allocations[source_rp_uuid]['resources']
  1835. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  1836. dest_allocation = allocations[dest_rp_uuid]['resources']
  1837. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  1838. # restart the source compute
  1839. self.restart_compute_service(self.compute1)
  1840. self.admin_api.put_service(
  1841. source_compute_id, {'forced_down': 'false'})
  1842. source_usages = self._get_provider_usages(source_rp_uuid)
  1843. self.assertEqual({'VCPU': 0,
  1844. 'MEMORY_MB': 0,
  1845. 'DISK_GB': 0},
  1846. source_usages)
  1847. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1848. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  1849. allocations = self._get_allocations_by_server_uuid(server['id'])
  1850. self.assertEqual(1, len(allocations))
  1851. dest_allocation = allocations[dest_rp_uuid]['resources']
  1852. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  1853. self._delete_and_check_allocations(server)
  1854. def test_evacuate_forced_host(self):
  1855. """Evacuating a server with a forced host bypasses the scheduler
  1856. which means conductor has to create the allocations against the
  1857. destination node. This test recreates the scenarios and asserts
  1858. the allocations on the source and destination nodes are as expected.
  1859. """
  1860. source_hostname = self.compute1.host
  1861. dest_hostname = self.compute2.host
  1862. server = self._boot_and_check_allocations(
  1863. self.flavor1, source_hostname)
  1864. source_compute_id = self.admin_api.get_services(
  1865. host=source_hostname, binary='nova-compute')[0]['id']
  1866. self.compute1.stop()
  1867. # force it down to avoid waiting for the service group to time out
  1868. self.admin_api.put_service(
  1869. source_compute_id, {'forced_down': 'true'})
  1870. # evacuate the server and force the destination host which bypasses
  1871. # the scheduler
  1872. post = {
  1873. 'evacuate': {
  1874. 'host': dest_hostname,
  1875. 'force': True
  1876. }
  1877. }
  1878. self.api.post_server_action(server['id'], post)
  1879. expected_params = {'OS-EXT-SRV-ATTR:host': dest_hostname,
  1880. 'status': 'ACTIVE'}
  1881. server = self._wait_for_server_parameter(self.api, server,
  1882. expected_params)
  1883. # Run the periodics to show those don't modify allocations.
  1884. self._run_periodics()
  1885. # Expect to have allocation and usages on both computes as the
  1886. # source compute is still down
  1887. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1888. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1889. source_usages = self._get_provider_usages(source_rp_uuid)
  1890. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  1891. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1892. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  1893. allocations = self._get_allocations_by_server_uuid(server['id'])
  1894. self.assertEqual(2, len(allocations))
  1895. source_allocation = allocations[source_rp_uuid]['resources']
  1896. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  1897. dest_allocation = allocations[dest_rp_uuid]['resources']
  1898. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  1899. # restart the source compute
  1900. self.restart_compute_service(self.compute1)
  1901. self.admin_api.put_service(
  1902. source_compute_id, {'forced_down': 'false'})
  1903. # Run the periodics again to show they don't change anything.
  1904. self._run_periodics()
  1905. # When the source node starts up, the instance has moved so the
  1906. # ResourceTracker should cleanup allocations for the source node.
  1907. source_usages = self._get_provider_usages(source_rp_uuid)
  1908. self.assertEqual(
  1909. {'VCPU': 0, 'MEMORY_MB': 0, 'DISK_GB': 0}, source_usages)
  1910. # The usages/allocations should still exist on the destination node
  1911. # after the source node starts back up.
  1912. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1913. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  1914. allocations = self._get_allocations_by_server_uuid(server['id'])
  1915. self.assertEqual(1, len(allocations))
  1916. dest_allocation = allocations[dest_rp_uuid]['resources']
  1917. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  1918. self._delete_and_check_allocations(server)
  1919. def test_evacuate_claim_on_dest_fails(self):
  1920. """Tests that the allocations on the destination node are cleaned up
  1921. when the rebuild move claim fails due to insufficient resources.
  1922. """
  1923. source_hostname = self.compute1.host
  1924. dest_hostname = self.compute2.host
  1925. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1926. server = self._boot_and_check_allocations(
  1927. self.flavor1, source_hostname)
  1928. source_compute_id = self.admin_api.get_services(
  1929. host=source_hostname, binary='nova-compute')[0]['id']
  1930. self.compute1.stop()
  1931. # force it down to avoid waiting for the service group to time out
  1932. self.admin_api.put_service(
  1933. source_compute_id, {'forced_down': 'true'})
  1934. # NOTE(mriedem): This isn't great, and I'd like to fake out the driver
  1935. # to make the claim fail, by doing something like returning a too high
  1936. # memory_mb overhead, but the limits dict passed to the claim is empty
  1937. # so the claim test is considering it as unlimited and never actually
  1938. # performs a claim test. Configuring the scheduler to use the RamFilter
  1939. # to get the memory_mb limit at least seems like it should work but
  1940. # it doesn't appear to for some reason...
  1941. def fake_move_claim(*args, **kwargs):
  1942. # Assert the destination node allocation exists.
  1943. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1944. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  1945. raise exception.ComputeResourcesUnavailable(
  1946. reason='test_evacuate_claim_on_dest_fails')
  1947. with mock.patch('nova.compute.claims.MoveClaim', fake_move_claim):
  1948. # evacuate the server
  1949. self.api.post_server_action(server['id'], {'evacuate': {}})
  1950. # the migration will fail on the dest node and the instance will
  1951. # go into error state
  1952. server = self._wait_for_state_change(self.api, server, 'ERROR')
  1953. # Run the periodics to show those don't modify allocations.
  1954. self._run_periodics()
  1955. # The allocation should still exist on the source node since it's
  1956. # still down, and the allocation on the destination node should be
  1957. # cleaned up.
  1958. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  1959. source_usages = self._get_provider_usages(source_rp_uuid)
  1960. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  1961. dest_usages = self._get_provider_usages(dest_rp_uuid)
  1962. self.assertFlavorMatchesAllocation(
  1963. {'vcpus': 0, 'ram': 0, 'disk': 0}, dest_usages)
  1964. allocations = self._get_allocations_by_server_uuid(server['id'])
  1965. self.assertEqual(1, len(allocations))
  1966. source_allocation = allocations[source_rp_uuid]['resources']
  1967. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  1968. # restart the source compute
  1969. self.restart_compute_service(self.compute1)
  1970. self.admin_api.put_service(
  1971. source_compute_id, {'forced_down': 'false'})
  1972. # Run the periodics again to show they don't change anything.
  1973. self._run_periodics()
  1974. # The source compute shouldn't have cleaned up the allocation for
  1975. # itself since the instance didn't move.
  1976. source_usages = self._get_provider_usages(source_rp_uuid)
  1977. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  1978. allocations = self._get_allocations_by_server_uuid(server['id'])
  1979. self.assertEqual(1, len(allocations))
  1980. source_allocation = allocations[source_rp_uuid]['resources']
  1981. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  1982. def test_evacuate_rebuild_on_dest_fails(self):
  1983. """Tests that the allocations on the destination node are cleaned up
  1984. automatically when the claim is made but the actual rebuild
  1985. via the driver fails.
  1986. """
  1987. source_hostname = self.compute1.host
  1988. dest_hostname = self.compute2.host
  1989. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  1990. server = self._boot_and_check_allocations(
  1991. self.flavor1, source_hostname)
  1992. source_compute_id = self.admin_api.get_services(
  1993. host=source_hostname, binary='nova-compute')[0]['id']
  1994. self.compute1.stop()
  1995. # force it down to avoid waiting for the service group to time out
  1996. self.admin_api.put_service(
  1997. source_compute_id, {'forced_down': 'true'})
  1998. def fake_rebuild(*args, **kwargs):
  1999. # Assert the destination node allocation exists.
  2000. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2001. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2002. raise test.TestingException('test_evacuate_rebuild_on_dest_fails')
  2003. with mock.patch.object(
  2004. self.compute2.driver, 'rebuild', fake_rebuild):
  2005. # evacuate the server
  2006. self.api.post_server_action(server['id'], {'evacuate': {}})
  2007. # the migration will fail on the dest node and the instance will
  2008. # go into error state
  2009. server = self._wait_for_state_change(self.api, server, 'ERROR')
  2010. # Run the periodics to show those don't modify allocations.
  2011. self._run_periodics()
  2012. # The allocation should still exist on the source node since it's
  2013. # still down, and the allocation on the destination node should be
  2014. # cleaned up.
  2015. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2016. source_usages = self._get_provider_usages(source_rp_uuid)
  2017. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2018. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2019. self.assertFlavorMatchesAllocation(
  2020. {'vcpus': 0, 'ram': 0, 'disk': 0}, dest_usages)
  2021. allocations = self._get_allocations_by_server_uuid(server['id'])
  2022. self.assertEqual(1, len(allocations))
  2023. source_allocation = allocations[source_rp_uuid]['resources']
  2024. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  2025. # restart the source compute
  2026. self.restart_compute_service(self.compute1)
  2027. self.admin_api.put_service(
  2028. source_compute_id, {'forced_down': 'false'})
  2029. # Run the periodics again to show they don't change anything.
  2030. self._run_periodics()
  2031. # The source compute shouldn't have cleaned up the allocation for
  2032. # itself since the instance didn't move.
  2033. source_usages = self._get_provider_usages(source_rp_uuid)
  2034. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2035. allocations = self._get_allocations_by_server_uuid(server['id'])
  2036. self.assertEqual(1, len(allocations))
  2037. source_allocation = allocations[source_rp_uuid]['resources']
  2038. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  2039. def _boot_then_shelve_and_check_allocations(self, hostname, rp_uuid):
  2040. # avoid automatic shelve offloading
  2041. self.flags(shelved_offload_time=-1)
  2042. server = self._boot_and_check_allocations(
  2043. self.flavor1, hostname)
  2044. req = {
  2045. 'shelve': {}
  2046. }
  2047. self.api.post_server_action(server['id'], req)
  2048. self._wait_for_state_change(self.api, server, 'SHELVED')
  2049. # the host should maintain the existing allocation for this instance
  2050. # while the instance is shelved
  2051. source_usages = self._get_provider_usages(rp_uuid)
  2052. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2053. # Check that the server only allocates resource from the host it is
  2054. # booted on
  2055. allocations = self._get_allocations_by_server_uuid(server['id'])
  2056. self.assertEqual(1, len(allocations))
  2057. allocation = allocations[rp_uuid]['resources']
  2058. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  2059. return server
  2060. def test_shelve_unshelve(self):
  2061. source_hostname = self.compute1.host
  2062. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2063. server = self._boot_then_shelve_and_check_allocations(
  2064. source_hostname, source_rp_uuid)
  2065. req = {
  2066. 'unshelve': {}
  2067. }
  2068. self.api.post_server_action(server['id'], req)
  2069. self._wait_for_state_change(self.api, server, 'ACTIVE')
  2070. # the host should have resource usage as the instance is ACTIVE
  2071. source_usages = self._get_provider_usages(source_rp_uuid)
  2072. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2073. # Check that the server only allocates resource from the host it is
  2074. # booted on
  2075. allocations = self._get_allocations_by_server_uuid(server['id'])
  2076. self.assertEqual(1, len(allocations))
  2077. allocation = allocations[source_rp_uuid]['resources']
  2078. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  2079. self._delete_and_check_allocations(server)
  2080. def _shelve_offload_and_check_allocations(self, server, source_rp_uuid):
  2081. req = {
  2082. 'shelveOffload': {}
  2083. }
  2084. self.api.post_server_action(server['id'], req)
  2085. self._wait_for_server_parameter(
  2086. self.api, server, {'status': 'SHELVED_OFFLOADED',
  2087. 'OS-EXT-SRV-ATTR:host': None,
  2088. 'OS-EXT-AZ:availability_zone': ''})
  2089. source_usages = self._get_provider_usages(source_rp_uuid)
  2090. self.assertEqual({'VCPU': 0,
  2091. 'MEMORY_MB': 0,
  2092. 'DISK_GB': 0},
  2093. source_usages)
  2094. allocations = self._get_allocations_by_server_uuid(server['id'])
  2095. self.assertEqual(0, len(allocations))
  2096. def test_shelve_offload_unshelve_diff_host(self):
  2097. source_hostname = self.compute1.host
  2098. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2099. server = self._boot_then_shelve_and_check_allocations(
  2100. source_hostname, source_rp_uuid)
  2101. self._shelve_offload_and_check_allocations(server, source_rp_uuid)
  2102. # unshelve after shelve offload will do scheduling. this test case
  2103. # wants to test the scenario when the scheduler select a different host
  2104. # to ushelve the instance. So we disable the original host.
  2105. source_service_id = self.admin_api.get_services(
  2106. host=source_hostname, binary='nova-compute')[0]['id']
  2107. self.admin_api.put_service(source_service_id, {'status': 'disabled'})
  2108. req = {
  2109. 'unshelve': {}
  2110. }
  2111. self.api.post_server_action(server['id'], req)
  2112. server = self._wait_for_state_change(self.api, server, 'ACTIVE')
  2113. # unshelving an offloaded instance will call the scheduler so the
  2114. # instance might end up on a different host
  2115. current_hostname = server['OS-EXT-SRV-ATTR:host']
  2116. self.assertEqual(current_hostname, self._other_hostname(
  2117. source_hostname))
  2118. # the host running the instance should have resource usage
  2119. current_rp_uuid = self._get_provider_uuid_by_host(current_hostname)
  2120. current_usages = self._get_provider_usages(current_rp_uuid)
  2121. self.assertFlavorMatchesAllocation(self.flavor1, current_usages)
  2122. allocations = self._get_allocations_by_server_uuid(server['id'])
  2123. self.assertEqual(1, len(allocations))
  2124. allocation = allocations[current_rp_uuid]['resources']
  2125. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  2126. self._delete_and_check_allocations(server)
  2127. def test_shelve_offload_unshelve_same_host(self):
  2128. source_hostname = self.compute1.host
  2129. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2130. server = self._boot_then_shelve_and_check_allocations(
  2131. source_hostname, source_rp_uuid)
  2132. self._shelve_offload_and_check_allocations(server, source_rp_uuid)
  2133. # unshelve after shelve offload will do scheduling. this test case
  2134. # wants to test the scenario when the scheduler select the same host
  2135. # to ushelve the instance. So we disable the other host.
  2136. source_service_id = self.admin_api.get_services(
  2137. host=self._other_hostname(source_hostname),
  2138. binary='nova-compute')[0]['id']
  2139. self.admin_api.put_service(source_service_id, {'status': 'disabled'})
  2140. req = {
  2141. 'unshelve': {}
  2142. }
  2143. self.api.post_server_action(server['id'], req)
  2144. server = self._wait_for_state_change(self.api, server, 'ACTIVE')
  2145. # unshelving an offloaded instance will call the scheduler so the
  2146. # instance might end up on a different host
  2147. current_hostname = server['OS-EXT-SRV-ATTR:host']
  2148. self.assertEqual(current_hostname, source_hostname)
  2149. # the host running the instance should have resource usage
  2150. current_rp_uuid = self._get_provider_uuid_by_host(current_hostname)
  2151. current_usages = self._get_provider_usages(current_rp_uuid)
  2152. self.assertFlavorMatchesAllocation(self.flavor1, current_usages)
  2153. allocations = self._get_allocations_by_server_uuid(server['id'])
  2154. self.assertEqual(1, len(allocations))
  2155. allocation = allocations[current_rp_uuid]['resources']
  2156. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  2157. self._delete_and_check_allocations(server)
  2158. def test_live_migrate_force(self):
  2159. source_hostname = self.compute1.host
  2160. dest_hostname = self.compute2.host
  2161. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2162. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2163. server = self._boot_and_check_allocations(
  2164. self.flavor1, source_hostname)
  2165. post = {
  2166. 'os-migrateLive': {
  2167. 'host': dest_hostname,
  2168. 'block_migration': True,
  2169. 'force': True,
  2170. }
  2171. }
  2172. self.api.post_server_action(server['id'], post)
  2173. self._wait_for_server_parameter(self.api, server,
  2174. {'OS-EXT-SRV-ATTR:host': dest_hostname,
  2175. 'status': 'ACTIVE'})
  2176. self._run_periodics()
  2177. source_usages = self._get_provider_usages(source_rp_uuid)
  2178. # NOTE(danms): There should be no usage for the source
  2179. self.assertFlavorMatchesAllocation(
  2180. {'ram': 0, 'disk': 0, 'vcpus': 0}, source_usages)
  2181. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2182. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2183. allocations = self._get_allocations_by_server_uuid(server['id'])
  2184. # the server has an allocation on only the dest node
  2185. self.assertEqual(1, len(allocations))
  2186. self.assertNotIn(source_rp_uuid, allocations)
  2187. dest_allocation = allocations[dest_rp_uuid]['resources']
  2188. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  2189. self._delete_and_check_allocations(server)
  2190. def test_live_migrate(self):
  2191. source_hostname = self.compute1.host
  2192. dest_hostname = self.compute2.host
  2193. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2194. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2195. server = self._boot_and_check_allocations(
  2196. self.flavor1, source_hostname)
  2197. post = {
  2198. 'os-migrateLive': {
  2199. 'host': dest_hostname,
  2200. 'block_migration': True,
  2201. }
  2202. }
  2203. self.api.post_server_action(server['id'], post)
  2204. self._wait_for_server_parameter(self.api, server,
  2205. {'OS-EXT-SRV-ATTR:host': dest_hostname,
  2206. 'status': 'ACTIVE'})
  2207. self._run_periodics()
  2208. source_usages = self._get_provider_usages(source_rp_uuid)
  2209. # NOTE(danms): There should be no usage for the source
  2210. self.assertFlavorMatchesAllocation(
  2211. {'ram': 0, 'disk': 0, 'vcpus': 0}, source_usages)
  2212. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2213. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2214. allocations = self._get_allocations_by_server_uuid(server['id'])
  2215. self.assertEqual(1, len(allocations))
  2216. self.assertNotIn(source_rp_uuid, allocations)
  2217. dest_allocation = allocations[dest_rp_uuid]['resources']
  2218. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  2219. self._delete_and_check_allocations(server)
  2220. def test_live_migrate_pre_check_fails(self):
  2221. """Tests the case that the LiveMigrationTask in conductor has
  2222. called the scheduler which picked a host and created allocations
  2223. against it in Placement, but then when the conductor task calls
  2224. check_can_live_migrate_destination on the destination compute it
  2225. fails. The allocations on the destination compute node should be
  2226. cleaned up before the conductor task asks the scheduler for another
  2227. host to try the live migration.
  2228. """
  2229. self.failed_hostname = None
  2230. source_hostname = self.compute1.host
  2231. dest_hostname = self.compute2.host
  2232. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2233. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2234. server = self._boot_and_check_allocations(
  2235. self.flavor1, source_hostname)
  2236. def fake_check_can_live_migrate_destination(
  2237. context, instance, src_compute_info, dst_compute_info,
  2238. block_migration=False, disk_over_commit=False):
  2239. self.failed_hostname = dst_compute_info['host']
  2240. raise exception.MigrationPreCheckError(
  2241. reason='test_live_migrate_pre_check_fails')
  2242. with mock.patch('nova.virt.fake.FakeDriver.'
  2243. 'check_can_live_migrate_destination',
  2244. side_effect=fake_check_can_live_migrate_destination):
  2245. post = {
  2246. 'os-migrateLive': {
  2247. 'host': dest_hostname,
  2248. 'block_migration': True,
  2249. }
  2250. }
  2251. self.api.post_server_action(server['id'], post)
  2252. # As there are only two computes and we failed to live migrate to
  2253. # the only other destination host, the LiveMigrationTask raises
  2254. # MaxRetriesExceeded back to the conductor manager which handles it
  2255. # generically and sets the instance back to ACTIVE status and
  2256. # clears the task_state. The migration record status is set to
  2257. # 'error', so that's what we need to look for to know when this
  2258. # is done.
  2259. migration = self._wait_for_migration_status(server, ['error'])
  2260. # The source_compute should be set on the migration record, but the
  2261. # destination shouldn't be as we never made it to one.
  2262. self.assertEqual(source_hostname, migration['source_compute'])
  2263. self.assertIsNone(migration['dest_compute'])
  2264. # Make sure the destination host (the only other host) is the failed
  2265. # host.
  2266. self.assertEqual(dest_hostname, self.failed_hostname)
  2267. source_usages = self._get_provider_usages(source_rp_uuid)
  2268. # Since the instance didn't move, assert the allocations are still
  2269. # on the source node.
  2270. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2271. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2272. # Assert the allocations, created by the scheduler, are cleaned up
  2273. # after the migration pre-check error happens.
  2274. self.assertFlavorMatchesAllocation(
  2275. {'vcpus': 0, 'ram': 0, 'disk': 0}, dest_usages)
  2276. allocations = self._get_allocations_by_server_uuid(server['id'])
  2277. # There should only be 1 allocation for the instance on the source node
  2278. self.assertEqual(1, len(allocations))
  2279. self.assertIn(source_rp_uuid, allocations)
  2280. self.assertFlavorMatchesAllocation(
  2281. self.flavor1, allocations[source_rp_uuid]['resources'])
  2282. self._delete_and_check_allocations(server)
  2283. @mock.patch('nova.virt.fake.FakeDriver.pre_live_migration',
  2284. # The actual type of exception here doesn't matter. The point
  2285. # is that the virt driver raised an exception from the
  2286. # pre_live_migration method on the destination host.
  2287. side_effect=test.TestingException(
  2288. 'test_live_migrate_rollback_cleans_dest_node_allocations'))
  2289. def test_live_migrate_rollback_cleans_dest_node_allocations(
  2290. self, mock_pre_live_migration):
  2291. """Tests the case that when live migration fails, either during the
  2292. call to pre_live_migration on the destination, or during the actual
  2293. live migration in the virt driver, the allocations on the destination
  2294. node are rolled back since the instance is still on the source node.
  2295. """
  2296. source_hostname = self.compute1.host
  2297. dest_hostname = self.compute2.host
  2298. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2299. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2300. server = self._boot_and_check_allocations(
  2301. self.flavor1, source_hostname)
  2302. post = {
  2303. 'os-migrateLive': {
  2304. 'host': dest_hostname,
  2305. 'block_migration': True,
  2306. }
  2307. }
  2308. self.api.post_server_action(server['id'], post)
  2309. # The compute manager will put the migration record into error status
  2310. # when pre_live_migration fails, so wait for that to happen.
  2311. migration = self._wait_for_migration_status(server, ['error'])
  2312. # The _rollback_live_migration method in the compute manager will reset
  2313. # the task_state on the instance, so wait for that to happen.
  2314. server = self._wait_for_server_parameter(
  2315. self.api, server, {'OS-EXT-STS:task_state': None})
  2316. self.assertEqual(source_hostname, migration['source_compute'])
  2317. self.assertEqual(dest_hostname, migration['dest_compute'])
  2318. source_usages = self._get_provider_usages(source_rp_uuid)
  2319. # Since the instance didn't move, assert the allocations are still
  2320. # on the source node.
  2321. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2322. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2323. # Assert the allocations, created by the scheduler, are cleaned up
  2324. # after the rollback happens.
  2325. self.assertFlavorMatchesAllocation(
  2326. {'vcpus': 0, 'ram': 0, 'disk': 0}, dest_usages)
  2327. allocations = self._get_allocations_by_server_uuid(server['id'])
  2328. # There should only be 1 allocation for the instance on the source node
  2329. self.assertEqual(1, len(allocations))
  2330. self.assertIn(source_rp_uuid, allocations)
  2331. self.assertFlavorMatchesAllocation(
  2332. self.flavor1, allocations[source_rp_uuid]['resources'])
  2333. self._delete_and_check_allocations(server)
  2334. def test_rescheduling_when_migrating_instance(self):
  2335. """Tests that allocations are removed from the destination node by
  2336. the compute service when a cold migrate / resize fails and a reschedule
  2337. request is sent back to conductor.
  2338. """
  2339. source_hostname = self.compute1.manager.host
  2340. server = self._boot_and_check_allocations(
  2341. self.flavor1, source_hostname)
  2342. def fake_prep_resize(*args, **kwargs):
  2343. dest_hostname = self._other_hostname(source_hostname)
  2344. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2345. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2346. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2347. allocations = self._get_allocations_by_server_uuid(server['id'])
  2348. self.assertIn(dest_rp_uuid, allocations)
  2349. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2350. source_usages = self._get_provider_usages(source_rp_uuid)
  2351. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2352. migration_uuid = self.get_migration_uuid_for_instance(server['id'])
  2353. allocations = self._get_allocations_by_server_uuid(migration_uuid)
  2354. self.assertIn(source_rp_uuid, allocations)
  2355. raise test.TestingException('Simulated _prep_resize failure.')
  2356. # Yes this isn't great in a functional test, but it's simple.
  2357. self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
  2358. fake_prep_resize)
  2359. # Now migrate the server which is going to fail on the destination.
  2360. self.api.post_server_action(server['id'], {'migrate': None})
  2361. self._wait_for_action_fail_completion(
  2362. server, instance_actions.MIGRATE, 'compute_prep_resize')
  2363. dest_hostname = self._other_hostname(source_hostname)
  2364. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2365. failed_usages = self._get_provider_usages(dest_rp_uuid)
  2366. # Expects no allocation records on the failed host.
  2367. self.assertFlavorMatchesAllocation(
  2368. {'vcpus': 0, 'ram': 0, 'disk': 0}, failed_usages)
  2369. # Ensure the allocation records still exist on the source host.
  2370. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2371. source_usages = self._get_provider_usages(source_rp_uuid)
  2372. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2373. allocations = self._get_allocations_by_server_uuid(server['id'])
  2374. self.assertIn(source_rp_uuid, allocations)
  2375. def _test_resize_to_same_host_instance_fails(self, failing_method,
  2376. event_name):
  2377. """Tests that when we resize to the same host and resize fails in
  2378. the given method, we cleanup the allocations before rescheduling.
  2379. """
  2380. # make sure that the test only uses a single host
  2381. compute2_service_id = self.admin_api.get_services(
  2382. host=self.compute2.host, binary='nova-compute')[0]['id']
  2383. self.admin_api.put_service(compute2_service_id, {'status': 'disabled'})
  2384. hostname = self.compute1.manager.host
  2385. rp_uuid = self._get_provider_uuid_by_host(hostname)
  2386. server = self._boot_and_check_allocations(self.flavor1, hostname)
  2387. def fake_resize_method(*args, **kwargs):
  2388. # Ensure the allocations are doubled now before we fail.
  2389. usages = self._get_provider_usages(rp_uuid)
  2390. self.assertFlavorsMatchAllocation(
  2391. self.flavor1, self.flavor2, usages)
  2392. raise test.TestingException('Simulated resize failure.')
  2393. # Yes this isn't great in a functional test, but it's simple.
  2394. self.stub_out(
  2395. 'nova.compute.manager.ComputeManager.%s' % failing_method,
  2396. fake_resize_method)
  2397. self.flags(allow_resize_to_same_host=True)
  2398. resize_req = {
  2399. 'resize': {
  2400. 'flavorRef': self.flavor2['id']
  2401. }
  2402. }
  2403. self.api.post_server_action(server['id'], resize_req)
  2404. self._wait_for_action_fail_completion(
  2405. server, instance_actions.RESIZE, event_name)
  2406. # Ensure the allocation records still exist on the host.
  2407. source_rp_uuid = self._get_provider_uuid_by_host(hostname)
  2408. source_usages = self._get_provider_usages(source_rp_uuid)
  2409. if failing_method == '_finish_resize':
  2410. # finish_resize will drop the old flavor allocations.
  2411. self.assertFlavorMatchesAllocation(self.flavor2, source_usages)
  2412. else:
  2413. # The new_flavor should have been subtracted from the doubled
  2414. # allocation which just leaves us with the original flavor.
  2415. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2416. def test_resize_to_same_host_prep_resize_fails(self):
  2417. self._test_resize_to_same_host_instance_fails(
  2418. '_prep_resize', 'compute_prep_resize')
  2419. def test_resize_instance_fails_allocation_cleanup(self):
  2420. self._test_resize_to_same_host_instance_fails(
  2421. '_resize_instance', 'compute_resize_instance')
  2422. def test_finish_resize_fails_allocation_cleanup(self):
  2423. self._test_resize_to_same_host_instance_fails(
  2424. '_finish_resize', 'compute_finish_resize')
  2425. def _test_resize_reschedule_uses_host_lists(self, fails, num_alts=None):
  2426. """Test that when a resize attempt fails, the retry comes from the
  2427. supplied host_list, and does not call the scheduler.
  2428. """
  2429. server_req = self._build_minimal_create_server_request(
  2430. self.api, "some-server", flavor_id=self.flavor1["id"],
  2431. image_uuid="155d900f-4e14-4e4c-a73d-069cbf4541e6",
  2432. networks=[])
  2433. created_server = self.api.post_server({"server": server_req})
  2434. server = self._wait_for_state_change(self.api, created_server,
  2435. "ACTIVE")
  2436. inst_host = server["OS-EXT-SRV-ATTR:host"]
  2437. uuid_orig = self._get_provider_uuid_by_host(inst_host)
  2438. # We will need four new compute nodes to test the resize, representing
  2439. # the host selected by select_destinations(), along with 3 alternates.
  2440. self._start_compute(host="selection")
  2441. self._start_compute(host="alt_host1")
  2442. self._start_compute(host="alt_host2")
  2443. self._start_compute(host="alt_host3")
  2444. uuid_sel = self._get_provider_uuid_by_host("selection")
  2445. uuid_alt1 = self._get_provider_uuid_by_host("alt_host1")
  2446. uuid_alt2 = self._get_provider_uuid_by_host("alt_host2")
  2447. uuid_alt3 = self._get_provider_uuid_by_host("alt_host3")
  2448. hosts = [{"name": "selection", "uuid": uuid_sel},
  2449. {"name": "alt_host1", "uuid": uuid_alt1},
  2450. {"name": "alt_host2", "uuid": uuid_alt2},
  2451. {"name": "alt_host3", "uuid": uuid_alt3},
  2452. ]
  2453. self.flags(weight_classes=[__name__ + '.AltHostWeigher'],
  2454. group='filter_scheduler')
  2455. self.scheduler_service.stop()
  2456. self.scheduler_service = self.start_service('scheduler')
  2457. def fake_prep_resize(*args, **kwargs):
  2458. if self.num_fails < fails:
  2459. self.num_fails += 1
  2460. raise Exception("fake_prep_resize")
  2461. actual_prep_resize(*args, **kwargs)
  2462. # Yes this isn't great in a functional test, but it's simple.
  2463. actual_prep_resize = compute_manager.ComputeManager._prep_resize
  2464. self.stub_out("nova.compute.manager.ComputeManager._prep_resize",
  2465. fake_prep_resize)
  2466. self.num_fails = 0
  2467. num_alts = 4 if num_alts is None else num_alts
  2468. # Make sure we have enough retries available for the number of
  2469. # requested fails.
  2470. attempts = min(fails + 2, num_alts)
  2471. self.flags(max_attempts=attempts, group='scheduler')
  2472. server_uuid = server["id"]
  2473. data = {"resize": {"flavorRef": self.flavor2["id"]}}
  2474. self.api.post_server_action(server_uuid, data)
  2475. if num_alts < fails:
  2476. # We will run out of alternates before populate_retry will
  2477. # raise a MaxRetriesExceeded exception, so the migration will
  2478. # fail and the server should be in status "ERROR"
  2479. server = self._wait_for_state_change(self.api, created_server,
  2480. "ERROR")
  2481. source_usages = self._get_provider_usages(uuid_orig)
  2482. # The usage should be unchanged from the original flavor
  2483. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2484. # There should be no usages on any of the hosts
  2485. target_uuids = (uuid_sel, uuid_alt1, uuid_alt2, uuid_alt3)
  2486. empty_usage = {"VCPU": 0, "MEMORY_MB": 0, "DISK_GB": 0}
  2487. for target_uuid in target_uuids:
  2488. usage = self._get_provider_usages(target_uuid)
  2489. self.assertEqual(empty_usage, usage)
  2490. else:
  2491. server = self._wait_for_state_change(self.api, created_server,
  2492. "VERIFY_RESIZE")
  2493. # Verify that the selected host failed, and was rescheduled to
  2494. # an alternate host.
  2495. new_server_host = server.get("OS-EXT-SRV-ATTR:host")
  2496. expected_host = hosts[fails]["name"]
  2497. self.assertEqual(expected_host, new_server_host)
  2498. uuid_dest = hosts[fails]["uuid"]
  2499. source_usages = self._get_provider_usages(uuid_orig)
  2500. dest_usages = self._get_provider_usages(uuid_dest)
  2501. # The usage should match the resized flavor
  2502. self.assertFlavorMatchesAllocation(self.flavor2, dest_usages)
  2503. # Verify that the other host have no allocations
  2504. target_uuids = (uuid_sel, uuid_alt1, uuid_alt2, uuid_alt3)
  2505. empty_usage = {"VCPU": 0, "MEMORY_MB": 0, "DISK_GB": 0}
  2506. for target_uuid in target_uuids:
  2507. if target_uuid == uuid_dest:
  2508. continue
  2509. usage = self._get_provider_usages(target_uuid)
  2510. self.assertEqual(empty_usage, usage)
  2511. # Verify that there is only one migration record for the instance.
  2512. ctxt = context.get_admin_context()
  2513. filters = {"instance_uuid": server["id"]}
  2514. migrations = objects.MigrationList.get_by_filters(ctxt, filters)
  2515. self.assertEqual(1, len(migrations.objects))
  2516. def test_resize_reschedule_uses_host_lists_1_fail(self):
  2517. self._test_resize_reschedule_uses_host_lists(fails=1)
  2518. def test_resize_reschedule_uses_host_lists_3_fails(self):
  2519. self._test_resize_reschedule_uses_host_lists(fails=3)
  2520. def test_resize_reschedule_uses_host_lists_not_enough_alts(self):
  2521. self._test_resize_reschedule_uses_host_lists(fails=3, num_alts=1)
  2522. def test_migrate_confirm(self):
  2523. source_hostname = self.compute1.host
  2524. dest_hostname = self.compute2.host
  2525. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2526. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2527. server = self._boot_and_check_allocations(
  2528. self.flavor1, source_hostname)
  2529. self._migrate_and_check_allocations(
  2530. server, self.flavor1, source_rp_uuid, dest_rp_uuid)
  2531. # Confirm the move and check the usages
  2532. post = {'confirmResize': None}
  2533. self.api.post_server_action(
  2534. server['id'], post, check_response_status=[204])
  2535. self._wait_for_state_change(self.api, server, 'ACTIVE')
  2536. def _check_allocation():
  2537. allocations = self._get_allocations_by_server_uuid(server['id'])
  2538. # and the server allocates only from the target host
  2539. self.assertEqual(1, len(allocations))
  2540. source_usages = self._get_provider_usages(source_rp_uuid)
  2541. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2542. # and the target host allocation should be according to the flavor
  2543. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2544. self.assertEqual({'VCPU': 0,
  2545. 'MEMORY_MB': 0,
  2546. 'DISK_GB': 0}, source_usages,
  2547. 'The source host %s still has usages after the '
  2548. 'resize has been confirmed' % source_hostname)
  2549. # and the target host allocation should be according to the flavor
  2550. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2551. dest_allocation = allocations[dest_rp_uuid]['resources']
  2552. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  2553. # After confirming, we should have an allocation only on the
  2554. # destination host
  2555. _check_allocation()
  2556. self._run_periodics()
  2557. # Check we're still accurate after running the periodics
  2558. _check_allocation()
  2559. self._delete_and_check_allocations(server)
  2560. def test_migrate_revert(self):
  2561. source_hostname = self.compute1.host
  2562. dest_hostname = self.compute2.host
  2563. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2564. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2565. server = self._boot_and_check_allocations(
  2566. self.flavor1, source_hostname)
  2567. self._migrate_and_check_allocations(
  2568. server, self.flavor1, source_rp_uuid, dest_rp_uuid)
  2569. # Revert the move and check the usages
  2570. post = {'revertResize': None}
  2571. self.api.post_server_action(server['id'], post)
  2572. self._wait_for_state_change(self.api, server, 'ACTIVE')
  2573. def _check_allocation():
  2574. source_usages = self._get_provider_usages(source_rp_uuid)
  2575. allocations = self._get_allocations_by_server_uuid(server['id'])
  2576. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2577. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2578. self.assertEqual({'VCPU': 0,
  2579. 'MEMORY_MB': 0,
  2580. 'DISK_GB': 0}, dest_usages,
  2581. 'Target host %s still has usage after the '
  2582. 'resize has been reverted' % dest_hostname)
  2583. # Check that the server only allocates resource from the original
  2584. # host
  2585. self.assertEqual(1, len(allocations))
  2586. source_allocation = allocations[source_rp_uuid]['resources']
  2587. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  2588. # the original host expected to have the old resource allocation
  2589. _check_allocation()
  2590. self._run_periodics()
  2591. _check_allocation()
  2592. self._delete_and_check_allocations(server)
  2593. class ServerLiveMigrateForceAndAbort(ProviderUsageBaseTestCase):
  2594. """Test Server live migrations, which delete the migration or
  2595. force_complete it, and check the allocations after the operations.
  2596. The test are using fakedriver to handle the force_completion and deletion
  2597. of live migration.
  2598. """
  2599. compute_driver = 'fake.FakeLiveMigrateDriver'
  2600. def setUp(self):
  2601. super(ServerLiveMigrateForceAndAbort, self).setUp()
  2602. self.compute1 = self._start_compute(host='host1')
  2603. self.compute2 = self._start_compute(host='host2')
  2604. flavors = self.api.get_flavors()
  2605. self.flavor1 = flavors[0]
  2606. def test_live_migrate_force_complete(self):
  2607. source_hostname = self.compute1.host
  2608. dest_hostname = self.compute2.host
  2609. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2610. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2611. server = self._boot_and_check_allocations(
  2612. self.flavor1, source_hostname)
  2613. post = {
  2614. 'os-migrateLive': {
  2615. 'host': dest_hostname,
  2616. 'block_migration': True,
  2617. }
  2618. }
  2619. self.api.post_server_action(server['id'], post)
  2620. migration = self._wait_for_migration_status(server, ['running'])
  2621. self.api.force_complete_migration(server['id'],
  2622. migration['id'])
  2623. self._wait_for_server_parameter(self.api, server,
  2624. {'OS-EXT-SRV-ATTR:host': dest_hostname,
  2625. 'status': 'ACTIVE'})
  2626. self._run_periodics()
  2627. source_usages = self._get_provider_usages(source_rp_uuid)
  2628. self.assertFlavorMatchesAllocation(
  2629. {'ram': 0, 'disk': 0, 'vcpus': 0}, source_usages)
  2630. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2631. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2632. allocations = self._get_allocations_by_server_uuid(server['id'])
  2633. self.assertEqual(1, len(allocations))
  2634. self.assertNotIn(source_rp_uuid, allocations)
  2635. dest_allocation = allocations[dest_rp_uuid]['resources']
  2636. self.assertFlavorMatchesAllocation(self.flavor1, dest_allocation)
  2637. self._delete_and_check_allocations(server)
  2638. def test_live_migrate_delete(self):
  2639. source_hostname = self.compute1.host
  2640. dest_hostname = self.compute2.host
  2641. source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
  2642. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2643. server = self._boot_and_check_allocations(
  2644. self.flavor1, source_hostname)
  2645. post = {
  2646. 'os-migrateLive': {
  2647. 'host': dest_hostname,
  2648. 'block_migration': True,
  2649. }
  2650. }
  2651. self.api.post_server_action(server['id'], post)
  2652. migration = self._wait_for_migration_status(server, ['running'])
  2653. self.api.delete_migration(server['id'], migration['id'])
  2654. self._wait_for_server_parameter(self.api, server,
  2655. {'OS-EXT-SRV-ATTR:host': source_hostname,
  2656. 'status': 'ACTIVE'})
  2657. self._run_periodics()
  2658. allocations = self._get_allocations_by_server_uuid(server['id'])
  2659. self.assertEqual(1, len(allocations))
  2660. self.assertNotIn(dest_rp_uuid, allocations)
  2661. source_usages = self._get_provider_usages(source_rp_uuid)
  2662. self.assertFlavorMatchesAllocation(self.flavor1, source_usages)
  2663. source_allocation = allocations[source_rp_uuid]['resources']
  2664. self.assertFlavorMatchesAllocation(self.flavor1, source_allocation)
  2665. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2666. self.assertFlavorMatchesAllocation(
  2667. {'ram': 0, 'disk': 0, 'vcpus': 0}, dest_usages)
  2668. self._delete_and_check_allocations(server)
  2669. class ServerRescheduleTests(ProviderUsageBaseTestCase):
  2670. """Tests server create scenarios which trigger a reschedule during
  2671. a server build and validates that allocations in Placement
  2672. are properly cleaned up.
  2673. Uses a fake virt driver that fails the build on the first attempt.
  2674. """
  2675. compute_driver = 'fake.FakeRescheduleDriver'
  2676. def setUp(self):
  2677. super(ServerRescheduleTests, self).setUp()
  2678. self.compute1 = self._start_compute(host='host1')
  2679. self.compute2 = self._start_compute(host='host2')
  2680. flavors = self.api.get_flavors()
  2681. self.flavor1 = flavors[0]
  2682. def _other_hostname(self, host):
  2683. other_host = {'host1': 'host2',
  2684. 'host2': 'host1'}
  2685. return other_host[host]
  2686. def test_rescheduling_when_booting_instance(self):
  2687. """Tests that allocations, created by the scheduler, are cleaned
  2688. from the source node when the build fails on that node and is
  2689. rescheduled to another node.
  2690. """
  2691. server_req = self._build_minimal_create_server_request(
  2692. self.api, 'some-server', flavor_id=self.flavor1['id'],
  2693. image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
  2694. networks=[])
  2695. created_server = self.api.post_server({'server': server_req})
  2696. server = self._wait_for_state_change(
  2697. self.api, created_server, 'ACTIVE')
  2698. dest_hostname = server['OS-EXT-SRV-ATTR:host']
  2699. failed_hostname = self._other_hostname(dest_hostname)
  2700. LOG.info('failed on %s', failed_hostname)
  2701. LOG.info('booting on %s', dest_hostname)
  2702. failed_rp_uuid = self._get_provider_uuid_by_host(failed_hostname)
  2703. dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname)
  2704. failed_usages = self._get_provider_usages(failed_rp_uuid)
  2705. # Expects no allocation records on the failed host.
  2706. self.assertFlavorMatchesAllocation(
  2707. {'vcpus': 0, 'ram': 0, 'disk': 0}, failed_usages)
  2708. # Ensure the allocation records on the destination host.
  2709. dest_usages = self._get_provider_usages(dest_rp_uuid)
  2710. self.assertFlavorMatchesAllocation(self.flavor1, dest_usages)
  2711. class ServerBuildAbortTests(ProviderUsageBaseTestCase):
  2712. """Tests server create scenarios which trigger a build abort during
  2713. a server build and validates that allocations in Placement
  2714. are properly cleaned up.
  2715. Uses a fake virt driver that aborts the build on the first attempt.
  2716. """
  2717. compute_driver = 'fake.FakeBuildAbortDriver'
  2718. def setUp(self):
  2719. super(ServerBuildAbortTests, self).setUp()
  2720. # We only need one compute service/host/node for these tests.
  2721. self.compute1 = self._start_compute(host='host1')
  2722. flavors = self.api.get_flavors()
  2723. self.flavor1 = flavors[0]
  2724. def test_abort_when_booting_instance(self):
  2725. """Tests that allocations, created by the scheduler, are cleaned
  2726. from the source node when the build is aborted on that node.
  2727. """
  2728. server_req = self._build_minimal_create_server_request(
  2729. self.api, 'some-server', flavor_id=self.flavor1['id'],
  2730. image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
  2731. networks=[])
  2732. created_server = self.api.post_server({'server': server_req})
  2733. self._wait_for_state_change(self.api, created_server, 'ERROR')
  2734. failed_hostname = self.compute1.manager.host
  2735. failed_rp_uuid = self._get_provider_uuid_by_host(failed_hostname)
  2736. failed_usages = self._get_provider_usages(failed_rp_uuid)
  2737. # Expects no allocation records on the failed host.
  2738. self.assertFlavorMatchesAllocation(
  2739. {'vcpus': 0, 'ram': 0, 'disk': 0}, failed_usages)
  2740. class ServerUnshelveSpawnFailTests(ProviderUsageBaseTestCase):
  2741. """Tests server unshelve scenarios which trigger a
  2742. VirtualInterfaceCreateException during driver.spawn() and validates that
  2743. allocations in Placement are properly cleaned up.
  2744. """
  2745. compute_driver = 'fake.FakeUnshelveSpawnFailDriver'
  2746. def setUp(self):
  2747. super(ServerUnshelveSpawnFailTests, self).setUp()
  2748. # We only need one compute service/host/node for these tests.
  2749. self.compute1 = self._start_compute('host1')
  2750. flavors = self.api.get_flavors()
  2751. self.flavor1 = flavors[0]
  2752. def test_driver_spawn_fail_when_unshelving_instance(self):
  2753. """Tests that allocations, created by the scheduler, are cleaned
  2754. from the target node when the unshelve driver.spawn fails on that node.
  2755. """
  2756. hostname = self.compute1.manager.host
  2757. rp_uuid = self._get_provider_uuid_by_host(hostname)
  2758. usages = self._get_provider_usages(rp_uuid)
  2759. # We start with no usages on the host.
  2760. self.assertFlavorMatchesAllocation(
  2761. {'vcpus': 0, 'ram': 0, 'disk': 0}, usages)
  2762. server_req = self._build_minimal_create_server_request(
  2763. self.api, 'unshelve-spawn-fail', flavor_id=self.flavor1['id'],
  2764. image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
  2765. networks='none')
  2766. server = self.api.post_server({'server': server_req})
  2767. self._wait_for_state_change(self.api, server, 'ACTIVE')
  2768. # assert allocations exist for the host
  2769. usages = self._get_provider_usages(rp_uuid)
  2770. self.assertFlavorMatchesAllocation(self.flavor1, usages)
  2771. # shelve offload the server
  2772. self.flags(shelved_offload_time=0)
  2773. self.api.post_server_action(server['id'], {'shelve': None})
  2774. self._wait_for_server_parameter(
  2775. self.api, server, {'status': 'SHELVED_OFFLOADED',
  2776. 'OS-EXT-SRV-ATTR:host': None})
  2777. # assert allocations were removed from the host
  2778. usages = self._get_provider_usages(rp_uuid)
  2779. self.assertFlavorMatchesAllocation(
  2780. {'vcpus': 0, 'ram': 0, 'disk': 0}, usages)
  2781. # unshelve the server, which should fail
  2782. self.api.post_server_action(server['id'], {'unshelve': None})
  2783. self._wait_for_action_fail_completion(
  2784. server, instance_actions.UNSHELVE, 'compute_unshelve_instance')
  2785. # assert allocations were removed from the host
  2786. usages = self._get_provider_usages(rp_uuid)
  2787. self.assertFlavorMatchesAllocation(
  2788. {'vcpus': 0, 'ram': 0, 'disk': 0}, usages)
  2789. class ServerSoftDeleteTests(ProviderUsageBaseTestCase):
  2790. compute_driver = 'fake.SmallFakeDriver'
  2791. def setUp(self):
  2792. super(ServerSoftDeleteTests, self).setUp()
  2793. # We only need one compute service/host/node for these tests.
  2794. self.compute1 = self._start_compute('host1')
  2795. flavors = self.api.get_flavors()
  2796. self.flavor1 = flavors[0]
  2797. def _soft_delete_and_check_allocation(self, server, hostname):
  2798. self.api.delete_server(server['id'])
  2799. server = self._wait_for_state_change(self.api, server, 'SOFT_DELETED')
  2800. self._run_periodics()
  2801. # in soft delete state nova should keep the resource allocation as
  2802. # the instance can be restored
  2803. rp_uuid = self._get_provider_uuid_by_host(hostname)
  2804. usages = self._get_provider_usages(rp_uuid)
  2805. self.assertFlavorMatchesAllocation(self.flavor1, usages)
  2806. allocations = self._get_allocations_by_server_uuid(server['id'])
  2807. self.assertEqual(1, len(allocations))
  2808. allocation = allocations[rp_uuid]['resources']
  2809. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  2810. # run the periodic reclaim but as time isn't advanced it should not
  2811. # reclaim the instance
  2812. ctxt = context.get_admin_context()
  2813. self.compute1._reclaim_queued_deletes(ctxt)
  2814. self._run_periodics()
  2815. usages = self._get_provider_usages(rp_uuid)
  2816. self.assertFlavorMatchesAllocation(self.flavor1, usages)
  2817. allocations = self._get_allocations_by_server_uuid(server['id'])
  2818. self.assertEqual(1, len(allocations))
  2819. allocation = allocations[rp_uuid]['resources']
  2820. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  2821. def test_soft_delete_then_reclaim(self):
  2822. """Asserts that the automatic reclaim of soft deleted instance cleans
  2823. up the allocations in placement.
  2824. """
  2825. # make sure that instance will go to SOFT_DELETED state instead of
  2826. # deleted immediately
  2827. self.flags(reclaim_instance_interval=30)
  2828. hostname = self.compute1.host
  2829. rp_uuid = self._get_provider_uuid_by_host(hostname)
  2830. server = self._boot_and_check_allocations(self.flavor1, hostname)
  2831. self._soft_delete_and_check_allocation(server, hostname)
  2832. # advance the time and run periodic reclaim, instance should be deleted
  2833. # and resources should be freed
  2834. the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
  2835. timeutils.set_time_override(override_time=the_past)
  2836. self.addCleanup(timeutils.clear_time_override)
  2837. ctxt = context.get_admin_context()
  2838. self.compute1._reclaim_queued_deletes(ctxt)
  2839. # Wait for real deletion
  2840. self._wait_until_deleted(server)
  2841. usages = self._get_provider_usages(rp_uuid)
  2842. self.assertEqual({'VCPU': 0,
  2843. 'MEMORY_MB': 0,
  2844. 'DISK_GB': 0}, usages)
  2845. allocations = self._get_allocations_by_server_uuid(server['id'])
  2846. self.assertEqual(0, len(allocations))
  2847. def test_soft_delete_then_restore(self):
  2848. """Asserts that restoring a soft deleted instance keeps the proper
  2849. allocation in placement.
  2850. """
  2851. # make sure that instance will go to SOFT_DELETED state instead of
  2852. # deleted immediately
  2853. self.flags(reclaim_instance_interval=30)
  2854. hostname = self.compute1.host
  2855. rp_uuid = self._get_provider_uuid_by_host(hostname)
  2856. server = self._boot_and_check_allocations(
  2857. self.flavor1, hostname)
  2858. self._soft_delete_and_check_allocation(server, hostname)
  2859. post = {'restore': {}}
  2860. self.api.post_server_action(server['id'], post)
  2861. server = self._wait_for_state_change(self.api, server, 'ACTIVE')
  2862. # after restore the allocations should be kept
  2863. usages = self._get_provider_usages(rp_uuid)
  2864. self.assertFlavorMatchesAllocation(self.flavor1, usages)
  2865. allocations = self._get_allocations_by_server_uuid(server['id'])
  2866. self.assertEqual(1, len(allocations))
  2867. allocation = allocations[rp_uuid]['resources']
  2868. self.assertFlavorMatchesAllocation(self.flavor1, allocation)
  2869. # Now we want a real delete
  2870. self.flags(reclaim_instance_interval=0)
  2871. self._delete_and_check_allocations(server)
  2872. class TraitsBasedSchedulingTest(ProviderUsageBaseTestCase):
  2873. """Tests for requesting a server with required traits in Placement"""
  2874. compute_driver = 'fake.SmallFakeDriver'
  2875. def setUp(self):
  2876. super(TraitsBasedSchedulingTest, self).setUp()
  2877. self.compute1 = self._start_compute('host1')
  2878. self.compute2 = self._start_compute('host2')
  2879. # Using a standard trait from the os-traits library, set a required
  2880. # trait extra spec on the flavor.
  2881. flavors = self.api.get_flavors()
  2882. self.flavor_with_trait = flavors[0]
  2883. self.admin_api.post_extra_spec(
  2884. self.flavor_with_trait['id'],
  2885. {'extra_specs': {'trait:HW_CPU_X86_VMX': 'required'}})
  2886. def _create_server(self):
  2887. # Create the server using the flavor with the required trait.
  2888. server_req = self._build_minimal_create_server_request(
  2889. self.api, 'trait-based-server',
  2890. image_uuid='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
  2891. flavor_id=self.flavor_with_trait['id'], networks='none')
  2892. return self.api.post_server({'server': server_req})
  2893. def test_traits_based_scheduling(self):
  2894. """Tests that a server create request using a required trait ends
  2895. up on the single compute node resource provider that also has that
  2896. trait in Placement.
  2897. """
  2898. # Decorate the compute_with_trait resource provider with that same
  2899. # trait.
  2900. rp_uuid = self._get_provider_uuid_by_host(self.compute1.host)
  2901. self._set_provider_traits(rp_uuid, ['HW_CPU_X86_VMX'])
  2902. server = self._create_server()
  2903. server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
  2904. # Assert the server ended up on the expected compute host that has
  2905. # the required trait.
  2906. self.assertEqual(self.compute1.host, server['OS-EXT-SRV-ATTR:host'])
  2907. def test_traits_based_scheduling_no_valid_host(self):
  2908. """Tests that a server create request using a required trait
  2909. fails to find a valid host since no compute node resource providers
  2910. have the trait.
  2911. """
  2912. server = self._create_server()
  2913. # The server should go to ERROR state because there is no valid host.
  2914. server = self._wait_for_state_change(self.admin_api, server, 'ERROR')
  2915. self.assertIsNone(server['OS-EXT-SRV-ATTR:host'])
  2916. # Make sure the failure was due to NoValidHost by checking the fault.
  2917. self.assertIn('fault', server)
  2918. self.assertIn('No valid host', server['fault']['message'])
  2919. class ServerTestV256Common(ServersTestBase):
  2920. api_major_version = 'v2.1'
  2921. microversion = '2.56'
  2922. ADMIN_API = True
  2923. def _setup_compute_service(self):
  2924. # Set up 3 compute services in the same cell
  2925. for host in ('host1', 'host2', 'host3'):
  2926. fake.set_nodes([host])
  2927. self.addCleanup(fake.restore_nodes)
  2928. self.start_service('compute', host=host)
  2929. def _create_server(self):
  2930. server = self._build_minimal_create_server_request(
  2931. image_uuid='a2459075-d96c-40d5-893e-577ff92e721c')
  2932. server.update({'networks': 'auto'})
  2933. post = {'server': server}
  2934. response = self.api.api_post('/servers', post).body
  2935. return response['server']
  2936. @staticmethod
  2937. def _get_target_and_other_hosts(host):
  2938. target_other_hosts = {'host1': ['host2', 'host3'],
  2939. 'host2': ['host3', 'host1'],
  2940. 'host3': ['host1', 'host2']}
  2941. return target_other_hosts[host]
  2942. class ServerTestV256SingleCellMultiHostTestCase(ServerTestV256Common):
  2943. """Happy path test where we create a server on one host, migrate it to
  2944. another host of our choosing and ensure it lands there.
  2945. """
  2946. def test_migrate_server_to_host_in_same_cell(self):
  2947. server = self._create_server()
  2948. server = self._wait_for_state_change(server, 'BUILD')
  2949. source_host = server['OS-EXT-SRV-ATTR:host']
  2950. target_host = self._get_target_and_other_hosts(source_host)[0]
  2951. self.api.post_server_action(server['id'],
  2952. {'migrate': {'host': target_host}})
  2953. # Assert the server is now on the target host.
  2954. server = self.api.get_server(server['id'])
  2955. self.assertEqual(target_host, server['OS-EXT-SRV-ATTR:host'])
  2956. class ServerTestV256RescheduleTestCase(ServerTestV256Common):
  2957. @mock.patch.object(compute_manager.ComputeManager, '_prep_resize',
  2958. side_effect=exception.MigrationError(
  2959. reason='Test Exception'))
  2960. def test_migrate_server_not_reschedule(self, mock_prep_resize):
  2961. server = self._create_server()
  2962. found_server = self._wait_for_state_change(server, 'BUILD')
  2963. target_host, other_host = self._get_target_and_other_hosts(
  2964. found_server['OS-EXT-SRV-ATTR:host'])
  2965. self.assertRaises(client.OpenStackApiException,
  2966. self.api.post_server_action,
  2967. server['id'],
  2968. {'migrate': {'host': target_host}})
  2969. self.assertEqual(1, mock_prep_resize.call_count)
  2970. found_server = self.api.get_server(server['id'])
  2971. # Check that rescheduling is not occurred.
  2972. self.assertNotEqual(other_host, found_server['OS-EXT-SRV-ATTR:host'])