Fuel UI
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_task_managers.py 59KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563
  1. # -*- coding: utf-8 -*-
  2. # Copyright 2013 Mirantis, Inc.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. import time
  16. import mock
  17. from sqlalchemy import sql
  18. import nailgun
  19. import nailgun.rpc as rpc
  20. from nailgun import consts
  21. from nailgun import objects
  22. from nailgun.consts import ACTION_TYPES
  23. from nailgun.consts import NODE_STATUSES
  24. from nailgun.consts import TASK_NAMES
  25. from nailgun.consts import TASK_STATUSES
  26. from nailgun.db.sqlalchemy import models
  27. from nailgun import errors
  28. from nailgun.rpc.receiver import NailgunReceiver
  29. from nailgun.task.helpers import TaskHelper
  30. from nailgun.task import manager
  31. from nailgun.task import task
  32. from nailgun.test.base import BaseIntegrationTest
  33. from nailgun.test.base import fake_tasks
  34. from nailgun.utils import reverse
  35. class TestTaskManagers(BaseIntegrationTest):
  36. def check_node_presence(self, nodes_count):
  37. return self.db.query(models.Node).count() == nodes_count
  38. def check_cluster_deletion_task(self, task_):
  39. self.assertEqual(task_.name, consts.TASK_NAMES.cluster_deletion)
  40. self.assertEqual(task_.status, consts.TASK_STATUSES.ready)
  41. self.assertEqual(task_.progress, 100)
  42. self.assertIsNone(task_.cluster_id)
  43. self.assertIsNotNone(task_.deleted_at)
  44. def _check_timing(self, task):
  45. self.assertIsNotNone(task.time_start)
  46. self.assertIsNotNone(task.time_end)
  47. self.assertLessEqual(task.time_start, task.time_end)
  48. def set_history_ready(self):
  49. objects.DeploymentHistoryCollection.all().update(
  50. {'status': consts.HISTORY_TASK_STATUSES.ready})
  51. def set_tasks_ready(self):
  52. objects.TransactionCollection.all().update(
  53. {'status': consts.TASK_STATUSES.ready})
  54. @fake_tasks(override_state={"progress": 100, "status": "ready"})
  55. def test_deployment_task_managers(self):
  56. cluster = self.env.create(
  57. nodes_kwargs=[
  58. {"pending_addition": True},
  59. {"pending_deletion": True,
  60. 'status': NODE_STATUSES.provisioned},
  61. ]
  62. )
  63. supertask = self.env.launch_deployment(cluster['id'])
  64. self.env.refresh_nodes()
  65. self.assertEqual(supertask.name, TASK_NAMES.deploy)
  66. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  67. self._check_timing(supertask)
  68. # we have three subtasks here
  69. # deletion
  70. # provision
  71. # deployment
  72. self.assertEqual(len(supertask.subtasks), 3)
  73. # provisioning task has less weight then deployment
  74. provision_task = next(
  75. t for t in supertask.subtasks
  76. if t.name == consts.TASK_NAMES.provision
  77. )
  78. self._check_timing(provision_task)
  79. self.assertEqual(provision_task.weight, 0.4)
  80. deployment_task = next(
  81. t for t in supertask.subtasks
  82. if t.name == consts.TASK_NAMES.deployment
  83. )
  84. self._check_timing(deployment_task)
  85. self.assertEqual(
  86. consts.DEFAULT_DEPLOYMENT_GRAPH_TYPE, deployment_task.graph_type
  87. )
  88. cluster_name = cluster['name']
  89. self.assertIn(
  90. u"Successfully removed 1 node(s). No errors occurred",
  91. supertask.message
  92. )
  93. self.assertIn(
  94. u"Provision of environment '{0}' is done.".format(cluster_name),
  95. supertask.message
  96. )
  97. self.assertIn(
  98. u"Deployment of environment '{0}' is done.".format(cluster_name),
  99. supertask.message
  100. )
  101. self.env.refresh_nodes()
  102. for n in filter(
  103. lambda n: n.cluster_id == cluster['id'],
  104. self.env.nodes
  105. ):
  106. self.assertEqual(n.status, NODE_STATUSES.ready)
  107. self.assertEqual(n.progress, 100)
  108. @mock.patch('nailgun.task.task.rpc.cast')
  109. def test_settings_saved_in_transaction(self, _):
  110. cluster = self.env.create(
  111. nodes_kwargs=[
  112. {"pending_addition": True},
  113. {"pending_deletion": True,
  114. 'status': NODE_STATUSES.provisioned},
  115. ]
  116. )
  117. supertask = self.env.launch_deployment(cluster.id)
  118. self.assertNotEqual(TASK_STATUSES.error, supertask.status)
  119. deployment_task = next(
  120. t for t in supertask.subtasks if t.name == TASK_NAMES.deployment
  121. )
  122. self.datadiff(
  123. {'editable': objects.Cluster.get_editable_attributes(cluster)},
  124. objects.Transaction.get_cluster_settings(deployment_task)
  125. )
  126. self.datadiff(
  127. objects.Cluster.get_network_attributes(cluster),
  128. objects.Transaction.get_network_settings(deployment_task),
  129. )
  130. self.assertEqual(
  131. len(objects.Transaction.get_tasks_snapshot(deployment_task)),
  132. len(objects.Cluster.get_deployment_tasks(cluster)))
  133. @mock.patch('nailgun.task.task.rpc.cast')
  134. def test_deployment_info_saves_in_transaction(self, _):
  135. self.check_deployment_info_was_saved_in_transaction(
  136. 'mitaka-9.0', True, True
  137. )
  138. self.check_deployment_info_was_saved_in_transaction(
  139. 'liberty-8.0', True, False
  140. )
  141. self.check_deployment_info_was_saved_in_transaction(
  142. '2015.1.0-7.0', False, False
  143. )
  144. def check_deployment_info_was_saved_in_transaction(
  145. self, release_ver, is_task_deploy, is_lcm
  146. ):
  147. cluster = self.env.create(
  148. nodes_kwargs=[
  149. {"pending_addition": True},
  150. {"pending_deletion": True,
  151. 'status': NODE_STATUSES.provisioned},
  152. ],
  153. release_kwargs={
  154. 'operating_system': consts.RELEASE_OS.ubuntu,
  155. 'version': release_ver
  156. },
  157. )
  158. if not is_task_deploy:
  159. self.env.disable_task_deploy(cluster)
  160. nodes_ids = [n.uid for n in cluster.nodes if not n.pending_deletion]
  161. supertask = self.env.launch_deployment(cluster.id)
  162. self.assertNotEqual(TASK_STATUSES.error, supertask.status)
  163. deployment_task = next(
  164. t for t in supertask.subtasks if t.name == TASK_NAMES.deployment
  165. )
  166. info = objects.Transaction.get_deployment_info(deployment_task)
  167. # information about master node should be in deployment info
  168. if is_lcm:
  169. nodes_ids.append(consts.MASTER_NODE_UID)
  170. # check that deployment info contains information about all nodes
  171. # that are not deleted
  172. self.assertItemsEqual(nodes_ids, info['nodes'])
  173. @mock.patch('nailgun.task.task.rpc.cast')
  174. @mock.patch('objects.Cluster.get_deployment_tasks')
  175. def test_deployment_tasks_assigned_for_primary_tags(
  176. self, tasks_mock, rpc_mock
  177. ):
  178. tasks_mock.return_value = [
  179. {
  180. 'id': 'primary_test', 'parameters': {}, 'type': 'puppet',
  181. 'roles': ['primary-controller'], 'version': '2.1.0',
  182. },
  183. {
  184. 'id': 'test', 'parameters': {}, 'type': 'puppet',
  185. 'roles': ['controller'], 'version': '2.1.0',
  186. }
  187. ]
  188. cluster = self.env.create(
  189. nodes_kwargs=[
  190. {"pending_addition": True, "pending_roles": ['controller']},
  191. {"pending_addition": True, "pending_roles": ['controller']},
  192. ],
  193. release_kwargs={
  194. 'operating_system': consts.RELEASE_OS.ubuntu,
  195. 'version': 'mitaka-9.0',
  196. },
  197. )
  198. supertask = self.env.launch_deployment(cluster.id)
  199. self.assertNotEqual(TASK_STATUSES.error, supertask.status)
  200. tasks_graph = rpc_mock.call_args[0][1][1]['args']['tasks_graph']
  201. self.assertEqual(
  202. ['primary_test'],
  203. [x['id'] for x in tasks_graph[cluster.nodes[0].uid]]
  204. )
  205. self.assertEqual(
  206. ['test'],
  207. [x['id'] for x in tasks_graph[cluster.nodes[1].uid]]
  208. )
  209. @fake_tasks()
  210. @mock.patch(
  211. 'nailgun.lcm.transaction_serializer.settings.LCM_CHECK_TASK_VERSION',
  212. new=True
  213. )
  214. @mock.patch('objects.Cluster.get_deployment_tasks')
  215. @mock.patch('objects.Cluster.is_propagate_task_deploy_enabled')
  216. def test_adaptation_legacy_tasks(self, propagate_mock, tasks_mock):
  217. tasks_mock.return_value = [
  218. {
  219. 'id': 'task', 'parameters': {}, 'type': 'puppet',
  220. 'roles': ['controller'], 'version': '1.0.0',
  221. },
  222. {
  223. 'id': 'controller', 'type': 'group', 'roles': ['controller']
  224. },
  225. {
  226. "id": "deploy_start",
  227. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  228. "requires": ["pre_deployment_end"],
  229. },
  230. {
  231. "id": "deploy_end",
  232. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  233. "requires": ["deploy_start"],
  234. },
  235. {
  236. "id": "pre_deployment_start",
  237. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  238. },
  239. {
  240. "id": "pre_deployment_end",
  241. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  242. "requires": ["pre_deployment_start"],
  243. },
  244. {
  245. "id": "post_deployment_start",
  246. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  247. "requires": ["deploy_end"],
  248. },
  249. {
  250. "id": "post_deployment_end",
  251. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  252. "requires": ["post_deployment_start"],
  253. },
  254. ]
  255. self.env.create(
  256. nodes_kwargs=[
  257. {"pending_addition": True, "pending_roles": ['controller']},
  258. {"pending_addition": True, "pending_roles": ['controller']},
  259. ],
  260. release_kwargs={
  261. 'operating_system': consts.RELEASE_OS.ubuntu,
  262. 'version': 'liberty-9.0',
  263. }
  264. )
  265. cluster = self.env.clusters[-1]
  266. propagate_mock.return_value = False
  267. supertask = self.env.launch_deployment(cluster.id)
  268. self.assertEqual(TASK_STATUSES.error, supertask.status)
  269. self.assertIn("Task 'task'", supertask.message)
  270. propagate_mock.return_value = True
  271. supertask = self.env.launch_deployment(cluster.id)
  272. self.assertEqual(TASK_STATUSES.ready, supertask.status)
  273. @fake_tasks()
  274. @mock.patch(
  275. 'nailgun.lcm.transaction_serializer.settings.LCM_CHECK_TASK_VERSION',
  276. new=True
  277. )
  278. @mock.patch('objects.Cluster.get_deployment_tasks')
  279. @mock.patch('objects.Cluster.is_propagate_task_deploy_enabled')
  280. def test_adapt_legacy_tasks_whanging_req(self, propagate_mock, tasks_mock):
  281. # Here we add non-existing dependencies to test for absence of failure
  282. # for LP#1635051
  283. tasks_mock.return_value = [
  284. {
  285. 'id': 'taskA', 'parameters': {}, 'type': 'puppet',
  286. 'roles': ['controller'], 'version': '1.0.0',
  287. 'requires': ['pre_deployment_start']
  288. },
  289. {
  290. 'id': 'taskB', 'parameters': {}, 'type': 'puppet',
  291. 'roles': ['controller'], 'version': '2.0.0',
  292. 'requires': ['pre_deployment_end', 'taskB']
  293. },
  294. {
  295. 'id': 'controller', 'type': 'group', 'roles': ['controller']
  296. },
  297. {
  298. "id": "deploy_start",
  299. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  300. "requires": ["pre_deployment_end"],
  301. },
  302. {
  303. "id": "deploy_end",
  304. "type": consts.ORCHESTRATOR_TASK_TYPES.stage,
  305. "requires": ["deploy_start"],
  306. },
  307. ]
  308. self.env.create(
  309. nodes_kwargs=[
  310. {"pending_addition": True, "pending_roles": ['controller']},
  311. {"pending_addition": True, "pending_roles": ['controller']},
  312. ],
  313. release_kwargs={
  314. 'operating_system': consts.RELEASE_OS.ubuntu,
  315. 'version': 'mitaka-9.0',
  316. }
  317. )
  318. cluster = self.env.clusters[-1]
  319. propagate_mock.return_value = True
  320. supertask = self.env.launch_deployment(cluster.id)
  321. self.assertEqual(TASK_STATUSES.ready, supertask.status)
  322. @fake_tasks(fake_rpc=False, mock_rpc=True)
  323. def test_write_action_logs(self, _):
  324. self.env.create(
  325. nodes_kwargs=[
  326. {"pending_addition": True},
  327. {"pending_addition": True},
  328. {"pending_deletion": True}
  329. ]
  330. )
  331. deployment_task = self.env.launch_deployment()
  332. for subtask in deployment_task.subtasks:
  333. action_log = objects.ActionLog.get_by_kwargs(
  334. task_uuid=subtask.uuid,
  335. action_name=subtask.name
  336. )
  337. self.assertIsNotNone(action_log)
  338. self.assertEqual(subtask.parent_id,
  339. action_log.additional_info['parent_task_id'])
  340. self.assertIn(action_log.action_name, TASK_NAMES)
  341. self.assertEqual(action_log.action_type, ACTION_TYPES.nailgun_task)
  342. if action_log.additional_info["operation"] in \
  343. (TASK_NAMES.check_networks,
  344. TASK_NAMES.check_before_deployment):
  345. self.assertIsNotNone(action_log.end_timestamp)
  346. self.assertIn("ended_with_status", action_log.additional_info)
  347. self.assertIn("message", action_log.additional_info)
  348. self.assertEqual(action_log.additional_info["message"], "")
  349. self.assertIn("output", action_log.additional_info)
  350. def test_update_action_logs_after_empty_cluster_deletion(self):
  351. self.env.create_cluster()
  352. self.env.delete_environment()
  353. al = objects.ActionLogCollection.filter_by(
  354. None, action_type=consts.ACTION_TYPES.nailgun_task).first()
  355. self.assertIsNotNone(al.end_timestamp)
  356. self.assertEqual(al.additional_info["ended_with_status"],
  357. consts.TASK_STATUSES.ready)
  358. self.assertEqual(al.additional_info["message"], "")
  359. self.assertEqual(al.additional_info["output"], {})
  360. def test_action_log_created_for_check_before_deployment_with_error(self):
  361. self.env.create(
  362. nodes_kwargs=[
  363. {"pending_addition": True, "online": False}
  364. ]
  365. )
  366. supertask = self.env.launch_deployment()
  367. action_logs = objects.ActionLogCollection.filter_by(
  368. None, action_type=consts.ACTION_TYPES.nailgun_task).all()
  369. # we have three action logs for the next tasks
  370. # deletion
  371. # provision
  372. # deployment
  373. self.assertEqual(len(action_logs), 3)
  374. for al in action_logs:
  375. self.assertEqual(al.action_type, ACTION_TYPES.nailgun_task)
  376. if al.additional_info["operation"] == TASK_NAMES.deploy:
  377. self.assertIsNone(al.additional_info["parent_task_id"])
  378. self.assertEqual(al.task_uuid, supertask.uuid)
  379. else:
  380. self.assertIsNotNone(al.end_timestamp)
  381. self.assertIn("ended_with_status", al.additional_info)
  382. self.assertIn("message", al.additional_info)
  383. self.assertEqual(al.additional_info["message"], "")
  384. self.assertIn("output", al.additional_info)
  385. if (
  386. al.additional_info["operation"] ==
  387. TASK_NAMES.check_networks
  388. ):
  389. self.assertEqual(al.additional_info["ended_with_status"],
  390. TASK_STATUSES.ready)
  391. self.assertEqual(al.additional_info["parent_task_id"],
  392. supertask.id)
  393. elif (
  394. al.additional_info["operation"] ==
  395. TASK_NAMES.check_before_deployment
  396. ):
  397. self.assertEqual(al.additional_info["ended_with_status"],
  398. TASK_STATUSES.error)
  399. self.assertEqual(al.additional_info["parent_task_id"],
  400. supertask.id)
  401. @fake_tasks(fake_rpc=False, mock_rpc=False)
  402. @mock.patch('nailgun.rpc.cast')
  403. def test_do_not_send_node_to_orchestrator_which_has_status_discover(
  404. self, _):
  405. self.env.create(
  406. nodes_kwargs=[
  407. {'pending_deletion': True, 'status': 'discover'}])
  408. self.env.launch_deployment()
  409. args, kwargs = nailgun.task.manager.rpc.cast.call_args_list[0]
  410. self.assertEqual(len(args[1]['args']['nodes']), 0)
  411. self.env.refresh_nodes()
  412. for n in self.env.nodes:
  413. self.assertEqual(len(self.env.nodes), 0)
  414. @fake_tasks(fake_rpc=False, mock_rpc=False)
  415. @mock.patch('nailgun.rpc.cast')
  416. def test_send_to_orchestrator_offline_nodes(self, _):
  417. self.env.create(
  418. nodes_kwargs=[
  419. {'pending_deletion': True,
  420. 'status': 'ready',
  421. 'online': False}])
  422. self.env.launch_deployment()
  423. args, kwargs = nailgun.task.manager.rpc.cast.call_args_list[0]
  424. self.assertEqual(len(args[1]['args']['nodes']), 1)
  425. @fake_tasks(fake_rpc=False, mock_rpc=False)
  426. @mock.patch('nailgun.rpc.cast')
  427. def test_update_nodes_info_on_node_removal(self, _):
  428. cluster = self.env.create(
  429. cluster_kwargs={
  430. 'status': consts.CLUSTER_STATUSES.operational,
  431. 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
  432. 'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre,
  433. },
  434. nodes_kwargs=[
  435. {'status': consts.NODE_STATUSES.ready,
  436. 'roles': ['controller']},
  437. {'status': consts.NODE_STATUSES.ready, 'roles': ['compute'],
  438. 'pending_deletion': True},
  439. {'status': consts.NODE_STATUSES.ready, 'roles': ['compute']},
  440. {'status': consts.NODE_STATUSES.ready, 'roles': ['compute']},
  441. ])
  442. objects.Cluster.prepare_for_deployment(cluster)
  443. self.env.launch_deployment()
  444. args, _ = nailgun.task.manager.rpc.cast.call_args_list[1]
  445. for message in args[1]:
  446. if message['method'] == 'execute_tasks':
  447. self.assertEqual(message['respond_to'], 'deploy_resp')
  448. execute_tasks = message
  449. break
  450. else:
  451. self.fail("'execute_tasks' method not found")
  452. def is_upload_nodes(task):
  453. return 'nodes.yaml' in task['parameters'].get('path', '')
  454. def is_update_hosts(task):
  455. return 'hosts.pp' in task['parameters'].get('puppet_manifest', '')
  456. tasks = execute_tasks['args']['tasks']
  457. self.assertIsNotNone(next((
  458. t for t in tasks if is_upload_nodes(t)), None))
  459. self.assertIsNotNone(next((
  460. t for t in tasks if is_update_hosts(t)), None))
  461. @mock.patch('nailgun.task.manager.rpc.cast')
  462. def test_do_not_redeploy_nodes_in_ready_status(self, mcast):
  463. self.env.create(
  464. nodes_kwargs=[
  465. {'pending_addition': False,
  466. 'roles': ['controller'],
  467. 'status': consts.NODE_STATUSES.ready},
  468. {'pending_addition': True,
  469. 'roles': ['compute'],
  470. 'status': consts.NODE_STATUSES.discover},
  471. ],
  472. )
  473. self.db.flush()
  474. node_db = self.env.nodes[1]
  475. supertask = self.env.launch_deployment()
  476. self.assertEqual(supertask.name, consts.TASK_NAMES.deploy)
  477. self.assertEqual(supertask.status, consts.TASK_STATUSES.pending)
  478. args, _ = mcast.call_args_list[0]
  479. provisioning_info = args[1][0]['args']['provisioning_info']
  480. deployment_info = args[1][1]['args']['deployment_info']
  481. # only one node should be provisioned (the second one)
  482. self.assertEqual(1, len(provisioning_info['nodes']))
  483. self.assertEqual(node_db.uid, provisioning_info['nodes'][0]['uid'])
  484. # only one node should be deployed (the second one)
  485. self.assertEqual(1, len(deployment_info))
  486. self.assertEqual(node_db.uid, deployment_info[0]['uid'])
  487. @fake_tasks()
  488. def test_deployment_fails_if_node_offline(self):
  489. cluster = self.env.create_cluster(api=True)
  490. self.env.create_node(
  491. cluster_id=cluster['id'],
  492. roles=["controller"],
  493. pending_addition=True)
  494. offline_node = self.env.create_node(
  495. cluster_id=cluster['id'],
  496. roles=["compute"],
  497. online=False,
  498. name="Offline node",
  499. pending_addition=True)
  500. self.env.create_node(
  501. cluster_id=cluster['id'],
  502. roles=["compute"],
  503. pending_addition=True)
  504. supertask = self.env.launch_deployment()
  505. self.assertEqual(supertask.status, consts.TASK_STATUSES.error)
  506. msg = ('Nodes "{0}" are offline. Remove them from environment '
  507. 'and try again.'.format(offline_node.full_name))
  508. self.assertEqual(supertask.message, msg)
  509. # Do not move cluster to error state
  510. # in case if cluster new and before
  511. # validation failed
  512. self.assertEqual(cluster.status, 'new')
  513. @fake_tasks()
  514. def test_deployment_fails_if_node_to_redeploy_is_offline(self):
  515. cluster = self.env.create_cluster(
  516. api=True,
  517. status=consts.CLUSTER_STATUSES.operational)
  518. offline_node = self.env.create_node(
  519. cluster_id=cluster['id'],
  520. roles=["controller"],
  521. online=False,
  522. name="Offline node to be redeployed",
  523. status=consts.NODE_STATUSES.ready)
  524. self.env.create_node(
  525. cluster_id=cluster['id'],
  526. roles=["controller"],
  527. pending_addition=True)
  528. self.env.create_node(
  529. cluster_id=cluster['id'],
  530. roles=["compute"],
  531. pending_addition=True)
  532. supertask = self.env.launch_deployment()
  533. self.assertEqual(supertask.status, consts.TASK_STATUSES.error)
  534. msg = ('Nodes "{0}" are offline. Remove them from environment '
  535. 'and try again.'.format(offline_node.full_name))
  536. self.assertEqual(supertask.message, msg)
  537. self.assertNotEqual(cluster.status, consts.CLUSTER_STATUSES.error)
  538. @fake_tasks(override_state={"progress": 100, "status": "ready"})
  539. def test_redeployment_works(self):
  540. cluster = self.env.create(
  541. nodes_kwargs=[
  542. {"pending_addition": True},
  543. {"pending_addition": True},
  544. {"pending_addition": True},
  545. {"roles": ["compute"], "pending_addition": True}
  546. ]
  547. )
  548. supertask = self.env.launch_deployment()
  549. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  550. self.env.refresh_nodes()
  551. self.env.create_node(
  552. cluster_id=cluster.id,
  553. roles=["controller"],
  554. pending_addition=True
  555. )
  556. supertask = self.env.launch_deployment()
  557. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  558. self.env.refresh_nodes()
  559. for n in self.env.nodes:
  560. self.assertEqual(n.status, 'ready')
  561. self.assertEqual(n.progress, 100)
  562. def test_deletion_empty_cluster_task_manager(self):
  563. # (mihgen): we synchronously call rpc receiver for empty cluster
  564. # that's why there is no need to mock rpc now
  565. # see task/task.py#L513 (DeletionTask.execute)
  566. cluster = self.env.create_cluster(api=True)
  567. resp = self.app.delete(
  568. reverse(
  569. 'ClusterHandler',
  570. kwargs={'obj_id': cluster['id']}),
  571. headers=self.default_headers
  572. )
  573. self.assertEqual(202, resp.status_code)
  574. notification = self.db.query(models.Notification)\
  575. .filter(models.Notification.topic == "done")\
  576. .filter(models.Notification.message == "Environment '{0}' "
  577. "is deleted".format(cluster["name"])).first()
  578. self.assertIsNotNone(notification)
  579. tasks = self.db.query(models.Task).all()
  580. self.assertEqual(len(tasks), 1)
  581. self.check_cluster_deletion_task(tasks[0])
  582. @fake_tasks()
  583. def test_deletion_cluster_task_manager(self):
  584. cluster = self.env.create(
  585. nodes_kwargs=[
  586. {"status": "ready", "progress": 100},
  587. {"roles": ["compute"], "status": "ready", "progress": 100},
  588. {"roles": ["compute"], "pending_addition": True},
  589. ]
  590. )
  591. resp = self.app.delete(
  592. reverse(
  593. 'ClusterHandler',
  594. kwargs={'obj_id': cluster.id}),
  595. headers=self.default_headers
  596. )
  597. self.assertEqual(202, resp.status_code)
  598. notification = self.db.query(models.Notification)\
  599. .filter(models.Notification.topic == "done")\
  600. .filter(models.Notification.message == "Environment '{0}' "
  601. "is deleted".format(cluster.name)).first()
  602. self.assertIsNotNone(notification)
  603. self.assertIsNone(self.db.query(models.Cluster).get(cluster.id))
  604. tasks = self.db.query(models.Task).all()
  605. self.assertEqual(len(tasks), 1)
  606. self.check_cluster_deletion_task(tasks[0])
  607. @fake_tasks(tick_interval=10, tick_count=5)
  608. def test_deletion_clusters_one_by_one(self):
  609. cluster1 = self.env.create(
  610. nodes_kwargs=[
  611. {"roles": ["compute"], "status": "ready", "progress": 100},
  612. {"roles": ["compute"], "status": "ready", "progress": 100},
  613. {"roles": ["compute"], "status": "ready", "progress": 100},
  614. {"roles": ["controller"], "status": "ready", "progress": 100},
  615. {"roles": ["controller"], "status": "ready", "progress": 100},
  616. {"roles": ["cinder"], "status": "ready", "progress": 100},
  617. ]
  618. )
  619. cluster2 = self.env.create_cluster(api=True)
  620. cluster_names = [cluster.name for cluster in self.env.clusters]
  621. resp = self.app.delete(
  622. reverse(
  623. 'ClusterHandler',
  624. kwargs={'obj_id': cluster1.id}),
  625. headers=self.default_headers
  626. )
  627. self.assertEqual(202, resp.status_code)
  628. resp = self.app.delete(
  629. reverse(
  630. 'ClusterHandler',
  631. kwargs={'obj_id': cluster2.id}),
  632. headers=self.default_headers
  633. )
  634. self.assertEqual(202, resp.status_code)
  635. timer = time.time()
  636. timeout = 15
  637. clstr1 = self.db.query(models.Cluster).get(cluster1.id)
  638. clstr2 = self.db.query(models.Cluster).get(cluster2.id)
  639. while clstr1 or clstr2:
  640. time.sleep(1)
  641. try:
  642. self.db.refresh(clstr1 or clstr2)
  643. except Exception:
  644. break
  645. if time.time() - timer > timeout:
  646. raise Exception("Cluster deletion seems to be hanged")
  647. for name in cluster_names:
  648. notification = self.db.query(models.Notification)\
  649. .filter(models.Notification.topic == "done")\
  650. .filter(models.Notification.message == "Environment '{0}'"
  651. "is deleted".format(name))
  652. self.assertIsNotNone(notification)
  653. tasks = self.db.query(models.Task).all()
  654. self.assertEqual(len(tasks), 2)
  655. for task_ in tasks:
  656. self.check_cluster_deletion_task(task_)
  657. @fake_tasks(recover_nodes=False, fake_rpc=False)
  658. def test_deletion_during_deployment(self, mock_rpc):
  659. cluster = self.env.create(
  660. nodes_kwargs=[
  661. {"status": "ready", "pending_addition": True},
  662. ]
  663. )
  664. resp = self.app.put(
  665. reverse(
  666. 'ClusterChangesHandler',
  667. kwargs={'cluster_id': cluster.id}),
  668. headers=self.default_headers
  669. )
  670. deploy_uuid = resp.json_body['uuid']
  671. NailgunReceiver.provision_resp(
  672. task_uuid=deploy_uuid,
  673. status=consts.TASK_STATUSES.running,
  674. progress=50,
  675. )
  676. resp = self.app.delete(
  677. reverse(
  678. 'ClusterHandler',
  679. kwargs={'obj_id': cluster.id}),
  680. headers=self.default_headers,
  681. expect_errors=True
  682. )
  683. self.assertEqual(400, resp.status_code)
  684. resp = self.app.delete(
  685. reverse(
  686. 'ClusterHandler',
  687. kwargs={'obj_id': cluster.id}) + '?force=1',
  688. headers=self.default_headers
  689. )
  690. task_delete = self.db.query(models.Task).filter_by(
  691. uuid=resp.json_body['uuid'],
  692. ).first()
  693. NailgunReceiver.remove_cluster_resp(
  694. task_uuid=task_delete.uuid,
  695. status=consts.TASK_STATUSES.ready,
  696. progress=100,
  697. )
  698. task_deploy = self.db.query(models.Task).filter_by(
  699. uuid=deploy_uuid
  700. ).first()
  701. self.assertIsNone(task_deploy)
  702. task_delete = self.db.query(models.Task).filter_by(
  703. cluster_id=cluster.id,
  704. name="cluster_deletion"
  705. ).first()
  706. self.assertIsNone(task_delete)
  707. @fake_tasks(override_state={"progress": 100, "status": "ready"})
  708. def test_deletion_cluster_ha_3x3(self):
  709. cluster = self.env.create(
  710. cluster_kwargs={
  711. "api": True,
  712. },
  713. nodes_kwargs=[
  714. {"roles": ["controller"], "pending_addition": True},
  715. {"roles": ["compute"], "pending_addition": True}
  716. ] * 3
  717. )
  718. supertask = self.env.launch_deployment()
  719. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  720. resp = self.app.delete(
  721. reverse(
  722. 'ClusterHandler',
  723. kwargs={'obj_id': cluster.id}),
  724. headers=self.default_headers
  725. )
  726. self.assertEqual(202, resp.status_code)
  727. timer = time.time()
  728. timeout = 15
  729. clstr = self.db.query(models.Cluster).get(cluster.id)
  730. while clstr:
  731. time.sleep(1)
  732. try:
  733. self.db.refresh(clstr)
  734. except Exception:
  735. break
  736. if time.time() - timer > timeout:
  737. raise Exception("Cluster deletion seems to be hanged")
  738. notification = self.db.query(models.Notification)\
  739. .filter(models.Notification.topic == "done")\
  740. .filter(models.Notification.message == "Environment '{0}' "
  741. "is deleted".format(cluster.name)).first()
  742. self.assertIsNotNone(notification)
  743. tasks = self.db.query(models.Task).all()
  744. self.assertEqual(len(tasks), 1)
  745. self.check_cluster_deletion_task(tasks[0])
  746. @fake_tasks()
  747. def test_no_node_no_cry(self):
  748. cluster = self.env.create_cluster(
  749. api=True,
  750. release_kwargs={
  751. 'operating_system': consts.RELEASE_OS.ubuntu,
  752. 'version': 'liberty-8.0'
  753. },
  754. )
  755. cluster_id = cluster['id']
  756. manager_ = manager.ApplyChangesTaskManager(cluster_id)
  757. task = models.Task(name='provision', cluster_id=cluster_id,
  758. status=consts.TASK_STATUSES.ready)
  759. self.db.add(task)
  760. self.db.commit()
  761. rpc.receiver.NailgunReceiver.deploy_resp(nodes=[
  762. {'uid': 666, 'id': 666, 'status': 'discover'}
  763. ], task_uuid=task.uuid)
  764. self.assertRaises(errors.WrongNodeStatus, manager_.execute)
  765. @fake_tasks()
  766. @mock.patch.object(task.DeletionTask, 'execute')
  767. def test_deletion_task_called(self, mdeletion_execute):
  768. cluster = self.env.create_cluster()
  769. node_db = self.env.create_node(
  770. api=False,
  771. cluster_id=cluster.id,
  772. pending_addition=False,
  773. pending_deletion=True,
  774. status=NODE_STATUSES.ready,
  775. roles=['controller'])
  776. manager_ = manager.ApplyChangesTaskManager(cluster.id)
  777. manager_.execute()
  778. self.assertEqual(mdeletion_execute.call_count, 1)
  779. nodes = mdeletion_execute.call_args[0][1]
  780. # unfortunately assertItemsEqual does not recurse into dicts
  781. self.assertItemsEqual(
  782. nodes['nodes_to_delete'],
  783. task.DeletionTask.prepare_nodes_for_task(
  784. [node_db])['nodes_to_delete']
  785. )
  786. self.assertItemsEqual(
  787. nodes['nodes_to_restore'],
  788. task.DeletionTask.prepare_nodes_for_task(
  789. [node_db])['nodes_to_restore']
  790. )
  791. @fake_tasks()
  792. @mock.patch.object(task.DeletionTask, 'execute')
  793. def test_deletion_task_w_check_ceph(self, mdeletion_execute):
  794. cluster = self.env.create_cluster()
  795. self.env.create_node(
  796. api=False,
  797. cluster_id=cluster.id,
  798. pending_addition=False,
  799. pending_deletion=True,
  800. status=NODE_STATUSES.ready,
  801. roles=['controller'])
  802. manager_ = manager.ApplyChangesTaskManager(cluster.id)
  803. manager_.execute()
  804. self.assertEqual(mdeletion_execute.call_count, 1)
  805. kwargs = mdeletion_execute.call_args[1]
  806. self.assertEqual(kwargs['check_ceph'], True)
  807. @fake_tasks()
  808. def test_no_changes_no_cry(self):
  809. self.env.create(
  810. nodes_kwargs=[
  811. {"status": "ready"}
  812. ],
  813. release_kwargs={
  814. 'operating_system': consts.RELEASE_OS.ubuntu,
  815. 'version': 'liberty-8.0'
  816. },
  817. )
  818. cluster_db = self.env.clusters[0]
  819. objects.Cluster.clear_pending_changes(cluster_db)
  820. manager_ = manager.ApplyChangesTaskManager(cluster_db.id)
  821. self.assertRaises(errors.WrongNodeStatus, manager_.execute)
  822. @mock.patch('nailgun.task.manager.rpc.cast')
  823. def test_force_deploy_changes(self, mcast):
  824. self.env.create(
  825. nodes_kwargs=[
  826. {'status': NODE_STATUSES.ready},
  827. {'status': NODE_STATUSES.ready},
  828. ],
  829. cluster_kwargs={
  830. 'status': consts.CLUSTER_STATUSES.operational
  831. },
  832. release_kwargs={
  833. 'operating_system': consts.RELEASE_OS.ubuntu,
  834. 'version': 'liberty-8.0'
  835. },
  836. )
  837. cluster_db = self.env.clusters[0]
  838. objects.Cluster.clear_pending_changes(cluster_db)
  839. manager_ = manager.ApplyChangesTaskManager(cluster_db.id)
  840. supertask = manager_.execute(force=True)
  841. self.assertEqual(supertask.name, TASK_NAMES.deploy)
  842. self.assertIn(supertask.status, TASK_STATUSES.pending)
  843. args, _ = mcast.call_args_list[0]
  844. deployment_info = args[1][0]['args']['deployment_info']
  845. self.assertItemsEqual(
  846. [node.uid for node in self.env.nodes],
  847. [node['uid'] for node in deployment_info]
  848. )
  849. @fake_tasks()
  850. @mock.patch('nailgun.task.manager.tasks.DeletionTask.execute')
  851. def test_apply_changes_exception_caught(self, mdeletion_execute):
  852. cluster_db = self.env.create(
  853. nodes_kwargs=[
  854. {"pending_deletion": True, "status": NODE_STATUSES.ready},
  855. ]
  856. )
  857. objects.Cluster.clear_pending_changes(cluster_db)
  858. manager_ = manager.ApplyChangesTaskManager(cluster_db.id)
  859. mdeletion_execute.side_effect = Exception('exception')
  860. task = manager_.execute()
  861. self.assertEqual(task.status, TASK_STATUSES.error)
  862. @fake_tasks(recover_offline_nodes=False)
  863. def test_deletion_offline_node(self):
  864. cluster = self.env.create(
  865. nodes_kwargs=[
  866. {"online": False, "pending_deletion": True},
  867. {"status": "ready"}
  868. ]
  869. )
  870. to_delete = TaskHelper.nodes_to_delete(cluster)
  871. to_delete_ids = [node.id for node in to_delete]
  872. self.assertEqual(len(to_delete_ids), 1)
  873. supertask = self.env.launch_deployment()
  874. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  875. self.assertEqual(self.env.db.query(models.Node).count(), 1)
  876. remaining_node = self.env.db.query(models.Node).first()
  877. self.assertNotIn(remaining_node.id, to_delete_ids)
  878. @fake_tasks(recover_offline_nodes=False, tick_interval=1)
  879. def test_deletion_three_offline_nodes_and_one_online(self):
  880. cluster = self.env.create(
  881. nodes_kwargs=[
  882. {"online": False, "pending_deletion": True},
  883. {"online": False, "pending_deletion": True},
  884. {"online": False, "pending_deletion": True},
  885. {"online": True, "pending_deletion": True}
  886. ]
  887. )
  888. supertask = self.env.launch_deployment()
  889. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  890. # Offline nodes were deleted, online node came back
  891. self.assertEqual(
  892. self.db.query(models.Node).filter(
  893. models.Node.cluster_id == cluster['id']).count(),
  894. 0
  895. )
  896. self.assertEqual(
  897. self.db.query(models.Node).filter(
  898. models.Node.cluster_id.is_(None)).count(),
  899. 1
  900. )
  901. self.assertEqual(
  902. self.db.query(models.Node).filter(
  903. models.Node.status == NODE_STATUSES.discover).count(),
  904. 1
  905. )
  906. self.assertEqual(
  907. self.db.query(models.Node).filter(
  908. models.Node.online == sql.true()).count(),
  909. 1
  910. )
  911. @fake_tasks(tick_interval=1)
  912. def test_delete_offile_nodes_and_recover_them(self):
  913. self.env.create(
  914. nodes_kwargs=[
  915. {"online": False, "pending_deletion": True},
  916. {"online": False, "pending_deletion": True},
  917. {"online": True, "pending_deletion": True}
  918. ]
  919. )
  920. supertask = self.env.launch_deployment()
  921. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  922. q_nodes = self.env.db.query(models.Node)
  923. online_nodes_count = q_nodes.filter_by(online=True).count()
  924. self.assertEqual(online_nodes_count, 1)
  925. offilne_nodes_count = q_nodes.filter_by(online=False).count()
  926. self.assertEqual(offilne_nodes_count, 2)
  927. for node in q_nodes:
  928. self.assertEqual(node.status, 'discover')
  929. self.assertIsNone(node.cluster_id)
  930. @fake_tasks(recover_offline_nodes=False)
  931. def test_deletion_offline_node_when_cluster_has_only_one_node(self):
  932. cluster = self.env.create_cluster()
  933. objects.Cluster.clear_pending_changes(cluster)
  934. self.env.create_node(
  935. cluster_id=cluster['id'],
  936. online=False,
  937. pending_deletion=True,
  938. pending_addition=False,
  939. status='ready',
  940. roles=['controller'])
  941. supertask = self.env.launch_deployment()
  942. self.assertEqual(supertask.status, consts.TASK_STATUSES.ready)
  943. self.assertEqual(self.env.db.query(models.Node).count(), 0)
  944. @fake_tasks(recover_nodes=False)
  945. def test_node_deletion_task_manager(self):
  946. cluster_db = self.env.create(
  947. nodes_kwargs=[
  948. {"pending_deletion": True, "status": "ready"}
  949. ]
  950. )
  951. objects.Cluster.clear_pending_changes(cluster_db)
  952. manager_ = manager.NodeDeletionTaskManager(cluster_id=cluster_db.id)
  953. task = manager_.execute(cluster_db.nodes)
  954. self.assertEqual(task.status, consts.TASK_STATUSES.ready)
  955. # Synchronous run ensures that nodes are deleted.
  956. self.assertEqual(self.db.query(models.Node).count(), 0)
  957. @fake_tasks(recover_nodes=False)
  958. def test_node_deletion_task_mgr_works_for_nodes_not_in_cluster(self):
  959. cluster_db = self.env.create(
  960. nodes_kwargs=[
  961. {"pending_deletion": True, "status": "ready"}
  962. ]
  963. )
  964. objects.Cluster.clear_pending_changes(cluster_db)
  965. node = cluster_db.nodes[0]
  966. objects.Node.update(node, {'cluster_id': None})
  967. self.db.commit()
  968. manager_ = manager.NodeDeletionTaskManager()
  969. task = manager_.execute([node])
  970. self.assertEqual(task.status, TASK_STATUSES.ready)
  971. # Nodes are removed immediately
  972. self.assertEqual(self.db.query(models.Node).count(), 0)
  973. @fake_tasks(recover_nodes=False)
  974. def test_node_deletion_task_manager_invalid_cluster(self):
  975. cluster_db = self.env.create(
  976. nodes_kwargs=[
  977. {"pending_deletion": True, "status": "ready"}
  978. ]
  979. )
  980. objects.Cluster.clear_pending_changes(cluster_db)
  981. manager_ = manager.NodeDeletionTaskManager()
  982. self.assertRaises(
  983. errors.InvalidData, manager_.execute, cluster_db.nodes)
  984. @mock.patch('nailgun.task.manager.rpc.cast')
  985. def test_node_deletion_redeploy_started_for_proper_controllers(self,
  986. mcast):
  987. cluster_db = self.env.create(nodes_kwargs=[
  988. {'roles': ['controller'],
  989. 'status': consts.NODE_STATUSES.provisioned},
  990. {'roles': ['controller'],
  991. 'status': consts.NODE_STATUSES.discover},
  992. ])
  993. node_to_delete = self.env.create_node(
  994. cluster_id=cluster_db.id,
  995. roles=['controller'],
  996. status=consts.NODE_STATUSES.ready
  997. )
  998. node_to_deploy = self.env.create_node(
  999. cluster_id=cluster_db.id,
  1000. roles=['controller'],
  1001. status=consts.NODE_STATUSES.ready
  1002. )
  1003. manager_ = manager.NodeDeletionTaskManager(cluster_id=cluster_db.id)
  1004. manager_.execute([node_to_delete])
  1005. args, kwargs = mcast.call_args_list[0]
  1006. depl_info = args[1][0]['args']['deployment_info']
  1007. self.assertEqual(node_to_deploy.uid, depl_info[0]['uid'])
  1008. def test_node_deletion_task_failed_with_controller_in_error(self):
  1009. cluster_db = self.env.create(nodes_kwargs=[
  1010. {'roles': ['controller'],
  1011. 'status': consts.NODE_STATUSES.error},
  1012. ])
  1013. node_to_delete = self.env.create_node(
  1014. cluster_id=cluster_db.id,
  1015. roles=['controller'],
  1016. status=consts.NODE_STATUSES.ready
  1017. )
  1018. manager_ = manager.NodeDeletionTaskManager(cluster_id=cluster_db.id)
  1019. self.assertRaises(errors.ControllerInErrorState,
  1020. manager_.execute, [node_to_delete])
  1021. @fake_tasks()
  1022. def test_deployment_on_controller_removal_via_apply_changes(self):
  1023. cluster = self.env.create(
  1024. nodes_kwargs=[
  1025. {'roles': ['controller'],
  1026. 'pending_deletion': True},
  1027. {'roles': ['controller'],
  1028. 'status': consts.NODE_STATUSES.ready},
  1029. {'roles': ['controller'],
  1030. 'status': consts.NODE_STATUSES.ready},
  1031. {'roles': ['controller'],
  1032. 'status': consts.NODE_STATUSES.ready},
  1033. {'roles': ['compute'],
  1034. 'status': consts.NODE_STATUSES.ready},
  1035. {'roles': ['compute'],
  1036. 'status': consts.NODE_STATUSES.ready},
  1037. ]
  1038. )
  1039. expected_nodes_to_deploy = filter(lambda n: 'controller' in n.roles
  1040. and not n.pending_deletion,
  1041. cluster.nodes)
  1042. with mock.patch('nailgun.task.task.DeploymentTask.message') as \
  1043. mocked_task:
  1044. with mock.patch('nailgun.rpc.cast'):
  1045. self.env.launch_deployment()
  1046. _, actual_nodes_to_deploy = mocked_task.call_args[0]
  1047. self.assertItemsEqual(expected_nodes_to_deploy,
  1048. actual_nodes_to_deploy)
  1049. @fake_tasks()
  1050. def test_deployment_on_controller_removal_via_node_deletion(self):
  1051. cluster = self.env.create(
  1052. nodes_kwargs=[
  1053. {'roles': ['controller'],
  1054. 'status': consts.NODE_STATUSES.ready},
  1055. {'roles': ['controller'],
  1056. 'status': consts.NODE_STATUSES.ready},
  1057. {'roles': ['controller'],
  1058. 'status': consts.NODE_STATUSES.ready},
  1059. {'roles': ['compute'],
  1060. 'status': consts.NODE_STATUSES.ready},
  1061. {'roles': ['compute'],
  1062. 'status': consts.NODE_STATUSES.ready},
  1063. ]
  1064. )
  1065. controllers = filter(lambda n: 'controller' in n.roles
  1066. and not n.pending_deletion,
  1067. cluster.nodes)
  1068. controller_to_delete = controllers[0]
  1069. expected_nodes_to_deploy = controllers[1:]
  1070. with mock.patch('nailgun.task.task.DeploymentTask.message') as \
  1071. mocked_task:
  1072. with mock.patch('nailgun.rpc.cast'):
  1073. resp = self.app.delete(
  1074. reverse(
  1075. 'NodeHandler',
  1076. kwargs={'obj_id': controller_to_delete.id}),
  1077. headers=self.default_headers
  1078. )
  1079. _, actual_nodes_to_deploy = mocked_task.call_args[0]
  1080. self.assertItemsEqual(expected_nodes_to_deploy,
  1081. actual_nodes_to_deploy)
  1082. self.assertEqual(202, resp.status_code)
  1083. @mock.patch('nailgun.rpc.cast')
  1084. def test_delete_nodes_do_not_run_if_there_is_deletion_running(self, _):
  1085. cluster = self.env.create(
  1086. nodes_kwargs=[{'roles': ['controller']}] * 3)
  1087. self.task_manager = manager.NodeDeletionTaskManager(
  1088. cluster_id=cluster.id)
  1089. self.task_manager.execute(self.env.nodes)
  1090. self.assertRaisesRegexp(
  1091. errors.TaskAlreadyRunning,
  1092. 'Cannot perform the actions because there are running tasks',
  1093. self.task_manager.execute,
  1094. self.env.nodes)
  1095. @mock.patch('nailgun.rpc.cast')
  1096. def test_delete_nodes_reelection_if_primary_for_deletion(self, _):
  1097. cluster = self.env.create(
  1098. nodes_kwargs=[{'roles': ['controller'],
  1099. 'status': consts.NODE_STATUSES.ready}] * 3)
  1100. task_manager = manager.NodeDeletionTaskManager(cluster_id=cluster.id)
  1101. objects.Cluster.set_primary_tags(cluster, self.env.nodes)
  1102. primary_node = filter(
  1103. lambda n: 'controller' in n.primary_tags,
  1104. self.env.nodes)[0]
  1105. task_manager.execute([primary_node])
  1106. self.env.refresh_nodes()
  1107. new_primary = filter(
  1108. lambda n: ('primary-controller' in objects.Node.all_tags(n) and
  1109. n.pending_deletion is False),
  1110. self.env.nodes)[0]
  1111. self.assertNotEqual(primary_node.id, new_primary.id)
  1112. @mock.patch('objects.Cluster.get_deployment_tasks')
  1113. @mock.patch('nailgun.rpc.cast')
  1114. def test_controller_deletion(self, rpc_mock, tasks_mock):
  1115. """When we delete controller node deployment is successfull"""
  1116. ready = consts.NODE_STATUSES.ready
  1117. cluster = self.env.create(
  1118. nodes_kwargs=[
  1119. {'pending_roles': ['controller'], 'status': ready},
  1120. {'pending_roles': ['controller'], 'status': ready},
  1121. {'pending_roles': ['controller'], 'status': ready},
  1122. {'pending_roles': ['compute'], 'status': ready},
  1123. ],
  1124. release_kwargs={
  1125. 'version': 'mitaka-9.0',
  1126. 'operating_system': consts.RELEASE_OS.ubuntu
  1127. }
  1128. )
  1129. # task with yaql
  1130. tasks_mock.return_value = [
  1131. {
  1132. 'id': 'test', 'parameters': {}, 'type': 'puppet',
  1133. 'roles': ['controller'], 'version': '2.1.0',
  1134. 'condition': {'yaql_exp': 'changed($)'},
  1135. },
  1136. ]
  1137. task_manager = manager.NodeDeletionTaskManager(cluster_id=cluster.id)
  1138. task = task_manager.execute([cluster.nodes[0]], mclient_remove=False)
  1139. # two call one for deployment, one for node deleteion
  1140. self.assertEqual(rpc_mock.call_count, 2)
  1141. deploy_call, delete_node_call = rpc_mock.call_args_list
  1142. self.assertEqual(deploy_call[0][1][0]['method'], 'task_deploy')
  1143. self.assertEqual(delete_node_call[0][1]['method'], 'remove_nodes')
  1144. self.assertEqual(task.name, consts.TASK_NAMES.node_deletion)
  1145. @mock.patch('nailgun.task.task.rpc.cast')
  1146. def test_node_group_deletion_failed_while_previous_in_progress(
  1147. self, mocked_rpc
  1148. ):
  1149. self.env.create(
  1150. cluster_kwargs={
  1151. 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
  1152. 'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre},
  1153. nodes_kwargs=[
  1154. {'api': True,
  1155. 'pending_addition': True}
  1156. ]
  1157. )
  1158. ng1 = self.env.create_node_group(name='ng_1').json_body
  1159. ng2 = self.env.create_node_group(name='ng_2').json_body
  1160. self.assertEqual(mocked_rpc.call_count, 0)
  1161. self.env.delete_node_group(ng1['id'])
  1162. self.assertEqual(mocked_rpc.call_count, 1)
  1163. # delete other node group
  1164. # request should be rejected as previous update_dnsmasq task is still
  1165. # in progress
  1166. resp = self.env.delete_node_group(ng2['id'], status_code=409)
  1167. self.assertEqual(resp.status_code, 409)
  1168. self.assertEqual(resp.json_body['message'],
  1169. errors.UpdateDnsmasqTaskIsRunning.message)
  1170. # no more calls were made
  1171. self.assertEqual(mocked_rpc.call_count, 1)
  1172. @mock.patch('nailgun.task.task.rpc.cast')
  1173. def test_deployment_starts_if_nodes_not_changed(self, rpc_mock):
  1174. self.env.create(
  1175. release_kwargs={
  1176. 'operating_system': consts.RELEASE_OS.ubuntu,
  1177. 'version': 'mitaka-9.0'
  1178. },
  1179. nodes_kwargs=[
  1180. {'status': NODE_STATUSES.ready, 'roles': ['controller']},
  1181. {'status': NODE_STATUSES.ready, 'roles': ['compute']},
  1182. ]
  1183. )
  1184. cluster = self.env.clusters[-1]
  1185. supertask = self.env.launch_deployment(cluster.id)
  1186. self.assertNotEqual(consts.TASK_STATUSES.error, supertask.status)
  1187. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1188. # check that nodes presents in tasks_graph
  1189. self.assertItemsEqual(
  1190. [n.uid for n in cluster.nodes] + [consts.MASTER_NODE_UID, None],
  1191. tasks_graph
  1192. )
  1193. @mock.patch('nailgun.task.task.rpc.cast')
  1194. @mock.patch('nailgun.objects.Cluster.get_deployment_tasks')
  1195. def test_redeployment_ignore_conditions(self, tasks_mock, rpc_mock):
  1196. tasks_mock.return_value = [
  1197. {
  1198. "id": "test", "roles": ['master'], "version": "2.0.1",
  1199. "type": "puppet", "parameters": {},
  1200. "condition": {"yaql_exp": "changed($.network_metadata.nodes)"}
  1201. }
  1202. ]
  1203. self.env.create(
  1204. release_kwargs={
  1205. 'operating_system': consts.RELEASE_OS.ubuntu,
  1206. 'version': 'mitaka-9.0'
  1207. }
  1208. )
  1209. cluster = self.env.clusters[-1]
  1210. # deploy cluster at first time
  1211. supertask = self.env.launch_deployment(cluster.id)
  1212. self.assertNotEqual(consts.TASK_STATUSES.error, supertask.status)
  1213. self.env.set_task_status_recursively(
  1214. supertask, consts.TASK_STATUSES.ready
  1215. )
  1216. self.set_history_ready()
  1217. self.db.flush()
  1218. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1219. self.assertEqual('puppet', tasks_graph['master'][0]['type'])
  1220. # launch cluster deployment again, because there is no changes
  1221. # the task should be skipped
  1222. supertask = self.env.launch_deployment(cluster.id)
  1223. self.assertNotEqual(consts.TASK_STATUSES.error, supertask.status)
  1224. self.env.set_task_status_recursively(
  1225. supertask, consts.TASK_STATUSES.ready
  1226. )
  1227. self.db.flush()
  1228. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1229. self.assertEqual('skipped', tasks_graph['master'][0]['type'])
  1230. supertask.status = consts.TASK_STATUSES.ready
  1231. self.set_history_ready()
  1232. self.db.flush()
  1233. # force cluster re-deployment, the task should not be skipped
  1234. supertask = self.env.launch_redeployment(cluster.id)
  1235. self.assertNotEqual(consts.TASK_STATUSES.error, supertask.status)
  1236. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1237. self.assertEqual('puppet', tasks_graph['master'][0]['type'])
  1238. @mock.patch('nailgun.rpc.cast')
  1239. def test_deploy_part_of_pending_addition_nodes(self, rpc_mock):
  1240. self.env.create(
  1241. release_kwargs={
  1242. 'operating_system': consts.RELEASE_OS.ubuntu,
  1243. 'version': 'mitaka-9.0'
  1244. },
  1245. nodes_kwargs=[
  1246. {'status': NODE_STATUSES.provisioned, 'roles': ['controller']},
  1247. {'status': NODE_STATUSES.provisioned, 'roles': ['compute']},
  1248. ]
  1249. )
  1250. cluster = self.env.clusters[-1]
  1251. nodes_uids = [n.uid for n in cluster.nodes]
  1252. node3 = self.env.create_node(
  1253. api=False, cluster_id=cluster.id,
  1254. roles=["compute"],
  1255. pending_addition=True
  1256. )
  1257. t = self.env.launch_deployment_selected(nodes_uids, cluster.id)
  1258. self.assertNotEqual(consts.TASK_STATUSES.error, t.status)
  1259. self.db.refresh(node3)
  1260. self.assertEqual(consts.NODE_STATUSES.discover, node3.status)
  1261. self.assertTrue(node3.pending_addition)
  1262. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1263. self.assertItemsEqual(
  1264. [consts.MASTER_NODE_UID, None] + nodes_uids, tasks_graph
  1265. )
  1266. @mock.patch('nailgun.task.task.rpc.cast')
  1267. @mock.patch('nailgun.objects.Cluster.get_deployment_tasks')
  1268. def test_only_certain_tasks_run_in_deploy(self, tasks_mock, rpc_mock):
  1269. task = {
  1270. 'id': 'test', 'parameters': {}, 'type': 'puppet',
  1271. 'roles': ['master'], 'version': '2.1.0',
  1272. }
  1273. tasks = []
  1274. for i in range(5):
  1275. task_copy = task.copy()
  1276. task_copy['id'] = 'test' + str(i)
  1277. tasks.append(task_copy)
  1278. tasks_mock.return_value = tasks
  1279. cluster = self.env.create(
  1280. nodes_kwargs=[
  1281. {'status': NODE_STATUSES.provisioned, 'roles': ['controller']},
  1282. {'status': NODE_STATUSES.provisioned, 'roles': ['compute']},
  1283. {'status': NODE_STATUSES.provisioned, 'roles': ['cinder']},
  1284. ],
  1285. release_kwargs={
  1286. 'operating_system': consts.RELEASE_OS.ubuntu,
  1287. 'version': 'mitaka-9.0',
  1288. },
  1289. )
  1290. task_ids = ['test0', 'test3']
  1291. task = self.env.launch_deployment_selected_tasks(
  1292. [n.uid for n in cluster.nodes],
  1293. cluster.id, task_ids)
  1294. self.assertNotEqual(consts.TASK_STATUSES.error, task.status)
  1295. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1296. for task in tasks_graph['master']:
  1297. if task['id'] in task_ids:
  1298. self.assertEqual(task['type'], 'puppet')
  1299. else:
  1300. self.assertEqual(task['type'], 'skipped')
  1301. @mock.patch('nailgun.rpc.cast')
  1302. def test_deployment_task_uses_all_nodes_by_default(self, rpc_mock):
  1303. cluster = self.env.create(
  1304. release_kwargs={
  1305. 'operating_system': consts.RELEASE_OS.ubuntu,
  1306. 'version': 'mitaka-9.0'
  1307. },
  1308. nodes_kwargs=[{'roles': ['controller'],
  1309. 'status': consts.NODE_STATUSES.ready}] * 3
  1310. )
  1311. resp = self.app.put(
  1312. reverse(
  1313. 'DeploySelectedNodes',
  1314. kwargs={'cluster_id': cluster.id}
  1315. ),
  1316. '{}',
  1317. headers=self.default_headers
  1318. )
  1319. self.assertIn(resp.status_code, [200, 202])
  1320. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1321. # check that all nodes present in message
  1322. self.assertItemsEqual(
  1323. [n.uid for n in cluster.nodes] + [consts.MASTER_NODE_UID, None],
  1324. tasks_graph
  1325. )
  1326. @mock.patch('nailgun.task.task.rpc.cast')
  1327. @mock.patch('nailgun.objects.Cluster.get_deployment_graph')
  1328. def check_correct_state_calculation(self, node_status, is_skip_expected,
  1329. get_graph_mock, rpc_mock):
  1330. cluster = self.env.create(
  1331. nodes_kwargs=[{'roles': ['controller'],
  1332. 'status': consts.NODE_STATUSES.ready}],
  1333. release_kwargs={
  1334. 'operating_system': consts.RELEASE_OS.ubuntu,
  1335. 'version': 'mitaka-9.0'
  1336. }
  1337. )
  1338. node = cluster.nodes[0]
  1339. task = {
  1340. 'parameters': {}, 'type': 'puppet',
  1341. 'roles': ['/.*/'], 'version': '2.1.0',
  1342. 'condition': {'yaql_exp': 'changed($.uid)'},
  1343. }
  1344. get_graph_mock.return_value = {
  1345. 'type': 'custom',
  1346. 'tasks': [dict(task, id='test1'), dict(task, id='test2')]
  1347. }
  1348. # reset progress
  1349. node.progress = 0
  1350. # deploy cluster at first time and create history
  1351. supertask = self.env.launch_deployment_selected([node.uid], cluster.id)
  1352. self.assertNotEqual(consts.TASK_STATUSES.error, supertask.status)
  1353. self.set_history_ready()
  1354. self.set_tasks_ready()
  1355. # mark test2 as skipped to ensure that it will run in next deploy
  1356. objects.DeploymentHistoryCollection.filter_by(
  1357. None, deployment_graph_task_name='test2'
  1358. ).update({'status': consts.HISTORY_TASK_STATUSES.skipped})
  1359. node.status = node_status
  1360. node.progress = 0
  1361. task = self.env.launch_deployment_selected([node.uid], cluster.id)
  1362. self.assertNotEqual(consts.TASK_STATUSES.error, task.status)
  1363. tasks_graph = rpc_mock.call_args[0][1][0]['args']['tasks_graph']
  1364. for task in tasks_graph[node.uid]:
  1365. if task['id'] == 'test1':
  1366. if is_skip_expected:
  1367. self.assertEqual(
  1368. task['type'], consts.ORCHESTRATOR_TASK_TYPES.skipped)
  1369. else:
  1370. self.assertNotEqual(
  1371. task['type'], consts.ORCHESTRATOR_TASK_TYPES.skipped)
  1372. elif task['id'] == 'test2':
  1373. self.assertNotEqual(
  1374. task['type'], consts.ORCHESTRATOR_TASK_TYPES.skipped)
  1375. else:
  1376. self.fail('Unexpected task in graph {0}'.format(task['id']))
  1377. def test_correct_state_calculation(self):
  1378. self.check_correct_state_calculation(
  1379. consts.NODE_STATUSES.ready, True)
  1380. def test_state_calculation_after_provision(self):
  1381. self.check_correct_state_calculation(
  1382. consts.NODE_STATUSES.provisioned, False)
  1383. def test_state_calculation_after_stop(self):
  1384. self.check_correct_state_calculation(
  1385. consts.NODE_STATUSES.stopped, False)
  1386. def test_state_calculation_after_rediscover(self):
  1387. self.check_correct_state_calculation(
  1388. consts.NODE_STATUSES.discover, False)