Fuel UI
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

base.py 72KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075
  1. # -*- coding: utf-8 -*-
  2. # Copyright 2013 Mirantis, Inc.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. try:
  16. from unittest.case import TestCase
  17. except ImportError:
  18. # Runing unit-tests in production environment
  19. from unittest2.case import TestCase
  20. import copy
  21. import mock
  22. import os
  23. import re
  24. import six
  25. from six.moves import range
  26. import uuid
  27. from datetime import datetime
  28. import functools
  29. from itertools import izip
  30. from netaddr import IPNetwork
  31. from random import randint
  32. from oslo_serialization import jsonutils
  33. import sqlalchemy as sa
  34. import web
  35. from webtest import app
  36. import nailgun
  37. from nailgun import consts
  38. from nailgun import errors
  39. from nailgun.settings import settings
  40. from nailgun.db import db
  41. from nailgun.db import flush
  42. from nailgun.db import syncdb
  43. from nailgun.logger import logger
  44. from nailgun.db.sqlalchemy.fixman import load_fake_deployment_tasks
  45. from nailgun.db.sqlalchemy.fixman import load_fixture
  46. from nailgun.db.sqlalchemy.fixman import upload_fixture
  47. from nailgun.db.sqlalchemy.models import ClusterPluginLink
  48. from nailgun.db.sqlalchemy.models import IPAddr
  49. from nailgun.db.sqlalchemy.models import NodeNICInterface
  50. from nailgun.db.sqlalchemy.models import Notification
  51. from nailgun.db.sqlalchemy.models import PluginLink
  52. from nailgun.db.sqlalchemy.models import Release as ReleaseModel
  53. from nailgun.db.sqlalchemy.models import Task
  54. # here come objects
  55. from nailgun.objects import Cluster
  56. from nailgun.objects import ClusterPlugin
  57. from nailgun.objects import MasterNodeSettings
  58. from nailgun.objects import NetworkGroup
  59. from nailgun.objects import Node
  60. from nailgun.objects import NodeGroup
  61. from nailgun.objects import OpenstackConfig
  62. from nailgun.objects import Plugin
  63. from nailgun.objects import Release
  64. from nailgun.app import build_app
  65. from nailgun.consts import NETWORK_INTERFACE_TYPES
  66. from nailgun.extensions.network_manager.manager import NetworkManager
  67. from nailgun.extensions.network_manager.template import NetworkTemplate
  68. from nailgun.middleware.connection_monitor import ConnectionMonitorMiddleware
  69. from nailgun.middleware.keystone import NailgunFakeKeystoneAuthMiddleware
  70. from nailgun.utils import dict_merge
  71. from nailgun.utils import is_feature_supported
  72. from nailgun.utils import reverse
  73. class TimeoutError(Exception):
  74. pass
  75. def test_db_driver(handler):
  76. try:
  77. return handler()
  78. except web.HTTPError:
  79. if str(web.ctx.status).startswith(("4", "5")):
  80. db.rollback()
  81. raise
  82. except Exception:
  83. db.rollback()
  84. raise
  85. finally:
  86. db.commit()
  87. # we do not remove session in tests
  88. class EnvironmentManager(object):
  89. _regex_type = type(re.compile("regex"))
  90. def __init__(self, app, session=None):
  91. self.db = session or db()
  92. self.app = app
  93. self.tester = TestCase
  94. self.tester.runTest = lambda a: None
  95. self.tester = self.tester()
  96. self.here = os.path.abspath(os.path.dirname(__file__))
  97. self.fixture_dir = os.path.join(self.here, "..", "fixtures")
  98. self.default_headers = {
  99. "Content-Type": "application/json"
  100. }
  101. self.releases = []
  102. self.clusters = []
  103. self.nodes = []
  104. self.plugins = []
  105. self.openstack_configs = []
  106. self.network_manager = NetworkManager
  107. def create(self, **kwargs):
  108. release_data = kwargs.pop('release_kwargs', {"api": False})
  109. cluster_data = kwargs.pop('cluster_kwargs', {})
  110. if 'release_id' not in cluster_data:
  111. cluster_data['release_id'] = self.create_release(**release_data).id
  112. cluster = self.create_cluster(
  113. **cluster_data
  114. )
  115. for node_kwargs in kwargs.pop('nodes_kwargs', []):
  116. if "cluster_id" not in node_kwargs:
  117. node_kwargs["cluster_id"] = cluster.id
  118. node_kwargs.setdefault("api", False)
  119. if "pending_roles" not in node_kwargs:
  120. node_kwargs.setdefault("roles", ["controller"])
  121. self.create_node(
  122. **node_kwargs
  123. )
  124. return cluster
  125. def create_release(self, api=False, expect_errors=False, **kwargs):
  126. os = kwargs.get(
  127. 'operating_system', consts.RELEASE_OS.centos)
  128. version = kwargs.get(
  129. 'version', '{0}-6.1'.format(randint(0, 100000000)))
  130. # NOTE(ikalnitsky): In order to do not read each time openstack.yaml
  131. # we're reading it once and then look for needed release.
  132. releases = self.read_fixtures(('openstack',))
  133. release_data = next((
  134. r for r in releases if r['fields']['operating_system'] == os),
  135. releases[0])
  136. release_data = release_data['fields']
  137. release_data.update({
  138. 'name': u"release_name_" + version,
  139. 'version': version,
  140. 'state': consts.RELEASE_STATES.available,
  141. 'description': u"release_desc" + version,
  142. })
  143. if kwargs.get('deployment_tasks') is None:
  144. kwargs['deployment_tasks'] = \
  145. load_fake_deployment_tasks(apply_to_db=False)
  146. _patch_tags_legacy(release_data, version)
  147. release_data.update(kwargs)
  148. if api:
  149. resp = self.app.post(
  150. reverse('ReleaseCollectionHandler'),
  151. params=jsonutils.dumps(release_data),
  152. headers=self.default_headers,
  153. expect_errors=expect_errors
  154. )
  155. release = resp.json_body
  156. if not expect_errors:
  157. self.releases.append(
  158. self.db.query(ReleaseModel).get(release['id'])
  159. )
  160. else:
  161. release = Release.create(release_data)
  162. db().commit()
  163. self.releases.append(release)
  164. return release
  165. def create_openstack_config(self, api=False, **kwargs):
  166. if api:
  167. resp = self.app.post(
  168. reverse('OpenstackConfigCollectionHandler'),
  169. params=jsonutils.dumps(kwargs),
  170. headers=self.default_headers
  171. )
  172. self.tester.assertEqual(resp.status_code, 201)
  173. config = resp.json_body
  174. self.openstack_configs.append(
  175. self.db.query(OpenstackConfig).get(config['id']))
  176. else:
  177. config = OpenstackConfig.create(kwargs)
  178. db().flush()
  179. self.openstack_configs.append(config)
  180. return config
  181. def get_all_roles(self, obj_type, obj_id, expect_errors=False):
  182. return self.app.get(
  183. reverse(
  184. 'RoleCollectionHandler',
  185. kwargs={'obj_id': obj_id, 'obj_type': obj_type}
  186. ),
  187. headers=self.default_headers,
  188. expect_errors=expect_errors
  189. )
  190. def get_role(self, obj_type, obj_id, role_name, expect_errors=False):
  191. return self.app.get(
  192. reverse(
  193. 'RoleHandler',
  194. kwargs={'obj_id': obj_id,
  195. 'obj_type': obj_type,
  196. 'role_name': role_name}
  197. ),
  198. headers=self.default_headers,
  199. expect_errors=expect_errors
  200. )
  201. def update_role(self, obj_type, obj_id, role_name, data,
  202. expect_errors=False):
  203. return self.app.put(
  204. reverse(
  205. 'RoleHandler',
  206. kwargs={'obj_id': obj_id,
  207. 'obj_type': obj_type,
  208. 'role_name': role_name}
  209. ),
  210. jsonutils.dumps(data),
  211. headers=self.default_headers,
  212. expect_errors=expect_errors
  213. )
  214. def delete_role(self, obj_type, obj_id, role_name, expect_errors=False):
  215. return self.app.delete(
  216. reverse(
  217. 'RoleHandler',
  218. kwargs={'obj_id': obj_id,
  219. 'obj_type': obj_type,
  220. 'role_name': role_name}
  221. ),
  222. headers=self.default_headers,
  223. expect_errors=expect_errors
  224. )
  225. def create_role(self, obj_type, obj_id, data, expect_errors=False):
  226. return self.app.post(
  227. reverse(
  228. 'RoleCollectionHandler',
  229. kwargs={'obj_id': obj_id, 'obj_type': obj_type}
  230. ),
  231. jsonutils.dumps(data),
  232. headers=self.default_headers,
  233. expect_errors=expect_errors
  234. )
  235. def get_all_tags(self, obj_type, obj_id, expect_errors=False):
  236. return self.app.get(
  237. reverse(
  238. 'TagCollectionHandler',
  239. kwargs={'obj_id': obj_id, 'obj_type': obj_type}
  240. ),
  241. headers=self.default_headers,
  242. expect_errors=expect_errors
  243. )
  244. def get_tag(self, obj_type, obj_id, tag_name, expect_errors=False):
  245. return self.app.get(
  246. reverse(
  247. 'TagHandler',
  248. kwargs={'obj_id': obj_id,
  249. 'obj_type': obj_type,
  250. 'tag_name': tag_name}
  251. ),
  252. headers=self.default_headers,
  253. expect_errors=expect_errors
  254. )
  255. def update_tag(self, obj_type, obj_id, tag_name, data,
  256. expect_errors=False):
  257. return self.app.put(
  258. reverse(
  259. 'TagHandler',
  260. kwargs={'obj_id': obj_id,
  261. 'obj_type': obj_type,
  262. 'tag_name': tag_name}
  263. ),
  264. jsonutils.dumps(data),
  265. headers=self.default_headers,
  266. expect_errors=expect_errors
  267. )
  268. def delete_tag(self, obj_type, obj_id, tag_name, expect_errors=False):
  269. return self.app.delete(
  270. reverse(
  271. 'TagHandler',
  272. kwargs={'obj_id': obj_id,
  273. 'obj_type': obj_type,
  274. 'tag_name': tag_name}
  275. ),
  276. headers=self.default_headers,
  277. expect_errors=expect_errors
  278. )
  279. def create_tag(self, obj_type, obj_id, data, expect_errors=False):
  280. return self.app.post(
  281. reverse(
  282. 'TagCollectionHandler',
  283. kwargs={'obj_id': obj_id, 'obj_type': obj_type}
  284. ),
  285. jsonutils.dumps(data),
  286. headers=self.default_headers,
  287. expect_errors=expect_errors
  288. )
  289. def create_cluster(self, api=True, exclude=None, **kwargs):
  290. cluster_data = {
  291. 'name': 'cluster-api-' + str(randint(0, 1000000)),
  292. }
  293. editable_attributes = kwargs.pop('editable_attributes', None)
  294. vmware_attributes = kwargs.pop('vmware_attributes', None)
  295. if kwargs:
  296. cluster_data.update(kwargs)
  297. if 'release_id' not in cluster_data:
  298. cluster_data['release_id'] = self.create_release(api=False).id
  299. if exclude and isinstance(exclude, list):
  300. for ex in exclude:
  301. try:
  302. del cluster_data[ex]
  303. except KeyError as err:
  304. logger.warning(err)
  305. if api:
  306. resp = self.app.post(
  307. reverse('ClusterCollectionHandler'),
  308. jsonutils.dumps(cluster_data),
  309. headers=self.default_headers,
  310. expect_errors=True
  311. )
  312. self.tester.assertEqual(resp.status_code, 201, resp.body)
  313. cluster = resp.json_body
  314. cluster_db = Cluster.get_by_uid(cluster['id'])
  315. else:
  316. cluster = Cluster.create(cluster_data)
  317. cluster_db = cluster
  318. db().commit()
  319. self.clusters.append(cluster_db)
  320. if editable_attributes:
  321. Cluster.patch_attributes(cluster_db,
  322. {'editable': editable_attributes})
  323. if vmware_attributes:
  324. Cluster.update_vmware_attributes(cluster_db, vmware_attributes)
  325. return cluster_db
  326. def create_node(
  327. self, api=False,
  328. exclude=None, expect_http=201,
  329. expected_error=None,
  330. **kwargs):
  331. # TODO(alekseyk) Simplify 'interfaces' and 'mac' manipulation logic
  332. metadata = kwargs.get('meta', {})
  333. default_metadata = self.default_metadata()
  334. default_metadata.update(metadata)
  335. mac = kwargs.get('mac', self.generate_random_mac())
  336. if default_metadata['interfaces']:
  337. if not metadata or 'interfaces' not in metadata:
  338. default_metadata['interfaces'][0]['mac'] = mac
  339. default_metadata['interfaces'][0]['pxe'] = True
  340. for iface in default_metadata['interfaces'][1:]:
  341. if 'mac' in iface:
  342. iface['mac'] = self.generate_random_mac()
  343. else:
  344. for iface in default_metadata['interfaces']:
  345. if iface.get('pxe'):
  346. if not iface.get('mac'):
  347. iface['mac'] = mac
  348. elif 'mac' not in kwargs:
  349. mac = iface['mac']
  350. if iface.get('mac') == mac:
  351. break
  352. else:
  353. default_metadata['interfaces'][0]['mac'] = mac
  354. default_metadata['interfaces'][0]['pxe'] = True
  355. node_data = {
  356. 'mac': mac,
  357. 'status': 'discover',
  358. 'ip': '10.20.0.130',
  359. 'meta': default_metadata
  360. }
  361. if kwargs:
  362. meta = kwargs.pop('meta', None)
  363. node_data.update(kwargs)
  364. if meta:
  365. kwargs['meta'] = meta
  366. if exclude and isinstance(exclude, list):
  367. for ex in exclude:
  368. try:
  369. del node_data[ex]
  370. except KeyError as err:
  371. logger.warning(err)
  372. if api:
  373. resp = self.app.post(
  374. reverse('NodeCollectionHandler'),
  375. jsonutils.dumps(node_data),
  376. headers=self.default_headers,
  377. expect_errors=True
  378. )
  379. self.tester.assertEqual(resp.status_code, expect_http, resp.body)
  380. if expected_error:
  381. self.tester.assertEqual(
  382. resp.json_body["message"],
  383. expected_error
  384. )
  385. if str(expect_http)[0] != "2":
  386. return None
  387. self.tester.assertEqual(resp.status_code, expect_http)
  388. node = resp.json_body
  389. node_db = Node.get_by_uid(node['id'])
  390. if 'interfaces' not in node_data['meta'] \
  391. or not node_data['meta']['interfaces']:
  392. self._set_interfaces_if_not_set_in_meta(
  393. node_db.id,
  394. kwargs.get('meta', None))
  395. self.nodes.append(node_db)
  396. else:
  397. node = Node.create(node_data)
  398. db().commit()
  399. self.nodes.append(node)
  400. return node
  401. def create_nodes(self, count, *args, **kwargs):
  402. """Helper to generate specific number of nodes."""
  403. return [self.create_node(*args, **kwargs) for _ in range(count)]
  404. def create_nodes_w_interfaces_count(self,
  405. nodes_count, if_count=2, **kwargs):
  406. """Create nodes_count nodes with if_count interfaces each
  407. Default random MAC is generated for each interface
  408. """
  409. nodes = []
  410. for i in range(nodes_count):
  411. meta = self.default_metadata()
  412. if_list = [
  413. {
  414. "name": "eth{0}".format(i),
  415. "mac": self.generate_random_mac(),
  416. }
  417. for i in range(if_count)]
  418. if_list[0]['pxe'] = True
  419. self.set_interfaces_in_meta(meta, if_list)
  420. nodes.append(self.create_node(meta=meta, mac=if_list[0]['mac'],
  421. **kwargs))
  422. return nodes
  423. def create_task(self, **kwargs):
  424. task = Task(**kwargs)
  425. self.db.add(task)
  426. self.db.commit()
  427. return task
  428. def create_notification(self, **kwargs):
  429. notif_data = {
  430. "topic": "discover",
  431. "message": "Test message",
  432. "status": "unread",
  433. "datetime": datetime.now()
  434. }
  435. if kwargs:
  436. notif_data.update(kwargs)
  437. notification = Notification()
  438. notification.cluster_id = notif_data.get("cluster_id")
  439. for f, v in six.iteritems(notif_data):
  440. setattr(notification, f, v)
  441. self.db.add(notification)
  442. self.db.commit()
  443. return notification
  444. def create_node_group(self, api=True, expect_errors=False,
  445. cluster_id=None, **kwargs):
  446. cluster_id = self._get_cluster_by_id(cluster_id).id
  447. ng_data = {
  448. 'cluster_id': cluster_id,
  449. 'name': 'test_ng'
  450. }
  451. if kwargs:
  452. ng_data.update(kwargs)
  453. if api:
  454. resp = self.app.post(
  455. reverse('NodeGroupCollectionHandler'),
  456. jsonutils.dumps(ng_data),
  457. headers=self.default_headers,
  458. expect_errors=expect_errors
  459. )
  460. ng = resp
  461. else:
  462. ng = NodeGroup.create(ng_data)
  463. db().flush()
  464. return ng
  465. def create_ip_addrs_by_rules(self, cluster, rules):
  466. """Manually create VIPs in database basing on given rules
  467. Format exmaple for rules
  468. {
  469. 'management': {
  470. 'haproxy': '192.168.0.1',
  471. 'vrouter': '192.168.0.2',
  472. }
  473. }
  474. :param cluster: cluster ORM instance
  475. :param rules: mapping of networks and VIPs information needed
  476. for proper database entry creation
  477. """
  478. created_ips = []
  479. for net_group in cluster.network_groups:
  480. if net_group.name not in rules:
  481. continue
  482. vips_by_names = rules[net_group.name]
  483. for vip_name, ip_addr in six.iteritems(vips_by_names):
  484. ip = IPAddr(
  485. network=net_group.id,
  486. ip_addr=ip_addr,
  487. vip_name=vip_name,
  488. )
  489. self.db.add(ip)
  490. created_ips.append(ip)
  491. if created_ips:
  492. self.db.flush()
  493. return created_ips
  494. def disable_task_deploy(self, cluster):
  495. cluster.attributes.editable['common']['task_deploy']['value'] = False
  496. cluster.attributes.editable.changed()
  497. self.db().flush()
  498. def delete_node_group(self, ng_id, status_code=200, api=True):
  499. if api:
  500. return self.app.delete(
  501. reverse(
  502. 'NodeGroupHandler',
  503. kwargs={'obj_id': ng_id}
  504. ),
  505. headers=self.default_headers,
  506. expect_errors=(status_code != 200)
  507. )
  508. else:
  509. ng = db().query(NodeGroup).get(ng_id)
  510. db().delete(ng)
  511. db().flush()
  512. def setup_networks_for_nodegroup(
  513. self, cluster_id, node_group, cidr_start, floating_ranges=None):
  514. """Setup networks of particular node group in multi-node-group mode.
  515. :param cluster_id: Cluster Id
  516. :param node_group: NodeGroup instance
  517. :param cidr_start: first two octets of CIDR which are common for all
  518. networks (e.g. "192.168")
  519. :param floating_ranges: Floating IP ranges
  520. :return: response from network setup API request
  521. """
  522. ng2_networks = {
  523. 'fuelweb_admin': {'cidr': '{0}.9.0/24'.format(cidr_start),
  524. 'ip_ranges': [['{0}.9.2'.format(cidr_start),
  525. '{0}.9.254'.format(cidr_start)]],
  526. 'gateway': '{0}.9.1'.format(cidr_start)},
  527. 'public': {'cidr': '{0}.0.0/24'.format(cidr_start),
  528. 'ip_ranges': [['{0}.0.2'.format(cidr_start),
  529. '{0}.0.127'.format(cidr_start)]],
  530. 'gateway': '{0}.0.1'.format(cidr_start)},
  531. 'management': {'cidr': '{0}.1.0/24'.format(cidr_start),
  532. 'gateway': '{0}.1.1'.format(cidr_start)},
  533. 'storage': {'cidr': '{0}.2.0/24'.format(cidr_start),
  534. 'gateway': '{0}.2.1'.format(cidr_start)},
  535. 'private': {'cidr': '{0}.3.0/24'.format(cidr_start),
  536. 'gateway': '{0}.3.1'.format(cidr_start)},
  537. }
  538. netw_ids = set(net.id for net in node_group.networks)
  539. netconfig = self.neutron_networks_get(cluster_id).json_body
  540. for net in netconfig['networks']:
  541. if not net['meta']['notation']:
  542. continue
  543. if net['id'] in netw_ids and net['name'] in ng2_networks:
  544. for pkey, pval in six.iteritems(ng2_networks[net['name']]):
  545. net[pkey] = pval
  546. net['meta']['use_gateway'] = True
  547. if floating_ranges:
  548. netconfig['networking_parameters']['floating_ranges'] = \
  549. floating_ranges
  550. resp = self.neutron_networks_put(cluster_id, netconfig)
  551. return resp
  552. @mock.patch('nailgun.plugins.loaders.files_manager.FilesManager.load')
  553. @mock.patch('nailgun.plugins.loaders.loader_base.os.path.isdir')
  554. def create_plugin(self, is_dir_m, files_manager_m, sample=None, api=False,
  555. cluster=None, enabled=True, expect_errors=False,
  556. directories=None, **kwargs):
  557. if sample:
  558. plugin_data = sample
  559. plugin_data.update(**kwargs)
  560. else:
  561. plugin_data = self.get_default_plugin_metadata(**kwargs)
  562. env_config = plugin_data.pop('attributes_metadata', None)
  563. node_roles = plugin_data.pop('roles_metadata', None)
  564. node_tags = plugin_data.pop('tags_metadata', None)
  565. volumes = plugin_data.pop('volumes_metadata', None)
  566. network_roles = plugin_data.pop('network_roles_metadata', None)
  567. deployment_tasks = plugin_data.pop('deployment_tasks', None)
  568. tasks = plugin_data.pop('tasks', None)
  569. components = plugin_data.pop('components', None)
  570. nic_config = plugin_data.pop('nic_attributes_metadata', None)
  571. bond_config = plugin_data.pop('bond_attributes_metadata', None)
  572. node_config = plugin_data.pop('node_attributes_metadata', None)
  573. mocked_metadata = {
  574. 'metadata.*': plugin_data,
  575. 'metadata.yaml': plugin_data,
  576. 'environment_config.yaml': env_config,
  577. 'node_roles.yaml': node_roles,
  578. 'node_tags.yaml': node_tags,
  579. 'volumes.yaml': volumes,
  580. 'network_roles.yaml': network_roles,
  581. 'deployment_tasks.yaml': deployment_tasks,
  582. 'tasks.yaml': tasks,
  583. 'components.yaml': components,
  584. 'nic_config.yaml': nic_config,
  585. 'bond_config.yaml': bond_config,
  586. 'node_config.yaml': node_config
  587. }
  588. # good only when everything is located in root dir
  589. files_manager_m.side_effect = lambda key: copy.deepcopy(
  590. mocked_metadata.get(os.path.basename(key))
  591. )
  592. # mock is_dir
  593. directories = (set(directories) if directories else set()).union({
  594. 'deployment_scripts/',
  595. 'repositories/ubuntu',
  596. 'repositories/centos'
  597. })
  598. def define_dir(path):
  599. return any(path.endswith(d) for d in directories)
  600. is_dir_m.side_effect = define_dir
  601. if api:
  602. return self.app.post(
  603. reverse('PluginCollectionHandler'),
  604. jsonutils.dumps(plugin_data),
  605. headers=self.default_headers,
  606. expect_errors=expect_errors
  607. )
  608. else:
  609. plugin = Plugin.create(plugin_data)
  610. self.plugins.append(plugin)
  611. # Enable plugin for specific cluster
  612. if cluster:
  613. cluster.plugins.append(plugin)
  614. ClusterPlugin.set_attributes(
  615. cluster.id, plugin.id, enabled=enabled,
  616. attrs=plugin.attributes_metadata or {}
  617. )
  618. return plugin
  619. def create_cluster_plugin_link(self, **kwargs):
  620. dash_data = {
  621. "title": "title",
  622. "url": "url",
  623. "description": "description",
  624. "cluster_id": None,
  625. "hidden": False
  626. }
  627. if kwargs:
  628. dash_data.update(kwargs)
  629. cluster_plugin_link = ClusterPluginLink()
  630. cluster_plugin_link.update(dash_data)
  631. self.db.add(cluster_plugin_link)
  632. self.db.commit()
  633. return cluster_plugin_link
  634. def create_plugin_link(self, **kwargs):
  635. dash_data = {
  636. "title": "title",
  637. "url": "url",
  638. "description": "description",
  639. "plugin_id": None,
  640. "hidden": False
  641. }
  642. if kwargs:
  643. dash_data.update(kwargs)
  644. plugin_link = PluginLink()
  645. plugin_link.update(dash_data)
  646. self.db.add(plugin_link)
  647. self.db.commit()
  648. return plugin_link
  649. def default_metadata(self):
  650. item = self.find_item_by_pk_model(
  651. self.read_fixtures(("sample_environment",)),
  652. 1, 'nailgun.node')
  653. return item.get('fields').get('meta', {})
  654. def generate_random_mac(self):
  655. mac = [randint(0x00, 0x7f) for _ in range(6)]
  656. return ':'.join(map(lambda x: "%02x" % x, mac)).lower()
  657. def generate_interfaces_in_meta(self, amount):
  658. nics = []
  659. for i in range(amount):
  660. nics.append(
  661. {
  662. 'name': 'eth{0}'.format(i),
  663. 'mac': self.generate_random_mac(),
  664. 'current_speed': 100,
  665. 'max_speed': 1000,
  666. 'offloading_modes': [
  667. {
  668. 'name': 'enabled_offloading_mode',
  669. 'state': True,
  670. "sub": [
  671. {
  672. 'name': 'disabled_offloading_sub_mode',
  673. 'state': False,
  674. "sub": []
  675. }
  676. ]
  677. },
  678. {
  679. 'name': 'disabled_offloading_mode',
  680. 'state': False,
  681. "sub": []
  682. }
  683. ]
  684. }
  685. )
  686. self.set_admin_ip_for_for_single_interface(nics)
  687. return {'interfaces': nics}
  688. def _set_interfaces_if_not_set_in_meta(self, node_id, meta):
  689. if not meta or 'interfaces' not in meta:
  690. self._add_interfaces_to_node(node_id)
  691. def _create_interfaces_from_meta(self, node):
  692. # Create interfaces from meta
  693. for interface in node.meta['interfaces']:
  694. interface = NodeNICInterface(
  695. mac=interface.get('mac'),
  696. name=interface.get('name'),
  697. ip_addr=interface.get('ip'),
  698. netmask=interface.get('netmask')
  699. )
  700. self.db.add(interface)
  701. node.nic_interfaces.append(interface)
  702. self.db.flush()
  703. # If node in a cluster then assign networks for all interfaces
  704. if node.cluster_id:
  705. self.network_manager.assign_networks_by_default(node)
  706. # At least one interface should have
  707. # same ip as mac in meta
  708. if node.nic_interfaces and not \
  709. filter(lambda i: node.mac == i.mac, node.nic_interfaces):
  710. node.nic_interfaces[0].mac = node.mac
  711. self.db.commit()
  712. def _add_interfaces_to_node(self, node_id, count=1):
  713. interfaces = []
  714. node = self.db.query(Node.model).get(node_id)
  715. networks_to_assign = \
  716. list(node.cluster.network_groups) if node.cluster else []
  717. for i in range(count):
  718. interface = NodeNICInterface(
  719. node_id=node_id,
  720. name='eth{0}'.format(i),
  721. mac=self.generate_random_mac(),
  722. current_speed=100,
  723. max_speed=1000,
  724. assigned_networks=networks_to_assign
  725. )
  726. self.db.add(interface)
  727. self.db.commit()
  728. interfaces.append(interface)
  729. # assign all networks to first NIC
  730. networks_to_assign = []
  731. return interfaces
  732. def set_admin_ip_for_for_single_interface(self, interfaces):
  733. """Set admin ip for single interface if it not setted yet."""
  734. ips = [interface.get('ip') for interface in interfaces]
  735. admin_ips = [
  736. ip for ip in ips
  737. if self.network_manager.is_ip_belongs_to_admin_subnet(ip)]
  738. if not admin_ips:
  739. admin_cidr = NetworkGroup.get_admin_network_group().cidr
  740. interfaces[0]['ip'] = str(IPNetwork(admin_cidr).ip)
  741. def set_interfaces_in_meta(self, meta, interfaces):
  742. """Set interfaces in metadata."""
  743. meta['interfaces'] = interfaces
  744. self.set_admin_ip_for_for_single_interface(meta['interfaces'])
  745. return meta['interfaces']
  746. def get_default_roles(self):
  747. return list(self.get_default_roles_metadata.keys())
  748. def get_default_volumes_metadata(self):
  749. return self.read_fixtures(
  750. ('openstack',))[0]['fields']['volumes_metadata']
  751. def get_default_roles_metadata(self):
  752. return self.read_fixtures(
  753. ('openstack',))[0]['fields']['roles_metadata']
  754. def get_default_networks_metadata(self):
  755. return self.read_fixtures(
  756. ('openstack',))[0]['fields']['networks_metadata']
  757. def get_default_attributes_metadata(self):
  758. return self.read_fixtures(
  759. ['openstack'])[0]['fields']['attributes_metadata']
  760. def get_default_plugin_env_config(self, **kwargs):
  761. return {
  762. 'attributes': {
  763. '{0}_text'.format(kwargs.get('plugin_name', 'plugin_name')): {
  764. 'value': kwargs.get('value', 'value'),
  765. 'type': kwargs.get('type', 'text'),
  766. 'description': kwargs.get('description', 'description'),
  767. 'weight': kwargs.get('weight', 25),
  768. 'label': kwargs.get('label', 'label')}}}
  769. def get_default_plugin_nic_config(self, **kwargs):
  770. nic_attributes = {
  771. 'metadata': {
  772. 'label': 'Test base plugin'
  773. },
  774. 'plugin_name_text': {
  775. 'value': 'value',
  776. 'type': 'text',
  777. 'description': 'Some description',
  778. 'weight': 25,
  779. 'label': 'label'
  780. }
  781. }
  782. nic_attributes.update(kwargs)
  783. return nic_attributes
  784. def get_default_plugin_bond_config(self, **kwargs):
  785. bond_attributes = {
  786. 'metadata': {
  787. 'label': 'Test base plugin'
  788. },
  789. 'plugin_name_text': {
  790. 'value': 'value',
  791. 'type': 'text',
  792. 'description': 'Some description',
  793. 'weight': 25,
  794. 'label': 'label'
  795. }
  796. }
  797. bond_attributes.update(kwargs)
  798. return bond_attributes
  799. def get_default_plugin_node_config(self, **kwargs):
  800. node_attributes = {
  801. 'plugin_a_section': {
  802. 'metadata': {
  803. 'label': 'Plugin A Section'
  804. },
  805. 'plugin_attr_key': {
  806. 'value': 'plugin_attr_val',
  807. 'type': 'text',
  808. 'description': 'Some description',
  809. 'weight': 25,
  810. 'label': 'label'
  811. }
  812. }
  813. }
  814. node_attributes.update(kwargs)
  815. return node_attributes
  816. def get_default_plugin_node_roles_config(self, **kwargs):
  817. node_roles = {
  818. 'testing_plugin': {
  819. 'name': 'Some plugin role',
  820. 'description': 'Some description',
  821. 'tags': ['testing_plugin']
  822. }
  823. }
  824. node_roles.update(kwargs)
  825. return node_roles
  826. def get_default_plugin_node_tags_config(self, **kwargs):
  827. node_tags = {
  828. 'testing_plugin': {
  829. 'has_primary': False
  830. }
  831. }
  832. node_tags.update(kwargs)
  833. return node_tags
  834. def get_default_plugin_volumes_config(self, **kwargs):
  835. volumes = {
  836. 'volumes_roles_mapping': {
  837. 'testing_plugin': [
  838. {'allocate_size': 'min', 'id': 'os'},
  839. {'allocate_size': 'all', 'id': 'test_volume'}
  840. ]
  841. },
  842. 'volumes': [
  843. {'id': 'test_volume', 'type': 'vg'}
  844. ]
  845. }
  846. volumes.update(kwargs)
  847. return volumes
  848. def get_default_network_roles_config(self, **kwargs):
  849. network_roles = [
  850. {
  851. 'id': 'test_network_role',
  852. 'default_mapping': 'public',
  853. 'properties': {
  854. 'subnet': 'true',
  855. 'gateway': 'false',
  856. 'vip': [
  857. {'name': 'test_vip_1', 'shared': False},
  858. {'name': 'test_vip_2', 'shared': False}
  859. ]
  860. }
  861. }
  862. ]
  863. network_roles[0].update(kwargs)
  864. return network_roles
  865. def get_default_plugin_deployment_tasks(self, **kwargs):
  866. deployment_tasks = [
  867. {
  868. 'id': 'role-name',
  869. 'type': 'group',
  870. 'role': ['role-name'],
  871. 'requires': ['controller'],
  872. 'required_for': ['deploy_end'],
  873. 'parameters': {
  874. 'strategy': {
  875. 'type': 'parallel'
  876. }
  877. }
  878. }
  879. ]
  880. deployment_tasks[0].update(kwargs)
  881. return deployment_tasks
  882. def get_default_plugin_tasks(self, **kwargs):
  883. default_tasks = [
  884. {
  885. 'role': '[test_role]',
  886. 'stage': 'post_deployment',
  887. 'type': 'puppet',
  888. 'parameters': {
  889. 'puppet_manifest': '/etc/puppet/modules/test_manigest.pp',
  890. 'puppet_modules': '/etc/puppet/modules',
  891. 'timeout': 720
  892. }
  893. }
  894. ]
  895. default_tasks[0].update(kwargs)
  896. return default_tasks
  897. def get_default_plugin_metadata(self, **kwargs):
  898. sample_plugin = {
  899. 'version': '0.1.0',
  900. 'name': 'testing_plugin',
  901. 'title': 'Test plugin',
  902. 'package_version': '1.0.0',
  903. 'description': 'Enable to use plugin X for Neutron',
  904. 'fuel_version': ['6.0'],
  905. 'groups': [],
  906. 'licenses': ['License 1'],
  907. 'authors': ['Author1'],
  908. 'homepage': 'http://some-plugin-url.com/',
  909. 'is_hotpluggable': False,
  910. 'releases': [
  911. {'repository_path': 'repositories/ubuntu',
  912. 'version': '2014.2-6.0', 'os': 'ubuntu',
  913. 'mode': ['ha', 'multinode'],
  914. 'deployment_scripts_path': 'deployment_scripts/'},
  915. {'repository_path': 'repositories/centos',
  916. 'version': '2014.2-6.0', 'os': 'centos',
  917. 'mode': ['ha', 'multinode'],
  918. 'deployment_scripts_path': 'deployment_scripts/'},
  919. {'repository_path': 'repositories/ubuntu',
  920. 'version': '2015.1-8.0', 'os': 'ubuntu',
  921. 'mode': ['ha', 'multinode'],
  922. 'deployment_scripts_path': 'deployment_scripts/'},
  923. {'repository_path': 'repositories/ubuntu',
  924. 'version': 'mitaka-9.0', 'os': 'ubuntu',
  925. 'mode': ['ha'],
  926. 'deployment_scripts_path': 'deployment_scripts/'},
  927. {'repository_path': 'repositories/ubuntu',
  928. 'version': 'newton-10.0', 'os': 'ubuntu',
  929. 'mode': ['ha'],
  930. 'deployment_scripts_path': 'deployment_scripts/'},
  931. {'repository_path': 'repositories/ubuntu',
  932. 'version': 'newton-10.0', 'os': 'ubuntu',
  933. 'mode': ['ha'],
  934. 'deployment_scripts_path': 'deployment_scripts/'},
  935. {'repository_path': 'repositories/ubuntu',
  936. 'version': 'ocata-11.0', 'os': 'ubuntu',
  937. 'mode': ['ha'],
  938. 'deployment_scripts_path': 'deployment_scripts/'}
  939. ]
  940. }
  941. sample_plugin.update(kwargs)
  942. return sample_plugin
  943. def get_default_components(self, **kwargs):
  944. default_components = [
  945. {
  946. 'name': 'hypervisor:test_hypervisor',
  947. 'compatible': [
  948. {'name': 'hypervisor:*'},
  949. {'name': 'storage:object:block:swift'}
  950. ],
  951. 'incompatible': [
  952. {'name': 'network:*'},
  953. {'name': 'additional_service:*'}
  954. ]
  955. }
  956. ]
  957. default_components[0].update(kwargs)
  958. return default_components
  959. def get_default_vmware_attributes_metadata(self):
  960. return self.read_fixtures(
  961. ['openstack'])[0]['fields']['vmware_attributes_metadata']
  962. def upload_fixtures(self, fxtr_names):
  963. for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
  964. with open(fxtr_path, "r") as fxtr_file:
  965. upload_fixture(fxtr_file)
  966. def read_fixtures(self, fxtr_names):
  967. data = []
  968. for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
  969. with open(fxtr_path, "r") as fxtr_file:
  970. try:
  971. data.extend(load_fixture(fxtr_file))
  972. except Exception as exc:
  973. logger.error(
  974. 'Error "%s" occurred while loading '
  975. 'fixture %s' % (exc, fxtr_path)
  976. )
  977. return data
  978. def fxtr_paths_by_names(self, fxtr_names):
  979. for fxtr in fxtr_names:
  980. for ext in ['json', 'yaml']:
  981. fxtr_path = os.path.join(
  982. self.fixture_dir,
  983. "%s.%s" % (fxtr, ext)
  984. )
  985. if os.path.exists(fxtr_path):
  986. logger.debug(
  987. "Fixture file is found, yielding path: %s",
  988. fxtr_path
  989. )
  990. yield fxtr_path
  991. break
  992. else:
  993. logger.warning(
  994. "Fixture file was not found: %s",
  995. fxtr
  996. )
  997. def find_item_by_pk_model(self, data, pk, model):
  998. for item in data:
  999. if item.get('pk') == pk and item.get('model') == model:
  1000. return item
  1001. def _get_cluster_by_id(self, cluster_id):
  1002. """Get cluster by cluster ID.
  1003. Get cluster by cluster ID. If `cluster_id` is `None` then return
  1004. first cluster from the list.
  1005. :param cluster_id: cluster ID
  1006. :type cluster_id: int
  1007. :return: cluster
  1008. """
  1009. if cluster_id is None:
  1010. return self.clusters[0]
  1011. for cluster in self.clusters:
  1012. if cluster.id == cluster_id:
  1013. return cluster
  1014. raise Exception(
  1015. 'Cluster with ID "{0}" was not found.'.format(cluster_id))
  1016. def _launch_for_selected_nodes(self, handler, nodes_uids, cluster_id,
  1017. body=None):
  1018. if body is None:
  1019. body = {}
  1020. if self.clusters:
  1021. cluster = self._get_cluster_by_id(cluster_id)
  1022. if not nodes_uids:
  1023. nodes_uids = [n.uid for n in cluster.nodes]
  1024. action_url = reverse(
  1025. handler,
  1026. kwargs={'cluster_id': cluster.id}
  1027. ) + '?nodes={0}'.format(','.join(nodes_uids))
  1028. resp = self.app.put(
  1029. action_url,
  1030. jsonutils.dumps(body),
  1031. headers=self.default_headers,
  1032. expect_errors=True
  1033. )
  1034. # If @fake_tasks runs synchoronously, then API
  1035. # returns 200 (executed). If fake_rpc=False, then
  1036. # API returns 202 (scheduled)
  1037. self.tester.assertIn(resp.status_code, [200, 202])
  1038. response = resp.json_body
  1039. return self.db.query(Task).filter_by(
  1040. uuid=response['uuid']
  1041. ).first()
  1042. else:
  1043. raise NotImplementedError(
  1044. "Nothing to provision - try creating cluster"
  1045. )
  1046. def launch_provisioning_selected(self, nodes_uids=None, cluster_id=None):
  1047. return self._launch_for_selected_nodes(
  1048. 'ProvisionSelectedNodes', nodes_uids, cluster_id
  1049. )
  1050. def launch_deployment_selected(self, nodes_uids=None, cluster_id=None):
  1051. return self._launch_for_selected_nodes(
  1052. 'DeploySelectedNodes', nodes_uids, cluster_id
  1053. )
  1054. def launch_deployment_selected_tasks(self,
  1055. nodes_uids, cluster_id, task_ids):
  1056. return self._launch_for_selected_nodes(
  1057. 'DeploySelectedNodesWithTasks', nodes_uids, cluster_id,
  1058. task_ids or [],
  1059. )
  1060. def _launch_for_cluster(self, handler, cluster_id, **kwargs):
  1061. if self.clusters:
  1062. cluster_id = self._get_cluster_by_id(cluster_id).id
  1063. if kwargs:
  1064. get_string = '?' + ('&'.join(
  1065. '{}={}'.format(k, v) for k, v in six.iteritems(kwargs)
  1066. ))
  1067. else:
  1068. get_string = ''
  1069. resp = self.app.put(
  1070. reverse(
  1071. handler,
  1072. kwargs={'cluster_id': cluster_id}
  1073. ) + get_string,
  1074. headers=self.default_headers)
  1075. return self.db.query(Task).filter_by(
  1076. uuid=resp.json_body['uuid']
  1077. ).first()
  1078. else:
  1079. raise NotImplementedError(
  1080. "Nothing to deploy - try creating cluster"
  1081. )
  1082. def launch_deployment(self, cluster_id=None, **kwargs):
  1083. return self._launch_for_cluster(
  1084. 'ClusterChangesHandler', cluster_id, **kwargs
  1085. )
  1086. def launch_redeployment(self, cluster_id=None, **kwargs):
  1087. return self._launch_for_cluster(
  1088. 'ClusterChangesForceRedeployHandler', cluster_id, **kwargs
  1089. )
  1090. def stop_deployment(self, cluster_id=None):
  1091. if self.clusters:
  1092. cluster_id = self._get_cluster_by_id(cluster_id).id
  1093. resp = self.app.put(
  1094. reverse(
  1095. 'ClusterStopDeploymentHandler',
  1096. kwargs={'cluster_id': cluster_id}),
  1097. expect_errors=True,
  1098. headers=self.default_headers)
  1099. return self.db.query(Task).filter_by(
  1100. uuid=resp.json_body['uuid']
  1101. ).first()
  1102. else:
  1103. raise NotImplementedError(
  1104. "Nothing to stop - try creating cluster"
  1105. )
  1106. def reset_environment(self, expect_http=None, cluster_id=None, force=1):
  1107. if self.clusters:
  1108. cluster_id = self._get_cluster_by_id(cluster_id).id
  1109. resp = self.app.put(
  1110. reverse(
  1111. 'ClusterResetHandler',
  1112. kwargs={'cluster_id': cluster_id}
  1113. ) + '?force={0}'.format(int(force)),
  1114. expect_errors=True,
  1115. headers=self.default_headers)
  1116. if expect_http is not None:
  1117. self.tester.assertEqual(resp.status_code, expect_http)
  1118. else:
  1119. # if task was started status code can be either 200 or 202
  1120. # depending on task status
  1121. self.tester.assertIn(resp.status_code, [200, 202])
  1122. if not (200 <= resp.status_code < 400):
  1123. return resp.body
  1124. return self.db.query(Task).filter_by(
  1125. uuid=resp.json_body['uuid']
  1126. ).first()
  1127. else:
  1128. raise NotImplementedError(
  1129. "Nothing to reset - try creating cluster"
  1130. )
  1131. def delete_environment(self, expect_http=202, cluster_id=None, force=1):
  1132. if self.clusters:
  1133. cluster_id = self._get_cluster_by_id(cluster_id).id
  1134. resp = self.app.delete(
  1135. reverse(
  1136. 'ClusterHandler',
  1137. kwargs={'obj_id': cluster_id}
  1138. ) + '?force={0}'.format(int(force)),
  1139. expect_errors=True,
  1140. headers=self.default_headers)
  1141. self.tester.assertEqual(resp.status_code, expect_http)
  1142. if not str(expect_http).startswith("2"):
  1143. return resp.body
  1144. return self.db.query(Task).filter_by(
  1145. name=consts.TASK_NAMES.cluster_deletion
  1146. ).first()
  1147. else:
  1148. raise NotImplementedError(
  1149. "Nothing to delete - try creating cluster"
  1150. )
  1151. def launch_verify_networks(self, data=None, expect_errors=False,
  1152. cluster_id=None):
  1153. if self.clusters:
  1154. cluster = self._get_cluster_by_id(cluster_id)
  1155. net_urls = {
  1156. "nova_network": {
  1157. "config": "NovaNetworkConfigurationHandler",
  1158. "verify": "NovaNetworkConfigurationVerifyHandler"
  1159. },
  1160. "neutron": {
  1161. "config": "NeutronNetworkConfigurationHandler",
  1162. "verify": "NeutronNetworkConfigurationVerifyHandler"
  1163. }
  1164. }
  1165. provider = cluster.net_provider
  1166. if data:
  1167. nets = jsonutils.dumps(data)
  1168. else:
  1169. resp = self.app.get(
  1170. reverse(
  1171. net_urls[provider]["config"],
  1172. kwargs={'cluster_id': cluster.id}
  1173. ),
  1174. headers=self.default_headers
  1175. )
  1176. self.tester.assertEqual(200, resp.status_code)
  1177. nets = resp.body
  1178. resp = self.app.put(
  1179. reverse(
  1180. net_urls[provider]["verify"],
  1181. kwargs={'cluster_id': cluster.id}),
  1182. nets,
  1183. headers=self.default_headers,
  1184. expect_errors=expect_errors,
  1185. )
  1186. if expect_errors:
  1187. return resp
  1188. else:
  1189. task_uuid = resp.json_body['uuid']
  1190. return self.db.query(Task).filter_by(uuid=task_uuid).first()
  1191. else:
  1192. raise NotImplementedError(
  1193. "Nothing to verify - try creating cluster"
  1194. )
  1195. def make_bond_via_api(self, bond_name, bond_mode, nic_names,
  1196. node_id=None, attrs=None):
  1197. if not node_id:
  1198. node_id = self.nodes[0]["id"]
  1199. resp = self.app.get(
  1200. reverse("NodeNICsHandler",
  1201. kwargs={"node_id": node_id}),
  1202. headers=self.default_headers)
  1203. self.tester.assertEqual(resp.status_code, 200)
  1204. data = resp.json_body
  1205. nics = self.db.query(NodeNICInterface).filter(
  1206. NodeNICInterface.name.in_(nic_names)
  1207. ).filter(
  1208. NodeNICInterface.node_id == node_id
  1209. )
  1210. self.tester.assertEqual(nics.count(), len(nic_names))
  1211. assigned_nets, slaves = [], []
  1212. for nic in data:
  1213. if nic['name'] in nic_names:
  1214. assigned_nets.extend(nic['assigned_networks'])
  1215. slaves.append({'name': nic['name']})
  1216. nic['assigned_networks'] = []
  1217. bond_dict = {
  1218. "name": bond_name,
  1219. "type": NETWORK_INTERFACE_TYPES.bond,
  1220. "mode": bond_mode,
  1221. "slaves": slaves,
  1222. "assigned_networks": assigned_nets,
  1223. "attributes": attrs or {}
  1224. }
  1225. data.append(bond_dict)
  1226. resp = self.node_nics_put(node_id, data)
  1227. self.tester.assertEqual(resp.status_code, 200)
  1228. def refresh_nodes(self):
  1229. for n in self.nodes[:]:
  1230. try:
  1231. self.db.add(n)
  1232. self.db.refresh(n)
  1233. except Exception:
  1234. self.nodes.remove(n)
  1235. self.db.flush()
  1236. def refresh_clusters(self):
  1237. for n in self.clusters[:]:
  1238. try:
  1239. self.db.refresh(n)
  1240. except Exception:
  1241. self.nodes.remove(n)
  1242. def _api_get(self, method, instance_id, expect_errors=False):
  1243. return self.app.get(
  1244. reverse(method,
  1245. kwargs=instance_id),
  1246. headers=self.default_headers,
  1247. expect_errors=expect_errors)
  1248. def _api_put(self, method, instance_id, data, expect_errors=False):
  1249. return self.app.put(
  1250. reverse(method,
  1251. kwargs=instance_id),
  1252. jsonutils.dumps(data),
  1253. headers=self.default_headers,
  1254. expect_errors=expect_errors)
  1255. def nova_networks_get(self, cluster_id, expect_errors=False):
  1256. return self._api_get('NovaNetworkConfigurationHandler',
  1257. {'cluster_id': cluster_id},
  1258. expect_errors)
  1259. def nova_networks_put(self, cluster_id, networks, expect_errors=False):
  1260. return self._api_put('NovaNetworkConfigurationHandler',
  1261. {'cluster_id': cluster_id},
  1262. networks,
  1263. expect_errors)
  1264. def neutron_networks_get(self, cluster_id, expect_errors=False):
  1265. return self._api_get('NeutronNetworkConfigurationHandler',
  1266. {'cluster_id': cluster_id},
  1267. expect_errors)
  1268. def neutron_networks_put(self, cluster_id, networks, expect_errors=False):
  1269. return self._api_put('NeutronNetworkConfigurationHandler',
  1270. {'cluster_id': cluster_id},
  1271. networks,
  1272. expect_errors)
  1273. def cluster_changes_put(self, cluster_id, expect_errors=False):
  1274. return self._api_put('ClusterChangesHandler',
  1275. {'cluster_id': cluster_id},
  1276. [],
  1277. expect_errors)
  1278. def node_nics_get(self, node_id, expect_errors=False):
  1279. return self._api_get('NodeNICsHandler',
  1280. {'node_id': node_id},
  1281. expect_errors)
  1282. def node_nics_put(self, node_id, interfaces, expect_errors=False):
  1283. return self._api_put('NodeNICsHandler',
  1284. {'node_id': node_id},
  1285. interfaces,
  1286. expect_errors)
  1287. def node_collection_nics_put(self, nodes,
  1288. expect_errors=False):
  1289. return self._api_put('NodeCollectionNICsHandler',
  1290. {},
  1291. nodes,
  1292. expect_errors)
  1293. def _create_network_group(self, expect_errors=False, cluster=None,
  1294. group_id=None, **kwargs):
  1295. if not cluster:
  1296. cluster = self.clusters[0]
  1297. ng = {
  1298. "release": cluster.release.id,
  1299. "name": "external",
  1300. "vlan_start": 50,
  1301. "cidr": "10.3.0.0/24",
  1302. "gateway": "10.3.0.1",
  1303. "group_id": group_id or Cluster.get_default_group(cluster).id,
  1304. "meta": {
  1305. "notation": consts.NETWORK_NOTATION.cidr,
  1306. "use_gateway": True,
  1307. "map_priority": 2}
  1308. }
  1309. ng.update(kwargs)
  1310. resp = self.app.post(
  1311. reverse('NetworkGroupCollectionHandler'),
  1312. jsonutils.dumps(ng),
  1313. headers=self.default_headers,
  1314. expect_errors=expect_errors,
  1315. )
  1316. return resp
  1317. def _update_network_group(self, ng_data, expect_errors=False):
  1318. return self.app.put(
  1319. reverse(
  1320. 'NetworkGroupHandler',
  1321. kwargs={'obj_id': ng_data['id']}
  1322. ),
  1323. jsonutils.dumps(ng_data),
  1324. headers=self.default_headers,
  1325. expect_errors=expect_errors
  1326. )
  1327. def _set_additional_component(self, cluster, component, value):
  1328. attrs = copy.deepcopy(cluster.attributes.editable)
  1329. attrs['additional_components'][component]['value'] = value
  1330. self.app.patch(
  1331. reverse(
  1332. 'ClusterAttributesHandler',
  1333. kwargs={'cluster_id': cluster.id}),
  1334. params=jsonutils.dumps({'editable': attrs}),
  1335. headers=self.default_headers
  1336. )
  1337. def _delete_network_group(self, ng_id, expect_errors=False):
  1338. return self.app.delete(
  1339. reverse(
  1340. 'NetworkGroupHandler',
  1341. kwargs={'obj_id': ng_id}
  1342. ),
  1343. headers=self.default_headers,
  1344. expect_errors=expect_errors
  1345. )
  1346. def set_task_status_recursively(self, supertask, status):
  1347. supertask.status = consts.TASK_STATUSES.ready
  1348. for sub_task in supertask.subtasks:
  1349. self.set_task_status_recursively(sub_task, status)
  1350. class BaseUnitTest(TestCase):
  1351. def datadiff(self, data1, data2, path=None, ignore_keys=[],
  1352. compare_sorted=False):
  1353. if path is None:
  1354. path = []
  1355. def fail(msg, failed_path):
  1356. self.fail('Path "{0}": {1}'.format("->".join(failed_path), msg))
  1357. if not isinstance(data1, dict) or not isinstance(data2, dict):
  1358. if isinstance(data1, (list, tuple)):
  1359. newpath = path[:]
  1360. if compare_sorted:
  1361. data1 = sorted(data1)
  1362. data2 = sorted(data2)
  1363. for i, keys in enumerate(izip(data1, data2)):
  1364. newpath.append(str(i))
  1365. self.datadiff(keys[0], keys[1], newpath, ignore_keys,
  1366. compare_sorted)
  1367. newpath.pop()
  1368. elif data1 != data2:
  1369. err = "Values differ: {0} != {1}".format(
  1370. str(data1),
  1371. str(data2)
  1372. )
  1373. fail(err, path)
  1374. else:
  1375. newpath = path[:]
  1376. if len(data1) != len(data2):
  1377. fail('Dicts have different keys number: {0} != {1}'.format(
  1378. len(data1), len(data2)), path)
  1379. for key1, key2 in zip(
  1380. sorted(data1),
  1381. sorted(data2)
  1382. ):
  1383. if key1 != key2:
  1384. err = "Keys differ: {0} != {1}".format(
  1385. str(key1),
  1386. str(key2)
  1387. )
  1388. fail(err, path)
  1389. if key1 in ignore_keys:
  1390. continue
  1391. newpath.append(key1)
  1392. self.datadiff(data1[key1], data2[key2], newpath, ignore_keys,
  1393. compare_sorted)
  1394. newpath.pop()
  1395. def assertNotRaises(self, exception, method, *args, **kwargs):
  1396. try:
  1397. method(*args, **kwargs)
  1398. except exception:
  1399. self.fail('Exception "{0}" raised.'.format(exception))
  1400. def assertRaisesWithMessage(self, exc, msg, func, *args, **kwargs):
  1401. try:
  1402. func(*args, **kwargs)
  1403. self.fail('Exception "{0}" raised.'.format(exc))
  1404. except Exception as inst:
  1405. self.assertIsInstance(inst, exc)
  1406. self.assertEqual(inst.message, msg)
  1407. def assertRaisesWithMessageIn(self, exc, msg, func, *args, **kwargs):
  1408. try:
  1409. func(*args, **kwargs)
  1410. self.fail('Exception "{0}" raised.'.format(exc))
  1411. except Exception as inst:
  1412. self.assertIsInstance(inst, exc)
  1413. self.assertIn(msg, inst.message)
  1414. def assertValidJSON(self, data):
  1415. self.assertNotRaises(ValueError, jsonutils.loads, data)
  1416. class BaseTestCase(BaseUnitTest):
  1417. fixtures = ['admin_network', 'master_node_settings']
  1418. def __init__(self, *args, **kwargs):
  1419. super(BaseTestCase, self).__init__(*args, **kwargs)
  1420. self.default_headers = {
  1421. "Content-Type": "application/json"
  1422. }
  1423. @classmethod
  1424. def setUpClass(cls):
  1425. cls.app = app.TestApp(
  1426. build_app(db_driver=test_db_driver).wsgifunc(
  1427. ConnectionMonitorMiddleware)
  1428. )
  1429. syncdb()
  1430. # syncdb disables logger, we need to enable it again
  1431. logger.disabled = 0
  1432. def setUp(self):
  1433. self.db = db
  1434. flush()
  1435. self.env = EnvironmentManager(app=self.app, session=self.db)
  1436. self.env.upload_fixtures(self.fixtures)
  1437. def tearDown(self):
  1438. self.db.remove()
  1439. class BaseIntegrationTest(BaseTestCase):
  1440. @classmethod
  1441. def setUpClass(cls):
  1442. super(BaseIntegrationTest, cls).setUpClass()
  1443. nailgun.task.task.logs_utils.prepare_syslog_dir = mock.Mock()
  1444. def emulate_nodes_provisioning(self, nodes):
  1445. for node in nodes:
  1446. node.status = consts.NODE_STATUSES.provisioned
  1447. node.pending_addition = False
  1448. self.db.add_all(nodes)
  1449. self.db.flush()
  1450. def emulate_nodes_deployment(self, nodes):
  1451. for node in nodes:
  1452. node.status = consts.NODE_STATUSES.ready
  1453. node.pending_addition = False
  1454. self.db.flush()
  1455. @classmethod
  1456. def tearDownClass(cls):
  1457. super(BaseIntegrationTest, cls).tearDownClass()
  1458. class BaseAuthenticationIntegrationTest(BaseIntegrationTest):
  1459. @classmethod
  1460. def setUpClass(cls):
  1461. super(BaseAuthenticationIntegrationTest, cls).setUpClass()
  1462. cls.app = app.TestApp(build_app(db_driver=test_db_driver).wsgifunc(
  1463. ConnectionMonitorMiddleware, NailgunFakeKeystoneAuthMiddleware))
  1464. syncdb()
  1465. def get_auth_token(self):
  1466. resp = self.app.post(
  1467. '/keystone/v2.0/tokens',
  1468. jsonutils.dumps({
  1469. 'auth': {
  1470. 'tenantName': 'admin',
  1471. 'passwordCredentials': {
  1472. 'username': settings.FAKE_KEYSTONE_USERNAME,
  1473. 'password': settings.FAKE_KEYSTONE_PASSWORD,
  1474. },
  1475. },
  1476. })
  1477. )
  1478. return resp.json['access']['token']['id'].encode('utf-8')
  1479. def fake_tasks(fake_rpc=True,
  1480. mock_rpc=True,
  1481. tick_count=100,
  1482. tick_interval=0,
  1483. **kwargs):
  1484. def wrapper(func):
  1485. func = mock.patch(
  1486. 'nailgun.task.task.settings.FAKE_TASKS',
  1487. True
  1488. )(func)
  1489. func = mock.patch(
  1490. 'nailgun.task.fake.settings.FAKE_TASKS_TICK_COUNT',
  1491. tick_count
  1492. )(func)
  1493. func = mock.patch(
  1494. 'nailgun.task.fake.settings.FAKE_TASKS_TICK_INTERVAL',
  1495. tick_interval
  1496. )(func)
  1497. if fake_rpc:
  1498. func = mock.patch(
  1499. 'nailgun.task.task.rpc.cast',
  1500. functools.partial(
  1501. nailgun.task.task.fake_cast,
  1502. **kwargs
  1503. )
  1504. )(func)
  1505. func = mock.patch(
  1506. 'nailgun.task.fake.settings.TESTS_WITH_NO_THREADS',
  1507. True
  1508. )(func)
  1509. # In fake.py, we have threading.Event().isSet(), which
  1510. # we need to return False. We also mock threading module
  1511. # We could just do mock.patch('..threading', **config), but
  1512. # mock object is inserted as first argument in every test then.
  1513. # If replacement object is passed as second arg to patch(), then
  1514. # args in original function are not changed. That's why isSet_mock
  1515. # created as replacement object.
  1516. config = {'Event.return_value.isSet.return_value': False}
  1517. isSet_mock = mock.Mock()
  1518. isSet_mock.configure_mock(**config)
  1519. func = mock.patch(
  1520. 'nailgun.task.fake.threading',
  1521. isSet_mock
  1522. )(func)
  1523. elif mock_rpc:
  1524. func = mock.patch(
  1525. 'nailgun.task.task.rpc.cast',
  1526. **kwargs
  1527. )(func)
  1528. return func
  1529. return wrapper
  1530. def mock_rpc(pass_mock=False, **rpc_mock_kwargs):
  1531. """Decorator that mocks rpc.cast
  1532. :param pass_mock: should decorator pass mocked object to decorated
  1533. function arguments
  1534. :param rpc_mock_kwargs: additional arguments to mock.patch function
  1535. """
  1536. def wrapper(f):
  1537. @functools.wraps(f)
  1538. def inner(*args, **kwargs):
  1539. with mock.patch('nailgun.rpc.cast', **rpc_mock_kwargs) as rpc_mock:
  1540. if pass_mock:
  1541. return f(*(args + (rpc_mock,)), **kwargs)
  1542. else:
  1543. return f(*args, **kwargs)
  1544. return inner
  1545. return wrapper
  1546. class DeploymentTasksTestMixin(object):
  1547. def _compare_tasks(self, reference, result):
  1548. """Compare deployment tasks.
  1549. Considering legacy format and compatible validator output with extra
  1550. fields where legacy fields is converted to new syntax.
  1551. :param reference: list of tasks
  1552. :type reference: list
  1553. :param result: list of tasks
  1554. :type result: list
  1555. """
  1556. reference.sort(key=lambda x: x.get('id', x.get('task_name')))
  1557. result.sort(key=lambda x: x.get('id', x.get('task_name')))
  1558. self.assertEqual(len(reference), len(result))
  1559. for ref, res in six.moves.zip(reference, result):
  1560. for field in ref:
  1561. if field == '_custom':
  1562. # unpack custom json fields if persist
  1563. self.assertEqual(
  1564. jsonutils.loads(ref.get(field)),
  1565. jsonutils.loads((res or {}).get(field))
  1566. )
  1567. else:
  1568. self.assertEqual(ref.get(field), (res or {}).get(field))
  1569. def _patch_tags_legacy(release_data, version):
  1570. if is_feature_supported(version, consts.TAGS_SUPPORT_VERSION):
  1571. return
  1572. roles = release_data.get('roles_metadata', {})
  1573. for role_name, meta in six.iteritems(roles):
  1574. meta['tags'] = [role_name]
  1575. # this method is for development and troubleshooting purposes
  1576. def datadiff(data1, data2, branch, p=True):
  1577. def iterator(data1, data2):
  1578. if isinstance(data1, (list,)) and isinstance(data2, (list,)):
  1579. return range(max(len(data1), len(data2)))
  1580. elif isinstance(data1, (dict,)) and isinstance(data2, (dict,)):
  1581. return (set(data1.keys()) | set(data2.keys()))
  1582. else:
  1583. raise TypeError
  1584. diff = []
  1585. if data1 != data2:
  1586. try:
  1587. it = iterator(data1, data2)
  1588. except Exception:
  1589. return [(branch, data1, data2)]
  1590. for k in it:
  1591. newbranch = branch[:]
  1592. newbranch.append(k)
  1593. if p:
  1594. print("Comparing branch: %s" % newbranch)
  1595. try:
  1596. try:
  1597. v1 = data1[k]
  1598. except (KeyError, IndexError):
  1599. if p:
  1600. print("data1 seems does not have key = %s" % k)
  1601. diff.append((newbranch, None, data2[k]))
  1602. continue
  1603. try:
  1604. v2 = data2[k]
  1605. except (KeyError, IndexError):
  1606. if p:
  1607. print("data2 seems does not have key = %s" % k)
  1608. diff.append((newbranch, data1[k], None))
  1609. continue
  1610. except Exception:
  1611. if p:
  1612. print("data1 and data2 cannot be compared on "
  1613. "branch: %s" % newbranch)
  1614. return diff.append((newbranch, data1, data2))
  1615. else:
  1616. if v1 != v2:
  1617. if p:
  1618. print("data1 and data2 do not match "
  1619. "each other on branch: %s" % newbranch)
  1620. # print("data1 = %s" % data1)
  1621. print("v1 = %s" % v1)
  1622. # print("data2 = %s" % data2)
  1623. print("v2 = %s" % v2)
  1624. diff.extend(datadiff(v1, v2, newbranch))
  1625. return diff
  1626. def reflect_db_metadata():
  1627. meta = sa.MetaData()
  1628. meta.reflect(bind=db.get_bind())
  1629. return meta
  1630. def get_nodegroup_network_schema_template(template, group_name):
  1631. custom_template = template['adv_net_template'][group_name]
  1632. custom_template_obj = NetworkTemplate(jsonutils.dumps(custom_template))
  1633. node_custom_template = custom_template_obj.safe_substitute(
  1634. custom_template['nic_mapping']['default'])
  1635. return jsonutils.loads(node_custom_template)['network_scheme']
  1636. class BaseAlembicMigrationTest(TestCase):
  1637. def setUp(self):
  1638. super(BaseAlembicMigrationTest, self).setUp()
  1639. self.meta = reflect_db_metadata()
  1640. def tearDown(self):
  1641. db.remove()
  1642. super(BaseAlembicMigrationTest, self).tearDown()
  1643. class BaseMasterNodeSettignsTest(BaseIntegrationTest):
  1644. def setUp(self):
  1645. super(BaseMasterNodeSettignsTest, self).setUp()
  1646. self.create_master_node_settings()
  1647. master_node_settings_template = {
  1648. "settings": {
  1649. "ui_settings": {
  1650. "view_mode": "standard",
  1651. "filter": {},
  1652. "sort": [{"status": "asc"}],
  1653. "filter_by_labels": {},
  1654. "sort_by_labels": [],
  1655. "search": ""
  1656. },
  1657. "statistics": {
  1658. "send_anonymous_statistic": {
  1659. "type": "checkbox",
  1660. "value": True,
  1661. "label": "statistics.setting_labels."
  1662. "send_anonymous_statistic",
  1663. "weight": 10
  1664. },
  1665. "user_choice_saved": {
  1666. "type": "hidden",
  1667. "value": False
  1668. }
  1669. }
  1670. }
  1671. }
  1672. def create_master_node_settings(self):
  1673. self.master_node_settings = {
  1674. 'master_node_uid': str(uuid.uuid4()),
  1675. }
  1676. self.master_node_settings.update(self.master_node_settings_template)
  1677. MasterNodeSettings.create(self.master_node_settings)
  1678. self.db.commit()
  1679. def set_sending_stats(self, value):
  1680. mn_settings = MasterNodeSettings.get_one()
  1681. mn_settings.settings = dict_merge(
  1682. mn_settings.settings,
  1683. {'statistics': {
  1684. 'user_choice_saved': {'value': True},
  1685. 'send_anonymous_statistic': {'value': value}
  1686. }})
  1687. self.db.flush()
  1688. def enable_sending_stats(self):
  1689. self.set_sending_stats(True)
  1690. def disable_sending_stats(self):
  1691. self.set_sending_stats(False)
  1692. class BaseValidatorTest(BaseTestCase):
  1693. """JSON-schema validation policy:
  1694. 1) All required properties are present;
  1695. 2) No additional properties allowed;
  1696. 3) Item has correct type.
  1697. """
  1698. validator = None
  1699. def serialize(self, data):
  1700. """Serialize object to a string.
  1701. :param data: object being serialized
  1702. :return: stringified JSON-object
  1703. """
  1704. return jsonutils.dumps(data)
  1705. def get_invalid_data_context(self, data, *args):
  1706. """Returns context object of raised InvalidData exception.
  1707. :return: context of 'errors.InvalidData'
  1708. """
  1709. serialized_data = self.serialize(data)
  1710. with self.assertRaises(errors.InvalidData) as context:
  1711. self.validator(serialized_data, *args)
  1712. return context
  1713. def assertRaisesAdditionalProperty(self, obj, key, *args):
  1714. context = self.get_invalid_data_context(obj, *args)
  1715. self.assertIn(
  1716. "Additional properties are not allowed".format(key),
  1717. context.exception.message)
  1718. self.assertIn(
  1719. "'{0}' was unexpected".format(key),
  1720. context.exception.message)
  1721. def assertRaisesRequiredProperty(self, obj, key):
  1722. context = self.get_invalid_data_context(obj)
  1723. self.assertIn(
  1724. "Failed validating 'required' in schema",
  1725. context.exception.message)
  1726. self.assertIn(
  1727. "'{0}' is a required property".format(key),
  1728. context.exception.message)
  1729. def assertRaisesInvalidType(self, obj, value, expected_value):
  1730. context = self.get_invalid_data_context(obj)
  1731. self.assertIn(
  1732. "Failed validating 'type' in schema",
  1733. context.exception.message)
  1734. self.assertIn(
  1735. "{0} is not of type {1}".format(value, expected_value),
  1736. context.exception.message)
  1737. def assertRaisesInvalidAnyOf(self, obj, passed_value, instance):
  1738. context = self.get_invalid_data_context(obj)
  1739. self.assertIn(
  1740. "Failed validating 'anyOf' in schema",
  1741. context.exception.message)
  1742. err_msg = "{0} is not valid under any of the given schemas"
  1743. self.assertIn(
  1744. err_msg.format(passed_value),
  1745. context.exception.message)
  1746. self.assertIn(
  1747. "On instance{0}".format(instance),
  1748. context.exception.message)
  1749. def assertRaisesInvalidOneOf(self, passed_data,
  1750. incorrect, data_label, *args):
  1751. """Check raised within error context exception
  1752. Test that the exception has features pertaining to
  1753. failed 'oneOf' validation case of jsonschema
  1754. :param passed_data: dict to be serialized and passed for validation
  1755. :param incorrect: data value which doesn't pass the validation
  1756. and is present in error message of original exception
  1757. :param data_label: key name from passed_data one of which value
  1758. doesn't pass the validation; is present in the error message
  1759. :param *args: list of additional arguments passed to validation code
  1760. """
  1761. context = self.get_invalid_data_context(passed_data, *args)
  1762. self.assertIn(
  1763. "Failed validating 'oneOf' in schema",
  1764. context.exception.message)
  1765. err_msg = "{0} is not valid under any of the given schemas"
  1766. self.assertIn(
  1767. err_msg.format(incorrect),
  1768. context.exception.message)
  1769. self.assertIn(
  1770. "On instance{0}".format(data_label),
  1771. context.exception.message)
  1772. def assertRaisesInvalidEnum(self, obj, value, expected_value):
  1773. context = self.get_invalid_data_context(obj)
  1774. self.assertIn(
  1775. "Failed validating 'enum' in schema",
  1776. context.exception.message)
  1777. self.assertIn(
  1778. "{0} is not one of {1}".format(value, expected_value),
  1779. context.exception.message)
  1780. def assertRaisesTooLong(self, obj, stringified_values):
  1781. context = self.get_invalid_data_context(obj)
  1782. self.assertIn(
  1783. "{0} is too long".format(stringified_values),
  1784. context.exception.message)
  1785. def assertRaisesTooShort(self, obj, stringified_values):
  1786. context = self.get_invalid_data_context(obj)
  1787. self.assertIn(
  1788. "{0} is too short".format(stringified_values),
  1789. context.exception.message)
  1790. def assertRaisesNonUnique(self, obj, stringified_values):
  1791. context = self.get_invalid_data_context(obj)
  1792. self.assertIn(
  1793. "{0} has non-unique elements".format(stringified_values),
  1794. context.exception.message)
  1795. def assertRaisesLessThanMinimum(self, obj, stringified_values):
  1796. context = self.get_invalid_data_context(obj)
  1797. self.assertIn(
  1798. "{0} is less than the minimum".format(stringified_values),
  1799. context.exception.message)
  1800. def assertRaisesGreaterThanMaximum(self, obj, stringified_values):
  1801. context = self.get_invalid_data_context(obj)
  1802. self.assertIn(
  1803. "{0} is greater than the maximum".format(stringified_values),
  1804. context.exception.message)
  1805. def assertRaisesNotMatchPattern(self, obj, stringified_values):
  1806. context = self.get_invalid_data_context(obj)
  1807. self.assertIn(
  1808. "Failed validating 'pattern' in schema",
  1809. context.exception.message)
  1810. self.assertIn(
  1811. "{0} does not match".format(stringified_values),
  1812. context.exception.message)