Big Switch Networks plugins and drivers for the networking project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_servermanager.py 36KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. # Copyright 2014 Big Switch Networks, Inc. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  4. # not use this file except in compliance with the License. You may obtain
  5. # a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  11. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing permissions and limitations
  13. # under the License.
  14. import httplib
  15. import socket
  16. import ssl
  17. import time
  18. import mock
  19. from oslo_config import cfg
  20. from oslo_db import exception as db_exc
  21. from oslo_utils import importutils
  22. from networking_bigswitch.plugins.bigswitch.db import consistency_db
  23. from networking_bigswitch.plugins.bigswitch import servermanager
  24. from networking_bigswitch.tests.unit.bigswitch \
  25. import test_restproxy_plugin as test_rp
  26. from neutron_lib.plugins import directory
  27. SERVERMANAGER = 'networking_bigswitch.plugins.bigswitch.servermanager'
  28. CONSISTENCYDB = 'networking_bigswitch.plugins.bigswitch.db.consistency_db'
  29. HTTPCON = SERVERMANAGER + '.httplib.HTTPConnection'
  30. HTTPSCON = SERVERMANAGER + '.HTTPSConnectionWithValidation'
  31. SERVER_GET_CAPABILITIES = SERVERMANAGER + '.ServerPool.get_capabilities'
  32. class ServerManagerTests(test_rp.BigSwitchProxyPluginV2TestCase):
  33. def setUp(self):
  34. self.socket_mock = mock.patch(
  35. SERVERMANAGER + '.socket.create_connection').start()
  36. self.wrap_mock = mock.patch(SERVERMANAGER + '.ssl.wrap_socket').start()
  37. super(ServerManagerTests, self).setUp()
  38. # http patch must not be running or it will mangle the servermanager
  39. # import where the https connection classes are defined
  40. self.httpPatch.stop()
  41. self.sm = importutils.import_module(SERVERMANAGER)
  42. def test_no_servers(self):
  43. cfg.CONF.set_override('servers', [], 'RESTPROXY')
  44. self.assertRaises(cfg.Error, servermanager.ServerPool)
  45. def test_malformed_servers(self):
  46. cfg.CONF.set_override('servers', ['1.2.3.4', '1.1.1.1:a'], 'RESTPROXY')
  47. self.assertRaises(cfg.Error, servermanager.ServerPool)
  48. def test_ipv6_server_address(self):
  49. cfg.CONF.set_override(
  50. 'servers', ['[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]:80'],
  51. 'RESTPROXY')
  52. s = servermanager.ServerPool()
  53. self.assertEqual(s.servers[0].server,
  54. 'ABCD:EF01:2345:6789:ABCD:EF01:2345:6789')
  55. def test_sticky_cert_fetch_fail(self):
  56. pl = directory.get_plugin()
  57. pl.servers.ssl = True
  58. with mock.patch(
  59. 'ssl.get_server_certificate',
  60. side_effect=Exception('There is no more entropy in the universe')
  61. ) as sslgetmock:
  62. self.assertRaises(
  63. cfg.Error,
  64. pl.servers._get_combined_cert_for_server,
  65. *('example.org', 443)
  66. )
  67. sslgetmock.assert_has_calls([mock.call(
  68. ('example.org', 443), ssl_version=ssl.PROTOCOL_SSLv23)])
  69. def test_consistency_watchdog_stops_with_0_polling_interval(self):
  70. pl = directory.get_plugin()
  71. pl.servers.capabilities = ['consistency']
  72. self.watch_p.stop()
  73. with mock.patch('eventlet.sleep') as smock:
  74. # should return immediately a polling interval of 0
  75. pl.servers._consistency_watchdog(0)
  76. self.assertFalse(smock.called)
  77. def test_consistency_watchdog(self):
  78. pl = directory.get_plugin()
  79. pl.servers.capabilities = ['dummy']
  80. self.watch_p.stop()
  81. with mock.patch('eventlet.sleep') as smock,\
  82. mock.patch(
  83. SERVERMANAGER + '.ServerPool.rest_call',
  84. side_effect=servermanager.RemoteRestError(
  85. reason='Failure to trigger except clause.'))\
  86. as rmock,\
  87. mock.patch(
  88. SERVERMANAGER + '.LOG.exception',
  89. side_effect=KeyError('Failure to break loop'))\
  90. as lmock:
  91. # should return immediately without consistency capability
  92. pl.servers._consistency_watchdog()
  93. self.assertFalse(smock.called)
  94. pl.servers.capabilities = ['consistency']
  95. self.assertRaises(KeyError,
  96. pl.servers._consistency_watchdog)
  97. rmock.assert_called_with('GET', '/health', '', {}, [], False)
  98. self.assertEqual(1, len(lmock.mock_calls))
  99. def test_file_put_contents(self):
  100. pl = directory.get_plugin()
  101. with mock.patch(SERVERMANAGER + '.open', create=True) as omock:
  102. pl.servers._file_put_contents('somepath', 'contents')
  103. omock.assert_has_calls([mock.call('somepath', 'w')])
  104. omock.return_value.__enter__.return_value.assert_has_calls([
  105. mock.call.write('contents')
  106. ])
  107. def test_combine_certs_to_file(self):
  108. pl = directory.get_plugin()
  109. with mock.patch(SERVERMANAGER + '.open', create=True) as omock:
  110. omock.return_value.__enter__().read.return_value = 'certdata'
  111. pl.servers._combine_certs_to_file(['cert1.pem', 'cert2.pem'],
  112. 'combined.pem')
  113. # mock shared between read and write file handles so the calls
  114. # are mixed together
  115. omock.assert_has_calls([
  116. mock.call('combined.pem', 'w'),
  117. mock.call('cert1.pem', 'r'),
  118. mock.call('cert2.pem', 'r'),
  119. ], any_order=True)
  120. omock.return_value.__enter__.return_value.assert_has_calls([
  121. mock.call.read(),
  122. mock.call.write('certdata'),
  123. mock.call.read(),
  124. mock.call.write('certdata')
  125. ])
  126. # basic authentication
  127. def test_auth_header(self):
  128. cfg.CONF.set_override('server_auth', 'username:pass', 'RESTPROXY')
  129. sp = servermanager.ServerPool()
  130. with mock.patch(HTTPCON) as conmock:
  131. rv = conmock.return_value
  132. rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
  133. sp.rest_create_network('tenant', 'network')
  134. callheaders = rv.request.mock_calls[0][1][3]
  135. self.assertIn('Authorization', callheaders)
  136. self.assertNotIn('Cookie', callheaders)
  137. self.assertEqual(callheaders['Authorization'],
  138. 'Basic dXNlcm5hbWU6cGFzcw==')
  139. # token based authentication
  140. def test_auth_token_header(self):
  141. cfg.CONF.set_override('server_auth', 'fake_token', 'RESTPROXY')
  142. sp = servermanager.ServerPool()
  143. with mock.patch(HTTPCON) as conmock:
  144. rv = conmock.return_value
  145. rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
  146. sp.rest_create_network('tenant', 'network')
  147. callheaders = rv.request.mock_calls[0][1][3]
  148. self.assertIn('Cookie', callheaders)
  149. self.assertNotIn('Authorization', callheaders)
  150. self.assertEqual(callheaders['Cookie'], 'session_cookie="fake_token"')
  151. def test_header_add(self):
  152. sp = servermanager.ServerPool()
  153. with mock.patch(HTTPCON) as conmock:
  154. rv = conmock.return_value
  155. rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
  156. sp.servers[0].rest_call('GET', '/', headers={'EXTRA-HEADER': 'HI'})
  157. callheaders = rv.request.mock_calls[0][1][3]
  158. # verify normal headers weren't mangled
  159. self.assertIn('Content-type', callheaders)
  160. self.assertEqual(callheaders['Content-type'],
  161. 'application/json')
  162. # verify new header made it in
  163. self.assertIn('EXTRA-HEADER', callheaders)
  164. self.assertEqual(callheaders['EXTRA-HEADER'], 'HI')
  165. def test_capabilities_retrieval(self):
  166. sp = servermanager.ServerPool()
  167. with mock.patch(HTTPCON) as conmock:
  168. rv = conmock.return_value.getresponse.return_value
  169. rv.getheader.return_value = 'HASHHEADER'
  170. # each server will get different capabilities
  171. rv.read.side_effect = ['["a","b","c"]', '["b","c","d"]']
  172. # pool capabilities is union of both
  173. # normally capabilities should be the same across all servers
  174. # this only happens in two situations:
  175. # 1. a server is down
  176. # 2. during upgrade/downgrade
  177. self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities())
  178. self.assertEqual(2, rv.read.call_count)
  179. # the pool should cache after the first call during a short period
  180. # so no more HTTP calls should be made
  181. rv.read.side_effect = ['["w","x","y"]', '["x","y","z"]']
  182. self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities())
  183. self.assertEqual(2, rv.read.call_count)
  184. def test_capabilities_retrieval_failure(self):
  185. sp = servermanager.ServerPool()
  186. with mock.patch(HTTPCON) as conmock:
  187. rv = conmock.return_value.getresponse.return_value
  188. rv.getheader.return_value = 'HASHHEADER'
  189. # a failure to parse should result in an empty capability set
  190. rv.read.return_value = 'XXXXX'
  191. self.assertEqual([], sp.servers[0].get_capabilities())
  192. # as capabilities is empty, it should try to update capabilities
  193. rv.read.side_effect = ['{"a": "b"}', '["b","c","d"]']
  194. self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities())
  195. def test_reconnect_on_timeout_change(self):
  196. sp = servermanager.ServerPool()
  197. with mock.patch(HTTPCON) as conmock:
  198. rv = conmock.return_value
  199. rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
  200. sp.servers[0].capabilities = ['keep-alive']
  201. sp.servers[0].rest_call('GET', '/', timeout=10)
  202. # even with keep-alive enabled, a change in timeout will trigger
  203. # a reconnect
  204. sp.servers[0].rest_call('GET', '/', timeout=75)
  205. conmock.assert_has_calls([
  206. mock.call('localhost', 9000, timeout=10),
  207. mock.call('localhost', 9000, timeout=75),
  208. ], any_order=True)
  209. def test_connect_failures(self):
  210. sp = servermanager.ServerPool()
  211. with mock.patch(HTTPCON, return_value=None):
  212. resp = sp.servers[0].rest_call('GET', '/')
  213. self.assertEqual(resp, (0, None, None, None))
  214. # verify same behavior on ssl class
  215. sp.servers[0].currentcon = False
  216. sp.servers[0].ssl = True
  217. with mock.patch(HTTPSCON, return_value=None):
  218. resp = sp.servers[0].rest_call('GET', '/')
  219. self.assertEqual(resp, (0, None, None, None))
  220. def test_reconnect_cached_connection(self):
  221. self.skipTest("cached connections are currently disabled because "
  222. "their assignment to the servermanager object is not "
  223. "thread-safe")
  224. sp = servermanager.ServerPool()
  225. with mock.patch(HTTPCON) as conmock:
  226. rv = conmock.return_value
  227. rv.getresponse.return_value.getheader.return_value = 'HASH'
  228. sp.servers[0].capabilities = ['keep-alive']
  229. sp.servers[0].rest_call('GET', '/first')
  230. # raise an error on re-use to verify reconnect
  231. # return okay the second time so the reconnect works
  232. rv.request.side_effect = [httplib.ImproperConnectionState(),
  233. mock.MagicMock()]
  234. sp.servers[0].rest_call('GET', '/second')
  235. uris = [c[1][1] for c in rv.request.mock_calls]
  236. expected = [
  237. sp.base_uri + '/first',
  238. sp.base_uri + '/second',
  239. sp.base_uri + '/second',
  240. ]
  241. self.assertEqual(uris, expected)
  242. def test_no_reconnect_recurse_to_infinity(self):
  243. self.skipTest("cached connections are currently disabled because "
  244. "their assignment to the servermanager object is not "
  245. "thread-safe")
  246. # retry uses recursion when a reconnect is necessary
  247. # this test makes sure it stops after 1 recursive call
  248. sp = servermanager.ServerPool()
  249. with mock.patch(HTTPCON) as conmock:
  250. rv = conmock.return_value
  251. # hash header must be string instead of mock object
  252. rv.getresponse.return_value.getheader.return_value = 'HASH'
  253. sp.servers[0].capabilities = ['keep-alive']
  254. sp.servers[0].rest_call('GET', '/first')
  255. # after retrying once, the rest call should raise the
  256. # exception up
  257. rv.request.side_effect = httplib.ImproperConnectionState()
  258. self.assertRaises(httplib.ImproperConnectionState,
  259. sp.servers[0].rest_call,
  260. *('GET', '/second'))
  261. # 1 for the first call, 2 for the second with retry
  262. self.assertEqual(rv.request.call_count, 3)
  263. def test_socket_error(self):
  264. sp = servermanager.ServerPool()
  265. with mock.patch(HTTPCON) as conmock:
  266. conmock.return_value.request.side_effect = socket.timeout()
  267. resp = sp.servers[0].rest_call('GET', '/')
  268. self.assertEqual(resp, (0, None, None, None))
  269. def test_cert_get_fail(self):
  270. pl = directory.get_plugin()
  271. pl.servers.ssl = True
  272. with mock.patch('os.path.exists', return_value=False):
  273. self.assertRaises(cfg.Error,
  274. pl.servers._get_combined_cert_for_server,
  275. *('example.org', 443))
  276. def test_cert_make_dirs(self):
  277. pl = directory.get_plugin()
  278. pl.servers.ssl = True
  279. cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
  280. # pretend base dir exists, 3 children don't, and host cert does
  281. with mock.patch('os.path.exists',
  282. side_effect=[True, False, False,
  283. False, True]) as exmock,\
  284. mock.patch('os.makedirs') as makemock,\
  285. mock.patch(
  286. SERVERMANAGER + '.ServerPool._combine_certs_to_file')\
  287. as combmock:
  288. # will raise error because no certs found
  289. self.assertIn(
  290. 'example.org',
  291. pl.servers._get_combined_cert_for_server('example.org', 443)
  292. )
  293. base = cfg.CONF.RESTPROXY.ssl_cert_directory
  294. hpath = base + '/host_certs/example.org.pem'
  295. combpath = base + '/combined/example.org.pem'
  296. combmock.assert_has_calls([mock.call([hpath], combpath)])
  297. self.assertEqual(exmock.call_count, 5)
  298. self.assertEqual(makemock.call_count, 3)
  299. def test_no_cert_error(self):
  300. pl = directory.get_plugin()
  301. pl.servers.ssl = True
  302. cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
  303. # pretend base dir exists and 3 children do, but host cert doesn't
  304. with mock.patch(
  305. 'os.path.exists',
  306. side_effect=[True, True, True, True, False]
  307. ) as exmock:
  308. # will raise error because no certs found
  309. self.assertRaises(
  310. cfg.Error,
  311. pl.servers._get_combined_cert_for_server,
  312. *('example.org', 443)
  313. )
  314. self.assertEqual(exmock.call_count, 5)
  315. def test_action_success(self):
  316. pl = directory.get_plugin()
  317. self.assertTrue(pl.servers.action_success((200,)))
  318. def test_server_failure(self):
  319. pl = directory.get_plugin()
  320. self.assertTrue(pl.servers.server_failure((404,)))
  321. # server failure has an ignore codes option
  322. self.assertFalse(pl.servers.server_failure((404,),
  323. ignore_codes=[404]))
  324. def test_retry_on_unavailable(self):
  325. pl = directory.get_plugin()
  326. with mock.patch(SERVERMANAGER + '.ServerProxy.rest_call',
  327. return_value=(httplib.SERVICE_UNAVAILABLE,
  328. 0, 0, 0)) as srestmock,\
  329. mock.patch(SERVERMANAGER + '.eventlet.sleep') as tmock:
  330. # making a call should trigger retries with sleeps in between
  331. pl.servers.rest_call('GET', '/', '', None, [])
  332. rest_call = [mock.call('GET', '/', '', None, False,
  333. reconnect=True)]
  334. rest_call_count = (
  335. servermanager.HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1)
  336. srestmock.assert_has_calls(rest_call * rest_call_count)
  337. sleep_call = [mock.call(
  338. servermanager.HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)]
  339. # should sleep 1 less time than the number of calls
  340. sleep_call_count = rest_call_count - 1
  341. tmock.assert_has_calls(sleep_call * sleep_call_count)
  342. def test_delete_failure_forces_topo_sync(self):
  343. pl = directory.get_plugin()
  344. with mock.patch(SERVERMANAGER + '.ServerProxy.rest_call',
  345. return_value=(httplib.INTERNAL_SERVER_ERROR,
  346. 0, 0, 0)), \
  347. mock.patch(SERVERMANAGER + '.ServerPool.force_topo_sync',
  348. return_value=(False,
  349. servermanager.TOPO_RESPONSE_OK)) \
  350. as topo_mock:
  351. # a failed DELETE call should trigger a forced topo_sync
  352. # with check_ts True
  353. self.assertRaises(servermanager.RemoteRestError,
  354. pl.servers.rest_action,
  355. **{'action': 'DELETE', 'resource': '/',
  356. 'data': '',
  357. 'errstr': "Unable to DELETE query to BCF: %s",
  358. 'ignore_codes': []})
  359. topo_mock.assert_called_once_with(**{'check_ts': True})
  360. def test_post_failure_forces_topo_sync(self):
  361. pl = directory.get_plugin()
  362. with mock.patch(SERVERMANAGER + '.ServerProxy.rest_call',
  363. return_value=(httplib.INTERNAL_SERVER_ERROR,
  364. 0, 0, 0)), \
  365. mock.patch(SERVERMANAGER + '.ServerPool.force_topo_sync',
  366. return_value=(False,
  367. servermanager.TOPO_RESPONSE_OK)) \
  368. as topo_mock:
  369. # a failed POST call should trigger a forced topo_sync
  370. # with check_ts True
  371. self.assertRaises(servermanager.RemoteRestError,
  372. pl.servers.rest_action,
  373. **{'action': 'POST', 'resource': '/', 'data': '',
  374. 'errstr': "Unable to POST query to BCF: %s",
  375. 'ignore_codes': []})
  376. topo_mock.assert_called_once_with(**{'check_ts': True})
  377. def test_topo_sync_failure_does_not_force_topo_sync(self):
  378. pl = directory.get_plugin()
  379. with mock.patch(SERVERMANAGER + '.ServerProxy.rest_call',
  380. return_value=(httplib.INTERNAL_SERVER_ERROR,
  381. 0, 0, 0)), \
  382. mock.patch(SERVERMANAGER + '.ServerPool.force_topo_sync',
  383. return_value=(False,
  384. servermanager.TOPO_RESPONSE_OK)) \
  385. as topo_mock:
  386. # a failed POST call for topology path should raise an exception
  387. # and not call force_topo_sync like other failed rest_action
  388. self.assertRaises(servermanager.RemoteRestError,
  389. pl.servers.rest_action,
  390. **{'action': 'POST', 'resource': '/topology',
  391. 'data': '',
  392. 'errstr': "Unable to perform topo_sync: %s",
  393. 'ignore_codes': []})
  394. topo_mock.assert_not_called()
  395. def test_not_found_sync_raises_error_without_topology(self):
  396. pl = directory.get_plugin()
  397. pl.servers.get_topo_function = None
  398. with \
  399. mock.patch(
  400. SERVERMANAGER + '.ServerProxy.rest_call',
  401. return_value=(httplib.NOT_FOUND, 0, 0, 0)):
  402. # making a call should trigger a conflict sync that will
  403. # error without the topology function set
  404. self.assertRaises(
  405. cfg.Error,
  406. pl.servers.rest_action,
  407. *('GET', '/', '', None, [])
  408. )
  409. def test_no_sync_without_keystone(self):
  410. pl = directory.get_plugin()
  411. with\
  412. mock.patch(SERVERMANAGER + '.ServerPool._update_tenant_cache',
  413. return_value=(False)),\
  414. mock.patch(SERVERMANAGER + '.ServerProxy.rest_call',
  415. return_value=(httplib.CONFLICT, 0, 0, 0)) as srestmock:
  416. # making a call should trigger a conflict sync
  417. pl.servers.rest_call('GET', '/', '', None, [])
  418. srestmock.assert_called_once_with(
  419. 'GET', '/', '', None, False, reconnect=True)
  420. def test_no_send_all_data_without_keystone(self):
  421. pl = directory.get_plugin()
  422. with mock.patch(SERVERMANAGER + '.ServerPool._update_tenant_cache',
  423. return_value=(False)), \
  424. mock.patch(SERVERMANAGER + '.ServerPool.force_topo_sync',
  425. return_value=(False, servermanager.TOPO_RESPONSE_OK)) \
  426. as tmock:
  427. # making a call should trigger a conflict sync
  428. self.assertRaises(Exception, pl._send_all_data()) # noqa
  429. tmock.assert_called_once()
  430. def test_floating_calls(self):
  431. pl = directory.get_plugin()
  432. with mock.patch(SERVERMANAGER + '.ServerPool.rest_action') as ramock:
  433. body1 = {'id': 'somefloat'}
  434. body2 = {'name': 'myfl'}
  435. pl.servers.rest_create_floatingip('tenant', body1)
  436. pl.servers.rest_update_floatingip('tenant', body2, 'id')
  437. pl.servers.rest_delete_floatingip('tenant', 'oldid')
  438. ramock.assert_has_calls([
  439. mock.call('PUT', '/tenants/tenant/floatingips/somefloat',
  440. body1,
  441. errstr=u'Unable to create floating IP: %s'),
  442. mock.call('PUT', '/tenants/tenant/floatingips/id',
  443. body2,
  444. errstr=u'Unable to update floating IP: %s'),
  445. mock.call('DELETE', '/tenants/tenant/floatingips/oldid',
  446. errstr=u'Unable to delete floating IP: %s')
  447. ])
  448. def test_HTTPSConnectionWithValidation_without_cert(self):
  449. con = self.sm.HTTPSConnectionWithValidation(
  450. 'www.example.org', 443, timeout=90)
  451. con.source_address = '127.0.0.1'
  452. con.request("GET", "/")
  453. self.socket_mock.assert_has_calls([mock.call(
  454. ('www.example.org', 443), 90, '127.0.0.1'
  455. )])
  456. self.wrap_mock.assert_has_calls([mock.call(
  457. self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE,
  458. ssl_version=ssl.PROTOCOL_SSLv23
  459. )])
  460. self.assertEqual(con.sock, self.wrap_mock())
  461. def test_HTTPSConnectionWithValidation_with_cert(self):
  462. con = self.sm.HTTPSConnectionWithValidation(
  463. 'www.example.org', 443, timeout=90)
  464. con.combined_cert = 'SOMECERTS.pem'
  465. con.source_address = '127.0.0.1'
  466. con.request("GET", "/")
  467. self.socket_mock.assert_has_calls([mock.call(
  468. ('www.example.org', 443), 90, '127.0.0.1'
  469. )])
  470. self.wrap_mock.assert_has_calls([mock.call(
  471. self.socket_mock(), None, None, ca_certs='SOMECERTS.pem',
  472. cert_reqs=ssl.CERT_REQUIRED,
  473. ssl_version=ssl.PROTOCOL_SSLv23
  474. )])
  475. self.assertEqual(con.sock, self.wrap_mock())
  476. def test_HTTPSConnectionWithValidation_tunnel(self):
  477. tunnel_mock = mock.patch.object(
  478. self.sm.HTTPSConnectionWithValidation,
  479. '_tunnel').start()
  480. con = self.sm.HTTPSConnectionWithValidation(
  481. 'www.example.org', 443, timeout=90)
  482. con.source_address = '127.0.0.1'
  483. con.set_tunnel('myproxy.local', 3128)
  484. con.request("GET", "/")
  485. self.socket_mock.assert_has_calls([mock.call(
  486. ('www.example.org', 443), 90, '127.0.0.1'
  487. )])
  488. self.wrap_mock.assert_has_calls([mock.call(
  489. self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE,
  490. ssl_version=ssl.PROTOCOL_SSLv23
  491. )])
  492. # _tunnel() doesn't take any args
  493. tunnel_mock.assert_has_calls([mock.call()])
  494. self.assertEqual(con._tunnel_host, 'myproxy.local')
  495. self.assertEqual(con._tunnel_port, 3128)
  496. self.assertEqual(con.sock, self.wrap_mock())
  497. def test_is_unicode_enabled(self):
  498. """Verify that unicode is enabled only when both conditions are True:
  499. 1. naming_scheme_unicode is True or empty
  500. 2. BCF capabilities include display-name
  501. :return:
  502. """
  503. self.is_unicode_enabled_p.stop()
  504. def capability_unicode_supported():
  505. return ['dummy', 'display-name']
  506. def capability_unicode_unsupported():
  507. return ['dummy']
  508. patch_supported = mock.patch(
  509. SERVER_GET_CAPABILITIES,
  510. side_effect=capability_unicode_supported)
  511. patch_unsupported = mock.patch(
  512. SERVER_GET_CAPABILITIES,
  513. side_effect=capability_unicode_unsupported)
  514. # Create a server pool with default naming_scheme_unicode
  515. # verify default value is true
  516. sp = servermanager.ServerPool()
  517. self.assertTrue(cfg.CONF.RESTPROXY.naming_scheme_unicode)
  518. # config enabled, and unicode is supported on bcf
  519. patch_supported.start()
  520. self.assertTrue(sp.is_unicode_enabled())
  521. patch_supported.stop()
  522. # config enabled, but unicode is not supported on bcf
  523. patch_unsupported.start()
  524. self.assertFalse(sp.is_unicode_enabled())
  525. patch_unsupported.stop()
  526. # Recreate the server pool, as the config is read during initialization
  527. cfg.CONF.set_override('naming_scheme_unicode', False, 'RESTPROXY')
  528. sp = servermanager.ServerPool()
  529. # config disabled, though unicode is supported on bcf
  530. patch_supported.start()
  531. self.assertFalse(sp.is_unicode_enabled())
  532. patch_supported.stop()
  533. # config disabled, and unicode is not supported on bcf
  534. patch_unsupported.start()
  535. self.assertFalse(sp.is_unicode_enabled())
  536. patch_unsupported.stop()
  537. class TestSockets(test_rp.BigSwitchProxyPluginV2TestCase):
  538. def setUp(self):
  539. super(TestSockets, self).setUp()
  540. # http patch must not be running or it will mangle the servermanager
  541. # import where the https connection classes are defined
  542. self.httpPatch.stop()
  543. self.sm = importutils.import_module(SERVERMANAGER)
  544. def test_socket_create_attempt(self):
  545. # exercise the socket creation to make sure it works on both python
  546. # versions
  547. con = self.sm.HTTPSConnectionWithValidation('127.0.0.1', 0, timeout=1)
  548. # if httpcon was created, a connect attempt should raise a socket error
  549. self.assertRaises(socket.error, con.connect)
  550. class HashLockingTests(test_rp.BigSwitchProxyPluginV2TestCase):
  551. def _get_hash_from_handler_db(self, handler):
  552. with handler.session.begin(subtransactions=True):
  553. res = (handler.session.query(consistency_db.ConsistencyHash).
  554. filter_by(hash_id=handler.hash_id).first())
  555. return res.hash
  556. def test_lock_no_initial_record(self):
  557. handler = consistency_db.HashHandler()
  558. h1 = handler.lock()
  559. # lock() request on empty DB should succeed
  560. self.assertTrue(h1)
  561. # db should have a lock marker
  562. self.assertEqual(handler.lock_marker,
  563. self._get_hash_from_handler_db(handler))
  564. # prev_lock_ts must be 0 for initial case
  565. self.assertEqual(handler.prev_lock_ts, '0')
  566. # unlock() should clear the lock
  567. handler.unlock()
  568. self.assertEqual(handler.lock_ts,
  569. self._get_hash_from_handler_db(handler))
  570. def test_db_duplicate_on_insert(self):
  571. handler = consistency_db.HashHandler()
  572. with mock.patch.object(
  573. handler.session, 'add', side_effect=[db_exc.DBDuplicateEntry, '']
  574. ) as add_mock:
  575. handler.lock()
  576. # duplicate insert failure should result in retry
  577. self.assertEqual(2, add_mock.call_count)
  578. def test_lock_check_ts_true_prev_lock_exists(self):
  579. handler1 = consistency_db.HashHandler()
  580. h1 = handler1.lock()
  581. self.assertTrue(h1)
  582. self.assertEqual(handler1.lock_marker,
  583. self._get_hash_from_handler_db(handler1))
  584. # 2nd thread came in just 10 millisecs after first one, and first one
  585. # still holds the lock, expired = False
  586. timestamp_2 = float(handler1.lock_ts) + 10
  587. handler2 = consistency_db.HashHandler(timestamp_ms=timestamp_2)
  588. h2 = handler2.lock()
  589. self.assertFalse(h2)
  590. self.assertEqual(handler1.lock_ts, handler2.prev_lock_ts)
  591. def test_lock_check_ts_false_prev_lock_exists(self):
  592. handler1 = consistency_db.HashHandler()
  593. h1 = handler1.lock()
  594. self.assertTrue(h1)
  595. self.assertEqual(handler1.lock_marker,
  596. self._get_hash_from_handler_db(handler1))
  597. self.assertEqual('0', handler1.prev_lock_ts)
  598. hh2_ts_hh1_ts_plus_1780 = float(handler1.lock_ts) + 1780
  599. handler2 = consistency_db.HashHandler(
  600. hash_id='1', timestamp_ms=hh2_ts_hh1_ts_plus_1780)
  601. with mock.patch(CONSISTENCYDB + '.eventlet.sleep',
  602. side_effect=[Exception]) as emock:
  603. try:
  604. handler2.lock(check_ts=False)
  605. except Exception:
  606. pass
  607. self.assertEqual(1, emock.call_count)
  608. self.assertEqual(handler1.lock_ts, handler2.prev_lock_ts)
  609. def test_lock_check_ts_true_prev_lock_not_expired(self):
  610. handler1 = consistency_db.HashHandler()
  611. h1 = handler1.lock()
  612. self.assertTrue(h1)
  613. self.assertEqual(handler1.lock_marker,
  614. self._get_hash_from_handler_db(handler1))
  615. handler1.unlock()
  616. self.assertEqual(handler1.lock_ts,
  617. self._get_hash_from_handler_db(handler1))
  618. # thread 1 has executed the complete lock-unlock cycle
  619. # thread 2 now tries to get lock with check_ts True
  620. # TOPO_SYNC_EXPIRED_SECS = 1800
  621. hh2_ts_under_limit = float(handler1.lock_ts) + 1000
  622. handler2 = consistency_db.HashHandler(hash_id=1,
  623. timestamp_ms=hh2_ts_under_limit)
  624. h2 = handler2.lock()
  625. self.assertFalse(h2)
  626. self.assertEqual(handler1.lock_ts, handler2.prev_lock_ts)
  627. def test_lock_check_ts_true_prev_lock_expired(self):
  628. handler1 = consistency_db.HashHandler()
  629. h1 = handler1.lock()
  630. self.assertTrue(h1)
  631. self.assertEqual(handler1.lock_marker,
  632. self._get_hash_from_handler_db(handler1))
  633. handler1.unlock()
  634. self.assertEqual(handler1.lock_ts,
  635. self._get_hash_from_handler_db(handler1))
  636. # thread 1 has executed the complete lock-unlock cycle
  637. # thread 2 now tries to get lock with check_ts True
  638. # TOPO_SYNC_EXPIRED_SECS = 1 for testing
  639. time.sleep(1)
  640. handler2 = consistency_db.HashHandler()
  641. # only for testing
  642. consistency_db.TOPO_SYNC_EXPIRED_SECS = 1
  643. h2 = handler2.lock()
  644. self.assertTrue(h2)
  645. self.assertEqual(handler1.lock_ts, handler2.prev_lock_ts)
  646. def test_lock_check_ts_false_prev_lock_not_expired(self):
  647. handler1 = consistency_db.HashHandler()
  648. h1 = handler1.lock()
  649. self.assertTrue(h1)
  650. self.assertEqual(handler1.lock_marker,
  651. self._get_hash_from_handler_db(handler1))
  652. handler1.unlock()
  653. self.assertEqual(handler1.lock_ts,
  654. self._get_hash_from_handler_db(handler1))
  655. # thread 1 has executed the complete lock-unlock cycle
  656. # thread 2 now tries to get lock with check_ts True
  657. # TOPO_SYNC_EXPIRED_SECS = 1800
  658. hh2_ts_under_limit = float(handler1.lock_ts) + 1000
  659. handler2 = consistency_db.HashHandler(hash_id=1,
  660. timestamp_ms=hh2_ts_under_limit)
  661. h2 = handler2.lock(check_ts=False)
  662. self.assertTrue(h2)
  663. self.assertEqual(handler1.lock_ts, handler2.prev_lock_ts)
  664. def test_lock_check_ts_false_lock_clash(self):
  665. # 2 threads try to lock the DB at the same time when check_ts is False
  666. # and no thread holds the lock
  667. handler1 = consistency_db.HashHandler()
  668. h1 = handler1.lock()
  669. self.assertTrue(h1)
  670. handler1.unlock()
  671. self.assertEqual(handler1.lock_ts,
  672. self._get_hash_from_handler_db(handler1))
  673. handler2 = consistency_db.HashHandler()
  674. with mock.patch.object(handler2._FACADE, 'get_engine') as ge, \
  675. mock.patch(CONSISTENCYDB + '.eventlet.sleep',
  676. side_effect=[None]) as emock:
  677. conn = ge.return_value.begin.return_value.__enter__.return_value
  678. firstresult = mock.Mock()
  679. # a rowcount of 0 simulates the effect of another db client
  680. # updating the same record the handler was trying to update
  681. firstresult.rowcount = 0
  682. secondresult = mock.Mock()
  683. secondresult.rowcount = 1
  684. conn.execute.side_effect = [firstresult, secondresult]
  685. h2 = handler2.lock(check_ts=False)
  686. self.assertTrue(h2)
  687. # update should have been called again after the failure
  688. self.assertEqual(2, conn.execute.call_count)
  689. # sleep should have been called once, during first result failure
  690. emock.assert_called_once()
  691. def test_clear_lock(self):
  692. handler = consistency_db.HashHandler()
  693. handler.lock() # lock the table
  694. self.assertEqual(handler.lock_marker,
  695. self._get_hash_from_handler_db(handler))
  696. handler.unlock()
  697. self.assertEqual(handler.lock_ts,
  698. self._get_hash_from_handler_db(handler))
  699. def test_handler_already_holding_lock(self):
  700. handler = consistency_db.HashHandler()
  701. handler.lock() # lock the table
  702. with mock.patch.object(handler._FACADE, 'get_engine') as ge:
  703. handler.lock()
  704. # get engine should not have been called because no update
  705. # should have been made
  706. self.assertFalse(ge.called)
  707. self.assertTrue(handler.lock_ts, handler.prev_lock_ts)
  708. def test_unlock_set_prev_ts(self):
  709. handler1 = consistency_db.HashHandler()
  710. handler1.lock()
  711. self.assertEqual(handler1.lock_marker,
  712. self._get_hash_from_handler_db(handler1))
  713. handler1.unlock()
  714. # first lock-unlock is done. now comes a second call with
  715. # check_ts = False
  716. handler2 = consistency_db.HashHandler()
  717. h2 = handler2.lock(check_ts=False)
  718. self.assertTrue(h2)
  719. self.assertEqual(handler1.lock_ts, handler2.prev_lock_ts)
  720. # now assuming exception occured during topo_sync, call
  721. # handler2.unlock(set_prev_ts=True)
  722. handler2.unlock(set_prev_ts=True)
  723. # hash in consistency_db will be previous hash_handler's lock_ts
  724. self.assertEqual(handler1.lock_ts,
  725. self._get_hash_from_handler_db(handler2))
  726. # try unlock again on the same handler2 - it should have no effect
  727. # as unlock(set_prev_ts) removed TOPO_SYNC marker. this simulates
  728. # unlock() being called in the finally block of force_topo_sync()
  729. handler2.unlock()
  730. self.assertEqual(handler1.lock_ts,
  731. self._get_hash_from_handler_db(handler2))