OpenStack Compute (Nova)
25'ten fazla konu seçemezsiniz Konular bir harf veya rakamla başlamalı, kısa çizgiler ('-') içerebilir ve en fazla 35 karakter uzunluğunda olabilir.

2037 satır
81KB

  1. # Copyright 2010 United States Government as represented by the
  2. # Administrator of the National Aeronautics and Space Administration.
  3. # All Rights Reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  6. # not use this file except in compliance with the License. You may obtain
  7. # a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  13. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  14. # License for the specific language governing permissions and limitations
  15. # under the License.
  16. """Fixtures for Nova tests."""
  17. from __future__ import absolute_import
  18. import collections
  19. from contextlib import contextmanager
  20. import copy
  21. import logging as std_logging
  22. import os
  23. import random
  24. import warnings
  25. import fixtures
  26. import mock
  27. from neutronclient.common import exceptions as neutron_client_exc
  28. from oslo_concurrency import lockutils
  29. from oslo_config import cfg
  30. from oslo_db import exception as db_exc
  31. import oslo_messaging as messaging
  32. from oslo_messaging import conffixture as messaging_conffixture
  33. from oslo_privsep import daemon as privsep_daemon
  34. from oslo_utils.fixture import uuidsentinel
  35. from oslo_utils import uuidutils
  36. from requests import adapters
  37. from sqlalchemy import exc as sqla_exc
  38. from wsgi_intercept import interceptor
  39. from nova.api.openstack.compute import tenant_networks
  40. from nova.api.openstack import wsgi_app
  41. from nova.api import wsgi
  42. from nova.compute import multi_cell_list
  43. from nova.compute import rpcapi as compute_rpcapi
  44. from nova import context
  45. from nova.db import migration
  46. from nova.db.sqlalchemy import api as session
  47. from nova import exception
  48. from nova.network import model as network_model
  49. from nova import objects
  50. from nova.objects import base as obj_base
  51. from nova.objects import service as service_obj
  52. from nova import quota as nova_quota
  53. from nova import rc_fields
  54. from nova import rpc
  55. from nova import service
  56. from nova.tests.functional.api import client
  57. _TRUE_VALUES = ('True', 'true', '1', 'yes')
  58. CONF = cfg.CONF
  59. DB_SCHEMA = {'main': "", 'api': ""}
  60. SESSION_CONFIGURED = False
  61. class ServiceFixture(fixtures.Fixture):
  62. """Run a service as a test fixture."""
  63. def __init__(self, name, host=None, cell=None, **kwargs):
  64. name = name
  65. # If not otherwise specified, the host will default to the
  66. # name of the service. Some things like aggregates care that
  67. # this is stable.
  68. host = host or name
  69. kwargs.setdefault('host', host)
  70. kwargs.setdefault('binary', 'nova-%s' % name)
  71. self.cell = cell
  72. self.kwargs = kwargs
  73. def setUp(self):
  74. super(ServiceFixture, self).setUp()
  75. self.ctxt = context.get_admin_context()
  76. if self.cell:
  77. context.set_target_cell(self.ctxt, self.cell)
  78. with mock.patch('nova.context.get_admin_context',
  79. return_value=self.ctxt):
  80. self.service = service.Service.create(**self.kwargs)
  81. self.service.start()
  82. self.addCleanup(self.service.kill)
  83. class NullHandler(std_logging.Handler):
  84. """custom default NullHandler to attempt to format the record.
  85. Used in conjunction with
  86. log_fixture.get_logging_handle_error_fixture to detect formatting errors in
  87. debug level logs without saving the logs.
  88. """
  89. def handle(self, record):
  90. self.format(record)
  91. def emit(self, record):
  92. pass
  93. def createLock(self):
  94. self.lock = None
  95. class StandardLogging(fixtures.Fixture):
  96. """Setup Logging redirection for tests.
  97. There are a number of things we want to handle with logging in tests:
  98. * Redirect the logging to somewhere that we can test or dump it later.
  99. * Ensure that as many DEBUG messages as possible are actually
  100. executed, to ensure they are actually syntactically valid (they
  101. often have not been).
  102. * Ensure that we create useful output for tests that doesn't
  103. overwhelm the testing system (which means we can't capture the
  104. 100 MB of debug logging on every run).
  105. To do this we create a logger fixture at the root level, which
  106. defaults to INFO and create a Null Logger at DEBUG which lets
  107. us execute log messages at DEBUG but not keep the output.
  108. To support local debugging OS_DEBUG=True can be set in the
  109. environment, which will print out the full debug logging.
  110. There are also a set of overrides for particularly verbose
  111. modules to be even less than INFO.
  112. """
  113. def setUp(self):
  114. super(StandardLogging, self).setUp()
  115. # set root logger to debug
  116. root = std_logging.getLogger()
  117. root.setLevel(std_logging.DEBUG)
  118. # supports collecting debug level for local runs
  119. if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
  120. level = std_logging.DEBUG
  121. else:
  122. level = std_logging.INFO
  123. # Collect logs
  124. fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
  125. self.logger = self.useFixture(
  126. fixtures.FakeLogger(format=fs, level=None))
  127. # TODO(sdague): why can't we send level through the fake
  128. # logger? Tests prove that it breaks, but it's worth getting
  129. # to the bottom of.
  130. root.handlers[0].setLevel(level)
  131. if level > std_logging.DEBUG:
  132. # Just attempt to format debug level logs, but don't save them
  133. handler = NullHandler()
  134. self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
  135. handler.setLevel(std_logging.DEBUG)
  136. # Don't log every single DB migration step
  137. std_logging.getLogger(
  138. 'migrate.versioning.api').setLevel(std_logging.WARNING)
  139. # At times we end up calling back into main() functions in
  140. # testing. This has the possibility of calling logging.setup
  141. # again, which completely unwinds the logging capture we've
  142. # created here. Once we've setup the logging the way we want,
  143. # disable the ability for the test to change this.
  144. def fake_logging_setup(*args):
  145. pass
  146. self.useFixture(
  147. fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
  148. class OutputStreamCapture(fixtures.Fixture):
  149. """Capture output streams during tests.
  150. This fixture captures errant printing to stderr / stdout during
  151. the tests and lets us see those streams at the end of the test
  152. runs instead. Useful to see what was happening during failed
  153. tests.
  154. """
  155. def setUp(self):
  156. super(OutputStreamCapture, self).setUp()
  157. if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
  158. self.out = self.useFixture(fixtures.StringStream('stdout'))
  159. self.useFixture(
  160. fixtures.MonkeyPatch('sys.stdout', self.out.stream))
  161. if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
  162. self.err = self.useFixture(fixtures.StringStream('stderr'))
  163. self.useFixture(
  164. fixtures.MonkeyPatch('sys.stderr', self.err.stream))
  165. @property
  166. def stderr(self):
  167. return self.err._details["stderr"].as_text()
  168. @property
  169. def stdout(self):
  170. return self.out._details["stdout"].as_text()
  171. class Timeout(fixtures.Fixture):
  172. """Setup per test timeouts.
  173. In order to avoid test deadlocks we support setting up a test
  174. timeout parameter read from the environment. In almost all
  175. cases where the timeout is reached this means a deadlock.
  176. A class level TIMEOUT_SCALING_FACTOR also exists, which allows
  177. extremely long tests to specify they need more time.
  178. """
  179. def __init__(self, timeout, scaling=1):
  180. super(Timeout, self).__init__()
  181. try:
  182. self.test_timeout = int(timeout)
  183. except ValueError:
  184. # If timeout value is invalid do not set a timeout.
  185. self.test_timeout = 0
  186. if scaling >= 1:
  187. self.test_timeout *= scaling
  188. else:
  189. raise ValueError('scaling value must be >= 1')
  190. def setUp(self):
  191. super(Timeout, self).setUp()
  192. if self.test_timeout > 0:
  193. self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
  194. class DatabasePoisonFixture(fixtures.Fixture):
  195. def setUp(self):
  196. super(DatabasePoisonFixture, self).setUp()
  197. self.useFixture(fixtures.MonkeyPatch(
  198. 'oslo_db.sqlalchemy.enginefacade._TransactionFactory.'
  199. '_create_session',
  200. self._poison_configure))
  201. def _poison_configure(self, *a, **k):
  202. # If you encounter this error, you might be tempted to just not
  203. # inherit from NoDBTestCase. Bug #1568414 fixed a few hundred of these
  204. # errors, and not once was that the correct solution. Instead,
  205. # consider some of the following tips (when applicable):
  206. #
  207. # - mock at the object layer rather than the db layer, for example:
  208. # nova.objects.instance.Instance.get
  209. # vs.
  210. # nova.db.instance_get
  211. #
  212. # - mock at the api layer rather than the object layer, for example:
  213. # nova.api.openstack.common.get_instance
  214. # vs.
  215. # nova.objects.instance.Instance.get
  216. #
  217. # - mock code that requires the database but is otherwise tangential
  218. # to the code you're testing (for example: EventReporterStub)
  219. #
  220. # - peruse some of the other database poison warning fixes here:
  221. # https://review.openstack.org/#/q/topic:bug/1568414
  222. raise Exception('This test uses methods that set internal oslo_db '
  223. 'state, but it does not claim to use the database. '
  224. 'This will conflict with the setup of tests that '
  225. 'do use the database and cause failures later.')
  226. class SingleCellSimple(fixtures.Fixture):
  227. """Setup the simplest cells environment possible
  228. This should be used when you do not care about multiple cells,
  229. or having a "real" environment for tests that should not care.
  230. This will give you a single cell, and map any and all accesses
  231. to that cell (even things that would go to cell0).
  232. If you need to distinguish between cell0 and cellN, then you
  233. should use the CellDatabases fixture.
  234. If instances should appear to still be in scheduling state, pass
  235. instances_created=False to init.
  236. """
  237. def __init__(self, instances_created=True):
  238. self.instances_created = instances_created
  239. def setUp(self):
  240. super(SingleCellSimple, self).setUp()
  241. self.useFixture(fixtures.MonkeyPatch(
  242. 'nova.objects.CellMappingList._get_all_from_db',
  243. self._fake_cell_list))
  244. self.useFixture(fixtures.MonkeyPatch(
  245. 'nova.objects.CellMappingList._get_by_project_id_from_db',
  246. self._fake_cell_list))
  247. self.useFixture(fixtures.MonkeyPatch(
  248. 'nova.objects.CellMapping._get_by_uuid_from_db',
  249. self._fake_cell_get))
  250. self.useFixture(fixtures.MonkeyPatch(
  251. 'nova.objects.HostMapping._get_by_host_from_db',
  252. self._fake_hostmapping_get))
  253. self.useFixture(fixtures.MonkeyPatch(
  254. 'nova.objects.InstanceMapping._get_by_instance_uuid_from_db',
  255. self._fake_instancemapping_get))
  256. self.useFixture(fixtures.MonkeyPatch(
  257. 'nova.objects.InstanceMappingList._get_by_instance_uuids_from_db',
  258. self._fake_instancemapping_get_uuids))
  259. self.useFixture(fixtures.MonkeyPatch(
  260. 'nova.objects.InstanceMapping._save_in_db',
  261. self._fake_instancemapping_get_save))
  262. self.useFixture(fixtures.MonkeyPatch(
  263. 'nova.context.target_cell',
  264. self._fake_target_cell))
  265. self.useFixture(fixtures.MonkeyPatch(
  266. 'nova.context.set_target_cell',
  267. lambda c, m: None))
  268. def _fake_hostmapping_get(self, *args):
  269. return {'id': 1,
  270. 'updated_at': None,
  271. 'created_at': None,
  272. 'host': 'host1',
  273. 'cell_mapping': self._fake_cell_list()[0]}
  274. def _fake_instancemapping_get_common(self, instance_uuid):
  275. return {
  276. 'id': 1,
  277. 'updated_at': None,
  278. 'created_at': None,
  279. 'instance_uuid': instance_uuid,
  280. 'cell_id': (self.instances_created and 1 or None),
  281. 'project_id': 'project',
  282. 'cell_mapping': (
  283. self.instances_created and self._fake_cell_get() or None),
  284. }
  285. def _fake_instancemapping_get_save(self, *args):
  286. return self._fake_instancemapping_get_common(args[-2])
  287. def _fake_instancemapping_get(self, *args):
  288. return self._fake_instancemapping_get_common(args[-1])
  289. def _fake_instancemapping_get_uuids(self, *args):
  290. return [self._fake_instancemapping_get(uuid)
  291. for uuid in args[-1]]
  292. def _fake_cell_get(self, *args):
  293. return self._fake_cell_list()[0]
  294. def _fake_cell_list(self, *args):
  295. return [{'id': 1,
  296. 'updated_at': None,
  297. 'created_at': None,
  298. 'uuid': uuidsentinel.cell1,
  299. 'name': 'onlycell',
  300. 'transport_url': 'fake://nowhere/',
  301. 'database_connection': 'sqlite:///',
  302. 'disabled': False}]
  303. @contextmanager
  304. def _fake_target_cell(self, context, target_cell):
  305. # NOTE(danms): Just pass through the context without actually
  306. # targeting anything.
  307. yield context
  308. class CheatingSerializer(rpc.RequestContextSerializer):
  309. """A messaging.RequestContextSerializer that helps with cells.
  310. Our normal serializer does not pass in the context like db_connection
  311. and mq_connection, for good reason. We don't really want/need to
  312. force a remote RPC server to use our values for this. However,
  313. during unit and functional tests, since we're all in the same
  314. process, we want cell-targeted RPC calls to preserve these values.
  315. Unless we had per-service config and database layer state for
  316. the fake services we start, this is a reasonable cheat.
  317. """
  318. def serialize_context(self, context):
  319. """Serialize context with the db_connection inside."""
  320. values = super(CheatingSerializer, self).serialize_context(context)
  321. values['db_connection'] = context.db_connection
  322. values['mq_connection'] = context.mq_connection
  323. return values
  324. def deserialize_context(self, values):
  325. """Deserialize context and honor db_connection if present."""
  326. ctxt = super(CheatingSerializer, self).deserialize_context(values)
  327. ctxt.db_connection = values.pop('db_connection', None)
  328. ctxt.mq_connection = values.pop('mq_connection', None)
  329. return ctxt
  330. class CellDatabases(fixtures.Fixture):
  331. """Create per-cell databases for testing.
  332. How to use::
  333. fix = CellDatabases()
  334. fix.add_cell_database('connection1')
  335. fix.add_cell_database('connection2', default=True)
  336. self.useFixture(fix)
  337. Passing default=True tells the fixture which database should
  338. be given to code that doesn't target a specific cell.
  339. """
  340. def __init__(self):
  341. self._ctxt_mgrs = {}
  342. self._last_ctxt_mgr = None
  343. self._default_ctxt_mgr = None
  344. # NOTE(danms): Use a ReaderWriterLock to synchronize our
  345. # global database muckery here. If we change global db state
  346. # to point to a cell, we need to take an exclusive lock to
  347. # prevent any other calls to get_context_manager() until we
  348. # reset to the default.
  349. self._cell_lock = lockutils.ReaderWriterLock()
  350. def _cache_schema(self, connection_str):
  351. # NOTE(melwitt): See the regular Database fixture for why
  352. # we do this.
  353. global DB_SCHEMA
  354. if not DB_SCHEMA['main']:
  355. ctxt_mgr = self._ctxt_mgrs[connection_str]
  356. engine = ctxt_mgr.writer.get_engine()
  357. conn = engine.connect()
  358. migration.db_sync(database='main')
  359. DB_SCHEMA['main'] = "".join(line for line
  360. in conn.connection.iterdump())
  361. engine.dispose()
  362. @contextmanager
  363. def _wrap_target_cell(self, context, cell_mapping):
  364. # NOTE(danms): This method is responsible for switching global
  365. # database state in a safe way such that code that doesn't
  366. # know anything about cell targeting (i.e. compute node code)
  367. # can continue to operate when called from something that has
  368. # targeted a specific cell. In order to make this safe from a
  369. # dining-philosopher-style deadlock, we need to be able to
  370. # support multiple threads talking to the same cell at the
  371. # same time and potentially recursion within the same thread
  372. # from code that would otherwise be running on separate nodes
  373. # in real life, but where we're actually recursing in the
  374. # tests.
  375. #
  376. # The basic logic here is:
  377. # 1. Grab a reader lock to see if the state is already pointing at
  378. # the cell we want. If it is, we can yield and return without
  379. # altering the global state further. The read lock ensures that
  380. # global state won't change underneath us, and multiple threads
  381. # can be working at the same time, as long as they are looking
  382. # for the same cell.
  383. # 2. If we do need to change the global state, grab a writer lock
  384. # to make that change, which assumes that nothing else is looking
  385. # at a cell right now. We do only non-schedulable things while
  386. # holding that lock to avoid the deadlock mentioned above.
  387. # 3. We then re-lock with a reader lock just as step #1 above and
  388. # yield to do the actual work. We can do schedulable things
  389. # here and not exclude other threads from making progress.
  390. # If an exception is raised, we capture that and save it.
  391. # 4. If we changed state in #2, we need to change it back. So we grab
  392. # a writer lock again and do that.
  393. # 5. Finally, if an exception was raised in #3 while state was
  394. # changed, we raise it to the caller.
  395. if cell_mapping:
  396. desired = self._ctxt_mgrs[cell_mapping.database_connection]
  397. else:
  398. desired = self._default_ctxt_mgr
  399. with self._cell_lock.read_lock():
  400. if self._last_ctxt_mgr == desired:
  401. with self._real_target_cell(context, cell_mapping) as c:
  402. yield c
  403. return
  404. raised_exc = None
  405. with self._cell_lock.write_lock():
  406. if cell_mapping is not None:
  407. # This assumes the next local DB access is the same cell that
  408. # was targeted last time.
  409. self._last_ctxt_mgr = desired
  410. with self._cell_lock.read_lock():
  411. if self._last_ctxt_mgr != desired:
  412. # NOTE(danms): This is unlikely to happen, but it's possible
  413. # another waiting writer changed the state between us letting
  414. # it go and re-acquiring as a reader. If lockutils supported
  415. # upgrading and downgrading locks, this wouldn't be a problem.
  416. # Regardless, assert that it is still as we left it here
  417. # so we don't hit the wrong cell. If this becomes a problem,
  418. # we just need to retry the write section above until we land
  419. # here with the cell we want.
  420. raise RuntimeError('Global DB state changed underneath us')
  421. try:
  422. with self._real_target_cell(context, cell_mapping) as ccontext:
  423. yield ccontext
  424. except Exception as exc:
  425. raised_exc = exc
  426. with self._cell_lock.write_lock():
  427. # Once we have returned from the context, we need
  428. # to restore the default context manager for any
  429. # subsequent calls
  430. self._last_ctxt_mgr = self._default_ctxt_mgr
  431. if raised_exc:
  432. raise raised_exc
  433. def _wrap_create_context_manager(self, connection=None):
  434. ctxt_mgr = self._ctxt_mgrs[connection]
  435. return ctxt_mgr
  436. def _wrap_get_context_manager(self, context):
  437. try:
  438. # If already targeted, we can proceed without a lock
  439. if context.db_connection:
  440. return context.db_connection
  441. except AttributeError:
  442. # Unit tests with None, FakeContext, etc
  443. pass
  444. # NOTE(melwitt): This is a hack to try to deal with
  445. # local accesses i.e. non target_cell accesses.
  446. with self._cell_lock.read_lock():
  447. # FIXME(mriedem): This is actually misleading and means we don't
  448. # catch things like bug 1717000 where a context should be targeted
  449. # to a cell but it's not, and the fixture here just returns the
  450. # last targeted context that was used.
  451. return self._last_ctxt_mgr
  452. def _wrap_get_server(self, target, endpoints, serializer=None):
  453. """Mirror rpc.get_server() but with our special sauce."""
  454. serializer = CheatingSerializer(serializer)
  455. return messaging.get_rpc_server(rpc.TRANSPORT,
  456. target,
  457. endpoints,
  458. executor='eventlet',
  459. serializer=serializer)
  460. def _wrap_get_client(self, target, version_cap=None, serializer=None,
  461. call_monitor_timeout=None):
  462. """Mirror rpc.get_client() but with our special sauce."""
  463. serializer = CheatingSerializer(serializer)
  464. return messaging.RPCClient(rpc.TRANSPORT,
  465. target,
  466. version_cap=version_cap,
  467. serializer=serializer,
  468. call_monitor_timeout=call_monitor_timeout)
  469. def add_cell_database(self, connection_str, default=False):
  470. """Add a cell database to the fixture.
  471. :param connection_str: An identifier used to represent the connection
  472. string for this database. It should match the database_connection field
  473. in the corresponding CellMapping.
  474. """
  475. # NOTE(danms): Create a new context manager for the cell, which
  476. # will house the sqlite:// connection for this cell's in-memory
  477. # database. Store/index it by the connection string, which is
  478. # how we identify cells in CellMapping.
  479. ctxt_mgr = session.create_context_manager()
  480. self._ctxt_mgrs[connection_str] = ctxt_mgr
  481. # NOTE(melwitt): The first DB access through service start is
  482. # local so this initializes _last_ctxt_mgr for that and needs
  483. # to be a compute cell.
  484. self._last_ctxt_mgr = ctxt_mgr
  485. # NOTE(danms): Record which context manager should be the default
  486. # so we can restore it when we return from target-cell contexts.
  487. # If none has been provided yet, store the current one in case
  488. # no default is ever specified.
  489. if self._default_ctxt_mgr is None or default:
  490. self._default_ctxt_mgr = ctxt_mgr
  491. def get_context_manager(context):
  492. return ctxt_mgr
  493. # NOTE(danms): This is a temporary MonkeyPatch just to get
  494. # a new database created with the schema we need and the
  495. # context manager for it stashed.
  496. with fixtures.MonkeyPatch(
  497. 'nova.db.sqlalchemy.api.get_context_manager',
  498. get_context_manager):
  499. self._cache_schema(connection_str)
  500. engine = ctxt_mgr.writer.get_engine()
  501. engine.dispose()
  502. conn = engine.connect()
  503. conn.connection.executescript(DB_SCHEMA['main'])
  504. def setUp(self):
  505. super(CellDatabases, self).setUp()
  506. self.addCleanup(self.cleanup)
  507. self._real_target_cell = context.target_cell
  508. # NOTE(danms): These context managers are in place for the
  509. # duration of the test (unlike the temporary ones above) and
  510. # provide the actual "runtime" switching of connections for us.
  511. self.useFixture(fixtures.MonkeyPatch(
  512. 'nova.db.sqlalchemy.api.create_context_manager',
  513. self._wrap_create_context_manager))
  514. self.useFixture(fixtures.MonkeyPatch(
  515. 'nova.db.sqlalchemy.api.get_context_manager',
  516. self._wrap_get_context_manager))
  517. self.useFixture(fixtures.MonkeyPatch(
  518. 'nova.context.target_cell',
  519. self._wrap_target_cell))
  520. self.useFixture(fixtures.MonkeyPatch(
  521. 'nova.rpc.get_server',
  522. self._wrap_get_server))
  523. self.useFixture(fixtures.MonkeyPatch(
  524. 'nova.rpc.get_client',
  525. self._wrap_get_client))
  526. def cleanup(self):
  527. for ctxt_mgr in self._ctxt_mgrs.values():
  528. engine = ctxt_mgr.writer.get_engine()
  529. engine.dispose()
  530. class Database(fixtures.Fixture):
  531. def __init__(self, database='main', connection=None):
  532. """Create a database fixture.
  533. :param database: The type of database, 'main', or 'api'
  534. :param connection: The connection string to use
  535. """
  536. super(Database, self).__init__()
  537. # NOTE(pkholkin): oslo_db.enginefacade is configured in tests the same
  538. # way as it is done for any other service that uses db
  539. global SESSION_CONFIGURED
  540. if not SESSION_CONFIGURED:
  541. session.configure(CONF)
  542. SESSION_CONFIGURED = True
  543. self.database = database
  544. if database == 'main':
  545. if connection is not None:
  546. ctxt_mgr = session.create_context_manager(
  547. connection=connection)
  548. self.get_engine = ctxt_mgr.writer.get_engine
  549. else:
  550. self.get_engine = session.get_engine
  551. elif database == 'api':
  552. self.get_engine = session.get_api_engine
  553. def _cache_schema(self):
  554. global DB_SCHEMA
  555. if not DB_SCHEMA[self.database]:
  556. engine = self.get_engine()
  557. conn = engine.connect()
  558. migration.db_sync(database=self.database)
  559. DB_SCHEMA[self.database] = "".join(line for line
  560. in conn.connection.iterdump())
  561. engine.dispose()
  562. def cleanup(self):
  563. engine = self.get_engine()
  564. engine.dispose()
  565. def reset(self):
  566. self._cache_schema()
  567. engine = self.get_engine()
  568. engine.dispose()
  569. conn = engine.connect()
  570. conn.connection.executescript(DB_SCHEMA[self.database])
  571. def setUp(self):
  572. super(Database, self).setUp()
  573. self.reset()
  574. self.addCleanup(self.cleanup)
  575. class DatabaseAtVersion(fixtures.Fixture):
  576. def __init__(self, version, database='main'):
  577. """Create a database fixture.
  578. :param version: Max version to sync to (or None for current)
  579. :param database: The type of database, 'main', 'api'
  580. """
  581. super(DatabaseAtVersion, self).__init__()
  582. self.database = database
  583. self.version = version
  584. if database == 'main':
  585. self.get_engine = session.get_engine
  586. elif database == 'api':
  587. self.get_engine = session.get_api_engine
  588. def cleanup(self):
  589. engine = self.get_engine()
  590. engine.dispose()
  591. def reset(self):
  592. engine = self.get_engine()
  593. engine.dispose()
  594. engine.connect()
  595. migration.db_sync(version=self.version, database=self.database)
  596. def setUp(self):
  597. super(DatabaseAtVersion, self).setUp()
  598. self.reset()
  599. self.addCleanup(self.cleanup)
  600. class DefaultFlavorsFixture(fixtures.Fixture):
  601. def setUp(self):
  602. super(DefaultFlavorsFixture, self).setUp()
  603. ctxt = context.get_admin_context()
  604. defaults = {'rxtx_factor': 1.0, 'disabled': False, 'is_public': True,
  605. 'ephemeral_gb': 0, 'swap': 0}
  606. extra_specs = {
  607. "hw:mem_page_size": "2048",
  608. "hw:cpu_policy": "dedicated"
  609. }
  610. default_flavors = [
  611. objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
  612. root_gb=1, flavorid='1', name='m1.tiny',
  613. **defaults),
  614. objects.Flavor(context=ctxt, memory_mb=2048, vcpus=1,
  615. root_gb=20, flavorid='2', name='m1.small',
  616. **defaults),
  617. objects.Flavor(context=ctxt, memory_mb=4096, vcpus=2,
  618. root_gb=40, flavorid='3', name='m1.medium',
  619. **defaults),
  620. objects.Flavor(context=ctxt, memory_mb=8192, vcpus=4,
  621. root_gb=80, flavorid='4', name='m1.large',
  622. **defaults),
  623. objects.Flavor(context=ctxt, memory_mb=16384, vcpus=8,
  624. root_gb=160, flavorid='5', name='m1.xlarge',
  625. **defaults),
  626. objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
  627. root_gb=1, flavorid='6', name='m1.tiny.specs',
  628. extra_specs=extra_specs, **defaults),
  629. ]
  630. for flavor in default_flavors:
  631. flavor.create()
  632. class RPCFixture(fixtures.Fixture):
  633. def __init__(self, *exmods):
  634. super(RPCFixture, self).__init__()
  635. self.exmods = []
  636. self.exmods.extend(exmods)
  637. self._buses = {}
  638. def _fake_create_transport(self, url):
  639. # FIXME(danms): Right now, collapse all connections
  640. # to a single bus. This is how our tests expect things
  641. # to work. When the tests are fixed, this fixture can
  642. # support simulating multiple independent buses, and this
  643. # hack should be removed.
  644. url = None
  645. # NOTE(danms): This will be called with a non-None url by
  646. # cells-aware code that is requesting to contact something on
  647. # one of the many transports we're multplexing here.
  648. if url not in self._buses:
  649. exmods = rpc.get_allowed_exmods()
  650. self._buses[url] = messaging.get_rpc_transport(
  651. CONF,
  652. url=url,
  653. allowed_remote_exmods=exmods)
  654. return self._buses[url]
  655. def setUp(self):
  656. super(RPCFixture, self).setUp()
  657. self.addCleanup(rpc.cleanup)
  658. rpc.add_extra_exmods(*self.exmods)
  659. self.addCleanup(rpc.clear_extra_exmods)
  660. self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
  661. self.messaging_conf.transport_url = 'fake:/'
  662. self.useFixture(self.messaging_conf)
  663. self.useFixture(fixtures.MonkeyPatch(
  664. 'nova.rpc.create_transport', self._fake_create_transport))
  665. # NOTE(danms): Execute the init with get_transport_url() as None,
  666. # instead of the parsed TransportURL(None) so that we can cache
  667. # it as it will be called later if the default is requested by
  668. # one of our mq-switching methods.
  669. with mock.patch('nova.rpc.get_transport_url') as mock_gtu:
  670. mock_gtu.return_value = None
  671. rpc.init(CONF)
  672. class WarningsFixture(fixtures.Fixture):
  673. """Filters out warnings during test runs."""
  674. def setUp(self):
  675. super(WarningsFixture, self).setUp()
  676. # NOTE(sdague): Make deprecation warnings only happen once. Otherwise
  677. # this gets kind of crazy given the way that upstream python libs use
  678. # this.
  679. warnings.simplefilter("once", DeprecationWarning)
  680. warnings.filterwarnings('ignore',
  681. message='With-statements now directly support'
  682. ' multiple context managers')
  683. # NOTE(sdague): nova does not use pkg_resources directly, this
  684. # is all very long standing deprecations about other tools
  685. # using it. None of this is useful to Nova development.
  686. warnings.filterwarnings('ignore',
  687. module='pkg_resources')
  688. # NOTE(sdague): this remains an unresolved item around the way
  689. # forward on is_admin, the deprecation is definitely really premature.
  690. warnings.filterwarnings('ignore',
  691. message='Policy enforcement is depending on the value of is_admin.'
  692. ' This key is deprecated. Please update your policy '
  693. 'file to use the standard policy values.')
  694. # NOTE(sdague): mox3 is on life support, don't really care
  695. # about any deprecations coming from it
  696. warnings.filterwarnings('ignore',
  697. module='mox3.mox')
  698. # NOTE(gibi): we can remove this once we get rid of Mox in nova
  699. warnings.filterwarnings('ignore', message="Using class 'MoxStubout'")
  700. # NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
  701. warnings.filterwarnings('ignore',
  702. message="Policy .* failed scope check",
  703. category=UserWarning)
  704. # NOTE(gibi): The UUIDFields emits a warning if the value is not a
  705. # valid UUID. Let's escalate that to an exception in the test to
  706. # prevent adding violations.
  707. warnings.filterwarnings('error', message=".*invalid UUID.*")
  708. # NOTE(mriedem): Avoid adding anything which tries to convert an
  709. # object to a primitive which jsonutils.to_primitive() does not know
  710. # how to handle (or isn't given a fallback callback).
  711. warnings.filterwarnings(
  712. 'error',
  713. message="Cannot convert <oslo_db.sqlalchemy.enginefacade"
  714. "._Default object at ",
  715. category=UserWarning)
  716. # TODO(mriedem): Change (or remove) this SAWarning to an error once
  717. # https://bugs.launchpad.net/oslo.db/+bug/1814199 is fixed.
  718. warnings.filterwarnings(
  719. 'ignore', message='Evaluating non-mapped column expression',
  720. category=sqla_exc.SAWarning)
  721. # TODO(jangutter): Change (or remove) this to an error during the Train
  722. # cycle when the os-vif port profile is no longer used.
  723. warnings.filterwarnings(
  724. 'ignore', message=".* 'VIFPortProfileOVSRepresentor' .* "
  725. "is deprecated", category=PendingDeprecationWarning)
  726. # TODO(mriedem): Change (or remove) this DeprecationWarning once
  727. # https://bugs.launchpad.net/sqlalchemy-migrate/+bug/1814288 is fixed.
  728. warnings.filterwarnings(
  729. 'ignore', message='inspect\.getargspec\(\) is deprecated',
  730. category=DeprecationWarning,
  731. module='migrate.versioning.script.py')
  732. self.addCleanup(warnings.resetwarnings)
  733. class ConfPatcher(fixtures.Fixture):
  734. """Fixture to patch and restore global CONF.
  735. This also resets overrides for everything that is patched during
  736. it's teardown.
  737. """
  738. def __init__(self, **kwargs):
  739. """Constructor
  740. :params group: if specified all config options apply to that group.
  741. :params **kwargs: the rest of the kwargs are processed as a
  742. set of key/value pairs to be set as configuration override.
  743. """
  744. super(ConfPatcher, self).__init__()
  745. self.group = kwargs.pop('group', None)
  746. self.args = kwargs
  747. def setUp(self):
  748. super(ConfPatcher, self).setUp()
  749. for k, v in self.args.items():
  750. self.addCleanup(CONF.clear_override, k, self.group)
  751. CONF.set_override(k, v, self.group)
  752. class OSAPIFixture(fixtures.Fixture):
  753. """Create an OS API server as a fixture.
  754. This spawns an OS API server as a fixture in a new greenthread in
  755. the current test. The fixture has a .api parameter with is a
  756. simple rest client that can communicate with it.
  757. This fixture is extremely useful for testing REST responses
  758. through the WSGI stack easily in functional tests.
  759. Usage:
  760. api = self.useFixture(fixtures.OSAPIFixture()).api
  761. resp = api.api_request('/someurl')
  762. self.assertEqual(200, resp.status_code)
  763. resp = api.api_request('/otherurl', method='POST', body='{foo}')
  764. The resp is a requests library response. Common attributes that
  765. you'll want to use are:
  766. - resp.status_code - integer HTTP status code returned by the request
  767. - resp.content - the body of the response
  768. - resp.headers - dictionary of HTTP headers returned
  769. """
  770. def __init__(self, api_version='v2',
  771. project_id='6f70656e737461636b20342065766572'):
  772. """Constructor
  773. :param api_version: the API version that we're interested in
  774. using. Currently this expects 'v2' or 'v2.1' as possible
  775. options.
  776. :param project_id: the project id to use on the API.
  777. """
  778. super(OSAPIFixture, self).__init__()
  779. self.api_version = api_version
  780. self.project_id = project_id
  781. def setUp(self):
  782. super(OSAPIFixture, self).setUp()
  783. # A unique hostname for the wsgi-intercept.
  784. hostname = uuidsentinel.osapi_host
  785. port = 80
  786. service_name = 'osapi_compute'
  787. endpoint = 'http://%s:%s/' % (hostname, port)
  788. conf_overrides = {
  789. 'osapi_compute_listen': hostname,
  790. 'osapi_compute_listen_port': port,
  791. 'debug': True,
  792. }
  793. self.useFixture(ConfPatcher(**conf_overrides))
  794. # Turn off manipulation of socket_options in TCPKeepAliveAdapter
  795. # to keep wsgi-intercept happy. Replace it with the method
  796. # from its superclass.
  797. self.useFixture(fixtures.MonkeyPatch(
  798. 'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
  799. adapters.HTTPAdapter.init_poolmanager))
  800. loader = wsgi.Loader().load_app(service_name)
  801. app = lambda: loader
  802. # re-use service setup code from wsgi_app to register
  803. # service, which is looked for in some tests
  804. wsgi_app._setup_service(CONF.host, service_name)
  805. intercept = interceptor.RequestsInterceptor(app, url=endpoint)
  806. intercept.install_intercept()
  807. self.addCleanup(intercept.uninstall_intercept)
  808. self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
  809. 'host': hostname, 'port': port, 'api_version': self.api_version})
  810. self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url,
  811. self.project_id)
  812. self.admin_api = client.TestOpenStackClient(
  813. 'admin', 'admin', self.auth_url, self.project_id)
  814. # Provide a way to access the wsgi application to tests using
  815. # the fixture.
  816. self.app = app
  817. class OSMetadataServer(fixtures.Fixture):
  818. """Create an OS Metadata API server as a fixture.
  819. This spawns an OS Metadata API server as a fixture in a new
  820. greenthread in the current test.
  821. TODO(sdague): ideally for testing we'd have something like the
  822. test client which acts like requests, but connects any of the
  823. interactions needed.
  824. """
  825. def setUp(self):
  826. super(OSMetadataServer, self).setUp()
  827. # in order to run these in tests we need to bind only to local
  828. # host, and dynamically allocate ports
  829. conf_overrides = {
  830. 'metadata_listen': '127.0.0.1',
  831. 'metadata_listen_port': 0,
  832. 'debug': True
  833. }
  834. self.useFixture(ConfPatcher(**conf_overrides))
  835. # NOTE(mikal): we don't have root to manipulate iptables, so just
  836. # zero that bit out.
  837. self.useFixture(fixtures.MonkeyPatch(
  838. 'nova.network.linux_net.IptablesManager._apply',
  839. lambda _: None))
  840. self.metadata = service.WSGIService("metadata")
  841. self.metadata.start()
  842. self.addCleanup(self.metadata.stop)
  843. self.md_url = "http://%s:%s/" % (
  844. conf_overrides['metadata_listen'],
  845. self.metadata.port)
  846. class PoisonFunctions(fixtures.Fixture):
  847. """Poison functions so they explode if we touch them.
  848. When running under a non full stack test harness there are parts
  849. of the code that you don't want to go anywhere near. These include
  850. things like code that spins up extra threads, which just
  851. introduces races.
  852. """
  853. def setUp(self):
  854. super(PoisonFunctions, self).setUp()
  855. # The nova libvirt driver starts an event thread which only
  856. # causes trouble in tests. Make sure that if tests don't
  857. # properly patch it the test explodes.
  858. def evloop(*args, **kwargs):
  859. import sys
  860. warnings.warn("Forgot to disable libvirt event thread")
  861. sys.exit(1)
  862. # Don't poison the function if it's already mocked
  863. import nova.virt.libvirt.host
  864. if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock):
  865. self.useFixture(fixtures.MockPatch(
  866. 'nova.virt.libvirt.host.Host._init_events',
  867. side_effect=evloop))
  868. class IndirectionAPIFixture(fixtures.Fixture):
  869. """Patch and restore the global NovaObject indirection api."""
  870. def __init__(self, indirection_api):
  871. """Constructor
  872. :param indirection_api: the indirection API to be used for tests.
  873. """
  874. super(IndirectionAPIFixture, self).__init__()
  875. self.indirection_api = indirection_api
  876. def cleanup(self):
  877. obj_base.NovaObject.indirection_api = self.orig_indirection_api
  878. def setUp(self):
  879. super(IndirectionAPIFixture, self).setUp()
  880. self.orig_indirection_api = obj_base.NovaObject.indirection_api
  881. obj_base.NovaObject.indirection_api = self.indirection_api
  882. self.addCleanup(self.cleanup)
  883. class _FakeGreenThread(object):
  884. def __init__(self, func, *args, **kwargs):
  885. self._result = func(*args, **kwargs)
  886. def cancel(self, *args, **kwargs):
  887. # This method doesn't make sense for a synchronous call, it's just
  888. # defined to satisfy the interface.
  889. pass
  890. def kill(self, *args, **kwargs):
  891. # This method doesn't make sense for a synchronous call, it's just
  892. # defined to satisfy the interface.
  893. pass
  894. def link(self, func, *args, **kwargs):
  895. func(self, *args, **kwargs)
  896. def unlink(self, func, *args, **kwargs):
  897. # This method doesn't make sense for a synchronous call, it's just
  898. # defined to satisfy the interface.
  899. pass
  900. def wait(self):
  901. return self._result
  902. class SpawnIsSynchronousFixture(fixtures.Fixture):
  903. """Patch and restore the spawn_n utility method to be synchronous"""
  904. def setUp(self):
  905. super(SpawnIsSynchronousFixture, self).setUp()
  906. self.useFixture(fixtures.MonkeyPatch(
  907. 'nova.utils.spawn_n', _FakeGreenThread))
  908. self.useFixture(fixtures.MonkeyPatch(
  909. 'nova.utils.spawn', _FakeGreenThread))
  910. class SynchronousThreadPoolExecutorFixture(fixtures.Fixture):
  911. """Make GreenThreadPoolExecutor.submit() synchronous.
  912. The function passed to submit() will be executed and a mock.Mock
  913. object will be returned as the Future where Future.result() will
  914. return the result of the call to the submitted function.
  915. """
  916. def setUp(self):
  917. super(SynchronousThreadPoolExecutorFixture, self).setUp()
  918. def fake_submit(_self, fn, *args, **kwargs):
  919. result = fn(*args, **kwargs)
  920. future = mock.Mock(spec='futurist.Future')
  921. future.return_value.result.return_value = result
  922. return future
  923. self.useFixture(fixtures.MonkeyPatch(
  924. 'futurist.GreenThreadPoolExecutor.submit',
  925. fake_submit))
  926. class BannedDBSchemaOperations(fixtures.Fixture):
  927. """Ban some operations for migrations"""
  928. def __init__(self, banned_resources=None):
  929. super(BannedDBSchemaOperations, self).__init__()
  930. self._banned_resources = banned_resources or []
  931. @staticmethod
  932. def _explode(resource, op):
  933. raise exception.DBNotAllowed(
  934. 'Operation %s.%s() is not allowed in a database migration' % (
  935. resource, op))
  936. def setUp(self):
  937. super(BannedDBSchemaOperations, self).setUp()
  938. for thing in self._banned_resources:
  939. self.useFixture(fixtures.MonkeyPatch(
  940. 'sqlalchemy.%s.drop' % thing,
  941. lambda *a, **k: self._explode(thing, 'drop')))
  942. self.useFixture(fixtures.MonkeyPatch(
  943. 'sqlalchemy.%s.alter' % thing,
  944. lambda *a, **k: self._explode(thing, 'alter')))
  945. class ForbidNewLegacyNotificationFixture(fixtures.Fixture):
  946. """Make sure the test fails if new legacy notification is added"""
  947. def __init__(self):
  948. super(ForbidNewLegacyNotificationFixture, self).__init__()
  949. self.notifier = rpc.LegacyValidatingNotifier
  950. def setUp(self):
  951. super(ForbidNewLegacyNotificationFixture, self).setUp()
  952. self.notifier.fatal = True
  953. # allow the special test value used in
  954. # nova.tests.unit.test_notifications.NotificationsTestCase
  955. self.notifier.allowed_legacy_notification_event_types.append(
  956. '_decorated_function')
  957. self.addCleanup(self.cleanup)
  958. def cleanup(self):
  959. self.notifier.fatal = False
  960. self.notifier.allowed_legacy_notification_event_types.remove(
  961. '_decorated_function')
  962. class AllServicesCurrent(fixtures.Fixture):
  963. def setUp(self):
  964. super(AllServicesCurrent, self).setUp()
  965. self.useFixture(fixtures.MonkeyPatch(
  966. 'nova.objects.Service.get_minimum_version_multi',
  967. self._fake_minimum))
  968. self.useFixture(fixtures.MonkeyPatch(
  969. 'nova.objects.service.get_minimum_version_all_cells',
  970. lambda *a, **k: service_obj.SERVICE_VERSION))
  971. compute_rpcapi.LAST_VERSION = None
  972. def _fake_minimum(self, *args, **kwargs):
  973. return service_obj.SERVICE_VERSION
  974. class RegisterNetworkQuota(fixtures.Fixture):
  975. def setUp(self):
  976. super(RegisterNetworkQuota, self).setUp()
  977. # Quota resource registration modifies the global QUOTAS engine, so
  978. # this fixture registers and unregisters network quota for a test.
  979. tenant_networks._register_network_quota()
  980. self.addCleanup(self.cleanup)
  981. def cleanup(self):
  982. nova_quota.QUOTAS._resources.pop('networks', None)
  983. class NeutronFixture(fixtures.Fixture):
  984. """A fixture to boot instances with neutron ports"""
  985. # the default project_id in OsaAPIFixtures
  986. tenant_id = '6f70656e737461636b20342065766572'
  987. network_1 = {
  988. 'status': 'ACTIVE',
  989. 'subnets': [],
  990. 'name': 'private-network',
  991. 'admin_state_up': True,
  992. 'tenant_id': tenant_id,
  993. 'id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
  994. 'shared': False,
  995. }
  996. subnet_1 = {
  997. 'name': 'private-subnet',
  998. 'enable_dhcp': True,
  999. 'network_id': network_1['id'],
  1000. 'tenant_id': tenant_id,
  1001. 'dns_nameservers': [],
  1002. 'allocation_pools': [
  1003. {
  1004. 'start': '192.168.1.1',
  1005. 'end': '192.168.1.254'
  1006. }
  1007. ],
  1008. 'host_routes': [],
  1009. 'ip_version': 4,
  1010. 'gateway_ip': '192.168.1.1',
  1011. 'cidr': '192.168.1.1/24',
  1012. 'id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef'
  1013. }
  1014. network_1['subnets'] = [subnet_1['id']]
  1015. port_1 = {
  1016. 'id': 'ce531f90-199f-48c0-816c-13e38010b442',
  1017. 'network_id': network_1['id'],
  1018. 'admin_state_up': True,
  1019. 'status': 'ACTIVE',
  1020. 'mac_address': 'fa:16:3e:4c:2c:30',
  1021. 'fixed_ips': [
  1022. {
  1023. # The IP on this port must be a prefix of the IP on port_2 to
  1024. # test listing servers with an ip filter regex.
  1025. 'ip_address': '192.168.1.3',
  1026. 'subnet_id': subnet_1['id']
  1027. }
  1028. ],
  1029. 'tenant_id': tenant_id,
  1030. 'binding:vif_type': 'ovs'
  1031. }
  1032. port_2 = {
  1033. 'id': '88dae9fa-0dc6-49e3-8c29-3abc41e99ac9',
  1034. 'network_id': network_1['id'],
  1035. 'admin_state_up': True,
  1036. 'status': 'ACTIVE',
  1037. 'mac_address': '00:0c:29:0d:11:74',
  1038. 'fixed_ips': [
  1039. {
  1040. 'ip_address': '192.168.1.30',
  1041. 'subnet_id': subnet_1['id']
  1042. }
  1043. ],
  1044. 'tenant_id': tenant_id,
  1045. 'binding:vif_type': 'ovs'
  1046. }
  1047. port_with_resource_request = {
  1048. 'id': '2f2613ce-95a9-490a-b3c4-5f1c28c1f886',
  1049. 'network_id': network_1['id'],
  1050. 'admin_state_up': True,
  1051. 'status': 'ACTIVE',
  1052. 'mac_address': '52:54:00:1e:59:c3',
  1053. 'fixed_ips': [
  1054. {
  1055. 'ip_address': '192.168.1.42',
  1056. 'subnet_id': subnet_1['id']
  1057. }
  1058. ],
  1059. 'tenant_id': tenant_id,
  1060. 'resource_request': {
  1061. "resources": {
  1062. rc_fields.ResourceClass.NET_BW_IGR_KILOBIT_PER_SEC: 1000,
  1063. rc_fields.ResourceClass.NET_BW_EGR_KILOBIT_PER_SEC: 1000},
  1064. "required": ["CUSTOM_PHYSNET_2", "CUSTOM_VNIC_TYPE_NORMAL"]
  1065. }
  1066. }
  1067. nw_info = [{
  1068. "profile": {},
  1069. "ovs_interfaceid": "b71f1699-42be-4515-930a-f3ef01f94aa7",
  1070. "preserve_on_delete": False,
  1071. "network": {
  1072. "bridge": "br-int",
  1073. "subnets": [{
  1074. "ips": [{
  1075. "meta": {},
  1076. "version": 4,
  1077. "type": "fixed",
  1078. "floating_ips": [],
  1079. "address": "10.0.0.4"
  1080. }],
  1081. "version": 4,
  1082. "meta": {},
  1083. "dns": [],
  1084. "routes": [],
  1085. "cidr": "10.0.0.0/26",
  1086. "gateway": {
  1087. "meta": {},
  1088. "version": 4,
  1089. "type": "gateway",
  1090. "address": "10.0.0.1"
  1091. }
  1092. }],
  1093. "meta": {
  1094. "injected": False,
  1095. "tenant_id": tenant_id,
  1096. "mtu": 1500
  1097. },
  1098. "id": "e1882e38-38c2-4239-ade7-35d644cb963a",
  1099. "label": "public"
  1100. },
  1101. "devname": "tapb71f1699-42",
  1102. "vnic_type": "normal",
  1103. "qbh_params": None,
  1104. "meta": {},
  1105. "details": {
  1106. "port_filter": True,
  1107. "ovs_hybrid_plug": True
  1108. },
  1109. "address": "fa:16:3e:47:94:4a",
  1110. "active": True,
  1111. "type": "ovs",
  1112. "id": "b71f1699-42be-4515-930a-f3ef01f94aa7",
  1113. "qbg_params": None
  1114. }]
  1115. def __init__(self, test):
  1116. super(NeutronFixture, self).__init__()
  1117. self.test = test
  1118. # The fixture allows port update so we need to deepcopy the class
  1119. # variables to avoid test case interference.
  1120. self._ports = {
  1121. NeutronFixture.port_1['id']: copy.deepcopy(NeutronFixture.port_1),
  1122. NeutronFixture.port_with_resource_request['id']:
  1123. copy.deepcopy(NeutronFixture.port_with_resource_request)
  1124. }
  1125. # The fixture does not allow network update so we don't have to
  1126. # deepcopy here
  1127. self._networks = {
  1128. NeutronFixture.network_1['id']: NeutronFixture.network_1
  1129. }
  1130. # The fixture does not allow network update so we don't have to
  1131. # deepcopy here
  1132. self._subnets = {
  1133. NeutronFixture.subnet_1['id']: NeutronFixture.subnet_1
  1134. }
  1135. def setUp(self):
  1136. super(NeutronFixture, self).setUp()
  1137. self.test.stub_out(
  1138. 'nova.network.neutronv2.api.API.setup_networks_on_host',
  1139. lambda *args, **kwargs: None)
  1140. self.test.stub_out(
  1141. 'nova.network.neutronv2.api.API.migrate_instance_start',
  1142. lambda *args, **kwargs: None)
  1143. self.test.stub_out(
  1144. 'nova.network.neutronv2.api.API.add_fixed_ip_to_instance',
  1145. lambda *args, **kwargs: network_model.NetworkInfo.hydrate(
  1146. NeutronFixture.nw_info))
  1147. self.test.stub_out(
  1148. 'nova.network.neutronv2.api.API.remove_fixed_ip_from_instance',
  1149. lambda *args, **kwargs: network_model.NetworkInfo.hydrate(
  1150. NeutronFixture.nw_info))
  1151. self.test.stub_out(
  1152. 'nova.network.neutronv2.api.API.migrate_instance_finish',
  1153. lambda *args, **kwargs: None)
  1154. self.test.stub_out(
  1155. 'nova.network.security_group.neutron_driver.SecurityGroupAPI.'
  1156. 'get_instances_security_groups_bindings',
  1157. lambda *args, **kwargs: {})
  1158. self.test.stub_out('nova.network.neutronv2.api.get_client',
  1159. lambda *args, **kwargs: self)
  1160. def _get_first_id_match(self, id, list):
  1161. filtered_list = [p for p in list if p['id'] == id]
  1162. if len(filtered_list) > 0:
  1163. return filtered_list[0]
  1164. else:
  1165. return None
  1166. def list_extensions(self, *args, **kwargs):
  1167. return {'extensions': []}
  1168. def show_port(self, port_id, **_params):
  1169. if port_id not in self._ports:
  1170. raise exception.PortNotFound(port_id=port_id)
  1171. return {'port': copy.deepcopy(self._ports[port_id])}
  1172. def delete_port(self, port_id, **_params):
  1173. if port_id in self._ports:
  1174. del self._ports[port_id]
  1175. def show_network(self, network_id, **_params):
  1176. if network_id not in self._networks:
  1177. raise neutron_client_exc.NetworkNotFoundClient()
  1178. return {'network': copy.deepcopy(self._networks[network_id])}
  1179. def list_networks(self, retrieve_all=True, **_params):
  1180. networks = self._networks.values()
  1181. if 'id' in _params:
  1182. networks = [x for x in networks if x['id'] in _params['id']]
  1183. _params.pop('id')
  1184. networks = [n for n in networks
  1185. if all(n.get(opt) == _params[opt] for opt in _params)]
  1186. return {'networks': copy.deepcopy(networks)}
  1187. def list_ports(self, retrieve_all=True, **_params):
  1188. ports = [p for p in self._ports.values()
  1189. if all(p.get(opt) == _params[opt] for opt in _params)]
  1190. return {'ports': copy.deepcopy(ports)}
  1191. def list_subnets(self, retrieve_all=True, **_params):
  1192. # NOTE(gibi): The fixture does not support filtering for subnets
  1193. return {'subnets': copy.deepcopy(list(self._subnets.values()))}
  1194. def list_floatingips(self, retrieve_all=True, **_params):
  1195. return {'floatingips': []}
  1196. def create_port(self, body=None):
  1197. # Note(gibi): Some of the test expects that a pre-defined port is
  1198. # created. This is port_2. So if that port is not created yet then
  1199. # that is the one created here.
  1200. if NeutronFixture.port_2['id'] not in self._ports:
  1201. new_port = copy.deepcopy(NeutronFixture.port_2)
  1202. else:
  1203. # If port_2 is already created then create a new port based on
  1204. # the request body, the port_2 as a template, and assign new
  1205. # port_id and mac_address for the new port
  1206. new_port = copy.deepcopy(body)
  1207. new_port.update(copy.deepcopy(NeutronFixture.port_2))
  1208. # we need truly random uuids instead of named sentinels as some
  1209. # tests needs more than 3 ports
  1210. new_port.update({
  1211. 'id': str(uuidutils.generate_uuid()),
  1212. 'mac_address': '00:' + ':'.join(
  1213. ['%02x' % random.randint(0, 255) for _ in range(5)]),
  1214. })
  1215. self._ports[new_port['id']] = new_port
  1216. # we need to copy again what we return as nova might modify the
  1217. # returned port locally and we don't want that it effects the port in
  1218. # the self._ports dict.
  1219. return {'port': copy.deepcopy(new_port)}
  1220. def update_port(self, port_id, body=None):
  1221. port = self._ports[port_id]
  1222. port.update(body['port'])
  1223. return {'port': copy.deepcopy(port)}
  1224. def show_quota(self, project_id):
  1225. # unlimited quota
  1226. return {'quota': {'port': -1}}
  1227. class _NoopConductor(object):
  1228. def __getattr__(self, key):
  1229. def _noop_rpc(*args, **kwargs):
  1230. return None
  1231. return _noop_rpc
  1232. class NoopConductorFixture(fixtures.Fixture):
  1233. """Stub out the conductor API to do nothing"""
  1234. def setUp(self):
  1235. super(NoopConductorFixture, self).setUp()
  1236. self.useFixture(fixtures.MonkeyPatch(
  1237. 'nova.conductor.ComputeTaskAPI', _NoopConductor))
  1238. self.useFixture(fixtures.MonkeyPatch(
  1239. 'nova.conductor.API', _NoopConductor))
  1240. class EventReporterStub(fixtures.Fixture):
  1241. def setUp(self):
  1242. super(EventReporterStub, self).setUp()
  1243. self.useFixture(fixtures.MonkeyPatch(
  1244. 'nova.compute.utils.EventReporter',
  1245. lambda *args, **kwargs: mock.MagicMock()))
  1246. class CinderFixture(fixtures.Fixture):
  1247. """A fixture to volume operations"""
  1248. # the default project_id in OSAPIFixtures
  1249. tenant_id = '6f70656e737461636b20342065766572'
  1250. SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113'
  1251. SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8'
  1252. SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
  1253. SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
  1254. # This represents a bootable image-backed volume to test
  1255. # boot-from-volume scenarios.
  1256. IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
  1257. def __init__(self, test):
  1258. super(CinderFixture, self).__init__()
  1259. self.test = test
  1260. self.swap_error = False
  1261. self.swap_volume_instance_uuid = None
  1262. self.swap_volume_instance_error_uuid = None
  1263. self.reserved_volumes = list()
  1264. # This is a map of instance UUIDs mapped to a list of volume IDs.
  1265. # This map gets updated on attach/detach operations.
  1266. self.attachments = collections.defaultdict(list)
  1267. def volume_ids_for_instance(self, instance_uuid):
  1268. return self.attachments.get(instance_uuid)
  1269. def setUp(self):
  1270. super(CinderFixture, self).setUp()
  1271. def fake_get(self_api, context, volume_id, microversion=None):
  1272. # Check for the special swap volumes.
  1273. if volume_id in (CinderFixture.SWAP_OLD_VOL,
  1274. CinderFixture.SWAP_ERR_OLD_VOL):
  1275. volume = {
  1276. 'status': 'available',
  1277. 'display_name': 'TEST1',
  1278. 'attach_status': 'detached',
  1279. 'id': volume_id,
  1280. 'multiattach': False,
  1281. 'size': 1
  1282. }
  1283. if ((self.swap_volume_instance_uuid and
  1284. volume_id == CinderFixture.SWAP_OLD_VOL) or
  1285. (self.swap_volume_instance_error_uuid and
  1286. volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
  1287. instance_uuid = (self.swap_volume_instance_uuid
  1288. if volume_id == CinderFixture.SWAP_OLD_VOL
  1289. else self.swap_volume_instance_error_uuid)
  1290. volume.update({
  1291. 'status': 'in-use',
  1292. 'attachments': {
  1293. instance_uuid: {
  1294. 'mountpoint': '/dev/vdb',
  1295. 'attachment_id': volume_id
  1296. }
  1297. },
  1298. 'attach_status': 'attached'
  1299. })
  1300. return volume
  1301. # Check to see if the volume is attached.
  1302. for instance_uuid, volumes in self.attachments.items():
  1303. if volume_id in volumes:
  1304. # The volume is attached.
  1305. volume = {
  1306. 'status': 'in-use',
  1307. 'display_name': volume_id,
  1308. 'attach_status': 'attached',
  1309. 'id': volume_id,
  1310. 'multiattach': False,
  1311. 'size': 1,
  1312. 'attachments': {
  1313. instance_uuid: {
  1314. 'attachment_id': volume_id,
  1315. 'mountpoint': '/dev/vdb'
  1316. }
  1317. }
  1318. }
  1319. break
  1320. else:
  1321. # This is a test that does not care about the actual details.
  1322. reserved_volume = (volume_id in self.reserved_volumes)
  1323. volume = {
  1324. 'status': 'attaching' if reserved_volume else 'available',
  1325. 'display_name': 'TEST2',
  1326. 'attach_status': 'detached',
  1327. 'id': volume_id,
  1328. 'multiattach': False,
  1329. 'size': 1
  1330. }
  1331. # Check for our special image-backed volume.
  1332. if volume_id == self.IMAGE_BACKED_VOL:
  1333. # Make it a bootable volume.
  1334. volume['bootable'] = True
  1335. # Add the image_id metadata.
  1336. volume['volume_image_metadata'] = {
  1337. # There would normally be more image metadata in here...
  1338. 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
  1339. }
  1340. return volume
  1341. def fake_initialize_connection(self, context, volume_id, connector):
  1342. if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:
  1343. # Return a tuple in order to raise an exception.
  1344. return ()
  1345. return {}
  1346. def fake_migrate_volume_completion(self, context, old_volume_id,
  1347. new_volume_id, error):
  1348. return {'save_volume_id': new_volume_id}
  1349. def fake_reserve_volume(self_api, context, volume_id):
  1350. self.reserved_volumes.append(volume_id)
  1351. def fake_unreserve_volume(self_api, context, volume_id):
  1352. # NOTE(mnaser): It's possible that we unreserve a volume that was
  1353. # never reserved (ex: instance.volume_attach.error
  1354. # notification tests)
  1355. if volume_id in self.reserved_volumes:
  1356. self.reserved_volumes.remove(volume_id)
  1357. # Signaling that swap_volume has encountered the error
  1358. # from initialize_connection and is working on rolling back
  1359. # the reservation on SWAP_ERR_NEW_VOL.
  1360. self.swap_error = True
  1361. def fake_attach(_self, context, volume_id, instance_uuid,
  1362. mountpoint, mode='rw'):
  1363. # Check to see if the volume is already attached to any server.
  1364. for instance, volumes in self.attachments.items():
  1365. if volume_id in volumes:
  1366. raise exception.InvalidInput(
  1367. reason='Volume %s is already attached to '
  1368. 'instance %s' % (volume_id, instance))
  1369. # It's not attached so let's "attach" it.
  1370. self.attachments[instance_uuid].append(volume_id)
  1371. self.test.stub_out('nova.volume.cinder.API.attach',
  1372. fake_attach)
  1373. def fake_detach(_self, context, volume_id, instance_uuid=None,
  1374. attachment_id=None):
  1375. # NOTE(mnaser): It's possible that we unreserve a volume that was
  1376. # never reserved (ex: instance.volume_attach.error
  1377. # notification tests)
  1378. if volume_id in self.reserved_volumes:
  1379. self.reserved_volumes.remove(volume_id)
  1380. if instance_uuid is not None:
  1381. # If the volume isn't attached to this instance it will
  1382. # result in a ValueError which indicates a broken test or
  1383. # code, so we just let that raise up.
  1384. self.attachments[instance_uuid].remove(volume_id)
  1385. else:
  1386. for instance, volumes in self.attachments.items():
  1387. if volume_id in volumes:
  1388. volumes.remove(volume_id)
  1389. break
  1390. self.test.stub_out('nova.volume.cinder.API.detach', fake_detach)
  1391. self.test.stub_out('nova.volume.cinder.API.begin_detaching',
  1392. lambda *args, **kwargs: None)
  1393. self.test.stub_out('nova.volume.cinder.API.get',
  1394. fake_get)
  1395. self.test.stub_out('nova.volume.cinder.API.initialize_connection',
  1396. fake_initialize_connection)
  1397. self.test.stub_out(
  1398. 'nova.volume.cinder.API.migrate_volume_completion',
  1399. fake_migrate_volume_completion)
  1400. self.test.stub_out('nova.volume.cinder.API.reserve_volume',
  1401. fake_reserve_volume)
  1402. self.test.stub_out('nova.volume.cinder.API.roll_detaching',
  1403. lambda *args, **kwargs: None)
  1404. self.test.stub_out('nova.volume.cinder.API.terminate_connection',
  1405. lambda *args, **kwargs: None)
  1406. self.test.stub_out('nova.volume.cinder.API.unreserve_volume',
  1407. fake_unreserve_volume)
  1408. self.test.stub_out('nova.volume.cinder.API.check_attached',
  1409. lambda *args, **kwargs: None)
  1410. # TODO(mriedem): We can probably pull some of the common parts from the
  1411. # CinderFixture into a common mixin class for things like the variables
  1412. # and fake_get.
  1413. class CinderFixtureNewAttachFlow(fixtures.Fixture):
  1414. """A fixture to volume operations with the new Cinder attach/detach API"""
  1415. # the default project_id in OSAPIFixtures
  1416. tenant_id = '6f70656e737461636b20342065766572'
  1417. SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113'
  1418. SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8'
  1419. SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
  1420. SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
  1421. SWAP_ERR_ATTACH_ID = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
  1422. MULTIATTACH_VOL = '4757d51f-54eb-4442-8684-3399a6431f67'
  1423. # This represents a bootable image-backed volume to test
  1424. # boot-from-volume scenarios.
  1425. IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
  1426. # This represents a bootable image-backed volume with required traits
  1427. # as part of volume image metadata
  1428. IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f'
  1429. def __init__(self, test):
  1430. super(CinderFixtureNewAttachFlow, self).__init__()
  1431. self.test = test
  1432. self.swap_error = False
  1433. self.swap_volume_instance_uuid = None
  1434. self.swap_volume_instance_error_uuid = None
  1435. self.attachment_error_id = None
  1436. # A map of volumes to a list of (attachment_id, instance_uuid).
  1437. # Note that a volume can have multiple attachments even without
  1438. # multi-attach, as some flows create a blank 'reservation' attachment
  1439. # before deleting another attachment.
  1440. self.volume_to_attachment = collections.defaultdict(list)
  1441. def volume_ids_for_instance(self, instance_uuid):
  1442. for volume_id, attachments in self.volume_to_attachment.items():
  1443. for _, _instance_uuid in attachments:
  1444. if _instance_uuid == instance_uuid:
  1445. # we might have multiple volumes attached to this instance
  1446. # so yield rather than return
  1447. yield volume_id
  1448. break
  1449. def setUp(self):
  1450. super(CinderFixtureNewAttachFlow, self).setUp()
  1451. def fake_get(self_api, context, volume_id, microversion=None):
  1452. # Check for the special swap volumes.
  1453. attachments = self.volume_to_attachment[volume_id]
  1454. if volume_id in (CinderFixture.SWAP_OLD_VOL,
  1455. CinderFixture.SWAP_ERR_OLD_VOL):
  1456. volume = {
  1457. 'status': 'available',
  1458. 'display_name': 'TEST1',
  1459. 'attach_status': 'detached',
  1460. 'id': volume_id,
  1461. 'multiattach': False,
  1462. 'size': 1
  1463. }
  1464. if ((self.swap_volume_instance_uuid and
  1465. volume_id == CinderFixture.SWAP_OLD_VOL) or
  1466. (self.swap_volume_instance_error_uuid and
  1467. volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
  1468. instance_uuid = (self.swap_volume_instance_uuid
  1469. if volume_id == CinderFixture.SWAP_OLD_VOL
  1470. else self.swap_volume_instance_error_uuid)
  1471. if attachments:
  1472. attachment_id, instance_uuid = attachments[0]
  1473. volume.update({
  1474. 'status': 'in-use',
  1475. 'attachments': {
  1476. instance_uuid: {
  1477. 'mountpoint': '/dev/vdb',
  1478. 'attachment_id': attachment_id
  1479. }
  1480. },
  1481. 'attach_status': 'attached'
  1482. })
  1483. return volume
  1484. # Check to see if the volume is attached.
  1485. if attachments:
  1486. # The volume is attached.
  1487. attachment_id, instance_uuid = attachments[0]
  1488. volume = {
  1489. 'status': 'in-use',
  1490. 'display_name': volume_id,
  1491. 'attach_status': 'attached',
  1492. 'id': volume_id,
  1493. 'multiattach': volume_id == self.MULTIATTACH_VOL,
  1494. 'size': 1,
  1495. 'attachments': {
  1496. instance_uuid: {
  1497. 'attachment_id': attachment_id,
  1498. 'mountpoint': '/dev/vdb'
  1499. }
  1500. }
  1501. }
  1502. else:
  1503. # This is a test that does not care about the actual details.
  1504. volume = {
  1505. 'status': 'available',
  1506. 'display_name': 'TEST2',
  1507. 'attach_status': 'detached',
  1508. 'id': volume_id,
  1509. 'multiattach': volume_id == self.MULTIATTACH_VOL,
  1510. 'size': 1
  1511. }
  1512. # Check for our special image-backed volume.
  1513. if volume_id in (self.IMAGE_BACKED_VOL,
  1514. self.IMAGE_WITH_TRAITS_BACKED_VOL):
  1515. # Make it a bootable volume.
  1516. volume['bootable'] = True
  1517. if volume_id == self.IMAGE_BACKED_VOL:
  1518. # Add the image_id metadata.
  1519. volume['volume_image_metadata'] = {
  1520. # There would normally be more image metadata in here.
  1521. 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
  1522. }
  1523. elif volume_id == self.IMAGE_WITH_TRAITS_BACKED_VOL:
  1524. # Add the image_id metadata with traits.
  1525. volume['volume_image_metadata'] = {
  1526. 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
  1527. "trait:HW_CPU_X86_SGX": "required",
  1528. }
  1529. return volume
  1530. def fake_migrate_volume_completion(self, context, old_volume_id,
  1531. new_volume_id, error):
  1532. return {'save_volume_id': new_volume_id}
  1533. def _find_attachment(attachment_id):
  1534. """Find attachment corresponding to ``attachment_id``.
  1535. Returns:
  1536. A tuple of the volume ID, an attachment-instance mapping tuple
  1537. for the given attachment ID, and a list of attachment-instance
  1538. mapping tuples for the volume.
  1539. """
  1540. for volume_id, attachments in self.volume_to_attachment.items():
  1541. for attachment in attachments:
  1542. _attachment_id, instance_uuid = attachment
  1543. if attachment_id == _attachment_id:
  1544. return volume_id, attachment, attachments
  1545. raise exception.VolumeAttachmentNotFound(
  1546. attachment_id=attachment_id)
  1547. def fake_attachment_create(_self, context, volume_id, instance_uuid,
  1548. connector=None, mountpoint=None):
  1549. attachment_id = uuidutils.generate_uuid()
  1550. if self.attachment_error_id is not None:
  1551. attachment_id = self.attachment_error_id
  1552. attachment = {'id': attachment_id, 'connection_info': {'data': {}}}
  1553. self.volume_to_attachment[volume_id].append(
  1554. (attachment_id, instance_uuid))
  1555. return attachment
  1556. def fake_attachment_delete(_self, context, attachment_id):
  1557. # 'attachment' is a tuple defining a attachment-instance mapping
  1558. _, attachment, attachments = _find_attachment(attachment_id)
  1559. attachments.remove(attachment)
  1560. if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID:
  1561. self.swap_error = True
  1562. def fake_attachment_update(_self, context, attachment_id, connector,
  1563. mountpoint=None):
  1564. # Ensure the attachment exists
  1565. _find_attachment(attachment_id)
  1566. attachment_ref = {'driver_volume_type': 'fake_type',
  1567. 'id': attachment_id,
  1568. 'connection_info': {'data':
  1569. {'foo': 'bar',
  1570. 'target_lun': '1'}}}
  1571. if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID:
  1572. attachment_ref = {'connection_info': ()}
  1573. return attachment_ref
  1574. def fake_attachment_get(_self, context, attachment_id):
  1575. # Ensure the attachment exists
  1576. _find_attachment(attachment_id)
  1577. attachment_ref = {'driver_volume_type': 'fake_type',
  1578. 'id': attachment_id,
  1579. 'connection_info': {'data':
  1580. {'foo': 'bar',
  1581. 'target_lun': '1'}}}
  1582. return attachment_ref
  1583. def fake_get_all_volume_types(*args, **kwargs):
  1584. return [{
  1585. # This is used in the 2.67 API sample test.
  1586. 'id': '5f9204ec-3e94-4f27-9beb-fe7bb73b6eb9',
  1587. 'name': 'lvm-1'
  1588. }]
  1589. self.test.stub_out('nova.volume.cinder.API.attachment_create',
  1590. fake_attachment_create)
  1591. self.test.stub_out('nova.volume.cinder.API.attachment_delete',
  1592. fake_attachment_delete)
  1593. self.test.stub_out('nova.volume.cinder.API.attachment_update',
  1594. fake_attachment_update)
  1595. self.test.stub_out('nova.volume.cinder.API.attachment_complete',
  1596. lambda *args, **kwargs: None)
  1597. self.test.stub_out('nova.volume.cinder.API.attachment_get',
  1598. fake_attachment_get)
  1599. self.test.stub_out('nova.volume.cinder.API.begin_detaching',
  1600. lambda *args, **kwargs: None)
  1601. self.test.stub_out('nova.volume.cinder.API.get',
  1602. fake_get)
  1603. self.test.stub_out(
  1604. 'nova.volume.cinder.API.migrate_volume_completion',
  1605. fake_migrate_volume_completion)
  1606. self.test.stub_out('nova.volume.cinder.API.roll_detaching',
  1607. lambda *args, **kwargs: None)
  1608. self.test.stub_out('nova.volume.cinder.is_microversion_supported',
  1609. lambda *args, **kwargs: None)
  1610. self.test.stub_out('nova.volume.cinder.API.check_attached',
  1611. lambda *args, **kwargs: None)
  1612. self.test.stub_out('nova.volume.cinder.API.get_all_volume_types',
  1613. fake_get_all_volume_types)
  1614. class UnHelperfulClientChannel(privsep_daemon._ClientChannel):
  1615. def __init__(self, context):
  1616. raise Exception('You have attempted to start a privsep helper. '
  1617. 'This is not allowed in the gate, and '
  1618. 'indicates a failure to have mocked your tests.')
  1619. class PrivsepNoHelperFixture(fixtures.Fixture):
  1620. """A fixture to catch failures to mock privsep's rootwrap helper.
  1621. If you fail to mock away a privsep'd method in a unit test, then
  1622. you may well end up accidentally running the privsep rootwrap
  1623. helper. This will fail in the gate, but it fails in a way which
  1624. doesn't identify which test is missing a mock. Instead, we
  1625. raise an exception so that you at least know where you've missed
  1626. something.
  1627. """
  1628. def setUp(self):
  1629. super(PrivsepNoHelperFixture, self).setUp()
  1630. self.useFixture(fixtures.MonkeyPatch(
  1631. 'oslo_privsep.daemon.RootwrapClientChannel',
  1632. UnHelperfulClientChannel))
  1633. class NoopQuotaDriverFixture(fixtures.Fixture):
  1634. """A fixture to run tests using the NoopQuotaDriver.
  1635. We can't simply set self.flags to the NoopQuotaDriver in tests to use the
  1636. NoopQuotaDriver because the QuotaEngine object is global. Concurrently
  1637. running tests will fail intermittently because they might get the
  1638. NoopQuotaDriver globally when they expected the default DbQuotaDriver
  1639. behavior. So instead, we can patch the _driver property of the QuotaEngine
  1640. class on a per-test basis.
  1641. """
  1642. def setUp(self):
  1643. super(NoopQuotaDriverFixture, self).setUp()
  1644. self.useFixture(fixtures.MonkeyPatch('nova.quota.QuotaEngine._driver',
  1645. nova_quota.NoopQuotaDriver()))
  1646. # Set the config option just so that code checking for the presence of
  1647. # the NoopQuotaDriver setting will see it as expected.
  1648. # For some reason, this does *not* work when TestCase.flags is used.
  1649. # When using self.flags, the concurrent test failures returned.
  1650. CONF.set_override('driver', 'nova.quota.NoopQuotaDriver', 'quota')
  1651. self.addCleanup(CONF.clear_override, 'driver', 'quota')
  1652. class DownCellFixture(fixtures.Fixture):
  1653. """A fixture to simulate when a cell is down either due to error or timeout
  1654. This fixture will stub out the scatter_gather_cells routine used in various
  1655. cells-related API operations like listing/showing server details to return
  1656. a ``oslo_db.exception.DBError`` per cell in the results. Therefore
  1657. it is best used with a test scenario like this:
  1658. 1. Create a server successfully.
  1659. 2. Using the fixture, list/show servers. Depending on the microversion
  1660. used, the API should either return minimal results or by default skip
  1661. the results from down cells.
  1662. Example usage::
  1663. with nova_fixtures.DownCellFixture():
  1664. # List servers with down cells.
  1665. self.api.get_servers()
  1666. # Show a server in a down cell.
  1667. self.api.get_server(server['id'])
  1668. # List services with down cells.
  1669. self.admin_api.api_get('/os-services')
  1670. """
  1671. def __init__(self, down_cell_mappings=None):
  1672. self.down_cell_mappings = down_cell_mappings
  1673. def setUp(self):
  1674. super(DownCellFixture, self).setUp()
  1675. def stub_scatter_gather_cells(ctxt, cell_mappings, timeout, fn, *args,
  1676. **kwargs):
  1677. # Return a dict with an entry per cell mapping where the results
  1678. # are some kind of exception.
  1679. up_cell_mappings = objects.CellMappingList()
  1680. if not self.down_cell_mappings:
  1681. # User has not passed any down cells explicitly, so all cells
  1682. # are considered as down cells.
  1683. self.down_cell_mappings = cell_mappings
  1684. else:
  1685. # User has passed down cell mappings, so the rest of the cells
  1686. # should be up meaning we should return the right results.
  1687. # We assume that down cells will be a subset of the
  1688. # cell_mappings.
  1689. down_cell_uuids = [cell.uuid
  1690. for cell in self.down_cell_mappings]
  1691. up_cell_mappings.objects = [cell
  1692. for cell in cell_mappings
  1693. if cell.uuid not in down_cell_uuids]
  1694. def wrap(cell_uuid, thing):
  1695. # We should embed the cell_uuid into the context before
  1696. # wrapping since its used to calcualte the cells_timed_out and
  1697. # cells_failed properties in the object.
  1698. ctxt.cell_uuid = cell_uuid
  1699. return multi_cell_list.RecordWrapper(ctxt, sort_ctx, thing)
  1700. if fn is multi_cell_list.query_wrapper:
  1701. # If the function called through scatter-gather utility is the
  1702. # multi_cell_list.query_wrapper, we should wrap the exception
  1703. # object into the multi_cell_list.RecordWrapper. This is
  1704. # because unlike the other functions where the exception object
  1705. # is returned directly, the query_wrapper wraps this into the
  1706. # RecordWrapper object format. So if we do not wrap it will
  1707. # blow up at the point of generating results from heapq further
  1708. # down the stack.
  1709. sort_ctx = multi_cell_list.RecordSortContext([], [])
  1710. ret1 = {
  1711. cell_mapping.uuid: [wrap(cell_mapping.uuid,
  1712. db_exc.DBError())]
  1713. for cell_mapping in self.down_cell_mappings
  1714. }
  1715. else:
  1716. ret1 = {
  1717. cell_mapping.uuid: db_exc.DBError()
  1718. for cell_mapping in self.down_cell_mappings
  1719. }
  1720. ret2 = {}
  1721. for cell in up_cell_mappings:
  1722. with context.target_cell(ctxt, cell) as cctxt:
  1723. ctxt.cell_uuid = cell.uuid
  1724. result = fn(cctxt, *args, **kwargs)
  1725. ret2[cell.uuid] = result
  1726. return dict(list(ret1.items()) + list(ret2.items()))
  1727. self.useFixture(fixtures.MonkeyPatch(
  1728. 'nova.context.scatter_gather_cells', stub_scatter_gather_cells))