OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

2224 lines
88 KiB

  1. # Copyright 2010 United States Government as represented by the
  2. # Administrator of the National Aeronautics and Space Administration.
  3. # All Rights Reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  6. # not use this file except in compliance with the License. You may obtain
  7. # a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  13. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  14. # License for the specific language governing permissions and limitations
  15. # under the License.
  16. """Fixtures for Nova tests."""
  17. from __future__ import absolute_import
  18. import collections
  19. from contextlib import contextmanager
  20. import copy
  21. import logging as std_logging
  22. import os
  23. import random
  24. import warnings
  25. import fixtures
  26. import mock
  27. from neutronclient.common import exceptions as neutron_client_exc
  28. import os_resource_classes as orc
  29. from oslo_concurrency import lockutils
  30. from oslo_config import cfg
  31. from oslo_db import exception as db_exc
  32. import oslo_messaging as messaging
  33. from oslo_messaging import conffixture as messaging_conffixture
  34. from oslo_privsep import daemon as privsep_daemon
  35. from oslo_serialization import jsonutils
  36. from oslo_utils.fixture import uuidsentinel
  37. from oslo_utils import uuidutils
  38. from requests import adapters
  39. from sqlalchemy import exc as sqla_exc
  40. from wsgi_intercept import interceptor
  41. from nova.api.openstack.compute import tenant_networks
  42. from nova.api.openstack import wsgi_app
  43. from nova.api import wsgi
  44. from nova.compute import multi_cell_list
  45. from nova.compute import rpcapi as compute_rpcapi
  46. from nova import context
  47. from nova.db import migration
  48. from nova.db.sqlalchemy import api as session
  49. from nova import exception
  50. from nova.network import model as network_model
  51. from nova.network.neutronv2 import constants as neutron_constants
  52. from nova import objects
  53. from nova.objects import base as obj_base
  54. from nova.objects import service as service_obj
  55. from nova import quota as nova_quota
  56. from nova import rpc
  57. from nova import service
  58. from nova.tests.functional.api import client
  59. from nova.tests.unit import fake_requests
  60. _TRUE_VALUES = ('True', 'true', '1', 'yes')
  61. CONF = cfg.CONF
  62. DB_SCHEMA = {'main': "", 'api': ""}
  63. SESSION_CONFIGURED = False
  64. class ServiceFixture(fixtures.Fixture):
  65. """Run a service as a test fixture."""
  66. def __init__(self, name, host=None, cell=None, **kwargs):
  67. name = name
  68. # If not otherwise specified, the host will default to the
  69. # name of the service. Some things like aggregates care that
  70. # this is stable.
  71. host = host or name
  72. kwargs.setdefault('host', host)
  73. kwargs.setdefault('binary', 'nova-%s' % name)
  74. self.cell = cell
  75. self.kwargs = kwargs
  76. def setUp(self):
  77. super(ServiceFixture, self).setUp()
  78. self.ctxt = context.get_admin_context()
  79. if self.cell:
  80. context.set_target_cell(self.ctxt, self.cell)
  81. with mock.patch('nova.context.get_admin_context',
  82. return_value=self.ctxt):
  83. self.service = service.Service.create(**self.kwargs)
  84. self.service.start()
  85. self.addCleanup(self.service.kill)
  86. class NullHandler(std_logging.Handler):
  87. """custom default NullHandler to attempt to format the record.
  88. Used in conjunction with
  89. log_fixture.get_logging_handle_error_fixture to detect formatting errors in
  90. debug level logs without saving the logs.
  91. """
  92. def handle(self, record):
  93. self.format(record)
  94. def emit(self, record):
  95. pass
  96. def createLock(self):
  97. self.lock = None
  98. class StandardLogging(fixtures.Fixture):
  99. """Setup Logging redirection for tests.
  100. There are a number of things we want to handle with logging in tests:
  101. * Redirect the logging to somewhere that we can test or dump it later.
  102. * Ensure that as many DEBUG messages as possible are actually
  103. executed, to ensure they are actually syntactically valid (they
  104. often have not been).
  105. * Ensure that we create useful output for tests that doesn't
  106. overwhelm the testing system (which means we can't capture the
  107. 100 MB of debug logging on every run).
  108. To do this we create a logger fixture at the root level, which
  109. defaults to INFO and create a Null Logger at DEBUG which lets
  110. us execute log messages at DEBUG but not keep the output.
  111. To support local debugging OS_DEBUG=True can be set in the
  112. environment, which will print out the full debug logging.
  113. There are also a set of overrides for particularly verbose
  114. modules to be even less than INFO.
  115. """
  116. def setUp(self):
  117. super(StandardLogging, self).setUp()
  118. # set root logger to debug
  119. root = std_logging.getLogger()
  120. root.setLevel(std_logging.DEBUG)
  121. # supports collecting debug level for local runs
  122. if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
  123. level = std_logging.DEBUG
  124. else:
  125. level = std_logging.INFO
  126. # Collect logs
  127. fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
  128. self.logger = self.useFixture(
  129. fixtures.FakeLogger(format=fs, level=None))
  130. # TODO(sdague): why can't we send level through the fake
  131. # logger? Tests prove that it breaks, but it's worth getting
  132. # to the bottom of.
  133. root.handlers[0].setLevel(level)
  134. if level > std_logging.DEBUG:
  135. # Just attempt to format debug level logs, but don't save them
  136. handler = NullHandler()
  137. self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
  138. handler.setLevel(std_logging.DEBUG)
  139. # Don't log every single DB migration step
  140. std_logging.getLogger(
  141. 'migrate.versioning.api').setLevel(std_logging.WARNING)
  142. # At times we end up calling back into main() functions in
  143. # testing. This has the possibility of calling logging.setup
  144. # again, which completely unwinds the logging capture we've
  145. # created here. Once we've setup the logging the way we want,
  146. # disable the ability for the test to change this.
  147. def fake_logging_setup(*args):
  148. pass
  149. self.useFixture(
  150. fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
  151. def delete_stored_logs(self):
  152. # NOTE(gibi): this depends on the internals of the fixtures.FakeLogger.
  153. # This could be enhanced once the PR
  154. # https://github.com/testing-cabal/fixtures/pull/42 merges
  155. self.logger._output.truncate(0)
  156. class OutputStreamCapture(fixtures.Fixture):
  157. """Capture output streams during tests.
  158. This fixture captures errant printing to stderr / stdout during
  159. the tests and lets us see those streams at the end of the test
  160. runs instead. Useful to see what was happening during failed
  161. tests.
  162. """
  163. def setUp(self):
  164. super(OutputStreamCapture, self).setUp()
  165. if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
  166. self.out = self.useFixture(fixtures.StringStream('stdout'))
  167. self.useFixture(
  168. fixtures.MonkeyPatch('sys.stdout', self.out.stream))
  169. if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
  170. self.err = self.useFixture(fixtures.StringStream('stderr'))
  171. self.useFixture(
  172. fixtures.MonkeyPatch('sys.stderr', self.err.stream))
  173. @property
  174. def stderr(self):
  175. return self.err._details["stderr"].as_text()
  176. @property
  177. def stdout(self):
  178. return self.out._details["stdout"].as_text()
  179. class Timeout(fixtures.Fixture):
  180. """Setup per test timeouts.
  181. In order to avoid test deadlocks we support setting up a test
  182. timeout parameter read from the environment. In almost all
  183. cases where the timeout is reached this means a deadlock.
  184. A class level TIMEOUT_SCALING_FACTOR also exists, which allows
  185. extremely long tests to specify they need more time.
  186. """
  187. def __init__(self, timeout, scaling=1):
  188. super(Timeout, self).__init__()
  189. try:
  190. self.test_timeout = int(timeout)
  191. except ValueError:
  192. # If timeout value is invalid do not set a timeout.
  193. self.test_timeout = 0
  194. if scaling >= 1:
  195. self.test_timeout *= scaling
  196. else:
  197. raise ValueError('scaling value must be >= 1')
  198. def setUp(self):
  199. super(Timeout, self).setUp()
  200. if self.test_timeout > 0:
  201. self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
  202. class DatabasePoisonFixture(fixtures.Fixture):
  203. def setUp(self):
  204. super(DatabasePoisonFixture, self).setUp()
  205. self.useFixture(fixtures.MonkeyPatch(
  206. 'oslo_db.sqlalchemy.enginefacade._TransactionFactory.'
  207. '_create_session',
  208. self._poison_configure))
  209. def _poison_configure(self, *a, **k):
  210. # If you encounter this error, you might be tempted to just not
  211. # inherit from NoDBTestCase. Bug #1568414 fixed a few hundred of these
  212. # errors, and not once was that the correct solution. Instead,
  213. # consider some of the following tips (when applicable):
  214. #
  215. # - mock at the object layer rather than the db layer, for example:
  216. # nova.objects.instance.Instance.get
  217. # vs.
  218. # nova.db.instance_get
  219. #
  220. # - mock at the api layer rather than the object layer, for example:
  221. # nova.api.openstack.common.get_instance
  222. # vs.
  223. # nova.objects.instance.Instance.get
  224. #
  225. # - mock code that requires the database but is otherwise tangential
  226. # to the code you're testing (for example: EventReporterStub)
  227. #
  228. # - peruse some of the other database poison warning fixes here:
  229. # https://review.openstack.org/#/q/topic:bug/1568414
  230. raise Exception('This test uses methods that set internal oslo_db '
  231. 'state, but it does not claim to use the database. '
  232. 'This will conflict with the setup of tests that '
  233. 'do use the database and cause failures later.')
  234. class SingleCellSimple(fixtures.Fixture):
  235. """Setup the simplest cells environment possible
  236. This should be used when you do not care about multiple cells,
  237. or having a "real" environment for tests that should not care.
  238. This will give you a single cell, and map any and all accesses
  239. to that cell (even things that would go to cell0).
  240. If you need to distinguish between cell0 and cellN, then you
  241. should use the CellDatabases fixture.
  242. If instances should appear to still be in scheduling state, pass
  243. instances_created=False to init.
  244. """
  245. def __init__(self, instances_created=True):
  246. self.instances_created = instances_created
  247. def setUp(self):
  248. super(SingleCellSimple, self).setUp()
  249. self.useFixture(fixtures.MonkeyPatch(
  250. 'nova.objects.CellMappingList._get_all_from_db',
  251. self._fake_cell_list))
  252. self.useFixture(fixtures.MonkeyPatch(
  253. 'nova.objects.CellMappingList._get_by_project_id_from_db',
  254. self._fake_cell_list))
  255. self.useFixture(fixtures.MonkeyPatch(
  256. 'nova.objects.CellMapping._get_by_uuid_from_db',
  257. self._fake_cell_get))
  258. self.useFixture(fixtures.MonkeyPatch(
  259. 'nova.objects.HostMapping._get_by_host_from_db',
  260. self._fake_hostmapping_get))
  261. self.useFixture(fixtures.MonkeyPatch(
  262. 'nova.objects.InstanceMapping._get_by_instance_uuid_from_db',
  263. self._fake_instancemapping_get))
  264. self.useFixture(fixtures.MonkeyPatch(
  265. 'nova.objects.InstanceMappingList._get_by_instance_uuids_from_db',
  266. self._fake_instancemapping_get_uuids))
  267. self.useFixture(fixtures.MonkeyPatch(
  268. 'nova.objects.InstanceMapping._save_in_db',
  269. self._fake_instancemapping_get_save))
  270. self.useFixture(fixtures.MonkeyPatch(
  271. 'nova.context.target_cell',
  272. self._fake_target_cell))
  273. self.useFixture(fixtures.MonkeyPatch(
  274. 'nova.context.set_target_cell',
  275. lambda c, m: None))
  276. def _fake_hostmapping_get(self, *args):
  277. return {'id': 1,
  278. 'updated_at': None,
  279. 'created_at': None,
  280. 'host': 'host1',
  281. 'cell_mapping': self._fake_cell_list()[0]}
  282. def _fake_instancemapping_get_common(self, instance_uuid):
  283. return {
  284. 'id': 1,
  285. 'updated_at': None,
  286. 'created_at': None,
  287. 'instance_uuid': instance_uuid,
  288. 'cell_id': (self.instances_created and 1 or None),
  289. 'project_id': 'project',
  290. 'cell_mapping': (
  291. self.instances_created and self._fake_cell_get() or None),
  292. }
  293. def _fake_instancemapping_get_save(self, *args):
  294. return self._fake_instancemapping_get_common(args[-2])
  295. def _fake_instancemapping_get(self, *args):
  296. return self._fake_instancemapping_get_common(args[-1])
  297. def _fake_instancemapping_get_uuids(self, *args):
  298. return [self._fake_instancemapping_get(uuid)
  299. for uuid in args[-1]]
  300. def _fake_cell_get(self, *args):
  301. return self._fake_cell_list()[0]
  302. def _fake_cell_list(self, *args):
  303. return [{'id': 1,
  304. 'updated_at': None,
  305. 'created_at': None,
  306. 'uuid': uuidsentinel.cell1,
  307. 'name': 'onlycell',
  308. 'transport_url': 'fake://nowhere/',
  309. 'database_connection': 'sqlite:///',
  310. 'disabled': False}]
  311. @contextmanager
  312. def _fake_target_cell(self, context, target_cell):
  313. # NOTE(danms): Just pass through the context without actually
  314. # targeting anything.
  315. yield context
  316. class CheatingSerializer(rpc.RequestContextSerializer):
  317. """A messaging.RequestContextSerializer that helps with cells.
  318. Our normal serializer does not pass in the context like db_connection
  319. and mq_connection, for good reason. We don't really want/need to
  320. force a remote RPC server to use our values for this. However,
  321. during unit and functional tests, since we're all in the same
  322. process, we want cell-targeted RPC calls to preserve these values.
  323. Unless we had per-service config and database layer state for
  324. the fake services we start, this is a reasonable cheat.
  325. """
  326. def serialize_context(self, context):
  327. """Serialize context with the db_connection inside."""
  328. values = super(CheatingSerializer, self).serialize_context(context)
  329. values['db_connection'] = context.db_connection
  330. values['mq_connection'] = context.mq_connection
  331. return values
  332. def deserialize_context(self, values):
  333. """Deserialize context and honor db_connection if present."""
  334. ctxt = super(CheatingSerializer, self).deserialize_context(values)
  335. ctxt.db_connection = values.pop('db_connection', None)
  336. ctxt.mq_connection = values.pop('mq_connection', None)
  337. return ctxt
  338. class CellDatabases(fixtures.Fixture):
  339. """Create per-cell databases for testing.
  340. How to use::
  341. fix = CellDatabases()
  342. fix.add_cell_database('connection1')
  343. fix.add_cell_database('connection2', default=True)
  344. self.useFixture(fix)
  345. Passing default=True tells the fixture which database should
  346. be given to code that doesn't target a specific cell.
  347. """
  348. def __init__(self):
  349. self._ctxt_mgrs = {}
  350. self._last_ctxt_mgr = None
  351. self._default_ctxt_mgr = None
  352. # NOTE(danms): Use a ReaderWriterLock to synchronize our
  353. # global database muckery here. If we change global db state
  354. # to point to a cell, we need to take an exclusive lock to
  355. # prevent any other calls to get_context_manager() until we
  356. # reset to the default.
  357. self._cell_lock = lockutils.ReaderWriterLock()
  358. def _cache_schema(self, connection_str):
  359. # NOTE(melwitt): See the regular Database fixture for why
  360. # we do this.
  361. global DB_SCHEMA
  362. if not DB_SCHEMA['main']:
  363. ctxt_mgr = self._ctxt_mgrs[connection_str]
  364. engine = ctxt_mgr.writer.get_engine()
  365. conn = engine.connect()
  366. migration.db_sync(database='main')
  367. DB_SCHEMA['main'] = "".join(line for line
  368. in conn.connection.iterdump())
  369. engine.dispose()
  370. @contextmanager
  371. def _wrap_target_cell(self, context, cell_mapping):
  372. # NOTE(danms): This method is responsible for switching global
  373. # database state in a safe way such that code that doesn't
  374. # know anything about cell targeting (i.e. compute node code)
  375. # can continue to operate when called from something that has
  376. # targeted a specific cell. In order to make this safe from a
  377. # dining-philosopher-style deadlock, we need to be able to
  378. # support multiple threads talking to the same cell at the
  379. # same time and potentially recursion within the same thread
  380. # from code that would otherwise be running on separate nodes
  381. # in real life, but where we're actually recursing in the
  382. # tests.
  383. #
  384. # The basic logic here is:
  385. # 1. Grab a reader lock to see if the state is already pointing at
  386. # the cell we want. If it is, we can yield and return without
  387. # altering the global state further. The read lock ensures that
  388. # global state won't change underneath us, and multiple threads
  389. # can be working at the same time, as long as they are looking
  390. # for the same cell.
  391. # 2. If we do need to change the global state, grab a writer lock
  392. # to make that change, which assumes that nothing else is looking
  393. # at a cell right now. We do only non-schedulable things while
  394. # holding that lock to avoid the deadlock mentioned above.
  395. # 3. We then re-lock with a reader lock just as step #1 above and
  396. # yield to do the actual work. We can do schedulable things
  397. # here and not exclude other threads from making progress.
  398. # If an exception is raised, we capture that and save it.
  399. # 4. If we changed state in #2, we need to change it back. So we grab
  400. # a writer lock again and do that.
  401. # 5. Finally, if an exception was raised in #3 while state was
  402. # changed, we raise it to the caller.
  403. if cell_mapping:
  404. desired = self._ctxt_mgrs[cell_mapping.database_connection]
  405. else:
  406. desired = self._default_ctxt_mgr
  407. with self._cell_lock.read_lock():
  408. if self._last_ctxt_mgr == desired:
  409. with self._real_target_cell(context, cell_mapping) as c:
  410. yield c
  411. return
  412. raised_exc = None
  413. with self._cell_lock.write_lock():
  414. if cell_mapping is not None:
  415. # This assumes the next local DB access is the same cell that
  416. # was targeted last time.
  417. self._last_ctxt_mgr = desired
  418. with self._cell_lock.read_lock():
  419. if self._last_ctxt_mgr != desired:
  420. # NOTE(danms): This is unlikely to happen, but it's possible
  421. # another waiting writer changed the state between us letting
  422. # it go and re-acquiring as a reader. If lockutils supported
  423. # upgrading and downgrading locks, this wouldn't be a problem.
  424. # Regardless, assert that it is still as we left it here
  425. # so we don't hit the wrong cell. If this becomes a problem,
  426. # we just need to retry the write section above until we land
  427. # here with the cell we want.
  428. raise RuntimeError('Global DB state changed underneath us')
  429. try:
  430. with self._real_target_cell(context, cell_mapping) as ccontext:
  431. yield ccontext
  432. except Exception as exc:
  433. raised_exc = exc
  434. with self._cell_lock.write_lock():
  435. # Once we have returned from the context, we need
  436. # to restore the default context manager for any
  437. # subsequent calls
  438. self._last_ctxt_mgr = self._default_ctxt_mgr
  439. if raised_exc:
  440. raise raised_exc
  441. def _wrap_create_context_manager(self, connection=None):
  442. ctxt_mgr = self._ctxt_mgrs[connection]
  443. return ctxt_mgr
  444. def _wrap_get_context_manager(self, context):
  445. try:
  446. # If already targeted, we can proceed without a lock
  447. if context.db_connection:
  448. return context.db_connection
  449. except AttributeError:
  450. # Unit tests with None, FakeContext, etc
  451. pass
  452. # NOTE(melwitt): This is a hack to try to deal with
  453. # local accesses i.e. non target_cell accesses.
  454. with self._cell_lock.read_lock():
  455. # FIXME(mriedem): This is actually misleading and means we don't
  456. # catch things like bug 1717000 where a context should be targeted
  457. # to a cell but it's not, and the fixture here just returns the
  458. # last targeted context that was used.
  459. return self._last_ctxt_mgr
  460. def _wrap_get_server(self, target, endpoints, serializer=None):
  461. """Mirror rpc.get_server() but with our special sauce."""
  462. serializer = CheatingSerializer(serializer)
  463. return messaging.get_rpc_server(rpc.TRANSPORT,
  464. target,
  465. endpoints,
  466. executor='eventlet',
  467. serializer=serializer)
  468. def _wrap_get_client(self, target, version_cap=None, serializer=None,
  469. call_monitor_timeout=None):
  470. """Mirror rpc.get_client() but with our special sauce."""
  471. serializer = CheatingSerializer(serializer)
  472. return messaging.RPCClient(rpc.TRANSPORT,
  473. target,
  474. version_cap=version_cap,
  475. serializer=serializer,
  476. call_monitor_timeout=call_monitor_timeout)
  477. def add_cell_database(self, connection_str, default=False):
  478. """Add a cell database to the fixture.
  479. :param connection_str: An identifier used to represent the connection
  480. string for this database. It should match the database_connection field
  481. in the corresponding CellMapping.
  482. """
  483. # NOTE(danms): Create a new context manager for the cell, which
  484. # will house the sqlite:// connection for this cell's in-memory
  485. # database. Store/index it by the connection string, which is
  486. # how we identify cells in CellMapping.
  487. ctxt_mgr = session.create_context_manager()
  488. self._ctxt_mgrs[connection_str] = ctxt_mgr
  489. # NOTE(melwitt): The first DB access through service start is
  490. # local so this initializes _last_ctxt_mgr for that and needs
  491. # to be a compute cell.
  492. self._last_ctxt_mgr = ctxt_mgr
  493. # NOTE(danms): Record which context manager should be the default
  494. # so we can restore it when we return from target-cell contexts.
  495. # If none has been provided yet, store the current one in case
  496. # no default is ever specified.
  497. if self._default_ctxt_mgr is None or default:
  498. self._default_ctxt_mgr = ctxt_mgr
  499. def get_context_manager(context):
  500. return ctxt_mgr
  501. # NOTE(danms): This is a temporary MonkeyPatch just to get
  502. # a new database created with the schema we need and the
  503. # context manager for it stashed.
  504. with fixtures.MonkeyPatch(
  505. 'nova.db.sqlalchemy.api.get_context_manager',
  506. get_context_manager):
  507. self._cache_schema(connection_str)
  508. engine = ctxt_mgr.writer.get_engine()
  509. engine.dispose()
  510. conn = engine.connect()
  511. conn.connection.executescript(DB_SCHEMA['main'])
  512. def setUp(self):
  513. super(CellDatabases, self).setUp()
  514. self.addCleanup(self.cleanup)
  515. self._real_target_cell = context.target_cell
  516. # NOTE(danms): These context managers are in place for the
  517. # duration of the test (unlike the temporary ones above) and
  518. # provide the actual "runtime" switching of connections for us.
  519. self.useFixture(fixtures.MonkeyPatch(
  520. 'nova.db.sqlalchemy.api.create_context_manager',
  521. self._wrap_create_context_manager))
  522. self.useFixture(fixtures.MonkeyPatch(
  523. 'nova.db.sqlalchemy.api.get_context_manager',
  524. self._wrap_get_context_manager))
  525. self.useFixture(fixtures.MonkeyPatch(
  526. 'nova.context.target_cell',
  527. self._wrap_target_cell))
  528. self.useFixture(fixtures.MonkeyPatch(
  529. 'nova.rpc.get_server',
  530. self._wrap_get_server))
  531. self.useFixture(fixtures.MonkeyPatch(
  532. 'nova.rpc.get_client',
  533. self._wrap_get_client))
  534. def cleanup(self):
  535. for ctxt_mgr in self._ctxt_mgrs.values():
  536. engine = ctxt_mgr.writer.get_engine()
  537. engine.dispose()
  538. class Database(fixtures.Fixture):
  539. def __init__(self, database='main', connection=None):
  540. """Create a database fixture.
  541. :param database: The type of database, 'main', or 'api'
  542. :param connection: The connection string to use
  543. """
  544. super(Database, self).__init__()
  545. # NOTE(pkholkin): oslo_db.enginefacade is configured in tests the same
  546. # way as it is done for any other service that uses db
  547. global SESSION_CONFIGURED
  548. if not SESSION_CONFIGURED:
  549. session.configure(CONF)
  550. SESSION_CONFIGURED = True
  551. self.database = database
  552. if database == 'main':
  553. if connection is not None:
  554. ctxt_mgr = session.create_context_manager(
  555. connection=connection)
  556. self.get_engine = ctxt_mgr.writer.get_engine
  557. else:
  558. self.get_engine = session.get_engine
  559. elif database == 'api':
  560. self.get_engine = session.get_api_engine
  561. def _cache_schema(self):
  562. global DB_SCHEMA
  563. if not DB_SCHEMA[self.database]:
  564. engine = self.get_engine()
  565. conn = engine.connect()
  566. migration.db_sync(database=self.database)
  567. DB_SCHEMA[self.database] = "".join(line for line
  568. in conn.connection.iterdump())
  569. engine.dispose()
  570. def cleanup(self):
  571. engine = self.get_engine()
  572. engine.dispose()
  573. def reset(self):
  574. self._cache_schema()
  575. engine = self.get_engine()
  576. engine.dispose()
  577. conn = engine.connect()
  578. conn.connection.executescript(DB_SCHEMA[self.database])
  579. def setUp(self):
  580. super(Database, self).setUp()
  581. self.reset()
  582. self.addCleanup(self.cleanup)
  583. class DatabaseAtVersion(fixtures.Fixture):
  584. def __init__(self, version, database='main'):
  585. """Create a database fixture.
  586. :param version: Max version to sync to (or None for current)
  587. :param database: The type of database, 'main', 'api'
  588. """
  589. super(DatabaseAtVersion, self).__init__()
  590. self.database = database
  591. self.version = version
  592. if database == 'main':
  593. self.get_engine = session.get_engine
  594. elif database == 'api':
  595. self.get_engine = session.get_api_engine
  596. def cleanup(self):
  597. engine = self.get_engine()
  598. engine.dispose()
  599. def reset(self):
  600. engine = self.get_engine()
  601. engine.dispose()
  602. engine.connect()
  603. migration.db_sync(version=self.version, database=self.database)
  604. def setUp(self):
  605. super(DatabaseAtVersion, self).setUp()
  606. self.reset()
  607. self.addCleanup(self.cleanup)
  608. class DefaultFlavorsFixture(fixtures.Fixture):
  609. def setUp(self):
  610. super(DefaultFlavorsFixture, self).setUp()
  611. ctxt = context.get_admin_context()
  612. defaults = {'rxtx_factor': 1.0, 'disabled': False, 'is_public': True,
  613. 'ephemeral_gb': 0, 'swap': 0}
  614. extra_specs = {
  615. "hw:mem_page_size": "2048",
  616. "hw:cpu_policy": "dedicated"
  617. }
  618. default_flavors = [
  619. objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
  620. root_gb=1, flavorid='1', name='m1.tiny',
  621. **defaults),
  622. objects.Flavor(context=ctxt, memory_mb=2048, vcpus=1,
  623. root_gb=20, flavorid='2', name='m1.small',
  624. **defaults),
  625. objects.Flavor(context=ctxt, memory_mb=4096, vcpus=2,
  626. root_gb=40, flavorid='3', name='m1.medium',
  627. **defaults),
  628. objects.Flavor(context=ctxt, memory_mb=8192, vcpus=4,
  629. root_gb=80, flavorid='4', name='m1.large',
  630. **defaults),
  631. objects.Flavor(context=ctxt, memory_mb=16384, vcpus=8,
  632. root_gb=160, flavorid='5', name='m1.xlarge',
  633. **defaults),
  634. objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
  635. root_gb=1, flavorid='6', name='m1.tiny.specs',
  636. extra_specs=extra_specs, **defaults),
  637. ]
  638. for flavor in default_flavors:
  639. flavor.create()
  640. class RPCFixture(fixtures.Fixture):
  641. def __init__(self, *exmods):
  642. super(RPCFixture, self).__init__()
  643. self.exmods = []
  644. self.exmods.extend(exmods)
  645. self._buses = {}
  646. def _fake_create_transport(self, url):
  647. # FIXME(danms): Right now, collapse all connections
  648. # to a single bus. This is how our tests expect things
  649. # to work. When the tests are fixed, this fixture can
  650. # support simulating multiple independent buses, and this
  651. # hack should be removed.
  652. url = None
  653. # NOTE(danms): This will be called with a non-None url by
  654. # cells-aware code that is requesting to contact something on
  655. # one of the many transports we're multplexing here.
  656. if url not in self._buses:
  657. exmods = rpc.get_allowed_exmods()
  658. self._buses[url] = messaging.get_rpc_transport(
  659. CONF,
  660. url=url,
  661. allowed_remote_exmods=exmods)
  662. return self._buses[url]
  663. def setUp(self):
  664. super(RPCFixture, self).setUp()
  665. self.addCleanup(rpc.cleanup)
  666. rpc.add_extra_exmods(*self.exmods)
  667. self.addCleanup(rpc.clear_extra_exmods)
  668. self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
  669. self.messaging_conf.transport_url = 'fake:/'
  670. self.useFixture(self.messaging_conf)
  671. self.useFixture(fixtures.MonkeyPatch(
  672. 'nova.rpc.create_transport', self._fake_create_transport))
  673. # NOTE(danms): Execute the init with get_transport_url() as None,
  674. # instead of the parsed TransportURL(None) so that we can cache
  675. # it as it will be called later if the default is requested by
  676. # one of our mq-switching methods.
  677. with mock.patch('nova.rpc.get_transport_url') as mock_gtu:
  678. mock_gtu.return_value = None
  679. rpc.init(CONF)
  680. def cleanup_in_flight_rpc_messages():
  681. messaging._drivers.impl_fake.FakeExchangeManager._exchanges = {}
  682. self.addCleanup(cleanup_in_flight_rpc_messages)
  683. class WarningsFixture(fixtures.Fixture):
  684. """Filters out warnings during test runs."""
  685. def setUp(self):
  686. super(WarningsFixture, self).setUp()
  687. # NOTE(sdague): Make deprecation warnings only happen once. Otherwise
  688. # this gets kind of crazy given the way that upstream python libs use
  689. # this.
  690. warnings.simplefilter("once", DeprecationWarning)
  691. warnings.filterwarnings('ignore',
  692. message='With-statements now directly support'
  693. ' multiple context managers')
  694. # NOTE(sdague): nova does not use pkg_resources directly, this
  695. # is all very long standing deprecations about other tools
  696. # using it. None of this is useful to Nova development.
  697. warnings.filterwarnings('ignore',
  698. module='pkg_resources')
  699. # NOTE(sdague): this remains an unresolved item around the way
  700. # forward on is_admin, the deprecation is definitely really premature.
  701. warnings.filterwarnings('ignore',
  702. message='Policy enforcement is depending on the value of is_admin.'
  703. ' This key is deprecated. Please update your policy '
  704. 'file to use the standard policy values.')
  705. # NOTE(sdague): mox3 is on life support, don't really care
  706. # about any deprecations coming from it
  707. warnings.filterwarnings('ignore',
  708. module='mox3.mox')
  709. # NOTE(gibi): we can remove this once we get rid of Mox in nova
  710. warnings.filterwarnings('ignore', message="Using class 'MoxStubout'")
  711. # NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
  712. warnings.filterwarnings('ignore',
  713. message="Policy .* failed scope check",
  714. category=UserWarning)
  715. # NOTE(gibi): The UUIDFields emits a warning if the value is not a
  716. # valid UUID. Let's escalate that to an exception in the test to
  717. # prevent adding violations.
  718. warnings.filterwarnings('error', message=".*invalid UUID.*")
  719. # NOTE(mriedem): Avoid adding anything which tries to convert an
  720. # object to a primitive which jsonutils.to_primitive() does not know
  721. # how to handle (or isn't given a fallback callback).
  722. warnings.filterwarnings(
  723. 'error',
  724. message="Cannot convert <oslo_db.sqlalchemy.enginefacade"
  725. "._Default object at ",
  726. category=UserWarning)
  727. warnings.filterwarnings(
  728. 'error', message='Evaluating non-mapped column expression',
  729. category=sqla_exc.SAWarning)
  730. # TODO(jangutter): Change (or remove) this to an error during the Train
  731. # cycle when the os-vif port profile is no longer used.
  732. warnings.filterwarnings(
  733. 'ignore', message=".* 'VIFPortProfileOVSRepresentor' .* "
  734. "is deprecated", category=PendingDeprecationWarning)
  735. # TODO(mriedem): Change (or remove) this DeprecationWarning once
  736. # https://bugs.launchpad.net/sqlalchemy-migrate/+bug/1814288 is fixed.
  737. warnings.filterwarnings(
  738. 'ignore', message='inspect\.getargspec\(\) is deprecated',
  739. category=DeprecationWarning,
  740. module='migrate.versioning.script.py')
  741. self.addCleanup(warnings.resetwarnings)
  742. class ConfPatcher(fixtures.Fixture):
  743. """Fixture to patch and restore global CONF.
  744. This also resets overrides for everything that is patched during
  745. it's teardown.
  746. """
  747. def __init__(self, **kwargs):
  748. """Constructor
  749. :params group: if specified all config options apply to that group.
  750. :params **kwargs: the rest of the kwargs are processed as a
  751. set of key/value pairs to be set as configuration override.
  752. """
  753. super(ConfPatcher, self).__init__()
  754. self.group = kwargs.pop('group', None)
  755. self.args = kwargs
  756. def setUp(self):
  757. super(ConfPatcher, self).setUp()
  758. for k, v in self.args.items():
  759. self.addCleanup(CONF.clear_override, k, self.group)
  760. CONF.set_override(k, v, self.group)
  761. class OSAPIFixture(fixtures.Fixture):
  762. """Create an OS API server as a fixture.
  763. This spawns an OS API server as a fixture in a new greenthread in
  764. the current test. The fixture has a .api parameter with is a
  765. simple rest client that can communicate with it.
  766. This fixture is extremely useful for testing REST responses
  767. through the WSGI stack easily in functional tests.
  768. Usage:
  769. api = self.useFixture(fixtures.OSAPIFixture()).api
  770. resp = api.api_request('/someurl')
  771. self.assertEqual(200, resp.status_code)
  772. resp = api.api_request('/otherurl', method='POST', body='{foo}')
  773. The resp is a requests library response. Common attributes that
  774. you'll want to use are:
  775. - resp.status_code - integer HTTP status code returned by the request
  776. - resp.content - the body of the response
  777. - resp.headers - dictionary of HTTP headers returned
  778. """
  779. def __init__(self, api_version='v2',
  780. project_id='6f70656e737461636b20342065766572'):
  781. """Constructor
  782. :param api_version: the API version that we're interested in
  783. using. Currently this expects 'v2' or 'v2.1' as possible
  784. options.
  785. :param project_id: the project id to use on the API.
  786. """
  787. super(OSAPIFixture, self).__init__()
  788. self.api_version = api_version
  789. self.project_id = project_id
  790. def setUp(self):
  791. super(OSAPIFixture, self).setUp()
  792. # A unique hostname for the wsgi-intercept.
  793. hostname = uuidsentinel.osapi_host
  794. port = 80
  795. service_name = 'osapi_compute'
  796. endpoint = 'http://%s:%s/' % (hostname, port)
  797. conf_overrides = {
  798. 'osapi_compute_listen': hostname,
  799. 'osapi_compute_listen_port': port,
  800. 'debug': True,
  801. }
  802. self.useFixture(ConfPatcher(**conf_overrides))
  803. # Turn off manipulation of socket_options in TCPKeepAliveAdapter
  804. # to keep wsgi-intercept happy. Replace it with the method
  805. # from its superclass.
  806. self.useFixture(fixtures.MonkeyPatch(
  807. 'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
  808. adapters.HTTPAdapter.init_poolmanager))
  809. loader = wsgi.Loader().load_app(service_name)
  810. app = lambda: loader
  811. # re-use service setup code from wsgi_app to register
  812. # service, which is looked for in some tests
  813. wsgi_app._setup_service(CONF.host, service_name)
  814. intercept = interceptor.RequestsInterceptor(app, url=endpoint)
  815. intercept.install_intercept()
  816. self.addCleanup(intercept.uninstall_intercept)
  817. self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
  818. 'host': hostname, 'port': port, 'api_version': self.api_version})
  819. self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url,
  820. self.project_id)
  821. self.admin_api = client.TestOpenStackClient(
  822. 'admin', 'admin', self.auth_url, self.project_id)
  823. # Provide a way to access the wsgi application to tests using
  824. # the fixture.
  825. self.app = app
  826. class OSMetadataServer(fixtures.Fixture):
  827. """Create an OS Metadata API server as a fixture.
  828. This spawns an OS Metadata API server as a fixture in a new
  829. greenthread in the current test.
  830. TODO(sdague): ideally for testing we'd have something like the
  831. test client which acts like requests, but connects any of the
  832. interactions needed.
  833. """
  834. def setUp(self):
  835. super(OSMetadataServer, self).setUp()
  836. # in order to run these in tests we need to bind only to local
  837. # host, and dynamically allocate ports
  838. conf_overrides = {
  839. 'metadata_listen': '127.0.0.1',
  840. 'metadata_listen_port': 0,
  841. 'debug': True
  842. }
  843. self.useFixture(ConfPatcher(**conf_overrides))
  844. # NOTE(mikal): we don't have root to manipulate iptables, so just
  845. # zero that bit out.
  846. self.useFixture(fixtures.MonkeyPatch(
  847. 'nova.network.linux_net.IptablesManager._apply',
  848. lambda _: None))
  849. self.metadata = service.WSGIService("metadata")
  850. self.metadata.start()
  851. self.addCleanup(self.metadata.stop)
  852. self.md_url = "http://%s:%s/" % (
  853. conf_overrides['metadata_listen'],
  854. self.metadata.port)
  855. class PoisonFunctions(fixtures.Fixture):
  856. """Poison functions so they explode if we touch them.
  857. When running under a non full stack test harness there are parts
  858. of the code that you don't want to go anywhere near. These include
  859. things like code that spins up extra threads, which just
  860. introduces races.
  861. """
  862. def setUp(self):
  863. super(PoisonFunctions, self).setUp()
  864. # The nova libvirt driver starts an event thread which only
  865. # causes trouble in tests. Make sure that if tests don't
  866. # properly patch it the test explodes.
  867. def evloop(*args, **kwargs):
  868. import sys
  869. warnings.warn("Forgot to disable libvirt event thread")
  870. sys.exit(1)
  871. # Don't poison the function if it's already mocked
  872. import nova.virt.libvirt.host
  873. if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock):
  874. self.useFixture(fixtures.MockPatch(
  875. 'nova.virt.libvirt.host.Host._init_events',
  876. side_effect=evloop))
  877. class IndirectionAPIFixture(fixtures.Fixture):
  878. """Patch and restore the global NovaObject indirection api."""
  879. def __init__(self, indirection_api):
  880. """Constructor
  881. :param indirection_api: the indirection API to be used for tests.
  882. """
  883. super(IndirectionAPIFixture, self).__init__()
  884. self.indirection_api = indirection_api
  885. def cleanup(self):
  886. obj_base.NovaObject.indirection_api = self.orig_indirection_api
  887. def setUp(self):
  888. super(IndirectionAPIFixture, self).setUp()
  889. self.orig_indirection_api = obj_base.NovaObject.indirection_api
  890. obj_base.NovaObject.indirection_api = self.indirection_api
  891. self.addCleanup(self.cleanup)
  892. class _FakeGreenThread(object):
  893. def __init__(self, func, *args, **kwargs):
  894. self._result = func(*args, **kwargs)
  895. def cancel(self, *args, **kwargs):
  896. # This method doesn't make sense for a synchronous call, it's just
  897. # defined to satisfy the interface.
  898. pass
  899. def kill(self, *args, **kwargs):
  900. # This method doesn't make sense for a synchronous call, it's just
  901. # defined to satisfy the interface.
  902. pass
  903. def link(self, func, *args, **kwargs):
  904. func(self, *args, **kwargs)
  905. def unlink(self, func, *args, **kwargs):
  906. # This method doesn't make sense for a synchronous call, it's just
  907. # defined to satisfy the interface.
  908. pass
  909. def wait(self):
  910. return self._result
  911. class SpawnIsSynchronousFixture(fixtures.Fixture):
  912. """Patch and restore the spawn_n utility method to be synchronous"""
  913. def setUp(self):
  914. super(SpawnIsSynchronousFixture, self).setUp()
  915. self.useFixture(fixtures.MonkeyPatch(
  916. 'nova.utils.spawn_n', _FakeGreenThread))
  917. self.useFixture(fixtures.MonkeyPatch(
  918. 'nova.utils.spawn', _FakeGreenThread))
  919. class SynchronousThreadPoolExecutorFixture(fixtures.Fixture):
  920. """Make GreenThreadPoolExecutor.submit() synchronous.
  921. The function passed to submit() will be executed and a mock.Mock
  922. object will be returned as the Future where Future.result() will
  923. return the result of the call to the submitted function.
  924. """
  925. def setUp(self):
  926. super(SynchronousThreadPoolExecutorFixture, self).setUp()
  927. def fake_submit(_self, fn, *args, **kwargs):
  928. result = fn(*args, **kwargs)
  929. future = mock.Mock(spec='futurist.Future')
  930. future.return_value.result.return_value = result
  931. return future
  932. self.useFixture(fixtures.MonkeyPatch(
  933. 'futurist.GreenThreadPoolExecutor.submit',
  934. fake_submit))
  935. class BannedDBSchemaOperations(fixtures.Fixture):
  936. """Ban some operations for migrations"""
  937. def __init__(self, banned_resources=None):
  938. super(BannedDBSchemaOperations, self).__init__()
  939. self._banned_resources = banned_resources or []
  940. @staticmethod
  941. def _explode(resource, op):
  942. raise exception.DBNotAllowed(
  943. 'Operation %s.%s() is not allowed in a database migration' % (
  944. resource, op))
  945. def setUp(self):
  946. super(BannedDBSchemaOperations, self).setUp()
  947. for thing in self._banned_resources:
  948. self.useFixture(fixtures.MonkeyPatch(
  949. 'sqlalchemy.%s.drop' % thing,
  950. lambda *a, **k: self._explode(thing, 'drop')))
  951. self.useFixture(fixtures.MonkeyPatch(
  952. 'sqlalchemy.%s.alter' % thing,
  953. lambda *a, **k: self._explode(thing, 'alter')))
  954. class ForbidNewLegacyNotificationFixture(fixtures.Fixture):
  955. """Make sure the test fails if new legacy notification is added"""
  956. def __init__(self):
  957. super(ForbidNewLegacyNotificationFixture, self).__init__()
  958. self.notifier = rpc.LegacyValidatingNotifier
  959. def setUp(self):
  960. super(ForbidNewLegacyNotificationFixture, self).setUp()
  961. self.notifier.fatal = True
  962. # allow the special test value used in
  963. # nova.tests.unit.test_notifications.NotificationsTestCase
  964. self.notifier.allowed_legacy_notification_event_types.append(
  965. '_decorated_function')
  966. self.addCleanup(self.cleanup)
  967. def cleanup(self):
  968. self.notifier.fatal = False
  969. self.notifier.allowed_legacy_notification_event_types.remove(
  970. '_decorated_function')
  971. class AllServicesCurrent(fixtures.Fixture):
  972. def setUp(self):
  973. super(AllServicesCurrent, self).setUp()
  974. self.useFixture(fixtures.MonkeyPatch(
  975. 'nova.objects.Service.get_minimum_version_multi',
  976. self._fake_minimum))
  977. self.useFixture(fixtures.MonkeyPatch(
  978. 'nova.objects.service.get_minimum_version_all_cells',
  979. lambda *a, **k: service_obj.SERVICE_VERSION))
  980. compute_rpcapi.LAST_VERSION = None
  981. def _fake_minimum(self, *args, **kwargs):
  982. return service_obj.SERVICE_VERSION
  983. class RegisterNetworkQuota(fixtures.Fixture):
  984. def setUp(self):
  985. super(RegisterNetworkQuota, self).setUp()
  986. # Quota resource registration modifies the global QUOTAS engine, so
  987. # this fixture registers and unregisters network quota for a test.
  988. tenant_networks._register_network_quota()
  989. self.addCleanup(self.cleanup)
  990. def cleanup(self):
  991. nova_quota.QUOTAS._resources.pop('networks', None)
  992. class NeutronFixture(fixtures.Fixture):
  993. """A fixture to boot instances with neutron ports"""
  994. # the default project_id in OsaAPIFixtures
  995. tenant_id = '6f70656e737461636b20342065766572'
  996. network_1 = {
  997. 'status': 'ACTIVE',
  998. 'subnets': [],
  999. 'name': 'private-network',
  1000. 'admin_state_up': True,
  1001. 'tenant_id': tenant_id,
  1002. 'id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
  1003. 'shared': False,
  1004. }
  1005. subnet_1 = {
  1006. 'name': 'private-subnet',
  1007. 'enable_dhcp': True,
  1008. 'network_id': network_1['id'],
  1009. 'tenant_id': tenant_id,
  1010. 'dns_nameservers': [],
  1011. 'allocation_pools': [
  1012. {
  1013. 'start': '192.168.1.1',
  1014. 'end': '192.168.1.254'
  1015. }
  1016. ],
  1017. 'host_routes': [],
  1018. 'ip_version': 4,
  1019. 'gateway_ip': '192.168.1.1',
  1020. 'cidr': '192.168.1.1/24',
  1021. 'id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef'
  1022. }
  1023. network_1['subnets'] = [subnet_1['id']]
  1024. port_1 = {
  1025. 'id': 'ce531f90-199f-48c0-816c-13e38010b442',
  1026. 'network_id': network_1['id'],
  1027. 'admin_state_up': True,
  1028. 'status': 'ACTIVE',
  1029. 'mac_address': 'fa:16:3e:4c:2c:30',
  1030. 'fixed_ips': [
  1031. {
  1032. # The IP on this port must be a prefix of the IP on port_2 to
  1033. # test listing servers with an ip filter regex.
  1034. 'ip_address': '192.168.1.3',
  1035. 'subnet_id': subnet_1['id']
  1036. }
  1037. ],
  1038. 'tenant_id': tenant_id,
  1039. 'binding:vif_type': 'ovs'
  1040. }
  1041. port_2 = {
  1042. 'id': '88dae9fa-0dc6-49e3-8c29-3abc41e99ac9',
  1043. 'network_id': network_1['id'],
  1044. 'admin_state_up': True,
  1045. 'status': 'ACTIVE',
  1046. 'mac_address': '00:0c:29:0d:11:74',
  1047. 'fixed_ips': [
  1048. {
  1049. 'ip_address': '192.168.1.30',
  1050. 'subnet_id': subnet_1['id']
  1051. }
  1052. ],
  1053. 'tenant_id': tenant_id,
  1054. 'binding:vif_type': 'ovs'
  1055. }
  1056. port_with_resource_request = {
  1057. 'id': '2f2613ce-95a9-490a-b3c4-5f1c28c1f886',
  1058. 'network_id': network_1['id'],
  1059. 'admin_state_up': True,
  1060. 'status': 'ACTIVE',
  1061. 'mac_address': '52:54:00:1e:59:c3',
  1062. 'fixed_ips': [
  1063. {
  1064. 'ip_address': '192.168.1.42',
  1065. 'subnet_id': subnet_1['id']
  1066. }
  1067. ],
  1068. 'tenant_id': tenant_id,
  1069. 'resource_request': {
  1070. "resources": {
  1071. orc.NET_BW_IGR_KILOBIT_PER_SEC: 1000,
  1072. orc.NET_BW_EGR_KILOBIT_PER_SEC: 1000},
  1073. "required": ["CUSTOM_PHYSNET2", "CUSTOM_VNIC_TYPE_NORMAL"]
  1074. }
  1075. }
  1076. network_2 = {
  1077. 'status': 'ACTIVE',
  1078. 'subnets': [],
  1079. 'name': 'private-network',
  1080. 'admin_state_up': True,
  1081. 'tenant_id': tenant_id,
  1082. 'id': '1b70879f-fd00-411e-8ea9-143e7820e61d',
  1083. 'shared': False,
  1084. 'provider:physical_network': 'physnet2',
  1085. "provider:network_type": "vlan",
  1086. }
  1087. subnet_2 = {
  1088. 'name': 'private-subnet',
  1089. 'enable_dhcp': True,
  1090. 'network_id': network_2['id'],
  1091. 'tenant_id': tenant_id,
  1092. 'dns_nameservers': [],
  1093. 'allocation_pools': [
  1094. {
  1095. 'start': '192.168.13.1',
  1096. 'end': '192.168.1.254'
  1097. }
  1098. ],
  1099. 'host_routes': [],
  1100. 'ip_version': 4,
  1101. 'gateway_ip': '192.168.1.1',
  1102. 'cidr': '192.168.1.1/24',
  1103. 'id': 'c7ca1baf-f536-4849-89fe-9671318375ff'
  1104. }
  1105. network_2['subnets'] = [subnet_2['id']]
  1106. sriov_port = {
  1107. 'id': '5460ee0c-ffbb-4e45-8d58-37bfceabd084',
  1108. 'network_id': network_2['id'],
  1109. 'admin_state_up': True,
  1110. 'status': 'ACTIVE',
  1111. 'mac_address': '52:54:00:1e:59:c4',
  1112. 'fixed_ips': [
  1113. {
  1114. 'ip_address': '192.168.13.2',
  1115. 'subnet_id': subnet_2['id']
  1116. }
  1117. ],
  1118. 'tenant_id': tenant_id,
  1119. 'resource_request': {},
  1120. 'binding:vnic_type': 'direct',
  1121. }
  1122. port_with_sriov_resource_request = {
  1123. 'id': '7059503b-a648-40fd-a561-5ca769304bee',
  1124. 'network_id': network_2['id'],
  1125. 'admin_state_up': True,
  1126. 'status': 'ACTIVE',
  1127. 'mac_address': '52:54:00:1e:59:c5',
  1128. # Do neutron really adds fixed_ips to an direct vnic_type port?
  1129. 'fixed_ips': [
  1130. {
  1131. 'ip_address': '192.168.13.3',
  1132. 'subnet_id': subnet_2['id']
  1133. }
  1134. ],
  1135. 'tenant_id': tenant_id,
  1136. 'resource_request': {
  1137. "resources": {
  1138. orc.NET_BW_IGR_KILOBIT_PER_SEC: 10000,
  1139. orc.NET_BW_EGR_KILOBIT_PER_SEC: 10000},
  1140. "required": ["CUSTOM_PHYSNET2", "CUSTOM_VNIC_TYPE_DIRECT"]
  1141. },
  1142. 'binding:vnic_type': 'direct',
  1143. }
  1144. nw_info = [{
  1145. "profile": {},
  1146. "ovs_interfaceid": "b71f1699-42be-4515-930a-f3ef01f94aa7",
  1147. "preserve_on_delete": False,
  1148. "network": {
  1149. "bridge": "br-int",
  1150. "subnets": [{
  1151. "ips": [{
  1152. "meta": {},
  1153. "version": 4,
  1154. "type": "fixed",
  1155. "floating_ips": [],
  1156. "address": "10.0.0.4"
  1157. }],
  1158. "version": 4,
  1159. "meta": {},
  1160. "dns": [],
  1161. "routes": [],
  1162. "cidr": "10.0.0.0/26",
  1163. "gateway": {
  1164. "meta": {},
  1165. "version": 4,
  1166. "type": "gateway",
  1167. "address": "10.0.0.1"
  1168. }
  1169. }],
  1170. "meta": {
  1171. "injected": False,
  1172. "tenant_id": tenant_id,
  1173. "mtu": 1500
  1174. },
  1175. "id": "e1882e38-38c2-4239-ade7-35d644cb963a",
  1176. "label": "public"
  1177. },
  1178. "devname": "tapb71f1699-42",
  1179. "vnic_type": "normal",
  1180. "qbh_params": None,
  1181. "meta": {},
  1182. "details": {
  1183. "port_filter": True,
  1184. "ovs_hybrid_plug": True
  1185. },
  1186. "address": "fa:16:3e:47:94:4a",
  1187. "active": True,
  1188. "type": "ovs",
  1189. "id": "b71f1699-42be-4515-930a-f3ef01f94aa7",
  1190. "qbg_params": None
  1191. }]
  1192. def __init__(self, test):
  1193. super(NeutronFixture, self).__init__()
  1194. self.test = test
  1195. # The fixture allows port update so we need to deepcopy the class
  1196. # variables to avoid test case interference.
  1197. self._ports = {
  1198. # NOTE(gibi)The port_with_sriov_resource_request cannot be added
  1199. # globally in this fixture as it adds a second network that makes
  1200. # auto allocation based test to fail due to ambiguous networks.
  1201. NeutronFixture.port_1['id']: copy.deepcopy(NeutronFixture.port_1),
  1202. NeutronFixture.port_with_resource_request['id']:
  1203. copy.deepcopy(NeutronFixture.port_with_resource_request)
  1204. }
  1205. # The fixture does not allow network update so we don't have to
  1206. # deepcopy here
  1207. self._networks = {
  1208. NeutronFixture.network_1['id']: NeutronFixture.network_1
  1209. }
  1210. # The fixture does not allow network update so we don't have to
  1211. # deepcopy here
  1212. self._subnets = {
  1213. NeutronFixture.subnet_1['id']: NeutronFixture.subnet_1
  1214. }
  1215. def setUp(self):
  1216. super(NeutronFixture, self).setUp()
  1217. self.test.stub_out(
  1218. 'nova.network.neutronv2.api.API.setup_networks_on_host',
  1219. lambda *args, **kwargs: None)
  1220. self.test.stub_out(
  1221. 'nova.network.neutronv2.api.API.migrate_instance_start',
  1222. lambda *args, **kwargs: None)
  1223. self.test.stub_out(
  1224. 'nova.network.neutronv2.api.API.add_fixed_ip_to_instance',
  1225. lambda *args, **kwargs: network_model.NetworkInfo.hydrate(
  1226. NeutronFixture.nw_info))
  1227. self.test.stub_out(
  1228. 'nova.network.neutronv2.api.API.remove_fixed_ip_from_instance',
  1229. lambda *args, **kwargs: network_model.NetworkInfo.hydrate(
  1230. NeutronFixture.nw_info))
  1231. self.test.stub_out(
  1232. 'nova.network.neutronv2.api.API.migrate_instance_finish',
  1233. lambda *args, **kwargs: None)
  1234. self.test.stub_out(
  1235. 'nova.network.security_group.neutron_driver.SecurityGroupAPI.'
  1236. 'get_instances_security_groups_bindings',
  1237. lambda *args, **kwargs: {})
  1238. # Stub out port binding APIs which go through a KSA client Adapter
  1239. # rather than python-neutronclient.
  1240. self.test.stub_out(
  1241. 'nova.network.neutronv2.api._get_ksa_client',
  1242. lambda *args, **kwargs: mock.Mock(
  1243. spec='keystoneauth1.adapter.Adapter'))
  1244. self.test.stub_out(
  1245. 'nova.network.neutronv2.api.API._create_port_binding',
  1246. self.fake_create_port_binding)
  1247. self.test.stub_out(
  1248. 'nova.network.neutronv2.api.API._delete_port_binding',
  1249. self.fake_delete_port_binding)
  1250. self.test.stub_out('nova.network.neutronv2.api.get_client',
  1251. lambda *args, **kwargs: self)
  1252. @staticmethod
  1253. def fake_create_port_binding(client, port_id, data):
  1254. # TODO(mriedem): Make this smarter by keeping track of our bindings
  1255. # per port so we can reflect the status accurately.
  1256. return fake_requests.FakeResponse(200, content=jsonutils.dumps(data))
  1257. @staticmethod
  1258. def fake_delete_port_binding(client, port_id, host):
  1259. # TODO(mriedem): Make this smarter by keeping track of our bindings
  1260. # per port so we can reflect the status accurately.
  1261. return fake_requests.FakeResponse(204)
  1262. def _get_first_id_match(self, id, list):
  1263. filtered_list = [p for p in list if p['id'] == id]
  1264. if len(filtered_list) > 0:
  1265. return filtered_list[0]
  1266. else:
  1267. return None
  1268. def list_extensions(self, *args, **kwargs):
  1269. return {
  1270. 'extensions': [
  1271. {
  1272. # Copied from neutron-lib portbindings_extended.py
  1273. "updated": "2017-07-17T10:00:00-00:00",
  1274. "name": neutron_constants.PORT_BINDING_EXTENDED,
  1275. "links": [],
  1276. "alias": "binding-extended",
  1277. "description": "Expose port bindings of a virtual port to "
  1278. "external application"
  1279. }
  1280. ]
  1281. }
  1282. def show_port(self, port_id, **_params):
  1283. if port_id not in self._ports:
  1284. raise exception.PortNotFound(port_id=port_id)
  1285. return {'port': copy.deepcopy(self._ports[port_id])}
  1286. def delete_port(self, port_id, **_params):
  1287. if port_id in self._ports:
  1288. del self._ports[port_id]
  1289. def show_network(self, network_id, **_params):
  1290. if network_id not in self._networks:
  1291. raise neutron_client_exc.NetworkNotFoundClient()
  1292. return {'network': copy.deepcopy(self._networks[network_id])}
  1293. def list_networks(self, retrieve_all=True, **_params):
  1294. networks = self._networks.values()
  1295. if 'id' in _params:
  1296. networks = [x for x in networks if x['id'] in _params['id']]
  1297. _params.pop('id')
  1298. networks = [n for n in networks
  1299. if all(n.get(opt) == _params[opt] for opt in _params)]
  1300. return {'networks': copy.deepcopy(networks)}
  1301. def list_ports(self, retrieve_all=True, **_params):
  1302. # If 'fields' is passed we need to strip that out since it will mess
  1303. # up the filtering as 'fields' is not a filter parameter.
  1304. _params.pop('fields', None)
  1305. ports = [p for p in self._ports.values()
  1306. if all(p.get(opt) == _params[opt] for opt in _params)]
  1307. return {'ports': copy.deepcopy(ports)}
  1308. def list_subnets(self, retrieve_all=True, **_params):
  1309. # NOTE(gibi): The fixture does not support filtering for subnets
  1310. return {'subnets': copy.deepcopy(list(self._subnets.values()))}
  1311. def list_floatingips(self, retrieve_all=True, **_params):
  1312. return {'floatingips': []}
  1313. def create_port(self, body=None):
  1314. # Note(gibi): Some of the test expects that a pre-defined port is
  1315. # created. This is port_2. So if that port is not created yet then
  1316. # that is the one created here.
  1317. if NeutronFixture.port_2['id'] not in self._ports:
  1318. new_port = copy.deepcopy(NeutronFixture.port_2)
  1319. else:
  1320. # If port_2 is already created then create a new port based on
  1321. # the request body, the port_2 as a template, and assign new
  1322. # port_id and mac_address for the new port
  1323. new_port = copy.deepcopy(body)
  1324. new_port.update(copy.deepcopy(NeutronFixture.port_2))
  1325. # we need truly random uuids instead of named sentinels as some
  1326. # tests needs more than 3 ports
  1327. new_port.update({
  1328. 'id': str(uuidutils.generate_uuid()),
  1329. 'mac_address': '00:' + ':'.join(
  1330. ['%02x' % random.randint(0, 255) for _ in range(5)]),
  1331. })
  1332. self._ports[new_port['id']] = new_port
  1333. # we need to copy again what we return as nova might modify the
  1334. # returned port locally and we don't want that it effects the port in
  1335. # the self._ports dict.
  1336. return {'port': copy.deepcopy(new_port)}
  1337. def update_port(self, port_id, body=None):
  1338. port = self._ports[port_id]
  1339. port.update(body['port'])
  1340. return {'port': copy.deepcopy(port)}
  1341. def show_quota(self, project_id):
  1342. # unlimited quota
  1343. return {'quota': {'port': -1}}
  1344. class _NoopConductor(object):
  1345. def __getattr__(self, key):
  1346. def _noop_rpc(*args, **kwargs):
  1347. return None
  1348. return _noop_rpc
  1349. class NoopConductorFixture(fixtures.Fixture):
  1350. """Stub out the conductor API to do nothing"""
  1351. def setUp(self):
  1352. super(NoopConductorFixture, self).setUp()
  1353. self.useFixture(fixtures.MonkeyPatch(
  1354. 'nova.conductor.ComputeTaskAPI', _NoopConductor))
  1355. self.useFixture(fixtures.MonkeyPatch(
  1356. 'nova.conductor.API', _NoopConductor))
  1357. class EventReporterStub(fixtures.Fixture):
  1358. def setUp(self):
  1359. super(EventReporterStub, self).setUp()
  1360. self.useFixture(fixtures.MonkeyPatch(
  1361. 'nova.compute.utils.EventReporter',
  1362. lambda *args, **kwargs: mock.MagicMock()))
  1363. class CinderFixture(fixtures.Fixture):
  1364. """A fixture to volume operations"""
  1365. # the default project_id in OSAPIFixtures
  1366. tenant_id = '6f70656e737461636b20342065766572'
  1367. SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113'
  1368. SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8'
  1369. SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
  1370. SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
  1371. # This represents a bootable image-backed volume to test
  1372. # boot-from-volume scenarios.
  1373. IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
  1374. def __init__(self, test):
  1375. super(CinderFixture, self).__init__()
  1376. self.test = test
  1377. self.swap_volume_instance_uuid = None
  1378. self.swap_volume_instance_error_uuid = None
  1379. self.reserved_volumes = list()
  1380. # This is a map of instance UUIDs mapped to a list of volume IDs.
  1381. # This map gets updated on attach/detach operations.
  1382. self.attachments = collections.defaultdict(list)
  1383. def volume_ids_for_instance(self, instance_uuid):
  1384. return self.attachments.get(instance_uuid)
  1385. def setUp(self):
  1386. super(CinderFixture, self).setUp()
  1387. def fake_get(self_api, context, volume_id, microversion=None):
  1388. # Check for the special swap volumes.
  1389. if volume_id in (CinderFixture.SWAP_OLD_VOL,
  1390. CinderFixture.SWAP_ERR_OLD_VOL):
  1391. volume = {
  1392. 'status': 'available',
  1393. 'display_name': 'TEST1',
  1394. 'attach_status': 'detached',
  1395. 'id': volume_id,
  1396. 'multiattach': False,
  1397. 'size': 1
  1398. }
  1399. if ((self.swap_volume_instance_uuid and
  1400. volume_id == CinderFixture.SWAP_OLD_VOL) or
  1401. (self.swap_volume_instance_error_uuid and
  1402. volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
  1403. instance_uuid = (self.swap_volume_instance_uuid
  1404. if volume_id == CinderFixture.SWAP_OLD_VOL
  1405. else self.swap_volume_instance_error_uuid)
  1406. volume.update({
  1407. 'status': 'in-use',
  1408. 'attachments': {
  1409. instance_uuid: {
  1410. 'mountpoint': '/dev/vdb',
  1411. 'attachment_id': volume_id
  1412. }
  1413. },
  1414. 'attach_status': 'attached'
  1415. })
  1416. return volume
  1417. # Check to see if the volume is attached.
  1418. for instance_uuid, volumes in self.attachments.items():
  1419. if volume_id in volumes:
  1420. # The volume is attached.
  1421. volume = {
  1422. 'status': 'in-use',
  1423. 'display_name': volume_id,
  1424. 'attach_status': 'attached',
  1425. 'id': volume_id,
  1426. 'multiattach': False,
  1427. 'size': 1,
  1428. 'attachments': {
  1429. instance_uuid: {
  1430. 'attachment_id': volume_id,
  1431. 'mountpoint': '/dev/vdb'
  1432. }
  1433. }
  1434. }
  1435. break
  1436. else:
  1437. # This is a test that does not care about the actual details.
  1438. reserved_volume = (volume_id in self.reserved_volumes)
  1439. volume = {
  1440. 'status': 'attaching' if reserved_volume else 'available',
  1441. 'display_name': 'TEST2',
  1442. 'attach_status': 'detached',
  1443. 'id': volume_id,
  1444. 'multiattach': False,
  1445. 'size': 1
  1446. }
  1447. # Check for our special image-backed volume.
  1448. if volume_id == self.IMAGE_BACKED_VOL:
  1449. # Make it a bootable volume.
  1450. volume['bootable'] = True
  1451. # Add the image_id metadata.
  1452. volume['volume_image_metadata'] = {
  1453. # There would normally be more image metadata in here...
  1454. 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
  1455. }
  1456. return volume
  1457. def fake_initialize_connection(self, context, volume_id, connector):
  1458. if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:
  1459. # Return a tuple in order to raise an exception.
  1460. return ()
  1461. return {}
  1462. def fake_migrate_volume_completion(self, context, old_volume_id,
  1463. new_volume_id, error):
  1464. return {'save_volume_id': new_volume_id}
  1465. def fake_reserve_volume(self_api, context, volume_id):
  1466. self.reserved_volumes.append(volume_id)
  1467. def fake_unreserve_volume(self_api, context, volume_id):
  1468. # NOTE(mnaser): It's possible that we unreserve a volume that was
  1469. # never reserved (ex: instance.volume_attach.error
  1470. # notification tests)
  1471. if volume_id in self.reserved_volumes:
  1472. self.reserved_volumes.remove(volume_id)
  1473. def fake_attach(_self, context, volume_id, instance_uuid,
  1474. mountpoint, mode='rw'):
  1475. # Check to see if the volume is already attached to any server.
  1476. for instance, volumes in self.attachments.items():
  1477. if volume_id in volumes:
  1478. raise exception.InvalidInput(
  1479. reason='Volume %s is already attached to '
  1480. 'instance %s' % (volume_id, instance))
  1481. # It's not attached so let's "attach" it.
  1482. self.attachments[instance_uuid].append(volume_id)
  1483. self.test.stub_out('nova.volume.cinder.API.attach',
  1484. fake_attach)
  1485. def fake_detach(_self, context, volume_id, instance_uuid=None,
  1486. attachment_id=None):
  1487. # NOTE(mnaser): It's possible that we unreserve a volume that was
  1488. # never reserved (ex: instance.volume_attach.error
  1489. # notification tests)
  1490. if volume_id in self.reserved_volumes:
  1491. self.reserved_volumes.remove(volume_id)
  1492. if instance_uuid is not None:
  1493. # If the volume isn't attached to this instance it will
  1494. # result in a ValueError which indicates a broken test or
  1495. # code, so we just let that raise up.
  1496. self.attachments[instance_uuid].remove(volume_id)
  1497. else:
  1498. for instance, volumes in self.attachments.items():
  1499. if volume_id in volumes:
  1500. volumes.remove(volume_id)
  1501. break
  1502. self.test.stub_out('nova.volume.cinder.API.detach', fake_detach)
  1503. self.test.stub_out('nova.volume.cinder.API.begin_detaching',
  1504. lambda *args, **kwargs: None)
  1505. self.test.stub_out('nova.volume.cinder.API.get',
  1506. fake_get)
  1507. self.test.stub_out('nova.volume.cinder.API.initialize_connection',
  1508. fake_initialize_connection)
  1509. self.test.stub_out(
  1510. 'nova.volume.cinder.API.migrate_volume_completion',
  1511. fake_migrate_volume_completion)
  1512. self.test.stub_out('nova.volume.cinder.API.reserve_volume',
  1513. fake_reserve_volume)
  1514. self.test.stub_out('nova.volume.cinder.API.roll_detaching',
  1515. lambda *args, **kwargs: None)
  1516. self.test.stub_out('nova.volume.cinder.API.terminate_connection',
  1517. lambda *args, **kwargs: None)
  1518. self.test.stub_out('nova.volume.cinder.API.unreserve_volume',
  1519. fake_unreserve_volume)
  1520. self.test.stub_out('nova.volume.cinder.API.check_attached',
  1521. lambda *args, **kwargs: None)
  1522. # TODO(mriedem): We can probably pull some of the common parts from the
  1523. # CinderFixture into a common mixin class for things like the variables
  1524. # and fake_get.
  1525. class CinderFixtureNewAttachFlow(fixtures.Fixture):
  1526. """A fixture to volume operations with the new Cinder attach/detach API"""
  1527. # the default project_id in OSAPIFixtures
  1528. tenant_id = '6f70656e737461636b20342065766572'
  1529. SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113'
  1530. SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8'
  1531. SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
  1532. SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
  1533. SWAP_ERR_ATTACH_ID = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
  1534. MULTIATTACH_VOL = '4757d51f-54eb-4442-8684-3399a6431f67'
  1535. # This represents a bootable image-backed volume to test
  1536. # boot-from-volume scenarios.
  1537. IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
  1538. # This represents a bootable image-backed volume with required traits
  1539. # as part of volume image metadata
  1540. IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f'
  1541. def __init__(self, test):
  1542. super(CinderFixtureNewAttachFlow, self).__init__()
  1543. self.test = test
  1544. self.swap_volume_instance_uuid = None
  1545. self.swap_volume_instance_error_uuid = None
  1546. self.attachment_error_id = None
  1547. # A map of volumes to a list of (attachment_id, instance_uuid).
  1548. # Note that a volume can have multiple attachments even without
  1549. # multi-attach, as some flows create a blank 'reservation' attachment
  1550. # before deleting another attachment.
  1551. self.volume_to_attachment = collections.defaultdict(list)
  1552. def volume_ids_for_instance(self, instance_uuid):
  1553. for volume_id, attachments in self.volume_to_attachment.items():
  1554. for _, _instance_uuid in attachments:
  1555. if _instance_uuid == instance_uuid:
  1556. # we might have multiple volumes attached to this instance
  1557. # so yield rather than return
  1558. yield volume_id
  1559. break
  1560. def setUp(self):
  1561. super(CinderFixtureNewAttachFlow, self).setUp()
  1562. def fake_get(self_api, context, volume_id, microversion=None):
  1563. # Check for the special swap volumes.
  1564. attachments = self.volume_to_attachment[volume_id]
  1565. if volume_id in (CinderFixture.SWAP_OLD_VOL,
  1566. CinderFixture.SWAP_ERR_OLD_VOL):
  1567. volume = {
  1568. 'status': 'available',
  1569. 'display_name': 'TEST1',
  1570. 'attach_status': 'detached',
  1571. 'id': volume_id,
  1572. 'multiattach': False,
  1573. 'size': 1
  1574. }
  1575. if ((self.swap_volume_instance_uuid and
  1576. volume_id == CinderFixture.SWAP_OLD_VOL) or
  1577. (self.swap_volume_instance_error_uuid and
  1578. volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
  1579. instance_uuid = (self.swap_volume_instance_uuid
  1580. if volume_id == CinderFixture.SWAP_OLD_VOL
  1581. else self.swap_volume_instance_error_uuid)
  1582. if attachments:
  1583. attachment_id, instance_uuid = attachments[0]
  1584. volume.update({
  1585. 'status': 'in-use',
  1586. 'attachments': {
  1587. instance_uuid: {
  1588. 'mountpoint': '/dev/vdb',
  1589. 'attachment_id': attachment_id
  1590. }
  1591. },
  1592. 'attach_status': 'attached'
  1593. })
  1594. return volume
  1595. # Check to see if the volume is attached.
  1596. if attachments:
  1597. # The volume is attached.
  1598. attachment_id, instance_uuid = attachments[0]
  1599. volume = {
  1600. 'status': 'in-use',
  1601. 'display_name': volume_id,
  1602. 'attach_status': 'attached',
  1603. 'id': volume_id,
  1604. 'multiattach': volume_id == self.MULTIATTACH_VOL,
  1605. 'size': 1,
  1606. 'attachments': {
  1607. instance_uuid: {
  1608. 'attachment_id': attachment_id,
  1609. 'mountpoint': '/dev/vdb'
  1610. }
  1611. }
  1612. }
  1613. else:
  1614. # This is a test that does not care about the actual details.
  1615. volume = {
  1616. 'status': 'available',
  1617. 'display_name': 'TEST2',
  1618. 'attach_status': 'detached',
  1619. 'id': volume_id,
  1620. 'multiattach': volume_id == self.MULTIATTACH_VOL,
  1621. 'size': 1
  1622. }
  1623. # Check for our special image-backed volume.
  1624. if volume_id in (self.IMAGE_BACKED_VOL,
  1625. self.IMAGE_WITH_TRAITS_BACKED_VOL):
  1626. # Make it a bootable volume.
  1627. volume['bootable'] = True
  1628. if volume_id == self.IMAGE_BACKED_VOL:
  1629. # Add the image_id metadata.
  1630. volume['volume_image_metadata'] = {
  1631. # There would normally be more image metadata in here.
  1632. 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
  1633. }
  1634. elif volume_id == self.IMAGE_WITH_TRAITS_BACKED_VOL:
  1635. # Add the image_id metadata with traits.
  1636. volume['volume_image_metadata'] = {
  1637. 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
  1638. "trait:HW_CPU_X86_SGX": "required",
  1639. }
  1640. return volume
  1641. def fake_migrate_volume_completion(self, context, old_volume_id,
  1642. new_volume_id, error):
  1643. return {'save_volume_id': new_volume_id}
  1644. def _find_attachment(attachment_id):
  1645. """Find attachment corresponding to ``attachment_id``.
  1646. Returns:
  1647. A tuple of the volume ID, an attachment-instance mapping tuple
  1648. for the given attachment ID, and a list of attachment-instance
  1649. mapping tuples for the volume.
  1650. """
  1651. for volume_id, attachments in self.volume_to_attachment.items():
  1652. for attachment in attachments:
  1653. _attachment_id, instance_uuid = attachment
  1654. if attachment_id == _attachment_id:
  1655. return volume_id, attachment, attachments
  1656. raise exception.VolumeAttachmentNotFound(
  1657. attachment_id=attachment_id)
  1658. def fake_attachment_create(_self, context, volume_id, instance_uuid,
  1659. connector=None, mountpoint=None):
  1660. attachment_id = uuidutils.generate_uuid()
  1661. if self.attachment_error_id is not None:
  1662. attachment_id = self.attachment_error_id
  1663. attachment = {'id': attachment_id, 'connection_info': {'data': {}}}
  1664. self.volume_to_attachment[volume_id].append(
  1665. (attachment_id, instance_uuid))
  1666. return attachment
  1667. def fake_attachment_delete(_self, context, attachment_id):
  1668. # 'attachment' is a tuple defining a attachment-instance mapping
  1669. _, attachment, attachments = _find_attachment(attachment_id)
  1670. attachments.remove(attachment)
  1671. def fake_attachment_update(_self, context, attachment_id, connector,
  1672. mountpoint=None):
  1673. # Ensure the attachment exists
  1674. _find_attachment(attachment_id)
  1675. attachment_ref = {'driver_volume_type': 'fake_type',
  1676. 'id': attachment_id,
  1677. 'connection_info': {'data':
  1678. {'foo': 'bar',
  1679. 'target_lun': '1'}}}
  1680. if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID:
  1681. # This intentionally triggers a TypeError for the
  1682. # instance.volume_swap.error versioned notification tests.
  1683. attachment_ref = {'connection_info': ()}
  1684. return attachment_ref
  1685. def fake_attachment_get(_self, context, attachment_id):
  1686. # Ensure the attachment exists
  1687. _find_attachment(attachment_id)
  1688. attachment_ref = {'driver_volume_type': 'fake_type',
  1689. 'id': attachment_id,
  1690. 'connection_info': {'data':
  1691. {'foo': 'bar',
  1692. 'target_lun': '1'}}}
  1693. return attachment_ref
  1694. def fake_get_all_volume_types(*args, **kwargs):
  1695. return [{
  1696. # This is used in the 2.67 API sample test.
  1697. 'id': '5f9204ec-3e94-4f27-9beb-fe7bb73b6eb9',
  1698. 'name': 'lvm-1'
  1699. }]
  1700. self.test.stub_out('nova.volume.cinder.API.attachment_create',
  1701. fake_attachment_create)
  1702. self.test.stub_out('nova.volume.cinder.API.attachment_delete',
  1703. fake_attachment_delete)
  1704. self.test.stub_out('nova.volume.cinder.API.attachment_update',
  1705. fake_attachment_update)
  1706. self.test.stub_out('nova.volume.cinder.API.attachment_complete',
  1707. lambda *args, **kwargs: None)
  1708. self.test.stub_out('nova.volume.cinder.API.attachment_get',
  1709. fake_attachment_get)
  1710. self.test.stub_out('nova.volume.cinder.API.begin_detaching',
  1711. lambda *args, **kwargs: None)
  1712. self.test.stub_out('nova.volume.cinder.API.get',
  1713. fake_get)
  1714. self.test.stub_out(
  1715. 'nova.volume.cinder.API.migrate_volume_completion',
  1716. fake_migrate_volume_completion)
  1717. self.test.stub_out('nova.volume.cinder.API.roll_detaching',
  1718. lambda *args, **kwargs: None)
  1719. self.test.stub_out('nova.volume.cinder.is_microversion_supported',
  1720. lambda *args, **kwargs: None)
  1721. self.test.stub_out('nova.volume.cinder.API.check_attached',
  1722. lambda *args, **kwargs: None)
  1723. self.test.stub_out('nova.volume.cinder.API.get_all_volume_types',
  1724. fake_get_all_volume_types)
  1725. class UnHelperfulClientChannel(privsep_daemon._ClientChannel):
  1726. def __init__(self, context):
  1727. raise Exception('You have attempted to start a privsep helper. '
  1728. 'This is not allowed in the gate, and '
  1729. 'indicates a failure to have mocked your tests.')
  1730. class PrivsepNoHelperFixture(fixtures.Fixture):
  1731. """A fixture to catch failures to mock privsep's rootwrap helper.
  1732. If you fail to mock away a privsep'd method in a unit test, then
  1733. you may well end up accidentally running the privsep rootwrap
  1734. helper. This will fail in the gate, but it fails in a way which
  1735. doesn't identify which test is missing a mock. Instead, we
  1736. raise an exception so that you at least know where you've missed
  1737. something.
  1738. """
  1739. def setUp(self):
  1740. super(PrivsepNoHelperFixture, self).setUp()
  1741. self.useFixture(fixtures.MonkeyPatch(
  1742. 'oslo_privsep.daemon.RootwrapClientChannel',
  1743. UnHelperfulClientChannel))
  1744. class NoopQuotaDriverFixture(fixtures.Fixture):
  1745. """A fixture to run tests using the NoopQuotaDriver.
  1746. We can't simply set self.flags to the NoopQuotaDriver in tests to use the
  1747. NoopQuotaDriver because the QuotaEngine object is global. Concurrently
  1748. running tests will fail intermittently because they might get the
  1749. NoopQuotaDriver globally when they expected the default DbQuotaDriver
  1750. behavior. So instead, we can patch the _driver property of the QuotaEngine
  1751. class on a per-test basis.
  1752. """
  1753. def setUp(self):
  1754. super(NoopQuotaDriverFixture, self).setUp()
  1755. self.useFixture(fixtures.MonkeyPatch('nova.quota.QuotaEngine._driver',
  1756. nova_quota.NoopQuotaDriver()))
  1757. # Set the config option just so that code checking for the presence of
  1758. # the NoopQuotaDriver setting will see it as expected.
  1759. # For some reason, this does *not* work when TestCase.flags is used.
  1760. # When using self.flags, the concurrent test failures returned.
  1761. CONF.set_override('driver', 'nova.quota.NoopQuotaDriver', 'quota')
  1762. self.addCleanup(CONF.clear_override, 'driver', 'quota')
  1763. class DownCellFixture(fixtures.Fixture):
  1764. """A fixture to simulate when a cell is down either due to error or timeout
  1765. This fixture will stub out the scatter_gather_cells routine and target_cell
  1766. used in various cells-related API operations like listing/showing server
  1767. details to return a ``oslo_db.exception.DBError`` per cell in the results.
  1768. Therefore it is best used with a test scenario like this:
  1769. 1. Create a server successfully.
  1770. 2. Using the fixture, list/show servers. Depending on the microversion
  1771. used, the API should either return minimal results or by default skip
  1772. the results from down cells.
  1773. Example usage::
  1774. with nova_fixtures.DownCellFixture():
  1775. # List servers with down cells.
  1776. self.api.get_servers()
  1777. # Show a server in a down cell.
  1778. self.api.get_server(server['id'])
  1779. # List services with down cells.
  1780. self.admin_api.api_get('/os-services')
  1781. """
  1782. def __init__(self, down_cell_mappings=None):
  1783. self.down_cell_mappings = down_cell_mappings
  1784. def setUp(self):
  1785. super(DownCellFixture, self).setUp()
  1786. def stub_scatter_gather_cells(ctxt, cell_mappings, timeout, fn, *args,
  1787. **kwargs):
  1788. # Return a dict with an entry per cell mapping where the results
  1789. # are some kind of exception.
  1790. up_cell_mappings = objects.CellMappingList()
  1791. if not self.down_cell_mappings:
  1792. # User has not passed any down cells explicitly, so all cells
  1793. # are considered as down cells.
  1794. self.down_cell_mappings = cell_mappings
  1795. else:
  1796. # User has passed down cell mappings, so the rest of the cells
  1797. # should be up meaning we should return the right results.
  1798. # We assume that down cells will be a subset of the
  1799. # cell_mappings.
  1800. down_cell_uuids = [cell.uuid
  1801. for cell in self.down_cell_mappings]
  1802. up_cell_mappings.objects = [cell
  1803. for cell in cell_mappings
  1804. if cell.uuid not in down_cell_uuids]
  1805. def wrap(cell_uuid, thing):
  1806. # We should embed the cell_uuid into the context before
  1807. # wrapping since its used to calcualte the cells_timed_out and
  1808. # cells_failed properties in the object.
  1809. ctxt.cell_uuid = cell_uuid
  1810. return multi_cell_list.RecordWrapper(ctxt, sort_ctx, thing)
  1811. if fn is multi_cell_list.query_wrapper:
  1812. # If the function called through scatter-gather utility is the
  1813. # multi_cell_list.query_wrapper, we should wrap the exception
  1814. # object into the multi_cell_list.RecordWrapper. This is
  1815. # because unlike the other functions where the exception object
  1816. # is returned directly, the query_wrapper wraps this into the
  1817. # RecordWrapper object format. So if we do not wrap it will
  1818. # blow up at the point of generating results from heapq further
  1819. # down the stack.
  1820. sort_ctx = multi_cell_list.RecordSortContext([], [])
  1821. ret1 = {
  1822. cell_mapping.uuid: [wrap(cell_mapping.uuid,
  1823. db_exc.DBError())]
  1824. for cell_mapping in self.down_cell_mappings
  1825. }
  1826. else:
  1827. ret1 = {
  1828. cell_mapping.uuid: db_exc.DBError()
  1829. for cell_mapping in self.down_cell_mappings
  1830. }
  1831. ret2 = {}
  1832. for cell in up_cell_mappings:
  1833. ctxt.cell_uuid = cell.uuid
  1834. cctxt = context.RequestContext.from_dict(ctxt.to_dict())
  1835. context.set_target_cell(cctxt, cell)
  1836. result = fn(cctxt, *args, **kwargs)
  1837. ret2[cell.uuid] = result
  1838. return dict(list(ret1.items()) + list(ret2.items()))
  1839. @contextmanager
  1840. def stub_target_cell(ctxt, cell_mapping):
  1841. # This is to give the freedom to simulate down cells for each
  1842. # individual cell targeted function calls.
  1843. if not self.down_cell_mappings:
  1844. # User has not passed any down cells explicitly, so all cells
  1845. # are considered as down cells.
  1846. self.down_cell_mappings = [cell_mapping]
  1847. raise db_exc.DBError()
  1848. else:
  1849. # if down_cell_mappings are passed, then check if this cell
  1850. # is down or up.
  1851. down_cell_uuids = [cell.uuid
  1852. for cell in self.down_cell_mappings]
  1853. if cell_mapping.uuid in down_cell_uuids:
  1854. # its a down cell raise the exception straight away
  1855. raise db_exc.DBError()
  1856. else:
  1857. # its an up cell, so yield its context
  1858. cctxt = context.RequestContext.from_dict(ctxt.to_dict())
  1859. context.set_target_cell(cctxt, cell_mapping)
  1860. yield cctxt
  1861. self.useFixture(fixtures.MonkeyPatch(
  1862. 'nova.context.scatter_gather_cells', stub_scatter_gather_cells))
  1863. self.useFixture(fixtures.MonkeyPatch(
  1864. 'nova.context.target_cell', stub_target_cell))
  1865. class AvailabilityZoneFixture(fixtures.Fixture):
  1866. """Fixture to stub out the nova.availability_zones module
  1867. The list of ``zones`` provided to the fixture are what get returned from
  1868. ``get_availability_zones``.
  1869. ``get_instance_availability_zone`` will return the availability_zone
  1870. requested when creating a server otherwise the instance.availabilty_zone
  1871. or default_availability_zone is returned.
  1872. """
  1873. def __init__(self, zones):
  1874. self.zones = zones
  1875. def setUp(self):
  1876. super(AvailabilityZoneFixture, self).setUp()
  1877. def fake_get_availability_zones(
  1878. ctxt, get_only_available=False, with_hosts=False):
  1879. # A 2-item tuple is returned if get_only_available=False.
  1880. if not get_only_available:
  1881. return self.zones, []
  1882. return self.zones
  1883. self.useFixture(fixtures.MonkeyPatch(
  1884. 'nova.availability_zones.get_availability_zones',
  1885. fake_get_availability_zones))
  1886. def fake_get_instance_availability_zone(ctxt, instance):
  1887. # If the server was created with a specific AZ, return it.
  1888. reqspec = objects.RequestSpec.get_by_instance_uuid(
  1889. ctxt, instance.uuid)
  1890. requested_az = reqspec.availability_zone
  1891. if requested_az:
  1892. return requested_az
  1893. # Otherwise return the instance.availability_zone if set else
  1894. # the default AZ.
  1895. return instance.availability_zone or CONF.default_availability_zone
  1896. self.useFixture(fixtures.MonkeyPatch(
  1897. 'nova.availability_zones.get_instance_availability_zone',
  1898. fake_get_instance_availability_zone))