OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6718 lines
234KB

  1. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
  2. # Copyright 2010 United States Government as represented by the
  3. # Administrator of the National Aeronautics and Space Administration.
  4. # All Rights Reserved.
  5. #
  6. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  7. # not use this file except in compliance with the License. You may obtain
  8. # a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  14. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  15. # License for the specific language governing permissions and limitations
  16. # under the License.
  17. """Implementation of SQLAlchemy backend."""
  18. import collections
  19. import copy
  20. import datetime
  21. import functools
  22. import inspect
  23. import sys
  24. from oslo_db import api as oslo_db_api
  25. from oslo_db import exception as db_exc
  26. from oslo_db.sqlalchemy import enginefacade
  27. from oslo_db.sqlalchemy import update_match
  28. from oslo_db.sqlalchemy import utils as sqlalchemyutils
  29. from oslo_log import log as logging
  30. from oslo_utils import excutils
  31. from oslo_utils import importutils
  32. from oslo_utils import timeutils
  33. from oslo_utils import uuidutils
  34. import six
  35. from six.moves import range
  36. import sqlalchemy as sa
  37. from sqlalchemy import and_
  38. from sqlalchemy import Boolean
  39. from sqlalchemy.exc import NoSuchTableError
  40. from sqlalchemy.ext.compiler import compiles
  41. from sqlalchemy import Integer
  42. from sqlalchemy import MetaData
  43. from sqlalchemy import or_
  44. from sqlalchemy.orm import aliased
  45. from sqlalchemy.orm import contains_eager
  46. from sqlalchemy.orm import joinedload
  47. from sqlalchemy.orm import joinedload_all
  48. from sqlalchemy.orm import noload
  49. from sqlalchemy.orm import undefer
  50. from sqlalchemy.schema import Table
  51. from sqlalchemy import sql
  52. from sqlalchemy.sql.expression import asc
  53. from sqlalchemy.sql.expression import cast
  54. from sqlalchemy.sql.expression import desc
  55. from sqlalchemy.sql.expression import UpdateBase
  56. from sqlalchemy.sql import false
  57. from sqlalchemy.sql import func
  58. from sqlalchemy.sql import null
  59. from sqlalchemy.sql import true
  60. from nova import block_device
  61. from nova.compute import task_states
  62. from nova.compute import vm_states
  63. import nova.conf
  64. import nova.context
  65. from nova.db.sqlalchemy import models
  66. from nova import exception
  67. from nova.i18n import _
  68. from nova import safe_utils
  69. profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
  70. CONF = nova.conf.CONF
  71. LOG = logging.getLogger(__name__)
  72. main_context_manager = enginefacade.transaction_context()
  73. api_context_manager = enginefacade.transaction_context()
  74. def _get_db_conf(conf_group, connection=None):
  75. kw = dict(conf_group.items())
  76. if connection is not None:
  77. kw['connection'] = connection
  78. return kw
  79. def _context_manager_from_context(context):
  80. if context:
  81. try:
  82. return context.db_connection
  83. except AttributeError:
  84. pass
  85. def configure(conf):
  86. main_context_manager.configure(**_get_db_conf(conf.database))
  87. api_context_manager.configure(**_get_db_conf(conf.api_database))
  88. if profiler_sqlalchemy and CONF.profiler.enabled \
  89. and CONF.profiler.trace_sqlalchemy:
  90. main_context_manager.append_on_engine_create(
  91. lambda eng: profiler_sqlalchemy.add_tracing(sa, eng, "db"))
  92. api_context_manager.append_on_engine_create(
  93. lambda eng: profiler_sqlalchemy.add_tracing(sa, eng, "db"))
  94. def create_context_manager(connection=None):
  95. """Create a database context manager object.
  96. : param connection: The database connection string
  97. """
  98. ctxt_mgr = enginefacade.transaction_context()
  99. ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
  100. return ctxt_mgr
  101. def get_context_manager(context):
  102. """Get a database context manager object.
  103. :param context: The request context that can contain a context manager
  104. """
  105. return _context_manager_from_context(context) or main_context_manager
  106. def get_engine(use_slave=False, context=None):
  107. """Get a database engine object.
  108. :param use_slave: Whether to use the slave connection
  109. :param context: The request context that can contain a context manager
  110. """
  111. ctxt_mgr = get_context_manager(context)
  112. return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave)
  113. def get_api_engine():
  114. return api_context_manager.get_legacy_facade().get_engine()
  115. _SHADOW_TABLE_PREFIX = 'shadow_'
  116. _DEFAULT_QUOTA_NAME = 'default'
  117. PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
  118. def get_backend():
  119. """The backend is this module itself."""
  120. return sys.modules[__name__]
  121. def require_context(f):
  122. """Decorator to require *any* user or admin context.
  123. This does no authorization for user or project access matching, see
  124. :py:func:`nova.context.authorize_project_context` and
  125. :py:func:`nova.context.authorize_user_context`.
  126. The first argument to the wrapped function must be the context.
  127. """
  128. @functools.wraps(f)
  129. def wrapper(*args, **kwargs):
  130. nova.context.require_context(args[0])
  131. return f(*args, **kwargs)
  132. return wrapper
  133. def require_instance_exists_using_uuid(f):
  134. """Decorator to require the specified instance to exist.
  135. Requires the wrapped function to use context and instance_uuid as
  136. their first two arguments.
  137. """
  138. @functools.wraps(f)
  139. def wrapper(context, instance_uuid, *args, **kwargs):
  140. instance_get_by_uuid(context, instance_uuid)
  141. return f(context, instance_uuid, *args, **kwargs)
  142. return wrapper
  143. def require_aggregate_exists(f):
  144. """Decorator to require the specified aggregate to exist.
  145. Requires the wrapped function to use context and aggregate_id as
  146. their first two arguments.
  147. """
  148. @functools.wraps(f)
  149. def wrapper(context, aggregate_id, *args, **kwargs):
  150. aggregate_get(context, aggregate_id)
  151. return f(context, aggregate_id, *args, **kwargs)
  152. return wrapper
  153. def select_db_reader_mode(f):
  154. """Decorator to select synchronous or asynchronous reader mode.
  155. The kwarg argument 'use_slave' defines reader mode. Asynchronous reader
  156. will be used if 'use_slave' is True and synchronous reader otherwise.
  157. If 'use_slave' is not specified default value 'False' will be used.
  158. Wrapped function must have a context in the arguments.
  159. """
  160. @functools.wraps(f)
  161. def wrapper(*args, **kwargs):
  162. wrapped_func = safe_utils.get_wrapped_function(f)
  163. keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs)
  164. context = keyed_args['context']
  165. use_slave = keyed_args.get('use_slave', False)
  166. if use_slave:
  167. reader_mode = get_context_manager(context).async
  168. else:
  169. reader_mode = get_context_manager(context).reader
  170. with reader_mode.using(context):
  171. return f(*args, **kwargs)
  172. return wrapper
  173. def pick_context_manager_writer(f):
  174. """Decorator to use a writer db context manager.
  175. The db context manager will be picked from the RequestContext.
  176. Wrapped function must have a RequestContext in the arguments.
  177. """
  178. @functools.wraps(f)
  179. def wrapped(context, *args, **kwargs):
  180. ctxt_mgr = get_context_manager(context)
  181. with ctxt_mgr.writer.using(context):
  182. return f(context, *args, **kwargs)
  183. return wrapped
  184. def pick_context_manager_reader(f):
  185. """Decorator to use a reader db context manager.
  186. The db context manager will be picked from the RequestContext.
  187. Wrapped function must have a RequestContext in the arguments.
  188. """
  189. @functools.wraps(f)
  190. def wrapped(context, *args, **kwargs):
  191. ctxt_mgr = get_context_manager(context)
  192. with ctxt_mgr.reader.using(context):
  193. return f(context, *args, **kwargs)
  194. return wrapped
  195. def pick_context_manager_reader_allow_async(f):
  196. """Decorator to use a reader.allow_async db context manager.
  197. The db context manager will be picked from the RequestContext.
  198. Wrapped function must have a RequestContext in the arguments.
  199. """
  200. @functools.wraps(f)
  201. def wrapped(context, *args, **kwargs):
  202. ctxt_mgr = get_context_manager(context)
  203. with ctxt_mgr.reader.allow_async.using(context):
  204. return f(context, *args, **kwargs)
  205. return wrapped
  206. def model_query(context, model,
  207. args=None,
  208. read_deleted=None,
  209. project_only=False):
  210. """Query helper that accounts for context's `read_deleted` field.
  211. :param context: NovaContext of the query.
  212. :param model: Model to query. Must be a subclass of ModelBase.
  213. :param args: Arguments to query. If None - model is used.
  214. :param read_deleted: If not None, overrides context's read_deleted field.
  215. Permitted values are 'no', which does not return
  216. deleted values; 'only', which only returns deleted
  217. values; and 'yes', which does not filter deleted
  218. values.
  219. :param project_only: If set and context is user-type, then restrict
  220. query to match the context's project_id. If set to
  221. 'allow_none', restriction includes project_id = None.
  222. """
  223. if read_deleted is None:
  224. read_deleted = context.read_deleted
  225. query_kwargs = {}
  226. if 'no' == read_deleted:
  227. query_kwargs['deleted'] = False
  228. elif 'only' == read_deleted:
  229. query_kwargs['deleted'] = True
  230. elif 'yes' == read_deleted:
  231. pass
  232. else:
  233. raise ValueError(_("Unrecognized read_deleted value '%s'")
  234. % read_deleted)
  235. query = sqlalchemyutils.model_query(
  236. model, context.session, args, **query_kwargs)
  237. # We can't use oslo.db model_query's project_id here, as it doesn't allow
  238. # us to return both our projects and unowned projects.
  239. if nova.context.is_user_context(context) and project_only:
  240. if project_only == 'allow_none':
  241. query = query.\
  242. filter(or_(model.project_id == context.project_id,
  243. model.project_id == null()))
  244. else:
  245. query = query.filter_by(project_id=context.project_id)
  246. return query
  247. def convert_objects_related_datetimes(values, *datetime_keys):
  248. if not datetime_keys:
  249. datetime_keys = ('created_at', 'deleted_at', 'updated_at')
  250. for key in datetime_keys:
  251. if key in values and values[key]:
  252. if isinstance(values[key], six.string_types):
  253. try:
  254. values[key] = timeutils.parse_strtime(values[key])
  255. except ValueError:
  256. # Try alternate parsing since parse_strtime will fail
  257. # with say converting '2015-05-28T19:59:38+00:00'
  258. values[key] = timeutils.parse_isotime(values[key])
  259. # NOTE(danms): Strip UTC timezones from datetimes, since they're
  260. # stored that way in the database
  261. values[key] = values[key].replace(tzinfo=None)
  262. return values
  263. ###################
  264. def constraint(**conditions):
  265. return Constraint(conditions)
  266. def equal_any(*values):
  267. return EqualityCondition(values)
  268. def not_equal(*values):
  269. return InequalityCondition(values)
  270. class Constraint(object):
  271. def __init__(self, conditions):
  272. self.conditions = conditions
  273. def apply(self, model, query):
  274. for key, condition in self.conditions.items():
  275. for clause in condition.clauses(getattr(model, key)):
  276. query = query.filter(clause)
  277. return query
  278. class EqualityCondition(object):
  279. def __init__(self, values):
  280. self.values = values
  281. def clauses(self, field):
  282. # method signature requires us to return an iterable even if for OR
  283. # operator this will actually be a single clause
  284. return [or_(*[field == value for value in self.values])]
  285. class InequalityCondition(object):
  286. def __init__(self, values):
  287. self.values = values
  288. def clauses(self, field):
  289. return [field != value for value in self.values]
  290. class DeleteFromSelect(UpdateBase):
  291. def __init__(self, table, select, column):
  292. self.table = table
  293. self.select = select
  294. self.column = column
  295. # NOTE(guochbo): some versions of MySQL doesn't yet support subquery with
  296. # 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
  297. @compiles(DeleteFromSelect)
  298. def visit_delete_from_select(element, compiler, **kw):
  299. return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
  300. compiler.process(element.table, asfrom=True),
  301. compiler.process(element.column),
  302. element.column.name,
  303. compiler.process(element.select))
  304. ###################
  305. @pick_context_manager_writer
  306. def service_destroy(context, service_id):
  307. service = service_get(context, service_id)
  308. model_query(context, models.Service).\
  309. filter_by(id=service_id).\
  310. soft_delete(synchronize_session=False)
  311. if service.binary == 'nova-compute':
  312. # TODO(sbauza): Remove the service_id filter in a later release
  313. # once we are sure that all compute nodes report the host field
  314. model_query(context, models.ComputeNode).\
  315. filter(or_(models.ComputeNode.service_id == service_id,
  316. models.ComputeNode.host == service['host'])).\
  317. soft_delete(synchronize_session=False)
  318. @pick_context_manager_reader
  319. def service_get(context, service_id):
  320. query = model_query(context, models.Service).filter_by(id=service_id)
  321. result = query.first()
  322. if not result:
  323. raise exception.ServiceNotFound(service_id=service_id)
  324. return result
  325. @pick_context_manager_reader
  326. def service_get_by_uuid(context, service_uuid):
  327. query = model_query(context, models.Service).filter_by(uuid=service_uuid)
  328. result = query.first()
  329. if not result:
  330. raise exception.ServiceNotFound(service_id=service_uuid)
  331. return result
  332. @pick_context_manager_reader_allow_async
  333. def service_get_minimum_version(context, binaries):
  334. min_versions = context.session.query(
  335. models.Service.binary,
  336. func.min(models.Service.version)).\
  337. filter(models.Service.binary.in_(binaries)).\
  338. filter(models.Service.deleted == 0).\
  339. filter(models.Service.forced_down == false()).\
  340. group_by(models.Service.binary)
  341. return dict(min_versions)
  342. @pick_context_manager_reader
  343. def service_get_all(context, disabled=None):
  344. query = model_query(context, models.Service)
  345. if disabled is not None:
  346. query = query.filter_by(disabled=disabled)
  347. return query.all()
  348. @pick_context_manager_reader
  349. def service_get_all_by_topic(context, topic):
  350. return model_query(context, models.Service, read_deleted="no").\
  351. filter_by(disabled=False).\
  352. filter_by(topic=topic).\
  353. all()
  354. @pick_context_manager_reader
  355. def service_get_by_host_and_topic(context, host, topic):
  356. return model_query(context, models.Service, read_deleted="no").\
  357. filter_by(disabled=False).\
  358. filter_by(host=host).\
  359. filter_by(topic=topic).\
  360. first()
  361. @pick_context_manager_reader
  362. def service_get_all_by_binary(context, binary, include_disabled=False):
  363. query = model_query(context, models.Service).filter_by(binary=binary)
  364. if not include_disabled:
  365. query = query.filter_by(disabled=False)
  366. return query.all()
  367. @pick_context_manager_reader
  368. def service_get_all_computes_by_hv_type(context, hv_type,
  369. include_disabled=False):
  370. query = model_query(context, models.Service, read_deleted="no").\
  371. filter_by(binary='nova-compute')
  372. if not include_disabled:
  373. query = query.filter_by(disabled=False)
  374. query = query.join(models.ComputeNode,
  375. models.Service.host == models.ComputeNode.host).\
  376. filter(models.ComputeNode.hypervisor_type == hv_type).\
  377. distinct('host')
  378. return query.all()
  379. @pick_context_manager_reader
  380. def service_get_by_host_and_binary(context, host, binary):
  381. result = model_query(context, models.Service, read_deleted="no").\
  382. filter_by(host=host).\
  383. filter_by(binary=binary).\
  384. first()
  385. if not result:
  386. raise exception.HostBinaryNotFound(host=host, binary=binary)
  387. return result
  388. @pick_context_manager_reader
  389. def service_get_all_by_host(context, host):
  390. return model_query(context, models.Service, read_deleted="no").\
  391. filter_by(host=host).\
  392. all()
  393. @pick_context_manager_reader_allow_async
  394. def service_get_by_compute_host(context, host):
  395. result = model_query(context, models.Service, read_deleted="no").\
  396. filter_by(host=host).\
  397. filter_by(binary='nova-compute').\
  398. first()
  399. if not result:
  400. raise exception.ComputeHostNotFound(host=host)
  401. return result
  402. @pick_context_manager_writer
  403. def service_create(context, values):
  404. service_ref = models.Service()
  405. service_ref.update(values)
  406. # We only auto-disable nova-compute services since those are the only
  407. # ones that can be enabled using the os-services REST API and they are
  408. # the only ones where being disabled means anything. It does
  409. # not make sense to be able to disable non-compute services like
  410. # nova-scheduler or nova-osapi_compute since that does nothing.
  411. if not CONF.enable_new_services and values.get('binary') == 'nova-compute':
  412. msg = _("New compute service disabled due to config option.")
  413. service_ref.disabled = True
  414. service_ref.disabled_reason = msg
  415. try:
  416. service_ref.save(context.session)
  417. except db_exc.DBDuplicateEntry as e:
  418. if 'binary' in e.columns:
  419. raise exception.ServiceBinaryExists(host=values.get('host'),
  420. binary=values.get('binary'))
  421. raise exception.ServiceTopicExists(host=values.get('host'),
  422. topic=values.get('topic'))
  423. return service_ref
  424. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  425. @pick_context_manager_writer
  426. def service_update(context, service_id, values):
  427. service_ref = service_get(context, service_id)
  428. # Only servicegroup.drivers.db.DbDriver._report_state() updates
  429. # 'report_count', so if that value changes then store the timestamp
  430. # as the last time we got a state report.
  431. if 'report_count' in values:
  432. if values['report_count'] > service_ref.report_count:
  433. service_ref.last_seen_up = timeutils.utcnow()
  434. service_ref.update(values)
  435. return service_ref
  436. ###################
  437. def _compute_node_select(context, filters=None, limit=None, marker=None):
  438. if filters is None:
  439. filters = {}
  440. cn_tbl = sa.alias(models.ComputeNode.__table__, name='cn')
  441. select = sa.select([cn_tbl])
  442. if context.read_deleted == "no":
  443. select = select.where(cn_tbl.c.deleted == 0)
  444. if "compute_id" in filters:
  445. select = select.where(cn_tbl.c.id == filters["compute_id"])
  446. if "service_id" in filters:
  447. select = select.where(cn_tbl.c.service_id == filters["service_id"])
  448. if "host" in filters:
  449. select = select.where(cn_tbl.c.host == filters["host"])
  450. if "hypervisor_hostname" in filters:
  451. hyp_hostname = filters["hypervisor_hostname"]
  452. select = select.where(cn_tbl.c.hypervisor_hostname == hyp_hostname)
  453. if "mapped" in filters:
  454. select = select.where(cn_tbl.c.mapped < filters['mapped'])
  455. if marker is not None:
  456. try:
  457. compute_node_get(context, marker)
  458. except exception.ComputeHostNotFound:
  459. raise exception.MarkerNotFound(marker=marker)
  460. select = select.where(cn_tbl.c.id > marker)
  461. if limit is not None:
  462. select = select.limit(limit)
  463. # Explicitly order by id, so we're not dependent on the native sort
  464. # order of the underlying DB.
  465. select = select.order_by(asc("id"))
  466. return select
  467. def _compute_node_fetchall(context, filters=None, limit=None, marker=None):
  468. select = _compute_node_select(context, filters, limit=limit, marker=marker)
  469. engine = get_engine(context=context)
  470. conn = engine.connect()
  471. results = conn.execute(select).fetchall()
  472. # Callers expect dict-like objects, not SQLAlchemy RowProxy objects...
  473. results = [dict(r) for r in results]
  474. conn.close()
  475. return results
  476. @pick_context_manager_reader
  477. def compute_node_get(context, compute_id):
  478. results = _compute_node_fetchall(context, {"compute_id": compute_id})
  479. if not results:
  480. raise exception.ComputeHostNotFound(host=compute_id)
  481. return results[0]
  482. @pick_context_manager_reader
  483. def compute_node_get_model(context, compute_id):
  484. # TODO(edleafe): remove once the compute node resource provider migration
  485. # is complete, and this distinction is no longer necessary.
  486. result = model_query(context, models.ComputeNode).\
  487. filter_by(id=compute_id).\
  488. first()
  489. if not result:
  490. raise exception.ComputeHostNotFound(host=compute_id)
  491. return result
  492. @pick_context_manager_reader
  493. def compute_nodes_get_by_service_id(context, service_id):
  494. results = _compute_node_fetchall(context, {"service_id": service_id})
  495. if not results:
  496. raise exception.ServiceNotFound(service_id=service_id)
  497. return results
  498. @pick_context_manager_reader
  499. def compute_node_get_by_host_and_nodename(context, host, nodename):
  500. results = _compute_node_fetchall(context,
  501. {"host": host, "hypervisor_hostname": nodename})
  502. if not results:
  503. raise exception.ComputeHostNotFound(host=host)
  504. return results[0]
  505. @pick_context_manager_reader_allow_async
  506. def compute_node_get_all_by_host(context, host):
  507. results = _compute_node_fetchall(context, {"host": host})
  508. if not results:
  509. raise exception.ComputeHostNotFound(host=host)
  510. return results
  511. @pick_context_manager_reader
  512. def compute_node_get_all(context):
  513. return _compute_node_fetchall(context)
  514. @pick_context_manager_reader
  515. def compute_node_get_all_mapped_less_than(context, mapped_less_than):
  516. return _compute_node_fetchall(context,
  517. {'mapped': mapped_less_than})
  518. @pick_context_manager_reader
  519. def compute_node_get_all_by_pagination(context, limit=None, marker=None):
  520. return _compute_node_fetchall(context, limit=limit, marker=marker)
  521. @pick_context_manager_reader
  522. def compute_node_search_by_hypervisor(context, hypervisor_match):
  523. field = models.ComputeNode.hypervisor_hostname
  524. return model_query(context, models.ComputeNode).\
  525. filter(field.like('%%%s%%' % hypervisor_match)).\
  526. all()
  527. @pick_context_manager_writer
  528. def compute_node_create(context, values):
  529. """Creates a new ComputeNode and populates the capacity fields
  530. with the most recent data.
  531. """
  532. convert_objects_related_datetimes(values)
  533. compute_node_ref = models.ComputeNode()
  534. compute_node_ref.update(values)
  535. compute_node_ref.save(context.session)
  536. return compute_node_ref
  537. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  538. @pick_context_manager_writer
  539. def compute_node_update(context, compute_id, values):
  540. """Updates the ComputeNode record with the most recent data."""
  541. compute_ref = compute_node_get_model(context, compute_id)
  542. # Always update this, even if there's going to be no other
  543. # changes in data. This ensures that we invalidate the
  544. # scheduler cache of compute node data in case of races.
  545. values['updated_at'] = timeutils.utcnow()
  546. convert_objects_related_datetimes(values)
  547. compute_ref.update(values)
  548. return compute_ref
  549. @pick_context_manager_writer
  550. def compute_node_delete(context, compute_id):
  551. """Delete a ComputeNode record."""
  552. result = model_query(context, models.ComputeNode).\
  553. filter_by(id=compute_id).\
  554. soft_delete(synchronize_session=False)
  555. if not result:
  556. raise exception.ComputeHostNotFound(host=compute_id)
  557. @pick_context_manager_reader
  558. def compute_node_statistics(context):
  559. """Compute statistics over all compute nodes."""
  560. engine = get_engine(context=context)
  561. services_tbl = models.Service.__table__
  562. inner_sel = sa.alias(_compute_node_select(context), name='inner_sel')
  563. # TODO(sbauza): Remove the service_id filter in a later release
  564. # once we are sure that all compute nodes report the host field
  565. j = sa.join(
  566. inner_sel, services_tbl,
  567. sql.and_(
  568. sql.or_(
  569. inner_sel.c.host == services_tbl.c.host,
  570. inner_sel.c.service_id == services_tbl.c.id
  571. ),
  572. services_tbl.c.disabled == false(),
  573. services_tbl.c.binary == 'nova-compute',
  574. services_tbl.c.deleted == 0
  575. )
  576. )
  577. # NOTE(jaypipes): This COALESCE() stuff is temporary while the data
  578. # migration to the new resource providers inventories and allocations
  579. # tables is completed.
  580. agg_cols = [
  581. func.count().label('count'),
  582. sql.func.sum(
  583. inner_sel.c.vcpus
  584. ).label('vcpus'),
  585. sql.func.sum(
  586. inner_sel.c.memory_mb
  587. ).label('memory_mb'),
  588. sql.func.sum(
  589. inner_sel.c.local_gb
  590. ).label('local_gb'),
  591. sql.func.sum(
  592. inner_sel.c.vcpus_used
  593. ).label('vcpus_used'),
  594. sql.func.sum(
  595. inner_sel.c.memory_mb_used
  596. ).label('memory_mb_used'),
  597. sql.func.sum(
  598. inner_sel.c.local_gb_used
  599. ).label('local_gb_used'),
  600. sql.func.sum(
  601. inner_sel.c.free_ram_mb
  602. ).label('free_ram_mb'),
  603. sql.func.sum(
  604. inner_sel.c.free_disk_gb
  605. ).label('free_disk_gb'),
  606. sql.func.sum(
  607. inner_sel.c.current_workload
  608. ).label('current_workload'),
  609. sql.func.sum(
  610. inner_sel.c.running_vms
  611. ).label('running_vms'),
  612. sql.func.sum(
  613. inner_sel.c.disk_available_least
  614. ).label('disk_available_least'),
  615. ]
  616. select = sql.select(agg_cols).select_from(j)
  617. conn = engine.connect()
  618. results = conn.execute(select).fetchone()
  619. # Build a dict of the info--making no assumptions about result
  620. fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
  621. 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
  622. 'current_workload', 'running_vms', 'disk_available_least')
  623. results = {field: int(results[idx] or 0)
  624. for idx, field in enumerate(fields)}
  625. conn.close()
  626. return results
  627. ###################
  628. @pick_context_manager_writer
  629. def certificate_create(context, values):
  630. certificate_ref = models.Certificate()
  631. for (key, value) in values.items():
  632. certificate_ref[key] = value
  633. certificate_ref.save(context.session)
  634. return certificate_ref
  635. @pick_context_manager_reader
  636. def certificate_get_all_by_project(context, project_id):
  637. return model_query(context, models.Certificate, read_deleted="no").\
  638. filter_by(project_id=project_id).\
  639. all()
  640. @pick_context_manager_reader
  641. def certificate_get_all_by_user(context, user_id):
  642. return model_query(context, models.Certificate, read_deleted="no").\
  643. filter_by(user_id=user_id).\
  644. all()
  645. @pick_context_manager_reader
  646. def certificate_get_all_by_user_and_project(context, user_id, project_id):
  647. return model_query(context, models.Certificate, read_deleted="no").\
  648. filter_by(user_id=user_id).\
  649. filter_by(project_id=project_id).\
  650. all()
  651. ###################
  652. @require_context
  653. @pick_context_manager_reader
  654. def floating_ip_get(context, id):
  655. try:
  656. result = model_query(context, models.FloatingIp, project_only=True).\
  657. filter_by(id=id).\
  658. options(joinedload_all('fixed_ip.instance')).\
  659. first()
  660. if not result:
  661. raise exception.FloatingIpNotFound(id=id)
  662. except db_exc.DBError:
  663. LOG.warning("Invalid floating IP ID %s in request", id)
  664. raise exception.InvalidID(id=id)
  665. return result
  666. @require_context
  667. @pick_context_manager_reader
  668. def floating_ip_get_pools(context):
  669. pools = []
  670. for result in model_query(context, models.FloatingIp,
  671. (models.FloatingIp.pool,)).distinct():
  672. pools.append({'name': result[0]})
  673. return pools
  674. @require_context
  675. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  676. @pick_context_manager_writer
  677. def floating_ip_allocate_address(context, project_id, pool,
  678. auto_assigned=False):
  679. nova.context.authorize_project_context(context, project_id)
  680. floating_ip_ref = model_query(context, models.FloatingIp,
  681. read_deleted="no").\
  682. filter_by(fixed_ip_id=None).\
  683. filter_by(project_id=None).\
  684. filter_by(pool=pool).\
  685. first()
  686. if not floating_ip_ref:
  687. raise exception.NoMoreFloatingIps()
  688. params = {'project_id': project_id, 'auto_assigned': auto_assigned}
  689. rows_update = model_query(context, models.FloatingIp, read_deleted="no").\
  690. filter_by(id=floating_ip_ref['id']).\
  691. filter_by(fixed_ip_id=None).\
  692. filter_by(project_id=None).\
  693. filter_by(pool=pool).\
  694. update(params, synchronize_session='evaluate')
  695. if not rows_update:
  696. LOG.debug('The row was updated in a concurrent transaction, '
  697. 'we will fetch another one')
  698. raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed())
  699. return floating_ip_ref['address']
  700. @require_context
  701. @pick_context_manager_writer
  702. def floating_ip_bulk_create(context, ips, want_result=True):
  703. try:
  704. tab = models.FloatingIp().__table__
  705. context.session.execute(tab.insert(), ips)
  706. except db_exc.DBDuplicateEntry as e:
  707. raise exception.FloatingIpExists(address=e.value)
  708. if want_result:
  709. return model_query(context, models.FloatingIp).filter(
  710. models.FloatingIp.address.in_(
  711. [ip['address'] for ip in ips])).all()
  712. def _ip_range_splitter(ips, block_size=256):
  713. """Yields blocks of IPs no more than block_size elements long."""
  714. out = []
  715. count = 0
  716. for ip in ips:
  717. out.append(ip['address'])
  718. count += 1
  719. if count > block_size - 1:
  720. yield out
  721. out = []
  722. count = 0
  723. if out:
  724. yield out
  725. @require_context
  726. @pick_context_manager_writer
  727. def floating_ip_bulk_destroy(context, ips):
  728. project_id_to_quota_count = collections.defaultdict(int)
  729. for ip_block in _ip_range_splitter(ips):
  730. # Find any floating IPs that were not auto_assigned and
  731. # thus need quota released.
  732. query = model_query(context, models.FloatingIp).\
  733. filter(models.FloatingIp.address.in_(ip_block)).\
  734. filter_by(auto_assigned=False)
  735. for row in query.all():
  736. # The count is negative since we release quota by
  737. # reserving negative quota.
  738. project_id_to_quota_count[row['project_id']] -= 1
  739. # Delete the floating IPs.
  740. model_query(context, models.FloatingIp).\
  741. filter(models.FloatingIp.address.in_(ip_block)).\
  742. soft_delete(synchronize_session='fetch')
  743. @require_context
  744. @pick_context_manager_writer
  745. def floating_ip_create(context, values):
  746. floating_ip_ref = models.FloatingIp()
  747. floating_ip_ref.update(values)
  748. try:
  749. floating_ip_ref.save(context.session)
  750. except db_exc.DBDuplicateEntry:
  751. raise exception.FloatingIpExists(address=values['address'])
  752. return floating_ip_ref
  753. def _floating_ip_count_by_project(context, project_id):
  754. nova.context.authorize_project_context(context, project_id)
  755. # TODO(tr3buchet): why leave auto_assigned floating IPs out?
  756. return model_query(context, models.FloatingIp, read_deleted="no").\
  757. filter_by(project_id=project_id).\
  758. filter_by(auto_assigned=False).\
  759. count()
  760. @require_context
  761. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  762. @pick_context_manager_writer
  763. def floating_ip_fixed_ip_associate(context, floating_address,
  764. fixed_address, host):
  765. fixed_ip_ref = model_query(context, models.FixedIp).\
  766. filter_by(address=fixed_address).\
  767. options(joinedload('network')).\
  768. first()
  769. if not fixed_ip_ref:
  770. raise exception.FixedIpNotFoundForAddress(address=fixed_address)
  771. rows = model_query(context, models.FloatingIp).\
  772. filter_by(address=floating_address).\
  773. filter(models.FloatingIp.project_id ==
  774. context.project_id).\
  775. filter(or_(models.FloatingIp.fixed_ip_id ==
  776. fixed_ip_ref['id'],
  777. models.FloatingIp.fixed_ip_id.is_(None))).\
  778. update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host})
  779. if not rows:
  780. raise exception.FloatingIpAssociateFailed(address=floating_address)
  781. return fixed_ip_ref
  782. @require_context
  783. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  784. @pick_context_manager_writer
  785. def floating_ip_deallocate(context, address):
  786. return model_query(context, models.FloatingIp).\
  787. filter_by(address=address).\
  788. filter(and_(models.FloatingIp.project_id != null()),
  789. models.FloatingIp.fixed_ip_id == null()).\
  790. update({'project_id': None,
  791. 'host': None,
  792. 'auto_assigned': False},
  793. synchronize_session=False)
  794. @require_context
  795. @pick_context_manager_writer
  796. def floating_ip_destroy(context, address):
  797. model_query(context, models.FloatingIp).\
  798. filter_by(address=address).\
  799. delete()
  800. @require_context
  801. @pick_context_manager_writer
  802. def floating_ip_disassociate(context, address):
  803. floating_ip_ref = model_query(context,
  804. models.FloatingIp).\
  805. filter_by(address=address).\
  806. first()
  807. if not floating_ip_ref:
  808. raise exception.FloatingIpNotFoundForAddress(address=address)
  809. fixed_ip_ref = model_query(context, models.FixedIp).\
  810. filter_by(id=floating_ip_ref['fixed_ip_id']).\
  811. options(joinedload('network')).\
  812. first()
  813. floating_ip_ref.fixed_ip_id = None
  814. floating_ip_ref.host = None
  815. return fixed_ip_ref
  816. def _floating_ip_get_all(context):
  817. return model_query(context, models.FloatingIp, read_deleted="no")
  818. @pick_context_manager_reader
  819. def floating_ip_get_all(context):
  820. floating_ip_refs = _floating_ip_get_all(context).\
  821. options(joinedload('fixed_ip')).\
  822. all()
  823. if not floating_ip_refs:
  824. raise exception.NoFloatingIpsDefined()
  825. return floating_ip_refs
  826. @pick_context_manager_reader
  827. def floating_ip_get_all_by_host(context, host):
  828. floating_ip_refs = _floating_ip_get_all(context).\
  829. filter_by(host=host).\
  830. options(joinedload('fixed_ip')).\
  831. all()
  832. if not floating_ip_refs:
  833. raise exception.FloatingIpNotFoundForHost(host=host)
  834. return floating_ip_refs
  835. @require_context
  836. @pick_context_manager_reader
  837. def floating_ip_get_all_by_project(context, project_id):
  838. nova.context.authorize_project_context(context, project_id)
  839. # TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
  840. return _floating_ip_get_all(context).\
  841. filter_by(project_id=project_id).\
  842. filter_by(auto_assigned=False).\
  843. options(joinedload_all('fixed_ip.instance')).\
  844. all()
  845. @require_context
  846. @pick_context_manager_reader
  847. def floating_ip_get_by_address(context, address):
  848. return _floating_ip_get_by_address(context, address)
  849. def _floating_ip_get_by_address(context, address):
  850. # if address string is empty explicitly set it to None
  851. if not address:
  852. address = None
  853. try:
  854. result = model_query(context, models.FloatingIp).\
  855. filter_by(address=address).\
  856. options(joinedload_all('fixed_ip.instance')).\
  857. first()
  858. if not result:
  859. raise exception.FloatingIpNotFoundForAddress(address=address)
  860. except db_exc.DBError:
  861. msg = _("Invalid floating IP %s in request") % address
  862. LOG.warning(msg)
  863. raise exception.InvalidIpAddressError(msg)
  864. # If the floating IP has a project ID set, check to make sure
  865. # the non-admin user has access.
  866. if result.project_id and nova.context.is_user_context(context):
  867. nova.context.authorize_project_context(context, result.project_id)
  868. return result
  869. @require_context
  870. @pick_context_manager_reader
  871. def floating_ip_get_by_fixed_address(context, fixed_address):
  872. return model_query(context, models.FloatingIp).\
  873. outerjoin(models.FixedIp,
  874. models.FixedIp.id ==
  875. models.FloatingIp.fixed_ip_id).\
  876. filter(models.FixedIp.address == fixed_address).\
  877. all()
  878. @require_context
  879. @pick_context_manager_reader
  880. def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
  881. return model_query(context, models.FloatingIp).\
  882. filter_by(fixed_ip_id=fixed_ip_id).\
  883. all()
  884. @require_context
  885. @pick_context_manager_writer
  886. def floating_ip_update(context, address, values):
  887. float_ip_ref = _floating_ip_get_by_address(context, address)
  888. float_ip_ref.update(values)
  889. try:
  890. float_ip_ref.save(context.session)
  891. except db_exc.DBDuplicateEntry:
  892. raise exception.FloatingIpExists(address=values['address'])
  893. return float_ip_ref
  894. ###################
  895. @require_context
  896. @pick_context_manager_reader
  897. def dnsdomain_get(context, fqdomain):
  898. return model_query(context, models.DNSDomain, read_deleted="no").\
  899. filter_by(domain=fqdomain).\
  900. with_lockmode('update').\
  901. first()
  902. def _dnsdomain_get_or_create(context, fqdomain):
  903. domain_ref = dnsdomain_get(context, fqdomain)
  904. if not domain_ref:
  905. dns_ref = models.DNSDomain()
  906. dns_ref.update({'domain': fqdomain,
  907. 'availability_zone': None,
  908. 'project_id': None})
  909. return dns_ref
  910. return domain_ref
  911. @pick_context_manager_writer
  912. def dnsdomain_register_for_zone(context, fqdomain, zone):
  913. domain_ref = _dnsdomain_get_or_create(context, fqdomain)
  914. domain_ref.scope = 'private'
  915. domain_ref.availability_zone = zone
  916. context.session.add(domain_ref)
  917. @pick_context_manager_writer
  918. def dnsdomain_register_for_project(context, fqdomain, project):
  919. domain_ref = _dnsdomain_get_or_create(context, fqdomain)
  920. domain_ref.scope = 'public'
  921. domain_ref.project_id = project
  922. context.session.add(domain_ref)
  923. @pick_context_manager_writer
  924. def dnsdomain_unregister(context, fqdomain):
  925. model_query(context, models.DNSDomain).\
  926. filter_by(domain=fqdomain).\
  927. delete()
  928. @pick_context_manager_reader
  929. def dnsdomain_get_all(context):
  930. return model_query(context, models.DNSDomain, read_deleted="no").all()
  931. ###################
  932. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  933. @pick_context_manager_writer
  934. def fixed_ip_associate(context, address, instance_uuid, network_id=None,
  935. reserved=False, virtual_interface_id=None):
  936. """Keyword arguments:
  937. reserved -- should be a boolean value(True or False), exact value will be
  938. used to filter on the fixed IP address
  939. """
  940. if not uuidutils.is_uuid_like(instance_uuid):
  941. raise exception.InvalidUUID(uuid=instance_uuid)
  942. network_or_none = or_(models.FixedIp.network_id == network_id,
  943. models.FixedIp.network_id == null())
  944. fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
  945. filter(network_or_none).\
  946. filter_by(reserved=reserved).\
  947. filter_by(address=address).\
  948. first()
  949. if fixed_ip_ref is None:
  950. raise exception.FixedIpNotFoundForNetwork(address=address,
  951. network_uuid=network_id)
  952. if fixed_ip_ref.instance_uuid:
  953. raise exception.FixedIpAlreadyInUse(address=address,
  954. instance_uuid=instance_uuid)
  955. params = {'instance_uuid': instance_uuid,
  956. 'allocated': virtual_interface_id is not None}
  957. if not fixed_ip_ref.network_id:
  958. params['network_id'] = network_id
  959. if virtual_interface_id:
  960. params['virtual_interface_id'] = virtual_interface_id
  961. rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
  962. filter_by(id=fixed_ip_ref.id).\
  963. filter(network_or_none).\
  964. filter_by(reserved=reserved).\
  965. filter_by(address=address).\
  966. update(params, synchronize_session='evaluate')
  967. if not rows_updated:
  968. LOG.debug('The row was updated in a concurrent transaction, '
  969. 'we will fetch another row')
  970. raise db_exc.RetryRequest(
  971. exception.FixedIpAssociateFailed(net=network_id))
  972. return fixed_ip_ref
  973. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  974. @pick_context_manager_writer
  975. def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
  976. host=None, virtual_interface_id=None):
  977. """allocate a fixed ip out of a fixed ip network pool.
  978. This allocates an unallocated fixed ip out of a specified
  979. network. We sort by updated_at to hand out the oldest address in
  980. the list.
  981. """
  982. if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
  983. raise exception.InvalidUUID(uuid=instance_uuid)
  984. network_or_none = or_(models.FixedIp.network_id == network_id,
  985. models.FixedIp.network_id == null())
  986. fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
  987. filter(network_or_none).\
  988. filter_by(reserved=False).\
  989. filter_by(instance_uuid=None).\
  990. filter_by(host=None).\
  991. filter_by(leased=False).\
  992. order_by(asc(models.FixedIp.updated_at)).\
  993. first()
  994. if not fixed_ip_ref:
  995. raise exception.NoMoreFixedIps(net=network_id)
  996. params = {'allocated': virtual_interface_id is not None}
  997. if fixed_ip_ref['network_id'] is None:
  998. params['network_id'] = network_id
  999. if instance_uuid:
  1000. params['instance_uuid'] = instance_uuid
  1001. if host:
  1002. params['host'] = host
  1003. if virtual_interface_id:
  1004. params['virtual_interface_id'] = virtual_interface_id
  1005. rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
  1006. filter_by(id=fixed_ip_ref['id']).\
  1007. filter_by(network_id=fixed_ip_ref['network_id']).\
  1008. filter_by(reserved=False).\
  1009. filter_by(instance_uuid=None).\
  1010. filter_by(host=None).\
  1011. filter_by(leased=False).\
  1012. filter_by(address=fixed_ip_ref['address']).\
  1013. update(params, synchronize_session='evaluate')
  1014. if not rows_updated:
  1015. LOG.debug('The row was updated in a concurrent transaction, '
  1016. 'we will fetch another row')
  1017. raise db_exc.RetryRequest(
  1018. exception.FixedIpAssociateFailed(net=network_id))
  1019. return fixed_ip_ref
  1020. @require_context
  1021. @pick_context_manager_writer
  1022. def fixed_ip_create(context, values):
  1023. fixed_ip_ref = models.FixedIp()
  1024. fixed_ip_ref.update(values)
  1025. try:
  1026. fixed_ip_ref.save(context.session)
  1027. except db_exc.DBDuplicateEntry:
  1028. raise exception.FixedIpExists(address=values['address'])
  1029. return fixed_ip_ref
  1030. @require_context
  1031. @pick_context_manager_writer
  1032. def fixed_ip_bulk_create(context, ips):
  1033. try:
  1034. tab = models.FixedIp.__table__
  1035. context.session.execute(tab.insert(), ips)
  1036. except db_exc.DBDuplicateEntry as e:
  1037. raise exception.FixedIpExists(address=e.value)
  1038. @require_context
  1039. @pick_context_manager_writer
  1040. def fixed_ip_disassociate(context, address):
  1041. _fixed_ip_get_by_address(context, address).update(
  1042. {'instance_uuid': None,
  1043. 'virtual_interface_id': None})
  1044. @pick_context_manager_writer
  1045. def fixed_ip_disassociate_all_by_timeout(context, host, time):
  1046. # NOTE(vish): only update fixed ips that "belong" to this
  1047. # host; i.e. the network host or the instance
  1048. # host matches. Two queries necessary because
  1049. # join with update doesn't work.
  1050. host_filter = or_(and_(models.Instance.host == host,
  1051. models.Network.multi_host == true()),
  1052. models.Network.host == host)
  1053. result = model_query(context, models.FixedIp, (models.FixedIp.id,),
  1054. read_deleted="no").\
  1055. filter(models.FixedIp.allocated == false()).\
  1056. filter(models.FixedIp.updated_at < time).\
  1057. join((models.Network,
  1058. models.Network.id == models.FixedIp.network_id)).\
  1059. join((models.Instance,
  1060. models.Instance.uuid == models.FixedIp.instance_uuid)).\
  1061. filter(host_filter).\
  1062. all()
  1063. fixed_ip_ids = [fip[0] for fip in result]
  1064. if not fixed_ip_ids:
  1065. return 0
  1066. result = model_query(context, models.FixedIp).\
  1067. filter(models.FixedIp.id.in_(fixed_ip_ids)).\
  1068. update({'instance_uuid': None,
  1069. 'leased': False,
  1070. 'updated_at': timeutils.utcnow()},
  1071. synchronize_session='fetch')
  1072. return result
  1073. @require_context
  1074. @pick_context_manager_reader
  1075. def fixed_ip_get(context, id, get_network=False):
  1076. query = model_query(context, models.FixedIp).filter_by(id=id)
  1077. if get_network:
  1078. query = query.options(joinedload('network'))
  1079. result = query.first()
  1080. if not result:
  1081. raise exception.FixedIpNotFound(id=id)
  1082. # FIXME(sirp): shouldn't we just use project_only here to restrict the
  1083. # results?
  1084. if (nova.context.is_user_context(context) and
  1085. result['instance_uuid'] is not None):
  1086. instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
  1087. result['instance_uuid'])
  1088. nova.context.authorize_project_context(context, instance.project_id)
  1089. return result
  1090. @pick_context_manager_reader
  1091. def fixed_ip_get_all(context):
  1092. result = model_query(context, models.FixedIp, read_deleted="yes").all()
  1093. if not result:
  1094. raise exception.NoFixedIpsDefined()
  1095. return result
  1096. @require_context
  1097. @pick_context_manager_reader
  1098. def fixed_ip_get_by_address(context, address, columns_to_join=None):
  1099. return _fixed_ip_get_by_address(context, address,
  1100. columns_to_join=columns_to_join)
  1101. def _fixed_ip_get_by_address(context, address, columns_to_join=None):
  1102. if columns_to_join is None:
  1103. columns_to_join = []
  1104. try:
  1105. result = model_query(context, models.FixedIp)
  1106. for column in columns_to_join:
  1107. result = result.options(joinedload_all(column))
  1108. result = result.filter_by(address=address).first()
  1109. if not result:
  1110. raise exception.FixedIpNotFoundForAddress(address=address)
  1111. except db_exc.DBError:
  1112. msg = _("Invalid fixed IP Address %s in request") % address
  1113. LOG.warning(msg)
  1114. raise exception.FixedIpInvalid(msg)
  1115. # NOTE(sirp): shouldn't we just use project_only here to restrict the
  1116. # results?
  1117. if (nova.context.is_user_context(context) and
  1118. result['instance_uuid'] is not None):
  1119. instance = _instance_get_by_uuid(
  1120. context.elevated(read_deleted='yes'),
  1121. result['instance_uuid'])
  1122. nova.context.authorize_project_context(context,
  1123. instance.project_id)
  1124. return result
  1125. @require_context
  1126. @pick_context_manager_reader
  1127. def fixed_ip_get_by_floating_address(context, floating_address):
  1128. return model_query(context, models.FixedIp).\
  1129. join(models.FloatingIp,
  1130. models.FloatingIp.fixed_ip_id ==
  1131. models.FixedIp.id).\
  1132. filter(models.FloatingIp.address == floating_address).\
  1133. first()
  1134. # NOTE(tr3buchet) please don't invent an exception here, None is fine
  1135. @require_context
  1136. @pick_context_manager_reader
  1137. def fixed_ip_get_by_instance(context, instance_uuid):
  1138. if not uuidutils.is_uuid_like(instance_uuid):
  1139. raise exception.InvalidUUID(uuid=instance_uuid)
  1140. vif_and = and_(models.VirtualInterface.id ==
  1141. models.FixedIp.virtual_interface_id,
  1142. models.VirtualInterface.deleted == 0)
  1143. result = model_query(context, models.FixedIp, read_deleted="no").\
  1144. filter_by(instance_uuid=instance_uuid).\
  1145. outerjoin(models.VirtualInterface, vif_and).\
  1146. options(contains_eager("virtual_interface")).\
  1147. options(joinedload('network')).\
  1148. options(joinedload('floating_ips')).\
  1149. order_by(asc(models.VirtualInterface.created_at),
  1150. asc(models.VirtualInterface.id)).\
  1151. all()
  1152. if not result:
  1153. raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
  1154. return result
  1155. @pick_context_manager_reader
  1156. def fixed_ip_get_by_host(context, host):
  1157. instance_uuids = _instance_get_all_uuids_by_host(context, host)
  1158. if not instance_uuids:
  1159. return []
  1160. return model_query(context, models.FixedIp).\
  1161. filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
  1162. all()
  1163. @require_context
  1164. @pick_context_manager_reader
  1165. def fixed_ip_get_by_network_host(context, network_id, host):
  1166. result = model_query(context, models.FixedIp, read_deleted="no").\
  1167. filter_by(network_id=network_id).\
  1168. filter_by(host=host).\
  1169. first()
  1170. if not result:
  1171. raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
  1172. host=host)
  1173. return result
  1174. @require_context
  1175. @pick_context_manager_reader
  1176. def fixed_ips_by_virtual_interface(context, vif_id):
  1177. result = model_query(context, models.FixedIp, read_deleted="no").\
  1178. filter_by(virtual_interface_id=vif_id).\
  1179. options(joinedload('network')).\
  1180. options(joinedload('floating_ips')).\
  1181. all()
  1182. return result
  1183. @require_context
  1184. @pick_context_manager_writer
  1185. def fixed_ip_update(context, address, values):
  1186. _fixed_ip_get_by_address(context, address).update(values)
  1187. def _fixed_ip_count_by_project(context, project_id):
  1188. nova.context.authorize_project_context(context, project_id)
  1189. return model_query(context, models.FixedIp, (models.FixedIp.id,),
  1190. read_deleted="no").\
  1191. join((models.Instance,
  1192. models.Instance.uuid == models.FixedIp.instance_uuid)).\
  1193. filter(models.Instance.project_id == project_id).\
  1194. count()
  1195. ###################
  1196. @require_context
  1197. @pick_context_manager_writer
  1198. def virtual_interface_create(context, values):
  1199. """Create a new virtual interface record in the database.
  1200. :param values: = dict containing column values
  1201. """
  1202. try:
  1203. vif_ref = models.VirtualInterface()
  1204. vif_ref.update(values)
  1205. vif_ref.save(context.session)
  1206. except db_exc.DBError:
  1207. LOG.exception("VIF creation failed with a database error.")
  1208. raise exception.VirtualInterfaceCreateException()
  1209. return vif_ref
  1210. def _virtual_interface_query(context):
  1211. return model_query(context, models.VirtualInterface, read_deleted="no")
  1212. @require_context
  1213. @pick_context_manager_writer
  1214. def virtual_interface_update(context, address, values):
  1215. vif_ref = virtual_interface_get_by_address(context, address)
  1216. vif_ref.update(values)
  1217. vif_ref.save(context.session)
  1218. return vif_ref
  1219. @require_context
  1220. @pick_context_manager_reader
  1221. def virtual_interface_get(context, vif_id):
  1222. """Gets a virtual interface from the table.
  1223. :param vif_id: = id of the virtual interface
  1224. """
  1225. vif_ref = _virtual_interface_query(context).\
  1226. filter_by(id=vif_id).\
  1227. first()
  1228. return vif_ref
  1229. @require_context
  1230. @pick_context_manager_reader
  1231. def virtual_interface_get_by_address(context, address):
  1232. """Gets a virtual interface from the table.
  1233. :param address: = the address of the interface you're looking to get
  1234. """
  1235. try:
  1236. vif_ref = _virtual_interface_query(context).\
  1237. filter_by(address=address).\
  1238. first()
  1239. except db_exc.DBError:
  1240. msg = _("Invalid virtual interface address %s in request") % address
  1241. LOG.warning(msg)
  1242. raise exception.InvalidIpAddressError(msg)
  1243. return vif_ref
  1244. @require_context
  1245. @pick_context_manager_reader
  1246. def virtual_interface_get_by_uuid(context, vif_uuid):
  1247. """Gets a virtual interface from the table.
  1248. :param vif_uuid: the uuid of the interface you're looking to get
  1249. """
  1250. vif_ref = _virtual_interface_query(context).\
  1251. filter_by(uuid=vif_uuid).\
  1252. first()
  1253. return vif_ref
  1254. @require_context
  1255. @require_instance_exists_using_uuid
  1256. @pick_context_manager_reader_allow_async
  1257. def virtual_interface_get_by_instance(context, instance_uuid):
  1258. """Gets all virtual interfaces for instance.
  1259. :param instance_uuid: = uuid of the instance to retrieve vifs for
  1260. """
  1261. vif_refs = _virtual_interface_query(context).\
  1262. filter_by(instance_uuid=instance_uuid).\
  1263. order_by(asc("created_at"), asc("id")).\
  1264. all()
  1265. return vif_refs
  1266. @require_context
  1267. @pick_context_manager_reader
  1268. def virtual_interface_get_by_instance_and_network(context, instance_uuid,
  1269. network_id):
  1270. """Gets virtual interface for instance that's associated with network."""
  1271. vif_ref = _virtual_interface_query(context).\
  1272. filter_by(instance_uuid=instance_uuid).\
  1273. filter_by(network_id=network_id).\
  1274. first()
  1275. return vif_ref
  1276. @require_context
  1277. @pick_context_manager_writer
  1278. def virtual_interface_delete_by_instance(context, instance_uuid):
  1279. """Delete virtual interface records that are associated
  1280. with the instance given by instance_id.
  1281. :param instance_uuid: = uuid of instance
  1282. """
  1283. _virtual_interface_query(context).\
  1284. filter_by(instance_uuid=instance_uuid).\
  1285. soft_delete()
  1286. @require_context
  1287. @pick_context_manager_writer
  1288. def virtual_interface_delete(context, id):
  1289. """Delete virtual interface records.
  1290. :param id: id of the interface
  1291. """
  1292. _virtual_interface_query(context).\
  1293. filter_by(id=id).\
  1294. soft_delete()
  1295. @require_context
  1296. @pick_context_manager_reader
  1297. def virtual_interface_get_all(context):
  1298. """Get all vifs."""
  1299. vif_refs = _virtual_interface_query(context).all()
  1300. return vif_refs
  1301. ###################
  1302. def _metadata_refs(metadata_dict, meta_class):
  1303. metadata_refs = []
  1304. if metadata_dict:
  1305. for k, v in metadata_dict.items():
  1306. metadata_ref = meta_class()
  1307. metadata_ref['key'] = k
  1308. metadata_ref['value'] = v
  1309. metadata_refs.append(metadata_ref)
  1310. return metadata_refs
  1311. def _validate_unique_server_name(context, name):
  1312. if not CONF.osapi_compute_unique_server_name_scope:
  1313. return
  1314. lowername = name.lower()
  1315. base_query = model_query(context, models.Instance, read_deleted='no').\
  1316. filter(func.lower(models.Instance.hostname) == lowername)
  1317. if CONF.osapi_compute_unique_server_name_scope == 'project':
  1318. instance_with_same_name = base_query.\
  1319. filter_by(project_id=context.project_id).\
  1320. count()
  1321. elif CONF.osapi_compute_unique_server_name_scope == 'global':
  1322. instance_with_same_name = base_query.count()
  1323. else:
  1324. return
  1325. if instance_with_same_name > 0:
  1326. raise exception.InstanceExists(name=lowername)
  1327. def _handle_objects_related_type_conversions(values):
  1328. """Make sure that certain things in values (which may have come from
  1329. an objects.instance.Instance object) are in suitable form for the
  1330. database.
  1331. """
  1332. # NOTE(danms): Make sure IP addresses are passed as strings to
  1333. # the database engine
  1334. for key in ('access_ip_v4', 'access_ip_v6'):
  1335. if key in values and values[key] is not None:
  1336. values[key] = str(values[key])
  1337. datetime_keys = ('created_at', 'deleted_at', 'updated_at',
  1338. 'launched_at', 'terminated_at')
  1339. convert_objects_related_datetimes(values, *datetime_keys)
  1340. def _check_instance_exists_in_project(context, instance_uuid):
  1341. if not model_query(context, models.Instance, read_deleted="no",
  1342. project_only=True).filter_by(
  1343. uuid=instance_uuid).first():
  1344. raise exception.InstanceNotFound(instance_id=instance_uuid)
  1345. @require_context
  1346. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  1347. @pick_context_manager_writer
  1348. def instance_create(context, values):
  1349. """Create a new Instance record in the database.
  1350. context - request context object
  1351. values - dict containing column values.
  1352. """
  1353. security_group_ensure_default(context)
  1354. values = values.copy()
  1355. values['metadata'] = _metadata_refs(
  1356. values.get('metadata'), models.InstanceMetadata)
  1357. values['system_metadata'] = _metadata_refs(
  1358. values.get('system_metadata'), models.InstanceSystemMetadata)
  1359. _handle_objects_related_type_conversions(values)
  1360. instance_ref = models.Instance()
  1361. if not values.get('uuid'):
  1362. values['uuid'] = uuidutils.generate_uuid()
  1363. instance_ref['info_cache'] = models.InstanceInfoCache()
  1364. info_cache = values.pop('info_cache', None)
  1365. if info_cache is not None:
  1366. instance_ref['info_cache'].update(info_cache)
  1367. security_groups = values.pop('security_groups', [])
  1368. instance_ref['extra'] = models.InstanceExtra()
  1369. instance_ref['extra'].update(
  1370. {'numa_topology': None,
  1371. 'pci_requests': None,
  1372. 'vcpu_model': None,
  1373. })
  1374. instance_ref['extra'].update(values.pop('extra', {}))
  1375. instance_ref.update(values)
  1376. def _get_sec_group_models(security_groups):
  1377. models = []
  1378. default_group = _security_group_ensure_default(context)
  1379. if 'default' in security_groups:
  1380. models.append(default_group)
  1381. # Generate a new list, so we don't modify the original
  1382. security_groups = [x for x in security_groups if x != 'default']
  1383. if security_groups:
  1384. models.extend(_security_group_get_by_names(
  1385. context, security_groups))
  1386. return models
  1387. if 'hostname' in values:
  1388. _validate_unique_server_name(context, values['hostname'])
  1389. instance_ref.security_groups = _get_sec_group_models(security_groups)
  1390. context.session.add(instance_ref)
  1391. # create the instance uuid to ec2_id mapping entry for instance
  1392. ec2_instance_create(context, instance_ref['uuid'])
  1393. # Parity with the return value of instance_get_all_by_filters_sort()
  1394. # Obviously a newly-created instance record can't already have a fault
  1395. # record because of the FK constraint, so this is fine.
  1396. instance_ref.fault = None
  1397. return instance_ref
  1398. def _instance_data_get_for_user(context, project_id, user_id):
  1399. not_soft_deleted = or_(
  1400. models.Instance.vm_state != vm_states.SOFT_DELETED,
  1401. models.Instance.vm_state == null()
  1402. )
  1403. result = model_query(context, models.Instance, (
  1404. func.count(models.Instance.id),
  1405. func.sum(models.Instance.vcpus),
  1406. func.sum(models.Instance.memory_mb))).\
  1407. filter_by(project_id=project_id).filter(not_soft_deleted)
  1408. if user_id:
  1409. result = result.filter_by(user_id=user_id).first()
  1410. else:
  1411. result = result.first()
  1412. # NOTE(vish): convert None to 0
  1413. return (result[0] or 0, result[1] or 0, result[2] or 0)
  1414. @require_context
  1415. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  1416. @pick_context_manager_writer
  1417. def instance_destroy(context, instance_uuid, constraint=None):
  1418. if uuidutils.is_uuid_like(instance_uuid):
  1419. instance_ref = _instance_get_by_uuid(context, instance_uuid)
  1420. else:
  1421. raise exception.InvalidUUID(instance_uuid)
  1422. query = model_query(context, models.Instance).\
  1423. filter_by(uuid=instance_uuid)
  1424. if constraint is not None:
  1425. query = constraint.apply(models.Instance, query)
  1426. count = query.soft_delete()
  1427. if count == 0:
  1428. raise exception.ConstraintNotMet()
  1429. model_query(context, models.SecurityGroupInstanceAssociation).\
  1430. filter_by(instance_uuid=instance_uuid).\
  1431. soft_delete()
  1432. model_query(context, models.InstanceInfoCache).\
  1433. filter_by(instance_uuid=instance_uuid).\
  1434. soft_delete()
  1435. model_query(context, models.InstanceMetadata).\
  1436. filter_by(instance_uuid=instance_uuid).\
  1437. soft_delete()
  1438. model_query(context, models.InstanceFault).\
  1439. filter_by(instance_uuid=instance_uuid).\
  1440. soft_delete()
  1441. model_query(context, models.InstanceExtra).\
  1442. filter_by(instance_uuid=instance_uuid).\
  1443. soft_delete()
  1444. model_query(context, models.InstanceSystemMetadata).\
  1445. filter_by(instance_uuid=instance_uuid).\
  1446. soft_delete()
  1447. model_query(context, models.InstanceGroupMember).\
  1448. filter_by(instance_id=instance_uuid).\
  1449. soft_delete()
  1450. model_query(context, models.BlockDeviceMapping).\
  1451. filter_by(instance_uuid=instance_uuid).\
  1452. soft_delete()
  1453. model_query(context, models.Migration).\
  1454. filter_by(instance_uuid=instance_uuid).\
  1455. soft_delete()
  1456. model_query(context, models.InstanceIdMapping).filter_by(
  1457. uuid=instance_uuid).soft_delete()
  1458. # NOTE(snikitin): We can't use model_query here, because there is no
  1459. # column 'deleted' in 'tags' or 'console_auth_tokens' tables.
  1460. context.session.query(models.Tag).filter_by(
  1461. resource_id=instance_uuid).delete()
  1462. context.session.query(models.ConsoleAuthToken).filter_by(
  1463. instance_uuid=instance_uuid).delete()
  1464. # NOTE(cfriesen): We intentionally do not soft-delete entries in the
  1465. # instance_actions or instance_actions_events tables because they
  1466. # can be used by operators to find out what actions were performed on a
  1467. # deleted instance. Both of these tables are special-cased in
  1468. # _archive_deleted_rows_for_table().
  1469. return instance_ref
  1470. @require_context
  1471. @pick_context_manager_reader_allow_async
  1472. def instance_get_by_uuid(context, uuid, columns_to_join=None):
  1473. return _instance_get_by_uuid(context, uuid,
  1474. columns_to_join=columns_to_join)
  1475. def _instance_get_by_uuid(context, uuid, columns_to_join=None):
  1476. result = _build_instance_get(context, columns_to_join=columns_to_join).\
  1477. filter_by(uuid=uuid).\
  1478. first()
  1479. if not result:
  1480. raise exception.InstanceNotFound(instance_id=uuid)
  1481. return result
  1482. @require_context
  1483. @pick_context_manager_reader
  1484. def instance_get(context, instance_id, columns_to_join=None):
  1485. try:
  1486. result = _build_instance_get(context, columns_to_join=columns_to_join
  1487. ).filter_by(id=instance_id).first()
  1488. if not result:
  1489. raise exception.InstanceNotFound(instance_id=instance_id)
  1490. return result
  1491. except db_exc.DBError:
  1492. # NOTE(sdague): catch all in case the db engine chokes on the
  1493. # id because it's too long of an int to store.
  1494. LOG.warning("Invalid instance id %s in request", instance_id)
  1495. raise exception.InvalidID(id=instance_id)
  1496. def _build_instance_get(context, columns_to_join=None):
  1497. query = model_query(context, models.Instance, project_only=True).\
  1498. options(joinedload_all('security_groups.rules')).\
  1499. options(joinedload('info_cache'))
  1500. if columns_to_join is None:
  1501. columns_to_join = ['metadata', 'system_metadata']
  1502. for column in columns_to_join:
  1503. if column in ['info_cache', 'security_groups']:
  1504. # Already always joined above
  1505. continue
  1506. if 'extra.' in column:
  1507. query = query.options(undefer(column))
  1508. else:
  1509. query = query.options(joinedload(column))
  1510. # NOTE(alaski) Stop lazy loading of columns not needed.
  1511. for col in ['metadata', 'system_metadata']:
  1512. if col not in columns_to_join:
  1513. query = query.options(noload(col))
  1514. return query
  1515. def _instances_fill_metadata(context, instances, manual_joins=None):
  1516. """Selectively fill instances with manually-joined metadata. Note that
  1517. instance will be converted to a dict.
  1518. :param context: security context
  1519. :param instances: list of instances to fill
  1520. :param manual_joins: list of tables to manually join (can be any
  1521. combination of 'metadata' and 'system_metadata' or
  1522. None to take the default of both)
  1523. """
  1524. uuids = [inst['uuid'] for inst in instances]
  1525. if manual_joins is None:
  1526. manual_joins = ['metadata', 'system_metadata']
  1527. meta = collections.defaultdict(list)
  1528. if 'metadata' in manual_joins:
  1529. for row in _instance_metadata_get_multi(context, uuids):
  1530. meta[row['instance_uuid']].append(row)
  1531. sys_meta = collections.defaultdict(list)
  1532. if 'system_metadata' in manual_joins:
  1533. for row in _instance_system_metadata_get_multi(context, uuids):
  1534. sys_meta[row['instance_uuid']].append(row)
  1535. pcidevs = collections.defaultdict(list)
  1536. if 'pci_devices' in manual_joins:
  1537. for row in _instance_pcidevs_get_multi(context, uuids):
  1538. pcidevs[row['instance_uuid']].append(row)
  1539. if 'fault' in manual_joins:
  1540. faults = instance_fault_get_by_instance_uuids(context, uuids,
  1541. latest=True)
  1542. else:
  1543. faults = {}
  1544. filled_instances = []
  1545. for inst in instances:
  1546. inst = dict(inst)
  1547. inst['system_metadata'] = sys_meta[inst['uuid']]
  1548. inst['metadata'] = meta[inst['uuid']]
  1549. if 'pci_devices' in manual_joins:
  1550. inst['pci_devices'] = pcidevs[inst['uuid']]
  1551. inst_faults = faults.get(inst['uuid'])
  1552. inst['fault'] = inst_faults and inst_faults[0] or None
  1553. filled_instances.append(inst)
  1554. return filled_instances
  1555. def _manual_join_columns(columns_to_join):
  1556. """Separate manually joined columns from columns_to_join
  1557. If columns_to_join contains 'metadata', 'system_metadata', 'fault', or
  1558. 'pci_devices' those columns are removed from columns_to_join and added
  1559. to a manual_joins list to be used with the _instances_fill_metadata method.
  1560. The columns_to_join formal parameter is copied and not modified, the return
  1561. tuple has the modified columns_to_join list to be used with joinedload in
  1562. a model query.
  1563. :param:columns_to_join: List of columns to join in a model query.
  1564. :return: tuple of (manual_joins, columns_to_join)
  1565. """
  1566. manual_joins = []
  1567. columns_to_join_new = copy.copy(columns_to_join)
  1568. for column in ('metadata', 'system_metadata', 'pci_devices', 'fault'):
  1569. if column in columns_to_join_new:
  1570. columns_to_join_new.remove(column)
  1571. manual_joins.append(column)
  1572. return manual_joins, columns_to_join_new
  1573. @require_context
  1574. @pick_context_manager_reader
  1575. def instance_get_all(context, columns_to_join=None):
  1576. if columns_to_join is None:
  1577. columns_to_join_new = ['info_cache', 'security_groups']
  1578. manual_joins = ['metadata', 'system_metadata']
  1579. else:
  1580. manual_joins, columns_to_join_new = (
  1581. _manual_join_columns(columns_to_join))
  1582. query = model_query(context, models.Instance)
  1583. for column in columns_to_join_new:
  1584. query = query.options(joinedload(column))
  1585. if not context.is_admin:
  1586. # If we're not admin context, add appropriate filter..
  1587. if context.project_id:
  1588. query = query.filter_by(project_id=context.project_id)
  1589. else:
  1590. query = query.filter_by(user_id=context.user_id)
  1591. instances = query.all()
  1592. return _instances_fill_metadata(context, instances, manual_joins)
  1593. @require_context
  1594. @pick_context_manager_reader_allow_async
  1595. def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
  1596. limit=None, marker=None, columns_to_join=None):
  1597. """Return instances matching all filters sorted by the primary key.
  1598. See instance_get_all_by_filters_sort for more information.
  1599. """
  1600. # Invoke the API with the multiple sort keys and directions using the
  1601. # single sort key/direction
  1602. return instance_get_all_by_filters_sort(context, filters, limit=limit,
  1603. marker=marker,
  1604. columns_to_join=columns_to_join,
  1605. sort_keys=[sort_key],
  1606. sort_dirs=[sort_dir])
  1607. @require_context
  1608. @pick_context_manager_reader_allow_async
  1609. def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
  1610. columns_to_join=None, sort_keys=None,
  1611. sort_dirs=None):
  1612. """Return instances that match all filters sorted by the given keys.
  1613. Deleted instances will be returned by default, unless there's a filter that
  1614. says otherwise.
  1615. Depending on the name of a filter, matching for that filter is
  1616. performed using either exact matching or as regular expression
  1617. matching. Exact matching is applied for the following filters::
  1618. | ['project_id', 'user_id', 'image_ref',
  1619. | 'vm_state', 'instance_type_id', 'uuid',
  1620. | 'metadata', 'host', 'system_metadata']
  1621. A third type of filter (also using exact matching), filters
  1622. based on instance metadata tags when supplied under a special
  1623. key named 'filter'::
  1624. | filters = {
  1625. | 'filter': [
  1626. | {'name': 'tag-key', 'value': '<metakey>'},
  1627. | {'name': 'tag-value', 'value': '<metaval>'},
  1628. | {'name': 'tag:<metakey>', 'value': '<metaval>'}
  1629. | ]
  1630. | }
  1631. Special keys are used to tweek the query further::
  1632. | 'changes-since' - only return instances updated after
  1633. | 'deleted' - only return (or exclude) deleted instances
  1634. | 'soft_deleted' - modify behavior of 'deleted' to either
  1635. | include or exclude instances whose
  1636. | vm_state is SOFT_DELETED.
  1637. A fourth type of filter (also using exact matching), filters
  1638. based on instance tags (not metadata tags). There are two types
  1639. of these tags:
  1640. `tags` -- One or more strings that will be used to filter results
  1641. in an AND expression: T1 AND T2
  1642. `tags-any` -- One or more strings that will be used to filter results in
  1643. an OR expression: T1 OR T2
  1644. `not-tags` -- One or more strings that will be used to filter results in
  1645. an NOT AND expression: NOT (T1 AND T2)
  1646. `not-tags-any` -- One or more strings that will be used to filter results
  1647. in an NOT OR expression: NOT (T1 OR T2)
  1648. Tags should be represented as list::
  1649. | filters = {
  1650. | 'tags': [some-tag, some-another-tag],
  1651. | 'tags-any: [some-any-tag, some-another-any-tag],
  1652. | 'not-tags: [some-not-tag, some-another-not-tag],
  1653. | 'not-tags-any: [some-not-any-tag, some-another-not-any-tag]
  1654. | }
  1655. """
  1656. # NOTE(mriedem): If the limit is 0 there is no point in even going
  1657. # to the database since nothing is going to be returned anyway.
  1658. if limit == 0:
  1659. return []
  1660. sort_keys, sort_dirs = process_sort_params(sort_keys,
  1661. sort_dirs,
  1662. default_dir='desc')
  1663. if columns_to_join is None:
  1664. columns_to_join_new = ['info_cache', 'security_groups']
  1665. manual_joins = ['metadata', 'system_metadata']
  1666. else:
  1667. manual_joins, columns_to_join_new = (
  1668. _manual_join_columns(columns_to_join))
  1669. query_prefix = context.session.query(models.Instance)
  1670. for column in columns_to_join_new:
  1671. if 'extra.' in column:
  1672. query_prefix = query_prefix.options(undefer(column))
  1673. else:
  1674. query_prefix = query_prefix.options(joinedload(column))
  1675. # Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
  1676. # no need to do it here as well
  1677. # Make a copy of the filters dictionary to use going forward, as we'll
  1678. # be modifying it and we shouldn't affect the caller's use of it.
  1679. filters = copy.deepcopy(filters)
  1680. if 'changes-since' in filters:
  1681. changes_since = timeutils.normalize_time(filters['changes-since'])
  1682. query_prefix = query_prefix.\
  1683. filter(models.Instance.updated_at >= changes_since)
  1684. if 'deleted' in filters:
  1685. # Instances can be soft or hard deleted and the query needs to
  1686. # include or exclude both
  1687. deleted = filters.pop('deleted')
  1688. if deleted:
  1689. if filters.pop('soft_deleted', True):
  1690. delete = or_(
  1691. models.Instance.deleted == models.Instance.id,
  1692. models.Instance.vm_state == vm_states.SOFT_DELETED
  1693. )
  1694. query_prefix = query_prefix.\
  1695. filter(delete)
  1696. else:
  1697. query_prefix = query_prefix.\
  1698. filter(models.Instance.deleted == models.Instance.id)
  1699. else:
  1700. query_prefix = query_prefix.\
  1701. filter_by(deleted=0)
  1702. if not filters.pop('soft_deleted', False):
  1703. # It would be better to have vm_state not be nullable
  1704. # but until then we test it explicitly as a workaround.
  1705. not_soft_deleted = or_(
  1706. models.Instance.vm_state != vm_states.SOFT_DELETED,
  1707. models.Instance.vm_state == null()
  1708. )
  1709. query_prefix = query_prefix.filter(not_soft_deleted)
  1710. if 'cleaned' in filters:
  1711. cleaned = 1 if filters.pop('cleaned') else 0
  1712. query_prefix = query_prefix.filter(models.Instance.cleaned == cleaned)
  1713. if 'tags' in filters:
  1714. tags = filters.pop('tags')
  1715. # We build a JOIN ladder expression for each tag, JOIN'ing
  1716. # the first tag to the instances table, and each subsequent
  1717. # tag to the last JOIN'd tags table
  1718. first_tag = tags.pop(0)
  1719. query_prefix = query_prefix.join(models.Instance.tags)
  1720. query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
  1721. for tag in tags:
  1722. tag_alias = aliased(models.Tag)
  1723. query_prefix = query_prefix.join(tag_alias,
  1724. models.Instance.tags)
  1725. query_prefix = query_prefix.filter(tag_alias.tag == tag)
  1726. if 'tags-any' in filters:
  1727. tags = filters.pop('tags-any')
  1728. tag_alias = aliased(models.Tag)
  1729. query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
  1730. query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
  1731. if 'not-tags' in filters:
  1732. tags = filters.pop('not-tags')
  1733. first_tag = tags.pop(0)
  1734. subq = query_prefix.session.query(models.Tag.resource_id)
  1735. subq = subq.join(models.Instance.tags)
  1736. subq = subq.filter(models.Tag.tag == first_tag)
  1737. for tag in tags:
  1738. tag_alias = aliased(models.Tag)
  1739. subq = subq.join(tag_alias, models.Instance.tags)
  1740. subq = subq.filter(tag_alias.tag == tag)
  1741. query_prefix = query_prefix.filter(~models.Instance.uuid.in_(subq))
  1742. if 'not-tags-any' in filters:
  1743. tags = filters.pop('not-tags-any')
  1744. query_prefix = query_prefix.filter(~models.Instance.tags.any(
  1745. models.Tag.tag.in_(tags)))
  1746. if not context.is_admin:
  1747. # If we're not admin context, add appropriate filter..
  1748. if context.project_id:
  1749. filters['project_id'] = context.project_id
  1750. else:
  1751. filters['user_id'] = context.user_id
  1752. # Filters for exact matches that we can do along with the SQL query...
  1753. # For other filters that don't match this, we will do regexp matching
  1754. exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
  1755. 'vm_state', 'instance_type_id', 'uuid',
  1756. 'metadata', 'host', 'task_state',
  1757. 'system_metadata']
  1758. # Filter the query
  1759. query_prefix = _exact_instance_filter(query_prefix,
  1760. filters, exact_match_filter_names)
  1761. if query_prefix is None:
  1762. return []
  1763. query_prefix = _regex_instance_filter(query_prefix, filters)
  1764. # paginate query
  1765. if marker is not None:
  1766. try:
  1767. marker = _instance_get_by_uuid(
  1768. context.elevated(read_deleted='yes'), marker)
  1769. except exception.InstanceNotFound:
  1770. raise exception.MarkerNotFound(marker=marker)
  1771. try:
  1772. query_prefix = sqlalchemyutils.paginate_query(query_prefix,
  1773. models.Instance, limit,
  1774. sort_keys,
  1775. marker=marker,
  1776. sort_dirs=sort_dirs)
  1777. except db_exc.InvalidSortKey:
  1778. raise exception.InvalidSortKey()
  1779. return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
  1780. @require_context
  1781. @pick_context_manager_reader_allow_async
  1782. def instance_get_by_sort_filters(context, sort_keys, sort_dirs, values):
  1783. """Attempt to get a single instance based on a combination of sort
  1784. keys, directions and filter values. This is used to try to find a
  1785. marker instance when we don't have a marker uuid.
  1786. This returns just a uuid of the instance that matched.
  1787. """
  1788. model = models.Instance
  1789. return _model_get_uuid_by_sort_filters(context, model, sort_keys,
  1790. sort_dirs, values)
  1791. def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
  1792. values):
  1793. query = context.session.query(model.uuid)
  1794. # NOTE(danms): Below is a re-implementation of our
  1795. # oslo_db.sqlalchemy.utils.paginate_query() utility. We can't use that
  1796. # directly because it does not return the marker and we need it to.
  1797. # The below is basically the same algorithm, stripped down to just what
  1798. # we need, and augmented with the filter criteria required for us to
  1799. # get back the instance that would correspond to our query.
  1800. # This is our position in sort_keys,sort_dirs,values for the loop below
  1801. key_index = 0
  1802. # We build a list of criteria to apply to the query, which looks
  1803. # approximately like this (assuming all ascending):
  1804. #
  1805. # OR(row.key1 > val1,
  1806. # AND(row.key1 == val1, row.key2 > val2),
  1807. # AND(row.key1 == val1, row.key2 == val2, row.key3 >= val3),
  1808. # )
  1809. #
  1810. # The final key is compared with the "or equal" variant so that
  1811. # a complete match instance is still returned.
  1812. criteria = []
  1813. for skey, sdir, val in zip(sort_keys, sort_dirs, values):
  1814. # Apply ordering to our query for the key, direction we're processing
  1815. if sdir == 'desc':
  1816. query = query.order_by(desc(getattr(model, skey)))
  1817. else:
  1818. query = query.order_by(asc(getattr(model, skey)))
  1819. # Build a list of equivalence requirements on keys we've already
  1820. # processed through the loop. In other words, if we're adding
  1821. # key2 > val2, make sure that key1 == val1
  1822. crit_attrs = []
  1823. for equal_attr in range(0, key_index):
  1824. crit_attrs.append(
  1825. (getattr(model, sort_keys[equal_attr]) == values[equal_attr]))
  1826. model_attr = getattr(model, skey)
  1827. if isinstance(model_attr.type, Boolean):
  1828. model_attr = cast(model_attr, Integer)
  1829. val = int(val)
  1830. if skey == sort_keys[-1]:
  1831. # If we are the last key, then we should use or-equal to
  1832. # allow a complete match to be returned
  1833. if sdir == 'asc':
  1834. crit = (model_attr >= val)
  1835. else:
  1836. crit = (model_attr <= val)
  1837. else:
  1838. # If we're not the last key, then strict greater or less than
  1839. # so we order strictly.
  1840. if sdir == 'asc':
  1841. crit = (model_attr > val)
  1842. else:
  1843. crit = (model_attr < val)
  1844. # AND together all the above
  1845. crit_attrs.append(crit)
  1846. criteria.append(and_(*crit_attrs))
  1847. key_index += 1
  1848. # OR together all the ANDs
  1849. query = query.filter(or_(*criteria))
  1850. # We can't raise InstanceNotFound because we don't have a uuid to
  1851. # be looking for, so just return nothing if no match.
  1852. result = query.limit(1).first()
  1853. if result:
  1854. # We're querying for a single column, which means we get back a
  1855. # tuple of one thing. Strip that out and just return the uuid
  1856. # for our caller.
  1857. return result[0]
  1858. else:
  1859. return result
  1860. def _db_connection_type(db_connection):
  1861. """Returns a lowercase symbol for the db type.
  1862. This is useful when we need to change what we are doing per DB
  1863. (like handling regexes). In a CellsV2 world it probably needs to
  1864. do something better than use the database configuration string.
  1865. """
  1866. db_string = db_connection.split(':')[0].split('+')[0]
  1867. return db_string.lower()
  1868. def _safe_regex_mysql(raw_string):
  1869. """Make regex safe to mysql.
  1870. Certain items like '|' are interpreted raw by mysql REGEX. If you
  1871. search for a single | then you trigger an error because it's
  1872. expecting content on either side.
  1873. For consistency sake we escape all '|'. This does mean we wouldn't
  1874. support something like foo|bar to match completely different
  1875. things, however, one can argue putting such complicated regex into
  1876. name search probably means you are doing this wrong.
  1877. """
  1878. return raw_string.replace('|', '\\|')
  1879. def _get_regexp_ops(connection):
  1880. """Return safety filter and db opts for regex."""
  1881. regexp_op_map = {
  1882. 'postgresql': '~',
  1883. 'mysql': 'REGEXP',
  1884. 'sqlite': 'REGEXP'
  1885. }
  1886. regex_safe_filters = {
  1887. 'mysql': _safe_regex_mysql
  1888. }
  1889. db_type = _db_connection_type(connection)
  1890. return (regex_safe_filters.get(db_type, lambda x: x),
  1891. regexp_op_map.get(db_type, 'LIKE'))
  1892. def _regex_instance_filter(query, filters):
  1893. """Applies regular expression filtering to an Instance query.
  1894. Returns the updated query.
  1895. :param query: query to apply filters to
  1896. :param filters: dictionary of filters with regex values
  1897. """
  1898. model = models.Instance
  1899. safe_regex_filter, db_regexp_op = _get_regexp_ops(CONF.database.connection)
  1900. for filter_name in filters:
  1901. try:
  1902. column_attr = getattr(model, filter_name)
  1903. except AttributeError:
  1904. continue
  1905. if 'property' == type(column_attr).__name__:
  1906. continue
  1907. filter_val = filters[filter_name]
  1908. # Sometimes the REGEX filter value is not a string
  1909. if not isinstance(filter_val, six.string_types):
  1910. filter_val = str(filter_val)
  1911. if db_regexp_op == 'LIKE':
  1912. query = query.filter(column_attr.op(db_regexp_op)(
  1913. u'%' + filter_val + u'%'))
  1914. else:
  1915. filter_val = safe_regex_filter(filter_val)
  1916. query = query.filter(column_attr.op(db_regexp_op)(
  1917. filter_val))
  1918. return query
  1919. def _exact_instance_filter(query, filters, legal_keys):
  1920. """Applies exact match filtering to an Instance query.
  1921. Returns the updated query. Modifies filters argument to remove
  1922. filters consumed.
  1923. :param query: query to apply filters to
  1924. :param filters: dictionary of filters; values that are lists,
  1925. tuples, sets, or frozensets cause an 'IN' test to
  1926. be performed, while exact matching ('==' operator)
  1927. is used for other values
  1928. :param legal_keys: list of keys to apply exact filtering to
  1929. """
  1930. filter_dict = {}
  1931. model = models.Instance
  1932. # Walk through all the keys
  1933. for key in legal_keys:
  1934. # Skip ones we're not filtering on
  1935. if key not in filters:
  1936. continue
  1937. # OK, filtering on this key; what value do we search for?
  1938. value = filters.pop(key)
  1939. if key in ('metadata', 'system_metadata'):
  1940. column_attr = getattr(model, key)
  1941. if isinstance(value, list):
  1942. for item in value:
  1943. for k, v in item.items():
  1944. query = query.filter(column_attr.any(key=k))
  1945. query = query.filter(column_attr.any(value=v))
  1946. else:
  1947. for k, v in value.items():
  1948. query = query.filter(column_attr.any(key=k))
  1949. query = query.filter(column_attr.any(value=v))
  1950. elif isinstance(value, (list, tuple, set, frozenset)):
  1951. if not value:
  1952. return None # empty IN-predicate; short circuit
  1953. # Looking for values in a list; apply to query directly
  1954. column_attr = getattr(model, key)
  1955. query = query.filter(column_attr.in_(value))
  1956. else:
  1957. # OK, simple exact match; save for later
  1958. filter_dict[key] = value
  1959. # Apply simple exact matches
  1960. if filter_dict:
  1961. query = query.filter(*[getattr(models.Instance, k) == v
  1962. for k, v in filter_dict.items()])
  1963. return query
  1964. def process_sort_params(sort_keys, sort_dirs,
  1965. default_keys=['created_at', 'id'],
  1966. default_dir='asc'):
  1967. """Process the sort parameters to include default keys.
  1968. Creates a list of sort keys and a list of sort directions. Adds the default
  1969. keys to the end of the list if they are not already included.
  1970. When adding the default keys to the sort keys list, the associated
  1971. direction is:
  1972. 1) The first element in the 'sort_dirs' list (if specified), else
  1973. 2) 'default_dir' value (Note that 'asc' is the default value since this is
  1974. the default in sqlalchemy.utils.paginate_query)
  1975. :param sort_keys: List of sort keys to include in the processed list
  1976. :param sort_dirs: List of sort directions to include in the processed list
  1977. :param default_keys: List of sort keys that need to be included in the
  1978. processed list, they are added at the end of the list
  1979. if not already specified.
  1980. :param default_dir: Sort direction associated with each of the default
  1981. keys that are not supplied, used when they are added
  1982. to the processed list
  1983. :returns: list of sort keys, list of sort directions
  1984. :raise exception.InvalidInput: If more sort directions than sort keys
  1985. are specified or if an invalid sort
  1986. direction is specified
  1987. """
  1988. # Determine direction to use for when adding default keys
  1989. if sort_dirs and len(sort_dirs) != 0:
  1990. default_dir_value = sort_dirs[0]
  1991. else:
  1992. default_dir_value = default_dir
  1993. # Create list of keys (do not modify the input list)
  1994. if sort_keys:
  1995. result_keys = list(sort_keys)
  1996. else:
  1997. result_keys = []
  1998. # If a list of directions is not provided, use the default sort direction
  1999. # for all provided keys
  2000. if sort_dirs:
  2001. result_dirs = []
  2002. # Verify sort direction
  2003. for sort_dir in sort_dirs:
  2004. if sort_dir not in ('asc', 'desc'):
  2005. msg = _("Unknown sort direction, must be 'desc' or 'asc'")
  2006. raise exception.InvalidInput(reason=msg)
  2007. result_dirs.append(sort_dir)
  2008. else:
  2009. result_dirs = [default_dir_value for _sort_key in result_keys]
  2010. # Ensure that the key and direction length match
  2011. while len(result_dirs) < len(result_keys):
  2012. result_dirs.append(default_dir_value)
  2013. # Unless more direction are specified, which is an error
  2014. if len(result_dirs) > len(result_keys):
  2015. msg = _("Sort direction size exceeds sort key size")
  2016. raise exception.InvalidInput(reason=msg)
  2017. # Ensure defaults are included
  2018. for key in default_keys:
  2019. if key not in result_keys:
  2020. result_keys.append(key)
  2021. result_dirs.append(default_dir_value)
  2022. return result_keys, result_dirs
  2023. @require_context
  2024. @pick_context_manager_reader_allow_async
  2025. def instance_get_active_by_window_joined(context, begin, end=None,
  2026. project_id=None, host=None,
  2027. columns_to_join=None, limit=None,
  2028. marker=None):
  2029. """Return instances and joins that were active during window."""
  2030. query = context.session.query(models.Instance)
  2031. if columns_to_join is None:
  2032. columns_to_join_new = ['info_cache', 'security_groups']
  2033. manual_joins = ['metadata', 'system_metadata']
  2034. else:
  2035. manual_joins, columns_to_join_new = (
  2036. _manual_join_columns(columns_to_join))
  2037. for column in columns_to_join_new:
  2038. if 'extra.' in column:
  2039. query = query.options(undefer(column))
  2040. else:
  2041. query = query.options(joinedload(column))
  2042. query = query.filter(or_(models.Instance.terminated_at == null(),
  2043. models.Instance.terminated_at > begin))
  2044. if end:
  2045. query = query.filter(models.Instance.launched_at < end)
  2046. if project_id:
  2047. query = query.filter_by(project_id=project_id)
  2048. if host:
  2049. query = query.filter_by(host=host)
  2050. if marker is not None:
  2051. try:
  2052. marker = _instance_get_by_uuid(
  2053. context.elevated(read_deleted='yes'), marker)
  2054. except exception.InstanceNotFound:
  2055. raise exception.MarkerNotFound(marker=marker)
  2056. query = sqlalchemyutils.paginate_query(
  2057. query, models.Instance, limit, ['project_id', 'uuid'], marker=marker)
  2058. return _instances_fill_metadata(context, query.all(), manual_joins)
  2059. def _instance_get_all_query(context, project_only=False, joins=None):
  2060. if joins is None:
  2061. joins = ['info_cache', 'security_groups']
  2062. query = model_query(context,
  2063. models.Instance,
  2064. project_only=project_only)
  2065. for column in joins:
  2066. if 'extra.' in column:
  2067. query = query.options(undefer(column))
  2068. else:
  2069. query = query.options(joinedload(column))
  2070. return query
  2071. @pick_context_manager_reader_allow_async
  2072. def instance_get_all_by_host(context, host, columns_to_join=None):
  2073. query = _instance_get_all_query(context, joins=columns_to_join)
  2074. return _instances_fill_metadata(context,
  2075. query.filter_by(host=host).all(),
  2076. manual_joins=columns_to_join)
  2077. def _instance_get_all_uuids_by_host(context, host):
  2078. """Return a list of the instance uuids on a given host.
  2079. Returns a list of UUIDs, not Instance model objects.
  2080. """
  2081. uuids = []
  2082. for tuple in model_query(context, models.Instance, (models.Instance.uuid,),
  2083. read_deleted="no").\
  2084. filter_by(host=host).\
  2085. all():
  2086. uuids.append(tuple[0])
  2087. return uuids
  2088. @pick_context_manager_reader
  2089. def instance_get_all_by_host_and_node(context, host, node,
  2090. columns_to_join=None):
  2091. if columns_to_join is None:
  2092. manual_joins = []
  2093. else:
  2094. candidates = ['system_metadata', 'metadata']
  2095. manual_joins = [x for x in columns_to_join if x in candidates]
  2096. columns_to_join = list(set(columns_to_join) - set(candidates))
  2097. return _instances_fill_metadata(context,
  2098. _instance_get_all_query(
  2099. context,
  2100. joins=columns_to_join).filter_by(host=host).
  2101. filter_by(node=node).all(), manual_joins=manual_joins)
  2102. @pick_context_manager_reader
  2103. def instance_get_all_by_host_and_not_type(context, host, type_id=None):
  2104. return _instances_fill_metadata(context,
  2105. _instance_get_all_query(context).filter_by(host=host).
  2106. filter(models.Instance.instance_type_id != type_id).all())
  2107. @pick_context_manager_reader
  2108. def instance_get_all_by_grantee_security_groups(context, group_ids):
  2109. if not group_ids:
  2110. return []
  2111. return _instances_fill_metadata(context,
  2112. _instance_get_all_query(context).
  2113. join(models.Instance.security_groups).
  2114. filter(models.SecurityGroup.rules.any(
  2115. models.SecurityGroupIngressRule.group_id.in_(group_ids))).
  2116. all())
  2117. @require_context
  2118. @pick_context_manager_reader
  2119. def instance_floating_address_get_all(context, instance_uuid):
  2120. if not uuidutils.is_uuid_like(instance_uuid):
  2121. raise exception.InvalidUUID(uuid=instance_uuid)
  2122. floating_ips = model_query(context,
  2123. models.FloatingIp,
  2124. (models.FloatingIp.address,)).\
  2125. join(models.FloatingIp.fixed_ip).\
  2126. filter_by(instance_uuid=instance_uuid)
  2127. return [floating_ip.address for floating_ip in floating_ips]
  2128. # NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
  2129. @pick_context_manager_reader
  2130. def instance_get_all_hung_in_rebooting(context, reboot_window):
  2131. reboot_window = (timeutils.utcnow() -
  2132. datetime.timedelta(seconds=reboot_window))
  2133. # NOTE(danms): this is only used in the _poll_rebooting_instances()
  2134. # call in compute/manager, so we can avoid the metadata lookups
  2135. # explicitly
  2136. return _instances_fill_metadata(context,
  2137. model_query(context, models.Instance).
  2138. filter(models.Instance.updated_at <= reboot_window).
  2139. filter_by(task_state=task_states.REBOOTING).all(),
  2140. manual_joins=[])
  2141. def _retry_instance_update():
  2142. """Wrap with oslo_db_api.wrap_db_retry, and also retry on
  2143. UnknownInstanceUpdateConflict.
  2144. """
  2145. exception_checker = \
  2146. lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,))
  2147. return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
  2148. exception_checker=exception_checker)
  2149. @require_context
  2150. @_retry_instance_update()
  2151. @pick_context_manager_writer
  2152. def instance_update(context, instance_uuid, values, expected=None):
  2153. return _instance_update(context, instance_uuid, values, expected)
  2154. @require_context
  2155. @_retry_instance_update()
  2156. @pick_context_manager_writer
  2157. def instance_update_and_get_original(context, instance_uuid, values,
  2158. columns_to_join=None, expected=None):
  2159. """Set the given properties on an instance and update it. Return
  2160. a shallow copy of the original instance reference, as well as the
  2161. updated one.
  2162. :param context: = request context object
  2163. :param instance_uuid: = instance uuid
  2164. :param values: = dict containing column values
  2165. If "expected_task_state" exists in values, the update can only happen
  2166. when the task state before update matches expected_task_state. Otherwise
  2167. a UnexpectedTaskStateError is thrown.
  2168. :returns: a tuple of the form (old_instance_ref, new_instance_ref)
  2169. Raises NotFound if instance does not exist.
  2170. """
  2171. instance_ref = _instance_get_by_uuid(context, instance_uuid,
  2172. columns_to_join=columns_to_join)
  2173. return (copy.copy(instance_ref), _instance_update(
  2174. context, instance_uuid, values, expected, original=instance_ref))
  2175. # NOTE(danms): This updates the instance's metadata list in-place and in
  2176. # the database to avoid stale data and refresh issues. It assumes the
  2177. # delete=True behavior of instance_metadata_update(...)
  2178. def _instance_metadata_update_in_place(context, instance, metadata_type, model,
  2179. metadata):
  2180. metadata = dict(metadata)
  2181. to_delete = []
  2182. for keyvalue in instance[metadata_type]:
  2183. key = keyvalue['key']
  2184. if key in metadata:
  2185. keyvalue['value'] = metadata.pop(key)
  2186. elif key not in metadata:
  2187. to_delete.append(keyvalue)
  2188. # NOTE: we have to hard_delete here otherwise we will get more than one
  2189. # system_metadata record when we read deleted for an instance;
  2190. # regular metadata doesn't have the same problem because we don't
  2191. # allow reading deleted regular metadata anywhere.
  2192. if metadata_type == 'system_metadata':
  2193. for condemned in to_delete:
  2194. context.session.delete(condemned)
  2195. instance[metadata_type].remove(condemned)
  2196. else:
  2197. for condemned in to_delete:
  2198. condemned.soft_delete(context.session)
  2199. for key, value in metadata.items():
  2200. newitem = model()
  2201. newitem.update({'key': key, 'value': value,
  2202. 'instance_uuid': instance['uuid']})
  2203. context.session.add(newitem)
  2204. instance[metadata_type].append(newitem)
  2205. def _instance_update(context, instance_uuid, values, expected, original=None):
  2206. if not uuidutils.is_uuid_like(instance_uuid):
  2207. raise exception.InvalidUUID(instance_uuid)
  2208. # NOTE(mdbooth): We pop values from this dict below, so we copy it here to
  2209. # ensure there are no side effects for the caller or if we retry the
  2210. # function due to a db conflict.
  2211. updates = copy.copy(values)
  2212. if expected is None:
  2213. expected = {}
  2214. else:
  2215. # Coerce all single values to singleton lists
  2216. expected = {k: [None] if v is None else sqlalchemyutils.to_list(v)
  2217. for (k, v) in expected.items()}
  2218. # Extract 'expected_' values from values dict, as these aren't actually
  2219. # updates
  2220. for field in ('task_state', 'vm_state'):
  2221. expected_field = 'expected_%s' % field
  2222. if expected_field in updates:
  2223. value = updates.pop(expected_field, None)
  2224. # Coerce all single values to singleton lists
  2225. if value is None:
  2226. expected[field] = [None]
  2227. else:
  2228. expected[field] = sqlalchemyutils.to_list(value)
  2229. # Values which need to be updated separately
  2230. metadata = updates.pop('metadata', None)
  2231. system_metadata = updates.pop('system_metadata', None)
  2232. _handle_objects_related_type_conversions(updates)
  2233. # Hostname is potentially unique, but this is enforced in code rather
  2234. # than the DB. The query below races, but the number of users of
  2235. # osapi_compute_unique_server_name_scope is small, and a robust fix
  2236. # will be complex. This is intentionally left as is for the moment.
  2237. if 'hostname' in updates:
  2238. _validate_unique_server_name(context, updates['hostname'])
  2239. compare = models.Instance(uuid=instance_uuid, **expected)
  2240. try:
  2241. instance_ref = model_query(context, models.Instance,
  2242. project_only=True).\
  2243. update_on_match(compare, 'uuid', updates)
  2244. except update_match.NoRowsMatched:
  2245. # Update failed. Try to find why and raise a specific error.
  2246. # We should get here only because our expected values were not current
  2247. # when update_on_match executed. Having failed, we now have a hint that
  2248. # the values are out of date and should check them.
  2249. # This code is made more complex because we are using repeatable reads.
  2250. # If we have previously read the original instance in the current
  2251. # transaction, reading it again will return the same data, even though
  2252. # the above update failed because it has changed: it is not possible to
  2253. # determine what has changed in this transaction. In this case we raise
  2254. # UnknownInstanceUpdateConflict, which will cause the operation to be
  2255. # retried in a new transaction.
  2256. # Because of the above, if we have previously read the instance in the
  2257. # current transaction it will have been passed as 'original', and there
  2258. # is no point refreshing it. If we have not previously read the
  2259. # instance, we can fetch it here and we will get fresh data.
  2260. if original is None:
  2261. original = _instance_get_by_uuid(context, instance_uuid)
  2262. conflicts_expected = {}
  2263. conflicts_actual = {}
  2264. for (field, expected_values) in expected.items():
  2265. actual = original[field]
  2266. if actual not in expected_values:
  2267. conflicts_expected[field] = expected_values
  2268. conflicts_actual[field] = actual
  2269. # Exception properties
  2270. exc_props = {
  2271. 'instance_uuid': instance_uuid,
  2272. 'expected': conflicts_expected,
  2273. 'actual': conflicts_actual
  2274. }
  2275. # There was a conflict, but something (probably the MySQL read view,
  2276. # but possibly an exceptionally unlikely second race) is preventing us
  2277. # from seeing what it is. When we go round again we'll get a fresh
  2278. # transaction and a fresh read view.
  2279. if len(conflicts_actual) == 0:
  2280. raise exception.UnknownInstanceUpdateConflict(**exc_props)
  2281. # Task state gets special handling for convenience. We raise the
  2282. # specific error UnexpectedDeletingTaskStateError or
  2283. # UnexpectedTaskStateError as appropriate
  2284. if 'task_state' in conflicts_actual:
  2285. conflict_task_state = conflicts_actual['task_state']
  2286. if conflict_task_state == task_states.DELETING:
  2287. exc = exception.UnexpectedDeletingTaskStateError
  2288. else:
  2289. exc = exception.UnexpectedTaskStateError
  2290. # Everything else is an InstanceUpdateConflict
  2291. else:
  2292. exc = exception.InstanceUpdateConflict
  2293. raise exc(**exc_props)
  2294. if metadata is not None:
  2295. _instance_metadata_update_in_place(context, instance_ref,
  2296. 'metadata',
  2297. models.InstanceMetadata,
  2298. metadata)
  2299. if system_metadata is not None:
  2300. _instance_metadata_update_in_place(context, instance_ref,
  2301. 'system_metadata',
  2302. models.InstanceSystemMetadata,
  2303. system_metadata)
  2304. return instance_ref
  2305. @pick_context_manager_writer
  2306. def instance_add_security_group(context, instance_uuid, security_group_id):
  2307. """Associate the given security group with the given instance."""
  2308. sec_group_ref = models.SecurityGroupInstanceAssociation()
  2309. sec_group_ref.update({'instance_uuid': instance_uuid,
  2310. 'security_group_id': security_group_id})
  2311. sec_group_ref.save(context.session)
  2312. @require_context
  2313. @pick_context_manager_writer
  2314. def instance_remove_security_group(context, instance_uuid, security_group_id):
  2315. """Disassociate the given security group from the given instance."""
  2316. model_query(context, models.SecurityGroupInstanceAssociation).\
  2317. filter_by(instance_uuid=instance_uuid).\
  2318. filter_by(security_group_id=security_group_id).\
  2319. soft_delete()
  2320. ###################
  2321. @require_context
  2322. @pick_context_manager_reader
  2323. def instance_info_cache_get(context, instance_uuid):
  2324. """Gets an instance info cache from the table.
  2325. :param instance_uuid: = uuid of the info cache's instance
  2326. """
  2327. return model_query(context, models.InstanceInfoCache).\
  2328. filter_by(instance_uuid=instance_uuid).\
  2329. first()
  2330. @require_context
  2331. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  2332. @pick_context_manager_writer
  2333. def instance_info_cache_update(context, instance_uuid, values):
  2334. """Update an instance info cache record in the table.
  2335. :param instance_uuid: = uuid of info cache's instance
  2336. :param values: = dict containing column values to update
  2337. """
  2338. convert_objects_related_datetimes(values)
  2339. info_cache = model_query(context, models.InstanceInfoCache).\
  2340. filter_by(instance_uuid=instance_uuid).\
  2341. first()
  2342. needs_create = False
  2343. if info_cache and info_cache['deleted']:
  2344. raise exception.InstanceInfoCacheNotFound(
  2345. instance_uuid=instance_uuid)
  2346. elif not info_cache:
  2347. # NOTE(tr3buchet): just in case someone blows away an instance's
  2348. # cache entry, re-create it.
  2349. values['instance_uuid'] = instance_uuid
  2350. info_cache = models.InstanceInfoCache(**values)
  2351. needs_create = True
  2352. try:
  2353. with get_context_manager(context).writer.savepoint.using(context):
  2354. if needs_create:
  2355. info_cache.save(context.session)
  2356. else:
  2357. info_cache.update(values)
  2358. except db_exc.DBDuplicateEntry:
  2359. # NOTE(sirp): Possible race if two greenthreads attempt to
  2360. # recreate the instance cache entry at the same time. First one
  2361. # wins.
  2362. pass
  2363. return info_cache
  2364. @require_context
  2365. @pick_context_manager_writer
  2366. def instance_info_cache_delete(context, instance_uuid):
  2367. """Deletes an existing instance_info_cache record
  2368. :param instance_uuid: = uuid of the instance tied to the cache record
  2369. """
  2370. model_query(context, models.InstanceInfoCache).\
  2371. filter_by(instance_uuid=instance_uuid).\
  2372. soft_delete()
  2373. ###################
  2374. def _instance_extra_create(context, values):
  2375. inst_extra_ref = models.InstanceExtra()
  2376. inst_extra_ref.update(values)
  2377. inst_extra_ref.save(context.session)
  2378. return inst_extra_ref
  2379. @pick_context_manager_writer
  2380. def instance_extra_update_by_uuid(context, instance_uuid, values):
  2381. rows_updated = model_query(context, models.InstanceExtra).\
  2382. filter_by(instance_uuid=instance_uuid).\
  2383. update(values)
  2384. if not rows_updated:
  2385. LOG.debug("Created instance_extra for %s", instance_uuid)
  2386. create_values = copy.copy(values)
  2387. create_values["instance_uuid"] = instance_uuid
  2388. _instance_extra_create(context, create_values)
  2389. rows_updated = 1
  2390. return rows_updated
  2391. @pick_context_manager_reader
  2392. def instance_extra_get_by_instance_uuid(context, instance_uuid,
  2393. columns=None):
  2394. query = model_query(context, models.InstanceExtra).\
  2395. filter_by(instance_uuid=instance_uuid)
  2396. if columns is None:
  2397. columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
  2398. 'migration_context']
  2399. for column in columns:
  2400. query = query.options(undefer(column))
  2401. instance_extra = query.first()
  2402. return instance_extra
  2403. ###################
  2404. @require_context
  2405. @pick_context_manager_writer
  2406. def key_pair_create(context, values):
  2407. try:
  2408. key_pair_ref = models.KeyPair()
  2409. key_pair_ref.update(values)
  2410. key_pair_ref.save(context.session)
  2411. return key_pair_ref
  2412. except db_exc.DBDuplicateEntry:
  2413. raise exception.KeyPairExists(key_name=values['name'])
  2414. @require_context
  2415. @pick_context_manager_writer
  2416. def key_pair_destroy(context, user_id, name):
  2417. result = model_query(context, models.KeyPair).\
  2418. filter_by(user_id=user_id).\
  2419. filter_by(name=name).\
  2420. soft_delete()
  2421. if not result:
  2422. raise exception.KeypairNotFound(user_id=user_id, name=name)
  2423. @require_context
  2424. @pick_context_manager_reader
  2425. def key_pair_get(context, user_id, name):
  2426. result = model_query(context, models.KeyPair).\
  2427. filter_by(user_id=user_id).\
  2428. filter_by(name=name).\
  2429. first()
  2430. if not result:
  2431. raise exception.KeypairNotFound(user_id=user_id, name=name)
  2432. return result
  2433. @require_context
  2434. @pick_context_manager_reader
  2435. def key_pair_get_all_by_user(context, user_id, limit=None, marker=None):
  2436. marker_row = None
  2437. if marker is not None:
  2438. marker_row = model_query(context, models.KeyPair, read_deleted="no").\
  2439. filter_by(name=marker).filter_by(user_id=user_id).first()
  2440. if not marker_row:
  2441. raise exception.MarkerNotFound(marker=marker)
  2442. query = model_query(context, models.KeyPair, read_deleted="no").\
  2443. filter_by(user_id=user_id)
  2444. query = sqlalchemyutils.paginate_query(
  2445. query, models.KeyPair, limit, ['name'], marker=marker_row)
  2446. return query.all()
  2447. @require_context
  2448. @pick_context_manager_reader
  2449. def key_pair_count_by_user(context, user_id):
  2450. return model_query(context, models.KeyPair, read_deleted="no").\
  2451. filter_by(user_id=user_id).\
  2452. count()
  2453. ###################
  2454. @pick_context_manager_writer
  2455. def network_associate(context, project_id, network_id=None, force=False):
  2456. """Associate a project with a network.
  2457. called by project_get_networks under certain conditions
  2458. and network manager add_network_to_project()
  2459. only associate if the project doesn't already have a network
  2460. or if force is True
  2461. force solves race condition where a fresh project has multiple instance
  2462. builds simultaneously picked up by multiple network hosts which attempt
  2463. to associate the project with multiple networks
  2464. force should only be used as a direct consequence of user request
  2465. all automated requests should not use force
  2466. """
  2467. def network_query(project_filter, id=None):
  2468. filter_kwargs = {'project_id': project_filter}
  2469. if id is not None:
  2470. filter_kwargs['id'] = id
  2471. return model_query(context, models.Network, read_deleted="no").\
  2472. filter_by(**filter_kwargs).\
  2473. with_lockmode('update').\
  2474. first()
  2475. if not force:
  2476. # find out if project has a network
  2477. network_ref = network_query(project_id)
  2478. if force or not network_ref:
  2479. # in force mode or project doesn't have a network so associate
  2480. # with a new network
  2481. # get new network
  2482. network_ref = network_query(None, network_id)
  2483. if not network_ref:
  2484. raise exception.NoMoreNetworks()
  2485. # associate with network
  2486. # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
  2487. # then this has concurrency issues
  2488. network_ref['project_id'] = project_id
  2489. context.session.add(network_ref)
  2490. return network_ref
  2491. def _network_ips_query(context, network_id):
  2492. return model_query(context, models.FixedIp, read_deleted="no").\
  2493. filter_by(network_id=network_id)
  2494. @pick_context_manager_reader
  2495. def network_count_reserved_ips(context, network_id):
  2496. return _network_ips_query(context, network_id).\
  2497. filter_by(reserved=True).\
  2498. count()
  2499. @pick_context_manager_writer
  2500. def network_create_safe(context, values):
  2501. network_ref = models.Network()
  2502. network_ref['uuid'] = uuidutils.generate_uuid()
  2503. network_ref.update(values)
  2504. try:
  2505. network_ref.save(context.session)
  2506. return network_ref
  2507. except db_exc.DBDuplicateEntry:
  2508. raise exception.DuplicateVlan(vlan=values['vlan'])
  2509. @pick_context_manager_writer
  2510. def network_delete_safe(context, network_id):
  2511. result = model_query(context, models.FixedIp, read_deleted="no").\
  2512. filter_by(network_id=network_id).\
  2513. filter_by(allocated=True).\
  2514. count()
  2515. if result != 0:
  2516. raise exception.NetworkInUse(network_id=network_id)
  2517. network_ref = _network_get(context, network_id=network_id)
  2518. model_query(context, models.FixedIp, read_deleted="no").\
  2519. filter_by(network_id=network_id).\
  2520. soft_delete()
  2521. context.session.delete(network_ref)
  2522. @pick_context_manager_writer
  2523. def network_disassociate(context, network_id, disassociate_host,
  2524. disassociate_project):
  2525. net_update = {}
  2526. if disassociate_project:
  2527. net_update['project_id'] = None
  2528. if disassociate_host:
  2529. net_update['host'] = None
  2530. network_update(context, network_id, net_update)
  2531. def _network_get(context, network_id, project_only='allow_none'):
  2532. result = model_query(context, models.Network, project_only=project_only).\
  2533. filter_by(id=network_id).\
  2534. first()
  2535. if not result:
  2536. raise exception.NetworkNotFound(network_id=network_id)
  2537. return result
  2538. @require_context
  2539. @pick_context_manager_reader
  2540. def network_get(context, network_id, project_only='allow_none'):
  2541. return _network_get(context, network_id, project_only=project_only)
  2542. @require_context
  2543. @pick_context_manager_reader
  2544. def network_get_all(context, project_only):
  2545. result = model_query(context, models.Network, read_deleted="no",
  2546. project_only=project_only).all()
  2547. if not result:
  2548. raise exception.NoNetworksFound()
  2549. return result
  2550. @require_context
  2551. @pick_context_manager_reader
  2552. def network_get_all_by_uuids(context, network_uuids, project_only):
  2553. result = model_query(context, models.Network, read_deleted="no",
  2554. project_only=project_only).\
  2555. filter(models.Network.uuid.in_(network_uuids)).\
  2556. all()
  2557. if not result:
  2558. raise exception.NoNetworksFound()
  2559. # check if the result contains all the networks
  2560. # we are looking for
  2561. for network_uuid in network_uuids:
  2562. for network in result:
  2563. if network['uuid'] == network_uuid:
  2564. break
  2565. else:
  2566. if project_only:
  2567. raise exception.NetworkNotFoundForProject(
  2568. network_uuid=network_uuid, project_id=context.project_id)
  2569. raise exception.NetworkNotFound(network_id=network_uuid)
  2570. return result
  2571. def _get_associated_fixed_ips_query(context, network_id, host=None):
  2572. # NOTE(vish): The ugly joins here are to solve a performance issue and
  2573. # should be removed once we can add and remove leases
  2574. # without regenerating the whole list
  2575. vif_and = and_(models.VirtualInterface.id ==
  2576. models.FixedIp.virtual_interface_id,
  2577. models.VirtualInterface.deleted == 0)
  2578. inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
  2579. models.Instance.deleted == 0)
  2580. # NOTE(vish): This subquery left joins the minimum interface id for each
  2581. # instance. If the join succeeds (i.e. the 11th column is not
  2582. # null), then the fixed ip is on the first interface.
  2583. subq = context.session.query(
  2584. func.min(models.VirtualInterface.id).label("id"),
  2585. models.VirtualInterface.instance_uuid).\
  2586. group_by(models.VirtualInterface.instance_uuid).subquery()
  2587. subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id,
  2588. subq.c.instance_uuid == models.VirtualInterface.instance_uuid)
  2589. query = context.session.query(
  2590. models.FixedIp.address,
  2591. models.FixedIp.instance_uuid,
  2592. models.FixedIp.network_id,
  2593. models.FixedIp.virtual_interface_id,
  2594. models.VirtualInterface.address,
  2595. models.Instance.hostname,
  2596. models.Instance.updated_at,
  2597. models.Instance.created_at,
  2598. models.FixedIp.allocated,
  2599. models.FixedIp.leased,
  2600. subq.c.id).\
  2601. filter(models.FixedIp.deleted == 0).\
  2602. filter(models.FixedIp.network_id == network_id).\
  2603. join((models.VirtualInterface, vif_and)).\
  2604. join((models.Instance, inst_and)).\
  2605. outerjoin((subq, subq_and)).\
  2606. filter(models.FixedIp.instance_uuid != null()).\
  2607. filter(models.FixedIp.virtual_interface_id != null())
  2608. if host:
  2609. query = query.filter(models.Instance.host == host)
  2610. return query
  2611. @pick_context_manager_reader
  2612. def network_get_associated_fixed_ips(context, network_id, host=None):
  2613. # FIXME(sirp): since this returns fixed_ips, this would be better named
  2614. # fixed_ip_get_all_by_network.
  2615. query = _get_associated_fixed_ips_query(context, network_id, host)
  2616. result = query.all()
  2617. data = []
  2618. for datum in result:
  2619. cleaned = {}
  2620. cleaned['address'] = datum[0]
  2621. cleaned['instance_uuid'] = datum[1]
  2622. cleaned['network_id'] = datum[2]
  2623. cleaned['vif_id'] = datum[3]
  2624. cleaned['vif_address'] = datum[4]
  2625. cleaned['instance_hostname'] = datum[5]
  2626. cleaned['instance_updated'] = datum[6]
  2627. cleaned['instance_created'] = datum[7]
  2628. cleaned['allocated'] = datum[8]
  2629. cleaned['leased'] = datum[9]
  2630. # NOTE(vish): default_route is True if this fixed ip is on the first
  2631. # interface its instance.
  2632. cleaned['default_route'] = datum[10] is not None
  2633. data.append(cleaned)
  2634. return data
  2635. @pick_context_manager_reader
  2636. def network_in_use_on_host(context, network_id, host):
  2637. query = _get_associated_fixed_ips_query(context, network_id, host)
  2638. return query.count() > 0
  2639. def _network_get_query(context):
  2640. return model_query(context, models.Network, read_deleted="no")
  2641. @pick_context_manager_reader
  2642. def network_get_by_uuid(context, uuid):
  2643. result = _network_get_query(context).filter_by(uuid=uuid).first()
  2644. if not result:
  2645. raise exception.NetworkNotFoundForUUID(uuid=uuid)
  2646. return result
  2647. @pick_context_manager_reader
  2648. def network_get_by_cidr(context, cidr):
  2649. result = _network_get_query(context).\
  2650. filter(or_(models.Network.cidr == cidr,
  2651. models.Network.cidr_v6 == cidr)).\
  2652. first()
  2653. if not result:
  2654. raise exception.NetworkNotFoundForCidr(cidr=cidr)
  2655. return result
  2656. @pick_context_manager_reader
  2657. def network_get_all_by_host(context, host):
  2658. fixed_host_filter = or_(models.FixedIp.host == host,
  2659. and_(models.FixedIp.instance_uuid != null(),
  2660. models.Instance.host == host))
  2661. fixed_ip_query = model_query(context, models.FixedIp,
  2662. (models.FixedIp.network_id,)).\
  2663. outerjoin((models.Instance,
  2664. models.Instance.uuid ==
  2665. models.FixedIp.instance_uuid)).\
  2666. filter(fixed_host_filter)
  2667. # NOTE(vish): return networks that have host set
  2668. # or that have a fixed ip with host set
  2669. # or that have an instance with host set
  2670. host_filter = or_(models.Network.host == host,
  2671. models.Network.id.in_(fixed_ip_query.subquery()))
  2672. return _network_get_query(context).filter(host_filter).all()
  2673. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  2674. @pick_context_manager_writer
  2675. def network_set_host(context, network_id, host_id):
  2676. network_ref = _network_get_query(context).\
  2677. filter_by(id=network_id).\
  2678. first()
  2679. if not network_ref:
  2680. raise exception.NetworkNotFound(network_id=network_id)
  2681. if network_ref.host:
  2682. return None
  2683. rows_updated = _network_get_query(context).\
  2684. filter_by(id=network_id).\
  2685. filter_by(host=None).\
  2686. update({'host': host_id})
  2687. if not rows_updated:
  2688. LOG.debug('The row was updated in a concurrent transaction, '
  2689. 'we will fetch another row')
  2690. raise db_exc.RetryRequest(
  2691. exception.NetworkSetHostFailed(network_id=network_id))
  2692. @require_context
  2693. @pick_context_manager_writer
  2694. def network_update(context, network_id, values):
  2695. network_ref = _network_get(context, network_id)
  2696. network_ref.update(values)
  2697. try:
  2698. network_ref.save(context.session)
  2699. except db_exc.DBDuplicateEntry:
  2700. raise exception.DuplicateVlan(vlan=values['vlan'])
  2701. return network_ref
  2702. ###################
  2703. @require_context
  2704. @pick_context_manager_reader
  2705. def quota_get(context, project_id, resource, user_id=None):
  2706. model = models.ProjectUserQuota if user_id else models.Quota
  2707. query = model_query(context, model).\
  2708. filter_by(project_id=project_id).\
  2709. filter_by(resource=resource)
  2710. if user_id:
  2711. query = query.filter_by(user_id=user_id)
  2712. result = query.first()
  2713. if not result:
  2714. if user_id:
  2715. raise exception.ProjectUserQuotaNotFound(project_id=project_id,
  2716. user_id=user_id)
  2717. else:
  2718. raise exception.ProjectQuotaNotFound(project_id=project_id)
  2719. return result
  2720. @require_context
  2721. @pick_context_manager_reader
  2722. def quota_get_all_by_project_and_user(context, project_id, user_id):
  2723. user_quotas = model_query(context, models.ProjectUserQuota,
  2724. (models.ProjectUserQuota.resource,
  2725. models.ProjectUserQuota.hard_limit)).\
  2726. filter_by(project_id=project_id).\
  2727. filter_by(user_id=user_id).\
  2728. all()
  2729. result = {'project_id': project_id, 'user_id': user_id}
  2730. for user_quota in user_quotas:
  2731. result[user_quota.resource] = user_quota.hard_limit
  2732. return result
  2733. @require_context
  2734. @pick_context_manager_reader
  2735. def quota_get_all_by_project(context, project_id):
  2736. rows = model_query(context, models.Quota, read_deleted="no").\
  2737. filter_by(project_id=project_id).\
  2738. all()
  2739. result = {'project_id': project_id}
  2740. for row in rows:
  2741. result[row.resource] = row.hard_limit
  2742. return result
  2743. @require_context
  2744. @pick_context_manager_reader
  2745. def quota_get_all(context, project_id):
  2746. result = model_query(context, models.ProjectUserQuota).\
  2747. filter_by(project_id=project_id).\
  2748. all()
  2749. return result
  2750. def quota_get_per_project_resources():
  2751. return PER_PROJECT_QUOTAS
  2752. @pick_context_manager_writer
  2753. def quota_create(context, project_id, resource, limit, user_id=None):
  2754. per_user = user_id and resource not in PER_PROJECT_QUOTAS
  2755. quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
  2756. if per_user:
  2757. quota_ref.user_id = user_id
  2758. quota_ref.project_id = project_id
  2759. quota_ref.resource = resource
  2760. quota_ref.hard_limit = limit
  2761. try:
  2762. quota_ref.save(context.session)
  2763. except db_exc.DBDuplicateEntry:
  2764. raise exception.QuotaExists(project_id=project_id, resource=resource)
  2765. return quota_ref
  2766. @pick_context_manager_writer
  2767. def quota_update(context, project_id, resource, limit, user_id=None):
  2768. per_user = user_id and resource not in PER_PROJECT_QUOTAS
  2769. model = models.ProjectUserQuota if per_user else models.Quota
  2770. query = model_query(context, model).\
  2771. filter_by(project_id=project_id).\
  2772. filter_by(resource=resource)
  2773. if per_user:
  2774. query = query.filter_by(user_id=user_id)
  2775. result = query.update({'hard_limit': limit})
  2776. if not result:
  2777. if per_user:
  2778. raise exception.ProjectUserQuotaNotFound(project_id=project_id,
  2779. user_id=user_id)
  2780. else:
  2781. raise exception.ProjectQuotaNotFound(project_id=project_id)
  2782. ###################
  2783. @require_context
  2784. @pick_context_manager_reader
  2785. def quota_class_get(context, class_name, resource):
  2786. result = model_query(context, models.QuotaClass, read_deleted="no").\
  2787. filter_by(class_name=class_name).\
  2788. filter_by(resource=resource).\
  2789. first()
  2790. if not result:
  2791. raise exception.QuotaClassNotFound(class_name=class_name)
  2792. return result
  2793. @pick_context_manager_reader
  2794. def quota_class_get_default(context):
  2795. rows = model_query(context, models.QuotaClass, read_deleted="no").\
  2796. filter_by(class_name=_DEFAULT_QUOTA_NAME).\
  2797. all()
  2798. result = {'class_name': _DEFAULT_QUOTA_NAME}
  2799. for row in rows:
  2800. result[row.resource] = row.hard_limit
  2801. return result
  2802. @require_context
  2803. @pick_context_manager_reader
  2804. def quota_class_get_all_by_name(context, class_name):
  2805. rows = model_query(context, models.QuotaClass, read_deleted="no").\
  2806. filter_by(class_name=class_name).\
  2807. all()
  2808. result = {'class_name': class_name}
  2809. for row in rows:
  2810. result[row.resource] = row.hard_limit
  2811. return result
  2812. @pick_context_manager_writer
  2813. def quota_class_create(context, class_name, resource, limit):
  2814. quota_class_ref = models.QuotaClass()
  2815. quota_class_ref.class_name = class_name
  2816. quota_class_ref.resource = resource
  2817. quota_class_ref.hard_limit = limit
  2818. quota_class_ref.save(context.session)
  2819. return quota_class_ref
  2820. @pick_context_manager_writer
  2821. def quota_class_update(context, class_name, resource, limit):
  2822. result = model_query(context, models.QuotaClass, read_deleted="no").\
  2823. filter_by(class_name=class_name).\
  2824. filter_by(resource=resource).\
  2825. update({'hard_limit': limit})
  2826. if not result:
  2827. raise exception.QuotaClassNotFound(class_name=class_name)
  2828. ###################
  2829. @pick_context_manager_writer
  2830. def quota_destroy_all_by_project_and_user(context, project_id, user_id):
  2831. model_query(context, models.ProjectUserQuota, read_deleted="no").\
  2832. filter_by(project_id=project_id).\
  2833. filter_by(user_id=user_id).\
  2834. soft_delete(synchronize_session=False)
  2835. @pick_context_manager_writer
  2836. def quota_destroy_all_by_project(context, project_id):
  2837. model_query(context, models.Quota, read_deleted="no").\
  2838. filter_by(project_id=project_id).\
  2839. soft_delete(synchronize_session=False)
  2840. model_query(context, models.ProjectUserQuota, read_deleted="no").\
  2841. filter_by(project_id=project_id).\
  2842. soft_delete(synchronize_session=False)
  2843. ###################
  2844. def _ec2_volume_get_query(context):
  2845. return model_query(context, models.VolumeIdMapping, read_deleted='yes')
  2846. def _ec2_snapshot_get_query(context):
  2847. return model_query(context, models.SnapshotIdMapping, read_deleted='yes')
  2848. @require_context
  2849. @pick_context_manager_writer
  2850. def ec2_volume_create(context, volume_uuid, id=None):
  2851. """Create ec2 compatible volume by provided uuid."""
  2852. ec2_volume_ref = models.VolumeIdMapping()
  2853. ec2_volume_ref.update({'uuid': volume_uuid})
  2854. if id is not None:
  2855. ec2_volume_ref.update({'id': id})
  2856. ec2_volume_ref.save(context.session)
  2857. return ec2_volume_ref
  2858. @require_context
  2859. @pick_context_manager_reader
  2860. def ec2_volume_get_by_uuid(context, volume_uuid):
  2861. result = _ec2_volume_get_query(context).\
  2862. filter_by(uuid=volume_uuid).\
  2863. first()
  2864. if not result:
  2865. raise exception.VolumeNotFound(volume_id=volume_uuid)
  2866. return result
  2867. @require_context
  2868. @pick_context_manager_reader
  2869. def ec2_volume_get_by_id(context, volume_id):
  2870. result = _ec2_volume_get_query(context).\
  2871. filter_by(id=volume_id).\
  2872. first()
  2873. if not result:
  2874. raise exception.VolumeNotFound(volume_id=volume_id)
  2875. return result
  2876. @require_context
  2877. @pick_context_manager_writer
  2878. def ec2_snapshot_create(context, snapshot_uuid, id=None):
  2879. """Create ec2 compatible snapshot by provided uuid."""
  2880. ec2_snapshot_ref = models.SnapshotIdMapping()
  2881. ec2_snapshot_ref.update({'uuid': snapshot_uuid})
  2882. if id is not None:
  2883. ec2_snapshot_ref.update({'id': id})
  2884. ec2_snapshot_ref.save(context.session)
  2885. return ec2_snapshot_ref
  2886. @require_context
  2887. @pick_context_manager_reader
  2888. def ec2_snapshot_get_by_ec2_id(context, ec2_id):
  2889. result = _ec2_snapshot_get_query(context).\
  2890. filter_by(id=ec2_id).\
  2891. first()
  2892. if not result:
  2893. raise exception.SnapshotNotFound(snapshot_id=ec2_id)
  2894. return result
  2895. @require_context
  2896. @pick_context_manager_reader
  2897. def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
  2898. result = _ec2_snapshot_get_query(context).\
  2899. filter_by(uuid=snapshot_uuid).\
  2900. first()
  2901. if not result:
  2902. raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
  2903. return result
  2904. ###################
  2905. def _block_device_mapping_get_query(context, columns_to_join=None):
  2906. if columns_to_join is None:
  2907. columns_to_join = []
  2908. query = model_query(context, models.BlockDeviceMapping)
  2909. for column in columns_to_join:
  2910. query = query.options(joinedload(column))
  2911. return query
  2912. def _scrub_empty_str_values(dct, keys_to_scrub):
  2913. """Remove any keys found in sequence keys_to_scrub from the dict
  2914. if they have the value ''.
  2915. """
  2916. for key in keys_to_scrub:
  2917. if key in dct and dct[key] == '':
  2918. del dct[key]
  2919. def _from_legacy_values(values, legacy, allow_updates=False):
  2920. if legacy:
  2921. if allow_updates and block_device.is_safe_for_update(values):
  2922. return values
  2923. else:
  2924. return block_device.BlockDeviceDict.from_legacy(values)
  2925. else:
  2926. return values
  2927. def _set_or_validate_uuid(values):
  2928. uuid = values.get('uuid')
  2929. # values doesn't contain uuid, or it's blank
  2930. if not uuid:
  2931. values['uuid'] = uuidutils.generate_uuid()
  2932. # values contains a uuid
  2933. else:
  2934. if not uuidutils.is_uuid_like(uuid):
  2935. raise exception.InvalidUUID(uuid=uuid)
  2936. @require_context
  2937. @pick_context_manager_writer
  2938. def block_device_mapping_create(context, values, legacy=True):
  2939. _scrub_empty_str_values(values, ['volume_size'])
  2940. values = _from_legacy_values(values, legacy)
  2941. convert_objects_related_datetimes(values)
  2942. _set_or_validate_uuid(values)
  2943. bdm_ref = models.BlockDeviceMapping()
  2944. bdm_ref.update(values)
  2945. bdm_ref.save(context.session)
  2946. return bdm_ref
  2947. @require_context
  2948. @pick_context_manager_writer
  2949. def block_device_mapping_update(context, bdm_id, values, legacy=True):
  2950. _scrub_empty_str_values(values, ['volume_size'])
  2951. values = _from_legacy_values(values, legacy, allow_updates=True)
  2952. convert_objects_related_datetimes(values)
  2953. query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
  2954. query.update(values)
  2955. return query.first()
  2956. @pick_context_manager_writer
  2957. def block_device_mapping_update_or_create(context, values, legacy=True):
  2958. # TODO(mdbooth): Remove this method entirely. Callers should know whether
  2959. # they require update or create, and call the appropriate method.
  2960. _scrub_empty_str_values(values, ['volume_size'])
  2961. values = _from_legacy_values(values, legacy, allow_updates=True)
  2962. convert_objects_related_datetimes(values)
  2963. result = None
  2964. # NOTE(xqueralt,danms): Only update a BDM when device_name or
  2965. # uuid was provided. Prefer the uuid, if available, but fall
  2966. # back to device_name if no uuid is provided, which can happen
  2967. # for BDMs created before we had a uuid. We allow empty device
  2968. # names so they will be set later by the manager.
  2969. if 'uuid' in values:
  2970. query = _block_device_mapping_get_query(context)
  2971. result = query.filter_by(instance_uuid=values['instance_uuid'],
  2972. uuid=values['uuid']).one_or_none()
  2973. if not result and values['device_name']:
  2974. query = _block_device_mapping_get_query(context)
  2975. result = query.filter_by(instance_uuid=values['instance_uuid'],
  2976. device_name=values['device_name']).first()
  2977. if result:
  2978. result.update(values)
  2979. else:
  2980. # Either the device_name or uuid doesn't exist in the database yet, or
  2981. # neither was provided. Both cases mean creating a new BDM.
  2982. _set_or_validate_uuid(values)
  2983. result = models.BlockDeviceMapping(**values)
  2984. result.save(context.session)
  2985. # NOTE(xqueralt): Prevent from having multiple swap devices for the
  2986. # same instance. This will delete all the existing ones.
  2987. if block_device.new_format_is_swap(values):
  2988. query = _block_device_mapping_get_query(context)
  2989. query = query.filter_by(instance_uuid=values['instance_uuid'],
  2990. source_type='blank', guest_format='swap')
  2991. query = query.filter(models.BlockDeviceMapping.id != result.id)
  2992. query.soft_delete()
  2993. return result
  2994. @require_context
  2995. @pick_context_manager_reader_allow_async
  2996. def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids):
  2997. if not instance_uuids:
  2998. return []
  2999. return _block_device_mapping_get_query(context).filter(
  3000. models.BlockDeviceMapping.instance_uuid.in_(instance_uuids)).all()
  3001. @require_context
  3002. @pick_context_manager_reader_allow_async
  3003. def block_device_mapping_get_all_by_instance(context, instance_uuid):
  3004. return _block_device_mapping_get_query(context).\
  3005. filter_by(instance_uuid=instance_uuid).\
  3006. all()
  3007. @require_context
  3008. @pick_context_manager_reader
  3009. def block_device_mapping_get_all_by_volume_id(context, volume_id,
  3010. columns_to_join=None):
  3011. return _block_device_mapping_get_query(context,
  3012. columns_to_join=columns_to_join).\
  3013. filter_by(volume_id=volume_id).\
  3014. all()
  3015. @require_context
  3016. @pick_context_manager_reader
  3017. def block_device_mapping_get_by_instance_and_volume_id(context, volume_id,
  3018. instance_uuid,
  3019. columns_to_join=None):
  3020. return _block_device_mapping_get_query(context,
  3021. columns_to_join=columns_to_join).\
  3022. filter_by(volume_id=volume_id).\
  3023. filter_by(instance_uuid=instance_uuid).\
  3024. first()
  3025. @require_context
  3026. @pick_context_manager_writer
  3027. def block_device_mapping_destroy(context, bdm_id):
  3028. _block_device_mapping_get_query(context).\
  3029. filter_by(id=bdm_id).\
  3030. soft_delete()
  3031. @require_context
  3032. @pick_context_manager_writer
  3033. def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
  3034. volume_id):
  3035. _block_device_mapping_get_query(context).\
  3036. filter_by(instance_uuid=instance_uuid).\
  3037. filter_by(volume_id=volume_id).\
  3038. soft_delete()
  3039. @require_context
  3040. @pick_context_manager_writer
  3041. def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
  3042. device_name):
  3043. _block_device_mapping_get_query(context).\
  3044. filter_by(instance_uuid=instance_uuid).\
  3045. filter_by(device_name=device_name).\
  3046. soft_delete()
  3047. ###################
  3048. @require_context
  3049. @pick_context_manager_writer
  3050. def security_group_create(context, values):
  3051. security_group_ref = models.SecurityGroup()
  3052. # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
  3053. # once save() is called. This will get cleaned up in next orm pass.
  3054. security_group_ref.rules
  3055. security_group_ref.update(values)
  3056. try:
  3057. with get_context_manager(context).writer.savepoint.using(context):
  3058. security_group_ref.save(context.session)
  3059. except db_exc.DBDuplicateEntry:
  3060. raise exception.SecurityGroupExists(
  3061. project_id=values['project_id'],
  3062. security_group_name=values['name'])
  3063. return security_group_ref
  3064. def _security_group_get_query(context, read_deleted=None,
  3065. project_only=False, join_rules=True):
  3066. query = model_query(context, models.SecurityGroup,
  3067. read_deleted=read_deleted, project_only=project_only)
  3068. if join_rules:
  3069. query = query.options(joinedload_all('rules.grantee_group'))
  3070. return query
  3071. def _security_group_get_by_names(context, group_names):
  3072. """Get security group models for a project by a list of names.
  3073. Raise SecurityGroupNotFoundForProject for a name not found.
  3074. """
  3075. query = _security_group_get_query(context, read_deleted="no",
  3076. join_rules=False).\
  3077. filter_by(project_id=context.project_id).\
  3078. filter(models.SecurityGroup.name.in_(group_names))
  3079. sg_models = query.all()
  3080. if len(sg_models) == len(group_names):
  3081. return sg_models
  3082. # Find the first one missing and raise
  3083. group_names_from_models = [x.name for x in sg_models]
  3084. for group_name in group_names:
  3085. if group_name not in group_names_from_models:
  3086. raise exception.SecurityGroupNotFoundForProject(
  3087. project_id=context.project_id, security_group_id=group_name)
  3088. # Not Reached
  3089. @require_context
  3090. @pick_context_manager_reader
  3091. def security_group_get_all(context):
  3092. return _security_group_get_query(context).all()
  3093. @require_context
  3094. @pick_context_manager_reader
  3095. def security_group_get(context, security_group_id, columns_to_join=None):
  3096. join_rules = columns_to_join and 'rules' in columns_to_join
  3097. if join_rules:
  3098. columns_to_join.remove('rules')
  3099. query = _security_group_get_query(context, project_only=True,
  3100. join_rules=join_rules).\
  3101. filter_by(id=security_group_id)
  3102. if columns_to_join is None:
  3103. columns_to_join = []
  3104. for column in columns_to_join:
  3105. if column.startswith('instances'):
  3106. query = query.options(joinedload_all(column))
  3107. result = query.first()
  3108. if not result:
  3109. raise exception.SecurityGroupNotFound(
  3110. security_group_id=security_group_id)
  3111. return result
  3112. @require_context
  3113. @pick_context_manager_reader
  3114. def security_group_get_by_name(context, project_id, group_name,
  3115. columns_to_join=None):
  3116. query = _security_group_get_query(context,
  3117. read_deleted="no", join_rules=False).\
  3118. filter_by(project_id=project_id).\
  3119. filter_by(name=group_name)
  3120. if columns_to_join is None:
  3121. columns_to_join = ['instances', 'rules.grantee_group']
  3122. for column in columns_to_join:
  3123. query = query.options(joinedload_all(column))
  3124. result = query.first()
  3125. if not result:
  3126. raise exception.SecurityGroupNotFoundForProject(
  3127. project_id=project_id, security_group_id=group_name)
  3128. return result
  3129. @require_context
  3130. @pick_context_manager_reader
  3131. def security_group_get_by_project(context, project_id):
  3132. return _security_group_get_query(context, read_deleted="no").\
  3133. filter_by(project_id=project_id).\
  3134. all()
  3135. @require_context
  3136. @pick_context_manager_reader
  3137. def security_group_get_by_instance(context, instance_uuid):
  3138. return _security_group_get_query(context, read_deleted="no").\
  3139. join(models.SecurityGroup.instances).\
  3140. filter_by(uuid=instance_uuid).\
  3141. all()
  3142. @require_context
  3143. @pick_context_manager_reader
  3144. def security_group_in_use(context, group_id):
  3145. # Are there any instances that haven't been deleted
  3146. # that include this group?
  3147. inst_assoc = model_query(context,
  3148. models.SecurityGroupInstanceAssociation,
  3149. read_deleted="no").\
  3150. filter_by(security_group_id=group_id).\
  3151. all()
  3152. for ia in inst_assoc:
  3153. num_instances = model_query(context, models.Instance,
  3154. read_deleted="no").\
  3155. filter_by(uuid=ia.instance_uuid).\
  3156. count()
  3157. if num_instances:
  3158. return True
  3159. return False
  3160. @require_context
  3161. @pick_context_manager_writer
  3162. def security_group_update(context, security_group_id, values,
  3163. columns_to_join=None):
  3164. query = model_query(context, models.SecurityGroup).filter_by(
  3165. id=security_group_id)
  3166. if columns_to_join:
  3167. for column in columns_to_join:
  3168. query = query.options(joinedload_all(column))
  3169. security_group_ref = query.first()
  3170. if not security_group_ref:
  3171. raise exception.SecurityGroupNotFound(
  3172. security_group_id=security_group_id)
  3173. security_group_ref.update(values)
  3174. name = security_group_ref['name']
  3175. project_id = security_group_ref['project_id']
  3176. try:
  3177. security_group_ref.save(context.session)
  3178. except db_exc.DBDuplicateEntry:
  3179. raise exception.SecurityGroupExists(
  3180. project_id=project_id,
  3181. security_group_name=name)
  3182. return security_group_ref
  3183. def security_group_ensure_default(context):
  3184. """Ensure default security group exists for a project_id."""
  3185. try:
  3186. # NOTE(rpodolyaka): create the default security group, if it doesn't
  3187. # exist. This must be done in a separate transaction, so that
  3188. # this one is not aborted in case a concurrent one succeeds first
  3189. # and the unique constraint for security group names is violated
  3190. # by a concurrent INSERT
  3191. with get_context_manager(context).writer.independent.using(context):
  3192. return _security_group_ensure_default(context)
  3193. except exception.SecurityGroupExists:
  3194. # NOTE(rpodolyaka): a concurrent transaction has succeeded first,
  3195. # suppress the error and proceed
  3196. return security_group_get_by_name(context, context.project_id,
  3197. 'default')
  3198. @pick_context_manager_writer
  3199. def _security_group_ensure_default(context):
  3200. try:
  3201. default_group = _security_group_get_by_names(context, ['default'])[0]
  3202. except exception.NotFound:
  3203. values = {'name': 'default',
  3204. 'description': 'default',
  3205. 'user_id': context.user_id,
  3206. 'project_id': context.project_id}
  3207. default_group = security_group_create(context, values)
  3208. default_rules = _security_group_rule_get_default_query(context).all()
  3209. for default_rule in default_rules:
  3210. # This is suboptimal, it should be programmatic to know
  3211. # the values of the default_rule
  3212. rule_values = {'protocol': default_rule.protocol,
  3213. 'from_port': default_rule.from_port,
  3214. 'to_port': default_rule.to_port,
  3215. 'cidr': default_rule.cidr,
  3216. 'parent_group_id': default_group.id,
  3217. }
  3218. _security_group_rule_create(context, rule_values)
  3219. return default_group
  3220. @require_context
  3221. @pick_context_manager_writer
  3222. def security_group_destroy(context, security_group_id):
  3223. model_query(context, models.SecurityGroup).\
  3224. filter_by(id=security_group_id).\
  3225. soft_delete()
  3226. model_query(context, models.SecurityGroupInstanceAssociation).\
  3227. filter_by(security_group_id=security_group_id).\
  3228. soft_delete()
  3229. model_query(context, models.SecurityGroupIngressRule).\
  3230. filter_by(group_id=security_group_id).\
  3231. soft_delete()
  3232. model_query(context, models.SecurityGroupIngressRule).\
  3233. filter_by(parent_group_id=security_group_id).\
  3234. soft_delete()
  3235. def _security_group_count_by_project_and_user(context, project_id, user_id):
  3236. nova.context.authorize_project_context(context, project_id)
  3237. return model_query(context, models.SecurityGroup, read_deleted="no").\
  3238. filter_by(project_id=project_id).\
  3239. filter_by(user_id=user_id).\
  3240. count()
  3241. ###################
  3242. def _security_group_rule_create(context, values):
  3243. security_group_rule_ref = models.SecurityGroupIngressRule()
  3244. security_group_rule_ref.update(values)
  3245. security_group_rule_ref.save(context.session)
  3246. return security_group_rule_ref
  3247. def _security_group_rule_get_query(context):
  3248. return model_query(context, models.SecurityGroupIngressRule)
  3249. @require_context
  3250. @pick_context_manager_reader
  3251. def security_group_rule_get(context, security_group_rule_id):
  3252. result = (_security_group_rule_get_query(context).
  3253. filter_by(id=security_group_rule_id).
  3254. first())
  3255. if not result:
  3256. raise exception.SecurityGroupNotFoundForRule(
  3257. rule_id=security_group_rule_id)
  3258. return result
  3259. @require_context
  3260. @pick_context_manager_reader
  3261. def security_group_rule_get_by_security_group(context, security_group_id,
  3262. columns_to_join=None):
  3263. if columns_to_join is None:
  3264. columns_to_join = ['grantee_group.instances.system_metadata',
  3265. 'grantee_group.instances.info_cache']
  3266. query = (_security_group_rule_get_query(context).
  3267. filter_by(parent_group_id=security_group_id))
  3268. for column in columns_to_join:
  3269. query = query.options(joinedload_all(column))
  3270. return query.all()
  3271. @require_context
  3272. @pick_context_manager_reader
  3273. def security_group_rule_get_by_instance(context, instance_uuid):
  3274. return (_security_group_rule_get_query(context).
  3275. join('parent_group', 'instances').
  3276. filter_by(uuid=instance_uuid).
  3277. options(joinedload('grantee_group')).
  3278. all())
  3279. @require_context
  3280. @pick_context_manager_writer
  3281. def security_group_rule_create(context, values):
  3282. return _security_group_rule_create(context, values)
  3283. @require_context
  3284. @pick_context_manager_writer
  3285. def security_group_rule_destroy(context, security_group_rule_id):
  3286. count = (_security_group_rule_get_query(context).
  3287. filter_by(id=security_group_rule_id).
  3288. soft_delete())
  3289. if count == 0:
  3290. raise exception.SecurityGroupNotFoundForRule(
  3291. rule_id=security_group_rule_id)
  3292. @require_context
  3293. @pick_context_manager_reader
  3294. def security_group_rule_count_by_group(context, security_group_id):
  3295. return (model_query(context, models.SecurityGroupIngressRule,
  3296. read_deleted="no").
  3297. filter_by(parent_group_id=security_group_id).
  3298. count())
  3299. ###################
  3300. def _security_group_rule_get_default_query(context):
  3301. return model_query(context, models.SecurityGroupIngressDefaultRule)
  3302. @require_context
  3303. @pick_context_manager_reader
  3304. def security_group_default_rule_get(context, security_group_rule_default_id):
  3305. result = _security_group_rule_get_default_query(context).\
  3306. filter_by(id=security_group_rule_default_id).\
  3307. first()
  3308. if not result:
  3309. raise exception.SecurityGroupDefaultRuleNotFound(
  3310. rule_id=security_group_rule_default_id)
  3311. return result
  3312. @pick_context_manager_writer
  3313. def security_group_default_rule_destroy(context,
  3314. security_group_rule_default_id):
  3315. count = _security_group_rule_get_default_query(context).\
  3316. filter_by(id=security_group_rule_default_id).\
  3317. soft_delete()
  3318. if count == 0:
  3319. raise exception.SecurityGroupDefaultRuleNotFound(
  3320. rule_id=security_group_rule_default_id)
  3321. @pick_context_manager_writer
  3322. def security_group_default_rule_create(context, values):
  3323. security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
  3324. security_group_default_rule_ref.update(values)
  3325. security_group_default_rule_ref.save(context.session)
  3326. return security_group_default_rule_ref
  3327. @require_context
  3328. @pick_context_manager_reader
  3329. def security_group_default_rule_list(context):
  3330. return _security_group_rule_get_default_query(context).all()
  3331. ###################
  3332. @pick_context_manager_writer
  3333. def provider_fw_rule_create(context, rule):
  3334. fw_rule_ref = models.ProviderFirewallRule()
  3335. fw_rule_ref.update(rule)
  3336. fw_rule_ref.save(context.session)
  3337. return fw_rule_ref
  3338. @pick_context_manager_reader
  3339. def provider_fw_rule_get_all(context):
  3340. return model_query(context, models.ProviderFirewallRule).all()
  3341. @pick_context_manager_writer
  3342. def provider_fw_rule_destroy(context, rule_id):
  3343. context.session.query(models.ProviderFirewallRule).\
  3344. filter_by(id=rule_id).\
  3345. soft_delete()
  3346. ###################
  3347. @require_context
  3348. @pick_context_manager_writer
  3349. def project_get_networks(context, project_id, associate=True):
  3350. # NOTE(tr3buchet): as before this function will associate
  3351. # a project with a network if it doesn't have one and
  3352. # associate is true
  3353. result = model_query(context, models.Network, read_deleted="no").\
  3354. filter_by(project_id=project_id).\
  3355. all()
  3356. if not result:
  3357. if not associate:
  3358. return []
  3359. return [network_associate(context, project_id)]
  3360. return result
  3361. ###################
  3362. @pick_context_manager_writer
  3363. def migration_create(context, values):
  3364. migration = models.Migration()
  3365. migration.update(values)
  3366. migration.save(context.session)
  3367. return migration
  3368. @pick_context_manager_writer
  3369. def migration_update(context, id, values):
  3370. migration = migration_get(context, id)
  3371. migration.update(values)
  3372. return migration
  3373. @pick_context_manager_reader
  3374. def migration_get(context, id):
  3375. result = model_query(context, models.Migration, read_deleted="yes").\
  3376. filter_by(id=id).\
  3377. first()
  3378. if not result:
  3379. raise exception.MigrationNotFound(migration_id=id)
  3380. return result
  3381. @pick_context_manager_reader
  3382. def migration_get_by_uuid(context, migration_uuid):
  3383. result = model_query(context, models.Migration, read_deleted="yes").\
  3384. filter_by(uuid=migration_uuid).\
  3385. first()
  3386. if not result:
  3387. raise exception.MigrationNotFound(migration_id=migration_uuid)
  3388. return result
  3389. @pick_context_manager_reader
  3390. def migration_get_by_id_and_instance(context, id, instance_uuid):
  3391. result = model_query(context, models.Migration).\
  3392. filter_by(id=id).\
  3393. filter_by(instance_uuid=instance_uuid).\
  3394. first()
  3395. if not result:
  3396. raise exception.MigrationNotFoundForInstance(migration_id=id,
  3397. instance_id=instance_uuid)
  3398. return result
  3399. @pick_context_manager_reader
  3400. def migration_get_by_instance_and_status(context, instance_uuid, status):
  3401. result = model_query(context, models.Migration, read_deleted="yes").\
  3402. filter_by(instance_uuid=instance_uuid).\
  3403. filter_by(status=status).\
  3404. first()
  3405. if not result:
  3406. raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
  3407. status=status)
  3408. return result
  3409. @pick_context_manager_reader_allow_async
  3410. def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
  3411. dest_compute):
  3412. confirm_window = (timeutils.utcnow() -
  3413. datetime.timedelta(seconds=confirm_window))
  3414. return model_query(context, models.Migration, read_deleted="yes").\
  3415. filter(models.Migration.updated_at <= confirm_window).\
  3416. filter_by(status="finished").\
  3417. filter_by(dest_compute=dest_compute).\
  3418. all()
  3419. @pick_context_manager_reader
  3420. def migration_get_in_progress_by_host_and_node(context, host, node):
  3421. # TODO(mriedem): Tracking what various code flows set for
  3422. # migration status is nutty, since it happens all over the place
  3423. # and several of the statuses are redundant (done and completed).
  3424. # We need to define these in an enum somewhere and just update
  3425. # that one central place that defines what "in progress" means.
  3426. # NOTE(mriedem): The 'finished' status is not in this list because
  3427. # 'finished' means a resize is finished on the destination host
  3428. # and the instance is in VERIFY_RESIZE state, so the end state
  3429. # for a resize is actually 'confirmed' or 'reverted'.
  3430. return model_query(context, models.Migration).\
  3431. filter(or_(and_(models.Migration.source_compute == host,
  3432. models.Migration.source_node == node),
  3433. and_(models.Migration.dest_compute == host,
  3434. models.Migration.dest_node == node))).\
  3435. filter(~models.Migration.status.in_(['accepted', 'confirmed',
  3436. 'reverted', 'error',
  3437. 'failed', 'completed',
  3438. 'cancelled', 'done'])).\
  3439. options(joinedload_all('instance.system_metadata')).\
  3440. all()
  3441. @pick_context_manager_reader
  3442. def migration_get_in_progress_by_instance(context, instance_uuid,
  3443. migration_type=None):
  3444. # TODO(Shaohe Feng) we should share the in-progress list.
  3445. # TODO(Shaohe Feng) will also summarize all status to a new
  3446. # MigrationStatus class.
  3447. query = model_query(context, models.Migration).\
  3448. filter_by(instance_uuid=instance_uuid).\
  3449. filter(models.Migration.status.in_(['queued', 'preparing',
  3450. 'running',
  3451. 'post-migrating']))
  3452. if migration_type:
  3453. query = query.filter(models.Migration.migration_type == migration_type)
  3454. return query.all()
  3455. @pick_context_manager_reader
  3456. def migration_get_all_by_filters(context, filters,
  3457. sort_keys=None, sort_dirs=None,
  3458. limit=None, marker=None):
  3459. if limit == 0:
  3460. return []
  3461. query = model_query(context, models.Migration)
  3462. if "uuid" in filters:
  3463. # The uuid filter is here for the MigrationLister and multi-cell
  3464. # paging support in the compute API.
  3465. uuid = filters["uuid"]
  3466. uuid = [uuid] if isinstance(uuid, six.string_types) else uuid
  3467. query = query.filter(models.Migration.uuid.in_(uuid))
  3468. if 'changes-since' in filters:
  3469. changes_since = timeutils.normalize_time(filters['changes-since'])
  3470. query = query. \
  3471. filter(models.Migration.updated_at >= changes_since)
  3472. if "status" in filters:
  3473. status = filters["status"]
  3474. status = [status] if isinstance(status, six.string_types) else status
  3475. query = query.filter(models.Migration.status.in_(status))
  3476. if "host" in filters:
  3477. host = filters["host"]
  3478. query = query.filter(or_(models.Migration.source_compute == host,
  3479. models.Migration.dest_compute == host))
  3480. elif "source_compute" in filters:
  3481. host = filters['source_compute']
  3482. query = query.filter(models.Migration.source_compute == host)
  3483. if "migration_type" in filters:
  3484. migtype = filters["migration_type"]
  3485. query = query.filter(models.Migration.migration_type == migtype)
  3486. if "hidden" in filters:
  3487. hidden = filters["hidden"]
  3488. query = query.filter(models.Migration.hidden == hidden)
  3489. if "instance_uuid" in filters:
  3490. instance_uuid = filters["instance_uuid"]
  3491. query = query.filter(models.Migration.instance_uuid == instance_uuid)
  3492. if marker:
  3493. try:
  3494. marker = migration_get_by_uuid(context, marker)
  3495. except exception.MigrationNotFound:
  3496. raise exception.MarkerNotFound(marker=marker)
  3497. if limit or marker or sort_keys or sort_dirs:
  3498. # Default sort by desc(['created_at', 'id'])
  3499. sort_keys, sort_dirs = process_sort_params(sort_keys, sort_dirs,
  3500. default_dir='desc')
  3501. return sqlalchemyutils.paginate_query(query,
  3502. models.Migration,
  3503. limit=limit,
  3504. sort_keys=sort_keys,
  3505. marker=marker,
  3506. sort_dirs=sort_dirs).all()
  3507. else:
  3508. return query.all()
  3509. @require_context
  3510. @pick_context_manager_reader_allow_async
  3511. def migration_get_by_sort_filters(context, sort_keys, sort_dirs, values):
  3512. """Attempt to get a single migration based on a combination of sort
  3513. keys, directions and filter values. This is used to try to find a
  3514. marker migration when we don't have a marker uuid.
  3515. This returns just a uuid of the migration that matched.
  3516. """
  3517. model = models.Migration
  3518. return _model_get_uuid_by_sort_filters(context, model, sort_keys,
  3519. sort_dirs, values)
  3520. @pick_context_manager_writer
  3521. def migration_migrate_to_uuid(context, count):
  3522. # Avoid circular import
  3523. from nova import objects
  3524. db_migrations = model_query(context, models.Migration).filter_by(
  3525. uuid=None).limit(count).all()
  3526. done = 0
  3527. for db_migration in db_migrations:
  3528. mig = objects.Migration(context)
  3529. mig._from_db_object(context, mig, db_migration)
  3530. done += 1
  3531. # We don't have any situation where we can (detectably) not
  3532. # migrate a thing, so report anything that matched as "completed".
  3533. return done, done
  3534. ##################
  3535. @pick_context_manager_writer
  3536. def console_pool_create(context, values):
  3537. pool = models.ConsolePool()
  3538. pool.update(values)
  3539. try:
  3540. pool.save(context.session)
  3541. except db_exc.DBDuplicateEntry:
  3542. raise exception.ConsolePoolExists(
  3543. host=values["host"],
  3544. console_type=values["console_type"],
  3545. compute_host=values["compute_host"],
  3546. )
  3547. return pool
  3548. @pick_context_manager_reader
  3549. def console_pool_get_by_host_type(context, compute_host, host,
  3550. console_type):
  3551. result = model_query(context, models.ConsolePool, read_deleted="no").\
  3552. filter_by(host=host).\
  3553. filter_by(console_type=console_type).\
  3554. filter_by(compute_host=compute_host).\
  3555. options(joinedload('consoles')).\
  3556. first()
  3557. if not result:
  3558. raise exception.ConsolePoolNotFoundForHostType(
  3559. host=host, console_type=console_type,
  3560. compute_host=compute_host)
  3561. return result
  3562. @pick_context_manager_reader
  3563. def console_pool_get_all_by_host_type(context, host, console_type):
  3564. return model_query(context, models.ConsolePool, read_deleted="no").\
  3565. filter_by(host=host).\
  3566. filter_by(console_type=console_type).\
  3567. options(joinedload('consoles')).\
  3568. all()
  3569. ##################
  3570. @pick_context_manager_writer
  3571. def console_create(context, values):
  3572. console = models.Console()
  3573. console.update(values)
  3574. console.save(context.session)
  3575. return console
  3576. @pick_context_manager_writer
  3577. def console_delete(context, console_id):
  3578. # NOTE(mdragon): consoles are meant to be transient.
  3579. context.session.query(models.Console).\
  3580. filter_by(id=console_id).\
  3581. delete()
  3582. @pick_context_manager_reader
  3583. def console_get_by_pool_instance(context, pool_id, instance_uuid):
  3584. result = model_query(context, models.Console, read_deleted="yes").\
  3585. filter_by(pool_id=pool_id).\
  3586. filter_by(instance_uuid=instance_uuid).\
  3587. options(joinedload('pool')).\
  3588. first()
  3589. if not result:
  3590. raise exception.ConsoleNotFoundInPoolForInstance(
  3591. pool_id=pool_id, instance_uuid=instance_uuid)
  3592. return result
  3593. @pick_context_manager_reader
  3594. def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
  3595. query = model_query(context, models.Console, read_deleted="yes").\
  3596. filter_by(instance_uuid=instance_uuid)
  3597. if columns_to_join:
  3598. for column in columns_to_join:
  3599. query = query.options(joinedload(column))
  3600. return query.all()
  3601. @pick_context_manager_reader
  3602. def console_get(context, console_id, instance_uuid=None):
  3603. query = model_query(context, models.Console, read_deleted="yes").\
  3604. filter_by(id=console_id).\
  3605. options(joinedload('pool'))
  3606. if instance_uuid is not None:
  3607. query = query.filter_by(instance_uuid=instance_uuid)
  3608. result = query.first()
  3609. if not result:
  3610. if instance_uuid:
  3611. raise exception.ConsoleNotFoundForInstance(
  3612. instance_uuid=instance_uuid)
  3613. else:
  3614. raise exception.ConsoleNotFound(console_id=console_id)
  3615. return result
  3616. ##################
  3617. @pick_context_manager_writer
  3618. def flavor_create(context, values, projects=None):
  3619. """Create a new instance type. In order to pass in extra specs,
  3620. the values dict should contain a 'extra_specs' key/value pair:
  3621. {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
  3622. """
  3623. specs = values.get('extra_specs')
  3624. specs_refs = []
  3625. if specs:
  3626. for k, v in specs.items():
  3627. specs_ref = models.InstanceTypeExtraSpecs()
  3628. specs_ref['key'] = k
  3629. specs_ref['value'] = v
  3630. specs_refs.append(specs_ref)
  3631. values['extra_specs'] = specs_refs
  3632. instance_type_ref = models.InstanceTypes()
  3633. instance_type_ref.update(values)
  3634. if projects is None:
  3635. projects = []
  3636. try:
  3637. instance_type_ref.save(context.session)
  3638. except db_exc.DBDuplicateEntry as e:
  3639. if 'flavorid' in e.columns:
  3640. raise exception.FlavorIdExists(flavor_id=values['flavorid'])
  3641. raise exception.FlavorExists(name=values['name'])
  3642. except Exception as e:
  3643. raise db_exc.DBError(e)
  3644. for project in set(projects):
  3645. access_ref = models.InstanceTypeProjects()
  3646. access_ref.update({"instance_type_id": instance_type_ref.id,
  3647. "project_id": project})
  3648. access_ref.save(context.session)
  3649. return _dict_with_extra_specs(instance_type_ref)
  3650. def _dict_with_extra_specs(inst_type_query):
  3651. """Takes an instance or instance type query returned
  3652. by sqlalchemy and returns it as a dictionary, converting the
  3653. extra_specs entry from a list of dicts:
  3654. 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
  3655. to a single dict:
  3656. 'extra_specs' : {'k1': 'v1'}
  3657. """
  3658. inst_type_dict = dict(inst_type_query)
  3659. extra_specs = {x['key']: x['value']
  3660. for x in inst_type_query['extra_specs']}
  3661. inst_type_dict['extra_specs'] = extra_specs
  3662. return inst_type_dict
  3663. def _flavor_get_query(context, read_deleted=None):
  3664. query = model_query(context, models.InstanceTypes,
  3665. read_deleted=read_deleted).\
  3666. options(joinedload('extra_specs'))
  3667. if not context.is_admin:
  3668. the_filter = [models.InstanceTypes.is_public == true()]
  3669. the_filter.extend([
  3670. models.InstanceTypes.projects.any(project_id=context.project_id)
  3671. ])
  3672. query = query.filter(or_(*the_filter))
  3673. return query
  3674. @require_context
  3675. @pick_context_manager_reader
  3676. def flavor_get_all(context, inactive=False, filters=None,
  3677. sort_key='flavorid', sort_dir='asc', limit=None,
  3678. marker=None):
  3679. """Returns all flavors.
  3680. """
  3681. filters = filters or {}
  3682. # FIXME(sirp): now that we have the `disabled` field for flavors, we
  3683. # should probably remove the use of `deleted` to mark inactive. `deleted`
  3684. # should mean truly deleted, e.g. we can safely purge the record out of the
  3685. # database.
  3686. read_deleted = "yes" if inactive else "no"
  3687. query = _flavor_get_query(context, read_deleted=read_deleted)
  3688. if 'min_memory_mb' in filters:
  3689. query = query.filter(
  3690. models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
  3691. if 'min_root_gb' in filters:
  3692. query = query.filter(
  3693. models.InstanceTypes.root_gb >= filters['min_root_gb'])
  3694. if 'disabled' in filters:
  3695. query = query.filter(
  3696. models.InstanceTypes.disabled == filters['disabled'])
  3697. if 'is_public' in filters and filters['is_public'] is not None:
  3698. the_filter = [models.InstanceTypes.is_public == filters['is_public']]
  3699. if filters['is_public'] and context.project_id is not None:
  3700. the_filter.extend([
  3701. models.InstanceTypes.projects.any(
  3702. project_id=context.project_id, deleted=0)
  3703. ])
  3704. if len(the_filter) > 1:
  3705. query = query.filter(or_(*the_filter))
  3706. else:
  3707. query = query.filter(the_filter[0])
  3708. marker_row = None
  3709. if marker is not None:
  3710. marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
  3711. filter_by(flavorid=marker).\
  3712. first()
  3713. if not marker_row:
  3714. raise exception.MarkerNotFound(marker=marker)
  3715. query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
  3716. [sort_key, 'id'],
  3717. marker=marker_row,
  3718. sort_dir=sort_dir)
  3719. inst_types = query.all()
  3720. return [_dict_with_extra_specs(i) for i in inst_types]
  3721. def _flavor_get_id_from_flavor_query(context, flavor_id):
  3722. return model_query(context, models.InstanceTypes,
  3723. (models.InstanceTypes.id,),
  3724. read_deleted="no").\
  3725. filter_by(flavorid=flavor_id)
  3726. def _flavor_get_id_from_flavor(context, flavor_id):
  3727. result = _flavor_get_id_from_flavor_query(context, flavor_id).first()
  3728. if not result:
  3729. raise exception.FlavorNotFound(flavor_id=flavor_id)
  3730. return result[0]
  3731. @require_context
  3732. @pick_context_manager_reader
  3733. def flavor_get(context, id):
  3734. """Returns a dict describing specific flavor."""
  3735. result = _flavor_get_query(context).\
  3736. filter_by(id=id).\
  3737. first()
  3738. if not result:
  3739. raise exception.FlavorNotFound(flavor_id=id)
  3740. return _dict_with_extra_specs(result)
  3741. @require_context
  3742. @pick_context_manager_reader
  3743. def flavor_get_by_name(context, name):
  3744. """Returns a dict describing specific flavor."""
  3745. result = _flavor_get_query(context).\
  3746. filter_by(name=name).\
  3747. first()
  3748. if not result:
  3749. raise exception.FlavorNotFoundByName(flavor_name=name)
  3750. return _dict_with_extra_specs(result)
  3751. @require_context
  3752. @pick_context_manager_reader
  3753. def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
  3754. """Returns a dict describing specific flavor_id."""
  3755. result = _flavor_get_query(context, read_deleted=read_deleted).\
  3756. filter_by(flavorid=flavor_id).\
  3757. order_by(asc(models.InstanceTypes.deleted),
  3758. asc(models.InstanceTypes.id)).\
  3759. first()
  3760. if not result:
  3761. raise exception.FlavorNotFound(flavor_id=flavor_id)
  3762. return _dict_with_extra_specs(result)
  3763. @pick_context_manager_writer
  3764. def flavor_destroy(context, flavor_id):
  3765. """Marks specific flavor as deleted."""
  3766. ref = model_query(context, models.InstanceTypes, read_deleted="no").\
  3767. filter_by(flavorid=flavor_id).\
  3768. first()
  3769. if not ref:
  3770. raise exception.FlavorNotFound(flavor_id=flavor_id)
  3771. ref.soft_delete(context.session)
  3772. model_query(context, models.InstanceTypeExtraSpecs, read_deleted="no").\
  3773. filter_by(instance_type_id=ref['id']).\
  3774. soft_delete()
  3775. model_query(context, models.InstanceTypeProjects, read_deleted="no").\
  3776. filter_by(instance_type_id=ref['id']).\
  3777. soft_delete()
  3778. def _flavor_access_query(context):
  3779. return model_query(context, models.InstanceTypeProjects, read_deleted="no")
  3780. @pick_context_manager_reader
  3781. def flavor_access_get_by_flavor_id(context, flavor_id):
  3782. """Get flavor access list by flavor id."""
  3783. instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
  3784. flavor_id)
  3785. access_refs = _flavor_access_query(context).\
  3786. filter_by(instance_type_id=instance_type_id_subq).\
  3787. all()
  3788. return access_refs
  3789. @pick_context_manager_writer
  3790. def flavor_access_add(context, flavor_id, project_id):
  3791. """Add given tenant to the flavor access list."""
  3792. instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
  3793. access_ref = models.InstanceTypeProjects()
  3794. access_ref.update({"instance_type_id": instance_type_id,
  3795. "project_id": project_id})
  3796. try:
  3797. access_ref.save(context.session)
  3798. except db_exc.DBDuplicateEntry:
  3799. raise exception.FlavorAccessExists(flavor_id=flavor_id,
  3800. project_id=project_id)
  3801. return access_ref
  3802. @pick_context_manager_writer
  3803. def flavor_access_remove(context, flavor_id, project_id):
  3804. """Remove given tenant from the flavor access list."""
  3805. instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
  3806. count = _flavor_access_query(context).\
  3807. filter_by(instance_type_id=instance_type_id).\
  3808. filter_by(project_id=project_id).\
  3809. soft_delete(synchronize_session=False)
  3810. if count == 0:
  3811. raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
  3812. project_id=project_id)
  3813. def _flavor_extra_specs_get_query(context, flavor_id):
  3814. instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
  3815. flavor_id)
  3816. return model_query(context, models.InstanceTypeExtraSpecs,
  3817. read_deleted="no").\
  3818. filter_by(instance_type_id=instance_type_id_subq)
  3819. @require_context
  3820. @pick_context_manager_reader
  3821. def flavor_extra_specs_get(context, flavor_id):
  3822. rows = _flavor_extra_specs_get_query(context, flavor_id).all()
  3823. return {row['key']: row['value'] for row in rows}
  3824. @require_context
  3825. @pick_context_manager_writer
  3826. def flavor_extra_specs_delete(context, flavor_id, key):
  3827. result = _flavor_extra_specs_get_query(context, flavor_id).\
  3828. filter(models.InstanceTypeExtraSpecs.key == key).\
  3829. soft_delete(synchronize_session=False)
  3830. # did not find the extra spec
  3831. if result == 0:
  3832. raise exception.FlavorExtraSpecsNotFound(
  3833. extra_specs_key=key, flavor_id=flavor_id)
  3834. @require_context
  3835. @pick_context_manager_writer
  3836. def flavor_extra_specs_update_or_create(context, flavor_id, specs,
  3837. max_retries=10):
  3838. for attempt in range(max_retries):
  3839. try:
  3840. instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
  3841. spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
  3842. read_deleted="no").\
  3843. filter_by(instance_type_id=instance_type_id).\
  3844. filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
  3845. all()
  3846. existing_keys = set()
  3847. for spec_ref in spec_refs:
  3848. key = spec_ref["key"]
  3849. existing_keys.add(key)
  3850. with get_context_manager(context).writer.savepoint.using(
  3851. context):
  3852. spec_ref.update({"value": specs[key]})
  3853. for key, value in specs.items():
  3854. if key in existing_keys:
  3855. continue
  3856. spec_ref = models.InstanceTypeExtraSpecs()
  3857. with get_context_manager(context).writer.savepoint.using(
  3858. context):
  3859. spec_ref.update({"key": key, "value": value,
  3860. "instance_type_id": instance_type_id})
  3861. context.session.add(spec_ref)
  3862. return specs
  3863. except db_exc.DBDuplicateEntry:
  3864. # a concurrent transaction has been committed,
  3865. # try again unless this was the last attempt
  3866. if attempt == max_retries - 1:
  3867. raise exception.FlavorExtraSpecUpdateCreateFailed(
  3868. id=flavor_id, retries=max_retries)
  3869. ####################
  3870. @pick_context_manager_writer
  3871. def cell_create(context, values):
  3872. cell = models.Cell()
  3873. cell.update(values)
  3874. try:
  3875. cell.save(context.session)
  3876. except db_exc.DBDuplicateEntry:
  3877. raise exception.CellExists(name=values['name'])
  3878. return cell
  3879. def _cell_get_by_name_query(context, cell_name):
  3880. return model_query(context, models.Cell).filter_by(name=cell_name)
  3881. @pick_context_manager_writer
  3882. def cell_update(context, cell_name, values):
  3883. cell_query = _cell_get_by_name_query(context, cell_name)
  3884. if not cell_query.update(values):
  3885. raise exception.CellNotFound(cell_name=cell_name)
  3886. cell = cell_query.first()
  3887. return cell
  3888. @pick_context_manager_writer
  3889. def cell_delete(context, cell_name):
  3890. return _cell_get_by_name_query(context, cell_name).soft_delete()
  3891. @pick_context_manager_reader
  3892. def cell_get(context, cell_name):
  3893. result = _cell_get_by_name_query(context, cell_name).first()
  3894. if not result:
  3895. raise exception.CellNotFound(cell_name=cell_name)
  3896. return result
  3897. @pick_context_manager_reader
  3898. def cell_get_all(context):
  3899. return model_query(context, models.Cell, read_deleted="no").all()
  3900. ########################
  3901. # User-provided metadata
  3902. def _instance_metadata_get_multi(context, instance_uuids):
  3903. if not instance_uuids:
  3904. return []
  3905. return model_query(context, models.InstanceMetadata).filter(
  3906. models.InstanceMetadata.instance_uuid.in_(instance_uuids))
  3907. def _instance_metadata_get_query(context, instance_uuid):
  3908. return model_query(context, models.InstanceMetadata, read_deleted="no").\
  3909. filter_by(instance_uuid=instance_uuid)
  3910. @require_context
  3911. @pick_context_manager_reader
  3912. def instance_metadata_get(context, instance_uuid):
  3913. rows = _instance_metadata_get_query(context, instance_uuid).all()
  3914. return {row['key']: row['value'] for row in rows}
  3915. @require_context
  3916. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  3917. @pick_context_manager_writer
  3918. def instance_metadata_delete(context, instance_uuid, key):
  3919. _instance_metadata_get_query(context, instance_uuid).\
  3920. filter_by(key=key).\
  3921. soft_delete()
  3922. @require_context
  3923. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  3924. @pick_context_manager_writer
  3925. def instance_metadata_update(context, instance_uuid, metadata, delete):
  3926. all_keys = metadata.keys()
  3927. if delete:
  3928. _instance_metadata_get_query(context, instance_uuid).\
  3929. filter(~models.InstanceMetadata.key.in_(all_keys)).\
  3930. soft_delete(synchronize_session=False)
  3931. already_existing_keys = []
  3932. meta_refs = _instance_metadata_get_query(context, instance_uuid).\
  3933. filter(models.InstanceMetadata.key.in_(all_keys)).\
  3934. all()
  3935. for meta_ref in meta_refs:
  3936. already_existing_keys.append(meta_ref.key)
  3937. meta_ref.update({"value": metadata[meta_ref.key]})
  3938. new_keys = set(all_keys) - set(already_existing_keys)
  3939. for key in new_keys:
  3940. meta_ref = models.InstanceMetadata()
  3941. meta_ref.update({"key": key, "value": metadata[key],
  3942. "instance_uuid": instance_uuid})
  3943. context.session.add(meta_ref)
  3944. return metadata
  3945. #######################
  3946. # System-owned metadata
  3947. def _instance_system_metadata_get_multi(context, instance_uuids):
  3948. if not instance_uuids:
  3949. return []
  3950. return model_query(context, models.InstanceSystemMetadata,
  3951. read_deleted='yes').filter(
  3952. models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
  3953. def _instance_system_metadata_get_query(context, instance_uuid):
  3954. return model_query(context, models.InstanceSystemMetadata).\
  3955. filter_by(instance_uuid=instance_uuid)
  3956. @require_context
  3957. @pick_context_manager_reader
  3958. def instance_system_metadata_get(context, instance_uuid):
  3959. rows = _instance_system_metadata_get_query(context, instance_uuid).all()
  3960. return {row['key']: row['value'] for row in rows}
  3961. @require_context
  3962. @pick_context_manager_writer
  3963. def instance_system_metadata_update(context, instance_uuid, metadata, delete):
  3964. all_keys = metadata.keys()
  3965. if delete:
  3966. _instance_system_metadata_get_query(context, instance_uuid).\
  3967. filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
  3968. soft_delete(synchronize_session=False)
  3969. already_existing_keys = []
  3970. meta_refs = _instance_system_metadata_get_query(context, instance_uuid).\
  3971. filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
  3972. all()
  3973. for meta_ref in meta_refs:
  3974. already_existing_keys.append(meta_ref.key)
  3975. meta_ref.update({"value": metadata[meta_ref.key]})
  3976. new_keys = set(all_keys) - set(already_existing_keys)
  3977. for key in new_keys:
  3978. meta_ref = models.InstanceSystemMetadata()
  3979. meta_ref.update({"key": key, "value": metadata[key],
  3980. "instance_uuid": instance_uuid})
  3981. context.session.add(meta_ref)
  3982. return metadata
  3983. ####################
  3984. @pick_context_manager_writer
  3985. def agent_build_create(context, values):
  3986. agent_build_ref = models.AgentBuild()
  3987. agent_build_ref.update(values)
  3988. try:
  3989. agent_build_ref.save(context.session)
  3990. except db_exc.DBDuplicateEntry:
  3991. raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
  3992. os=values['os'], architecture=values['architecture'])
  3993. return agent_build_ref
  3994. @pick_context_manager_reader
  3995. def agent_build_get_by_triple(context, hypervisor, os, architecture):
  3996. return model_query(context, models.AgentBuild, read_deleted="no").\
  3997. filter_by(hypervisor=hypervisor).\
  3998. filter_by(os=os).\
  3999. filter_by(architecture=architecture).\
  4000. first()
  4001. @pick_context_manager_reader
  4002. def agent_build_get_all(context, hypervisor=None):
  4003. if hypervisor:
  4004. return model_query(context, models.AgentBuild, read_deleted="no").\
  4005. filter_by(hypervisor=hypervisor).\
  4006. all()
  4007. else:
  4008. return model_query(context, models.AgentBuild, read_deleted="no").\
  4009. all()
  4010. @pick_context_manager_writer
  4011. def agent_build_destroy(context, agent_build_id):
  4012. rows_affected = model_query(context, models.AgentBuild).filter_by(
  4013. id=agent_build_id).soft_delete()
  4014. if rows_affected == 0:
  4015. raise exception.AgentBuildNotFound(id=agent_build_id)
  4016. @pick_context_manager_writer
  4017. def agent_build_update(context, agent_build_id, values):
  4018. rows_affected = model_query(context, models.AgentBuild).\
  4019. filter_by(id=agent_build_id).\
  4020. update(values)
  4021. if rows_affected == 0:
  4022. raise exception.AgentBuildNotFound(id=agent_build_id)
  4023. ####################
  4024. @require_context
  4025. @pick_context_manager_reader_allow_async
  4026. def bw_usage_get(context, uuid, start_period, mac):
  4027. values = {'start_period': start_period}
  4028. values = convert_objects_related_datetimes(values, 'start_period')
  4029. return model_query(context, models.BandwidthUsage, read_deleted="yes").\
  4030. filter_by(start_period=values['start_period']).\
  4031. filter_by(uuid=uuid).\
  4032. filter_by(mac=mac).\
  4033. first()
  4034. @require_context
  4035. @pick_context_manager_reader_allow_async
  4036. def bw_usage_get_by_uuids(context, uuids, start_period):
  4037. values = {'start_period': start_period}
  4038. values = convert_objects_related_datetimes(values, 'start_period')
  4039. return (
  4040. model_query(context, models.BandwidthUsage, read_deleted="yes").
  4041. filter(models.BandwidthUsage.uuid.in_(uuids)).
  4042. filter_by(start_period=values['start_period']).
  4043. all()
  4044. )
  4045. @require_context
  4046. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  4047. @pick_context_manager_writer
  4048. def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
  4049. last_ctr_in, last_ctr_out, last_refreshed=None):
  4050. if last_refreshed is None:
  4051. last_refreshed = timeutils.utcnow()
  4052. # NOTE(comstud): More often than not, we'll be updating records vs
  4053. # creating records. Optimize accordingly, trying to update existing
  4054. # records. Fall back to creation when no rows are updated.
  4055. ts_values = {'last_refreshed': last_refreshed,
  4056. 'start_period': start_period}
  4057. ts_keys = ('start_period', 'last_refreshed')
  4058. ts_values = convert_objects_related_datetimes(ts_values, *ts_keys)
  4059. values = {'last_refreshed': ts_values['last_refreshed'],
  4060. 'last_ctr_in': last_ctr_in,
  4061. 'last_ctr_out': last_ctr_out,
  4062. 'bw_in': bw_in,
  4063. 'bw_out': bw_out}
  4064. # NOTE(pkholkin): order_by() is needed here to ensure that the
  4065. # same record is updated every time. It can be removed after adding
  4066. # unique constraint to this model.
  4067. bw_usage = model_query(context, models.BandwidthUsage,
  4068. read_deleted='yes').\
  4069. filter_by(start_period=ts_values['start_period']).\
  4070. filter_by(uuid=uuid).\
  4071. filter_by(mac=mac).\
  4072. order_by(asc(models.BandwidthUsage.id)).first()
  4073. if bw_usage:
  4074. bw_usage.update(values)
  4075. return bw_usage
  4076. bwusage = models.BandwidthUsage()
  4077. bwusage.start_period = ts_values['start_period']
  4078. bwusage.uuid = uuid
  4079. bwusage.mac = mac
  4080. bwusage.last_refreshed = ts_values['last_refreshed']
  4081. bwusage.bw_in = bw_in
  4082. bwusage.bw_out = bw_out
  4083. bwusage.last_ctr_in = last_ctr_in
  4084. bwusage.last_ctr_out = last_ctr_out
  4085. bwusage.save(context.session)
  4086. return bwusage
  4087. ####################
  4088. @require_context
  4089. @pick_context_manager_reader
  4090. def vol_get_usage_by_time(context, begin):
  4091. """Return volumes usage that have been updated after a specified time."""
  4092. return model_query(context, models.VolumeUsage, read_deleted="yes").\
  4093. filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
  4094. models.VolumeUsage.tot_last_refreshed > begin,
  4095. models.VolumeUsage.curr_last_refreshed == null(),
  4096. models.VolumeUsage.curr_last_refreshed > begin,
  4097. )).all()
  4098. @require_context
  4099. @pick_context_manager_writer
  4100. def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
  4101. instance_id, project_id, user_id, availability_zone,
  4102. update_totals=False):
  4103. refreshed = timeutils.utcnow()
  4104. values = {}
  4105. # NOTE(dricco): We will be mostly updating current usage records vs
  4106. # updating total or creating records. Optimize accordingly.
  4107. if not update_totals:
  4108. values = {'curr_last_refreshed': refreshed,
  4109. 'curr_reads': rd_req,
  4110. 'curr_read_bytes': rd_bytes,
  4111. 'curr_writes': wr_req,
  4112. 'curr_write_bytes': wr_bytes,
  4113. 'instance_uuid': instance_id,
  4114. 'project_id': project_id,
  4115. 'user_id': user_id,
  4116. 'availability_zone': availability_zone}
  4117. else:
  4118. values = {'tot_last_refreshed': refreshed,
  4119. 'tot_reads': models.VolumeUsage.tot_reads + rd_req,
  4120. 'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
  4121. rd_bytes,
  4122. 'tot_writes': models.VolumeUsage.tot_writes + wr_req,
  4123. 'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
  4124. wr_bytes,
  4125. 'curr_reads': 0,
  4126. 'curr_read_bytes': 0,
  4127. 'curr_writes': 0,
  4128. 'curr_write_bytes': 0,
  4129. 'instance_uuid': instance_id,
  4130. 'project_id': project_id,
  4131. 'user_id': user_id,
  4132. 'availability_zone': availability_zone}
  4133. current_usage = model_query(context, models.VolumeUsage,
  4134. read_deleted="yes").\
  4135. filter_by(volume_id=id).\
  4136. first()
  4137. if current_usage:
  4138. if (rd_req < current_usage['curr_reads'] or
  4139. rd_bytes < current_usage['curr_read_bytes'] or
  4140. wr_req < current_usage['curr_writes'] or
  4141. wr_bytes < current_usage['curr_write_bytes']):
  4142. LOG.info("Volume(%s) has lower stats then what is in "
  4143. "the database. Instance must have been rebooted "
  4144. "or crashed. Updating totals.", id)
  4145. if not update_totals:
  4146. values['tot_reads'] = (models.VolumeUsage.tot_reads +
  4147. current_usage['curr_reads'])
  4148. values['tot_read_bytes'] = (
  4149. models.VolumeUsage.tot_read_bytes +
  4150. current_usage['curr_read_bytes'])
  4151. values['tot_writes'] = (models.VolumeUsage.tot_writes +
  4152. current_usage['curr_writes'])
  4153. values['tot_write_bytes'] = (
  4154. models.VolumeUsage.tot_write_bytes +
  4155. current_usage['curr_write_bytes'])
  4156. else:
  4157. values['tot_reads'] = (models.VolumeUsage.tot_reads +
  4158. current_usage['curr_reads'] +
  4159. rd_req)
  4160. values['tot_read_bytes'] = (
  4161. models.VolumeUsage.tot_read_bytes +
  4162. current_usage['curr_read_bytes'] + rd_bytes)
  4163. values['tot_writes'] = (models.VolumeUsage.tot_writes +
  4164. current_usage['curr_writes'] +
  4165. wr_req)
  4166. values['tot_write_bytes'] = (
  4167. models.VolumeUsage.tot_write_bytes +
  4168. current_usage['curr_write_bytes'] + wr_bytes)
  4169. current_usage.update(values)
  4170. current_usage.save(context.session)
  4171. context.session.refresh(current_usage)
  4172. return current_usage
  4173. vol_usage = models.VolumeUsage()
  4174. vol_usage.volume_id = id
  4175. vol_usage.instance_uuid = instance_id
  4176. vol_usage.project_id = project_id
  4177. vol_usage.user_id = user_id
  4178. vol_usage.availability_zone = availability_zone
  4179. if not update_totals:
  4180. vol_usage.curr_last_refreshed = refreshed
  4181. vol_usage.curr_reads = rd_req
  4182. vol_usage.curr_read_bytes = rd_bytes
  4183. vol_usage.curr_writes = wr_req
  4184. vol_usage.curr_write_bytes = wr_bytes
  4185. else:
  4186. vol_usage.tot_last_refreshed = refreshed
  4187. vol_usage.tot_reads = rd_req
  4188. vol_usage.tot_read_bytes = rd_bytes
  4189. vol_usage.tot_writes = wr_req
  4190. vol_usage.tot_write_bytes = wr_bytes
  4191. vol_usage.save(context.session)
  4192. return vol_usage
  4193. ####################
  4194. @pick_context_manager_reader
  4195. def s3_image_get(context, image_id):
  4196. """Find local s3 image represented by the provided id."""
  4197. result = model_query(context, models.S3Image, read_deleted="yes").\
  4198. filter_by(id=image_id).\
  4199. first()
  4200. if not result:
  4201. raise exception.ImageNotFound(image_id=image_id)
  4202. return result
  4203. @pick_context_manager_reader
  4204. def s3_image_get_by_uuid(context, image_uuid):
  4205. """Find local s3 image represented by the provided uuid."""
  4206. result = model_query(context, models.S3Image, read_deleted="yes").\
  4207. filter_by(uuid=image_uuid).\
  4208. first()
  4209. if not result:
  4210. raise exception.ImageNotFound(image_id=image_uuid)
  4211. return result
  4212. @pick_context_manager_writer
  4213. def s3_image_create(context, image_uuid):
  4214. """Create local s3 image represented by provided uuid."""
  4215. try:
  4216. s3_image_ref = models.S3Image()
  4217. s3_image_ref.update({'uuid': image_uuid})
  4218. s3_image_ref.save(context.session)
  4219. except Exception as e:
  4220. raise db_exc.DBError(e)
  4221. return s3_image_ref
  4222. ####################
  4223. def _aggregate_get_query(context, model_class, id_field=None, id=None,
  4224. read_deleted=None):
  4225. columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
  4226. query = model_query(context, model_class, read_deleted=read_deleted)
  4227. for c in columns_to_join.get(model_class, []):
  4228. query = query.options(joinedload(c))
  4229. if id and id_field:
  4230. query = query.filter(id_field == id)
  4231. return query
  4232. @pick_context_manager_writer
  4233. def aggregate_create(context, values, metadata=None):
  4234. query = _aggregate_get_query(context,
  4235. models.Aggregate,
  4236. models.Aggregate.name,
  4237. values['name'],
  4238. read_deleted='no')
  4239. aggregate = query.first()
  4240. if not aggregate:
  4241. aggregate = models.Aggregate()
  4242. aggregate.update(values)
  4243. aggregate.save(context.session)
  4244. # We don't want these to be lazy loaded later. We know there is
  4245. # nothing here since we just created this aggregate.
  4246. aggregate._hosts = []
  4247. aggregate._metadata = []
  4248. else:
  4249. raise exception.AggregateNameExists(aggregate_name=values['name'])
  4250. if metadata:
  4251. aggregate_metadata_add(context, aggregate.id, metadata)
  4252. # NOTE(pkholkin): '_metadata' attribute was updated during
  4253. # 'aggregate_metadata_add' method, so it should be expired and
  4254. # read from db
  4255. context.session.expire(aggregate, ['_metadata'])
  4256. aggregate._metadata
  4257. return aggregate
  4258. @pick_context_manager_reader
  4259. def aggregate_get(context, aggregate_id):
  4260. query = _aggregate_get_query(context,
  4261. models.Aggregate,
  4262. models.Aggregate.id,
  4263. aggregate_id)
  4264. aggregate = query.first()
  4265. if not aggregate:
  4266. raise exception.AggregateNotFound(aggregate_id=aggregate_id)
  4267. return aggregate
  4268. @pick_context_manager_reader
  4269. def aggregate_get_by_uuid(context, uuid):
  4270. query = _aggregate_get_query(context,
  4271. models.Aggregate,
  4272. models.Aggregate.uuid,
  4273. uuid)
  4274. aggregate = query.first()
  4275. if not aggregate:
  4276. raise exception.AggregateNotFound(aggregate_id=uuid)
  4277. return aggregate
  4278. @pick_context_manager_reader
  4279. def aggregate_get_by_host(context, host, key=None):
  4280. """Return rows that match host (mandatory) and metadata key (optional).
  4281. :param host matches host, and is required.
  4282. :param key Matches metadata key, if not None.
  4283. """
  4284. query = model_query(context, models.Aggregate)
  4285. query = query.options(joinedload('_hosts'))
  4286. query = query.options(joinedload('_metadata'))
  4287. query = query.join('_hosts')
  4288. query = query.filter(models.AggregateHost.host == host)
  4289. if key:
  4290. query = query.join("_metadata").filter(
  4291. models.AggregateMetadata.key == key)
  4292. return query.all()
  4293. @pick_context_manager_reader
  4294. def aggregate_metadata_get_by_host(context, host, key=None):
  4295. query = model_query(context, models.Aggregate)
  4296. query = query.join("_hosts")
  4297. query = query.join("_metadata")
  4298. query = query.filter(models.AggregateHost.host == host)
  4299. query = query.options(contains_eager("_metadata"))
  4300. if key:
  4301. query = query.filter(models.AggregateMetadata.key == key)
  4302. rows = query.all()
  4303. metadata = collections.defaultdict(set)
  4304. for agg in rows:
  4305. for kv in agg._metadata:
  4306. metadata[kv['key']].add(kv['value'])
  4307. return dict(metadata)
  4308. @pick_context_manager_reader
  4309. def aggregate_get_by_metadata_key(context, key):
  4310. """Return rows that match metadata key.
  4311. :param key Matches metadata key.
  4312. """
  4313. query = model_query(context, models.Aggregate)
  4314. query = query.join("_metadata")
  4315. query = query.filter(models.AggregateMetadata.key == key)
  4316. query = query.options(contains_eager("_metadata"))
  4317. query = query.options(joinedload("_hosts"))
  4318. return query.all()
  4319. @pick_context_manager_writer
  4320. def aggregate_update(context, aggregate_id, values):
  4321. if "name" in values:
  4322. aggregate_by_name = (_aggregate_get_query(context,
  4323. models.Aggregate,
  4324. models.Aggregate.name,
  4325. values['name'],
  4326. read_deleted='no').first())
  4327. if aggregate_by_name and aggregate_by_name.id != aggregate_id:
  4328. # there is another aggregate with the new name
  4329. raise exception.AggregateNameExists(aggregate_name=values['name'])
  4330. aggregate = (_aggregate_get_query(context,
  4331. models.Aggregate,
  4332. models.Aggregate.id,
  4333. aggregate_id).first())
  4334. set_delete = True
  4335. if aggregate:
  4336. if "availability_zone" in values:
  4337. az = values.pop('availability_zone')
  4338. if 'metadata' not in values:
  4339. values['metadata'] = {'availability_zone': az}
  4340. set_delete = False
  4341. else:
  4342. values['metadata']['availability_zone'] = az
  4343. metadata = values.get('metadata')
  4344. if metadata is not None:
  4345. aggregate_metadata_add(context,
  4346. aggregate_id,
  4347. values.pop('metadata'),
  4348. set_delete=set_delete)
  4349. aggregate.update(values)
  4350. aggregate.save(context.session)
  4351. return aggregate_get(context, aggregate.id)
  4352. else:
  4353. raise exception.AggregateNotFound(aggregate_id=aggregate_id)
  4354. @pick_context_manager_writer
  4355. def aggregate_delete(context, aggregate_id):
  4356. count = _aggregate_get_query(context,
  4357. models.Aggregate,
  4358. models.Aggregate.id,
  4359. aggregate_id).\
  4360. soft_delete()
  4361. if count == 0:
  4362. raise exception.AggregateNotFound(aggregate_id=aggregate_id)
  4363. # Delete Metadata
  4364. model_query(context, models.AggregateMetadata).\
  4365. filter_by(aggregate_id=aggregate_id).\
  4366. soft_delete()
  4367. @pick_context_manager_reader
  4368. def aggregate_get_all(context):
  4369. return _aggregate_get_query(context, models.Aggregate).all()
  4370. def _aggregate_metadata_get_query(context, aggregate_id, read_deleted="yes"):
  4371. return model_query(context,
  4372. models.AggregateMetadata,
  4373. read_deleted=read_deleted).\
  4374. filter_by(aggregate_id=aggregate_id)
  4375. @require_aggregate_exists
  4376. @pick_context_manager_reader
  4377. def aggregate_metadata_get(context, aggregate_id):
  4378. rows = model_query(context,
  4379. models.AggregateMetadata).\
  4380. filter_by(aggregate_id=aggregate_id).all()
  4381. return {r['key']: r['value'] for r in rows}
  4382. @require_aggregate_exists
  4383. @pick_context_manager_writer
  4384. def aggregate_metadata_delete(context, aggregate_id, key):
  4385. count = _aggregate_get_query(context,
  4386. models.AggregateMetadata,
  4387. models.AggregateMetadata.aggregate_id,
  4388. aggregate_id).\
  4389. filter_by(key=key).\
  4390. soft_delete()
  4391. if count == 0:
  4392. raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
  4393. metadata_key=key)
  4394. @require_aggregate_exists
  4395. @pick_context_manager_writer
  4396. def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
  4397. max_retries=10):
  4398. all_keys = metadata.keys()
  4399. for attempt in range(max_retries):
  4400. try:
  4401. query = _aggregate_metadata_get_query(context, aggregate_id,
  4402. read_deleted='no')
  4403. if set_delete:
  4404. query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
  4405. soft_delete(synchronize_session=False)
  4406. already_existing_keys = set()
  4407. if all_keys:
  4408. query = query.filter(
  4409. models.AggregateMetadata.key.in_(all_keys))
  4410. for meta_ref in query.all():
  4411. key = meta_ref.key
  4412. meta_ref.update({"value": metadata[key]})
  4413. already_existing_keys.add(key)
  4414. new_entries = []
  4415. for key, value in metadata.items():
  4416. if key in already_existing_keys:
  4417. continue
  4418. new_entries.append({"key": key,
  4419. "value": value,
  4420. "aggregate_id": aggregate_id})
  4421. if new_entries:
  4422. context.session.execute(
  4423. models.AggregateMetadata.__table__.insert(),
  4424. new_entries)
  4425. return metadata
  4426. except db_exc.DBDuplicateEntry:
  4427. # a concurrent transaction has been committed,
  4428. # try again unless this was the last attempt
  4429. with excutils.save_and_reraise_exception() as ctxt:
  4430. if attempt < max_retries - 1:
  4431. ctxt.reraise = False
  4432. else:
  4433. LOG.warning("Add metadata failed for aggregate %(id)s "
  4434. "after %(retries)s retries",
  4435. {"id": aggregate_id, "retries": max_retries})
  4436. @require_aggregate_exists
  4437. @pick_context_manager_reader
  4438. def aggregate_host_get_all(context, aggregate_id):
  4439. rows = model_query(context,
  4440. models.AggregateHost).\
  4441. filter_by(aggregate_id=aggregate_id).all()
  4442. return [r.host for r in rows]
  4443. @require_aggregate_exists
  4444. @pick_context_manager_writer
  4445. def aggregate_host_delete(context, aggregate_id, host):
  4446. count = _aggregate_get_query(context,
  4447. models.AggregateHost,
  4448. models.AggregateHost.aggregate_id,
  4449. aggregate_id).\
  4450. filter_by(host=host).\
  4451. soft_delete()
  4452. if count == 0:
  4453. raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
  4454. host=host)
  4455. @require_aggregate_exists
  4456. @pick_context_manager_writer
  4457. def aggregate_host_add(context, aggregate_id, host):
  4458. host_ref = models.AggregateHost()
  4459. host_ref.update({"host": host, "aggregate_id": aggregate_id})
  4460. try:
  4461. host_ref.save(context.session)
  4462. except db_exc.DBDuplicateEntry:
  4463. raise exception.AggregateHostExists(host=host,
  4464. aggregate_id=aggregate_id)
  4465. return host_ref
  4466. ################
  4467. @pick_context_manager_writer
  4468. def instance_fault_create(context, values):
  4469. """Create a new InstanceFault."""
  4470. fault_ref = models.InstanceFault()
  4471. fault_ref.update(values)
  4472. fault_ref.save(context.session)
  4473. return dict(fault_ref)
  4474. @pick_context_manager_reader
  4475. def instance_fault_get_by_instance_uuids(context, instance_uuids,
  4476. latest=False):
  4477. """Get all instance faults for the provided instance_uuids.
  4478. :param instance_uuids: List of UUIDs of instances to grab faults for
  4479. :param latest: Optional boolean indicating we should only return the latest
  4480. fault for the instance
  4481. """
  4482. if not instance_uuids:
  4483. return {}
  4484. faults_tbl = models.InstanceFault.__table__
  4485. # NOTE(rpodolyaka): filtering by instance_uuids is performed in both
  4486. # code branches below for the sake of a better query plan. On change,
  4487. # make sure to update the other one as well.
  4488. query = model_query(context, models.InstanceFault,
  4489. [faults_tbl],
  4490. read_deleted='no')
  4491. if latest:
  4492. # NOTE(jaypipes): We join instance_faults to a derived table of the
  4493. # latest faults per instance UUID. The SQL produced below looks like
  4494. # this:
  4495. #
  4496. # SELECT instance_faults.*
  4497. # FROM instance_faults
  4498. # JOIN (
  4499. # SELECT instance_uuid, MAX(id) AS max_id
  4500. # FROM instance_faults
  4501. # WHERE instance_uuid IN ( ... )
  4502. # AND deleted = 0
  4503. # GROUP BY instance_uuid
  4504. # ) AS latest_faults
  4505. # ON instance_faults.id = latest_faults.max_id;
  4506. latest_faults = model_query(
  4507. context, models.InstanceFault,
  4508. [faults_tbl.c.instance_uuid,
  4509. sql.func.max(faults_tbl.c.id).label('max_id')],
  4510. read_deleted='no'
  4511. ).filter(
  4512. faults_tbl.c.instance_uuid.in_(instance_uuids)
  4513. ).group_by(
  4514. faults_tbl.c.instance_uuid
  4515. ).subquery(name="latest_faults")
  4516. query = query.join(latest_faults,
  4517. faults_tbl.c.id == latest_faults.c.max_id)
  4518. else:
  4519. query = query.filter(models.InstanceFault.instance_uuid.in_(
  4520. instance_uuids)).order_by(desc("id"))
  4521. output = {}
  4522. for instance_uuid in instance_uuids:
  4523. output[instance_uuid] = []
  4524. for row in query:
  4525. output[row.instance_uuid].append(row._asdict())
  4526. return output
  4527. ##################
  4528. @pick_context_manager_writer
  4529. def action_start(context, values):
  4530. convert_objects_related_datetimes(values, 'start_time', 'updated_at')
  4531. action_ref = models.InstanceAction()
  4532. action_ref.update(values)
  4533. action_ref.save(context.session)
  4534. return action_ref
  4535. @pick_context_manager_writer
  4536. def action_finish(context, values):
  4537. convert_objects_related_datetimes(values, 'start_time', 'finish_time',
  4538. 'updated_at')
  4539. query = model_query(context, models.InstanceAction).\
  4540. filter_by(instance_uuid=values['instance_uuid']).\
  4541. filter_by(request_id=values['request_id'])
  4542. if query.update(values) != 1:
  4543. raise exception.InstanceActionNotFound(
  4544. request_id=values['request_id'],
  4545. instance_uuid=values['instance_uuid'])
  4546. return query.one()
  4547. @pick_context_manager_reader
  4548. def actions_get(context, instance_uuid, limit=None, marker=None,
  4549. filters=None):
  4550. """Get all instance actions for the provided uuid and filters."""
  4551. if limit == 0:
  4552. return []
  4553. sort_keys = ['created_at', 'id']
  4554. sort_dirs = ['desc', 'desc']
  4555. query_prefix = model_query(context, models.InstanceAction).\
  4556. filter_by(instance_uuid=instance_uuid)
  4557. if filters and 'changes-since' in filters:
  4558. changes_since = timeutils.normalize_time(filters['changes-since'])
  4559. query_prefix = query_prefix. \
  4560. filter(models.InstanceAction.updated_at >= changes_since)
  4561. if marker is not None:
  4562. marker = action_get_by_request_id(context, instance_uuid, marker)
  4563. if not marker:
  4564. raise exception.MarkerNotFound(marker=marker)
  4565. actions = sqlalchemyutils.paginate_query(query_prefix,
  4566. models.InstanceAction, limit,
  4567. sort_keys, marker=marker,
  4568. sort_dirs=sort_dirs).all()
  4569. return actions
  4570. @pick_context_manager_reader
  4571. def action_get_by_request_id(context, instance_uuid, request_id):
  4572. """Get the action by request_id and given instance."""
  4573. action = _action_get_by_request_id(context, instance_uuid, request_id)
  4574. return action
  4575. def _action_get_by_request_id(context, instance_uuid, request_id):
  4576. result = model_query(context, models.InstanceAction).\
  4577. filter_by(instance_uuid=instance_uuid).\
  4578. filter_by(request_id=request_id).\
  4579. order_by(desc("created_at"), desc("id")).\
  4580. first()
  4581. return result
  4582. def _action_get_last_created_by_instance_uuid(context, instance_uuid):
  4583. result = (model_query(context, models.InstanceAction).
  4584. filter_by(instance_uuid=instance_uuid).
  4585. order_by(desc("created_at"), desc("id")).
  4586. first())
  4587. return result
  4588. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  4589. @pick_context_manager_writer
  4590. def action_event_start(context, values):
  4591. """Start an event on an instance action."""
  4592. convert_objects_related_datetimes(values, 'start_time')
  4593. action = _action_get_by_request_id(context, values['instance_uuid'],
  4594. values['request_id'])
  4595. # When nova-compute restarts, the context is generated again in
  4596. # init_host workflow, the request_id was different with the request_id
  4597. # recorded in InstanceAction, so we can't get the original record
  4598. # according to request_id. Try to get the last created action so that
  4599. # init_instance can continue to finish the recovery action, like:
  4600. # powering_off, unpausing, and so on.
  4601. update_action = True
  4602. if not action and not context.project_id:
  4603. action = _action_get_last_created_by_instance_uuid(
  4604. context, values['instance_uuid'])
  4605. # If we couldn't find an action by the request_id, we don't want to
  4606. # update this action since it likely represents an inactive action.
  4607. update_action = False
  4608. if not action:
  4609. raise exception.InstanceActionNotFound(
  4610. request_id=values['request_id'],
  4611. instance_uuid=values['instance_uuid'])
  4612. values['action_id'] = action['id']
  4613. event_ref = models.InstanceActionEvent()
  4614. event_ref.update(values)
  4615. context.session.add(event_ref)
  4616. # Update action updated_at.
  4617. if update_action:
  4618. action.update({'updated_at': values['start_time']})
  4619. action.save(context.session)
  4620. return event_ref
  4621. # NOTE: We need the retry_on_deadlock decorator for cases like resize where
  4622. # a lot of events are happening at once between multiple hosts trying to
  4623. # update the same action record in a small time window.
  4624. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
  4625. @pick_context_manager_writer
  4626. def action_event_finish(context, values):
  4627. """Finish an event on an instance action."""
  4628. convert_objects_related_datetimes(values, 'start_time', 'finish_time')
  4629. action = _action_get_by_request_id(context, values['instance_uuid'],
  4630. values['request_id'])
  4631. # When nova-compute restarts, the context is generated again in
  4632. # init_host workflow, the request_id was different with the request_id
  4633. # recorded in InstanceAction, so we can't get the original record
  4634. # according to request_id. Try to get the last created action so that
  4635. # init_instance can continue to finish the recovery action, like:
  4636. # powering_off, unpausing, and so on.
  4637. update_action = True
  4638. if not action and not context.project_id:
  4639. action = _action_get_last_created_by_instance_uuid(
  4640. context, values['instance_uuid'])
  4641. # If we couldn't find an action by the request_id, we don't want to
  4642. # update this action since it likely represents an inactive action.
  4643. update_action = False
  4644. if not action:
  4645. raise exception.InstanceActionNotFound(
  4646. request_id=values['request_id'],
  4647. instance_uuid=values['instance_uuid'])
  4648. event_ref = model_query(context, models.InstanceActionEvent).\
  4649. filter_by(action_id=action['id']).\
  4650. filter_by(event=values['event']).\
  4651. first()
  4652. if not event_ref:
  4653. raise exception.InstanceActionEventNotFound(action_id=action['id'],
  4654. event=values['event'])
  4655. event_ref.update(values)
  4656. if values['result'].lower() == 'error':
  4657. action.update({'message': 'Error'})
  4658. # Update action updated_at.
  4659. if update_action:
  4660. action.update({'updated_at': values['finish_time']})
  4661. action.save(context.session)
  4662. return event_ref
  4663. @pick_context_manager_reader
  4664. def action_events_get(context, action_id):
  4665. events = model_query(context, models.InstanceActionEvent).\
  4666. filter_by(action_id=action_id).\
  4667. order_by(desc("created_at"), desc("id")).\
  4668. all()
  4669. return events
  4670. @pick_context_manager_reader
  4671. def action_event_get_by_id(context, action_id, event_id):
  4672. event = model_query(context, models.InstanceActionEvent).\
  4673. filter_by(action_id=action_id).\
  4674. filter_by(id=event_id).\
  4675. first()
  4676. return event
  4677. ##################
  4678. @require_context
  4679. @pick_context_manager_writer
  4680. def ec2_instance_create(context, instance_uuid, id=None):
  4681. """Create ec2 compatible instance by provided uuid."""
  4682. ec2_instance_ref = models.InstanceIdMapping()
  4683. ec2_instance_ref.update({'uuid': instance_uuid})
  4684. if id is not None:
  4685. ec2_instance_ref.update({'id': id})
  4686. ec2_instance_ref.save(context.session)
  4687. return ec2_instance_ref
  4688. @require_context
  4689. @pick_context_manager_reader
  4690. def ec2_instance_get_by_uuid(context, instance_uuid):
  4691. result = _ec2_instance_get_query(context).\
  4692. filter_by(uuid=instance_uuid).\
  4693. first()
  4694. if not result:
  4695. raise exception.InstanceNotFound(instance_id=instance_uuid)
  4696. return result
  4697. @require_context
  4698. @pick_context_manager_reader
  4699. def ec2_instance_get_by_id(context, instance_id):
  4700. result = _ec2_instance_get_query(context).\
  4701. filter_by(id=instance_id).\
  4702. first()
  4703. if not result:
  4704. raise exception.InstanceNotFound(instance_id=instance_id)
  4705. return result
  4706. @require_context
  4707. @pick_context_manager_reader
  4708. def get_instance_uuid_by_ec2_id(context, ec2_id):
  4709. result = ec2_instance_get_by_id(context, ec2_id)
  4710. return result['uuid']
  4711. def _ec2_instance_get_query(context):
  4712. return model_query(context, models.InstanceIdMapping, read_deleted='yes')
  4713. ##################
  4714. def _task_log_get_query(context, task_name, period_beginning,
  4715. period_ending, host=None, state=None):
  4716. values = {'period_beginning': period_beginning,
  4717. 'period_ending': period_ending}
  4718. values = convert_objects_related_datetimes(values, *values.keys())
  4719. query = model_query(context, models.TaskLog).\
  4720. filter_by(task_name=task_name).\
  4721. filter_by(period_beginning=values['period_beginning']).\
  4722. filter_by(period_ending=values['period_ending'])
  4723. if host is not None:
  4724. query = query.filter_by(host=host)
  4725. if state is not None:
  4726. query = query.filter_by(state=state)
  4727. return query
  4728. @pick_context_manager_reader
  4729. def task_log_get(context, task_name, period_beginning, period_ending, host,
  4730. state=None):
  4731. return _task_log_get_query(context, task_name, period_beginning,
  4732. period_ending, host, state).first()
  4733. @pick_context_manager_reader
  4734. def task_log_get_all(context, task_name, period_beginning, period_ending,
  4735. host=None, state=None):
  4736. return _task_log_get_query(context, task_name, period_beginning,
  4737. period_ending, host, state).all()
  4738. @pick_context_manager_writer
  4739. def task_log_begin_task(context, task_name, period_beginning, period_ending,
  4740. host, task_items=None, message=None):
  4741. values = {'period_beginning': period_beginning,
  4742. 'period_ending': period_ending}
  4743. values = convert_objects_related_datetimes(values, *values.keys())
  4744. task = models.TaskLog()
  4745. task.task_name = task_name
  4746. task.period_beginning = values['period_beginning']
  4747. task.period_ending = values['period_ending']
  4748. task.host = host
  4749. task.state = "RUNNING"
  4750. if message:
  4751. task.message = message
  4752. if task_items:
  4753. task.task_items = task_items
  4754. try:
  4755. task.save(context.session)
  4756. except db_exc.DBDuplicateEntry:
  4757. raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
  4758. @pick_context_manager_writer
  4759. def task_log_end_task(context, task_name, period_beginning, period_ending,
  4760. host, errors, message=None):
  4761. values = dict(state="DONE", errors=errors)
  4762. if message:
  4763. values["message"] = message
  4764. rows = _task_log_get_query(context, task_name, period_beginning,
  4765. period_ending, host).update(values)
  4766. if rows == 0:
  4767. # It's not running!
  4768. raise exception.TaskNotRunning(task_name=task_name, host=host)
  4769. ##################
  4770. def _archive_if_instance_deleted(table, shadow_table, instances, conn,
  4771. max_rows):
  4772. """Look for records that pertain to deleted instances, but may not be
  4773. deleted themselves. This catches cases where we delete an instance,
  4774. but leave some residue because of a failure in a cleanup path or
  4775. similar.
  4776. Logic is: if I have a column called instance_uuid, and that instance
  4777. is deleted, then I can be deleted.
  4778. """
  4779. query_insert = shadow_table.insert(inline=True).\
  4780. from_select(
  4781. [c.name for c in table.c],
  4782. sql.select(
  4783. [table],
  4784. and_(instances.c.deleted != instances.c.deleted.default.arg,
  4785. instances.c.uuid == table.c.instance_uuid)).
  4786. order_by(table.c.id).limit(max_rows))
  4787. query_delete = sql.select(
  4788. [table.c.id],
  4789. and_(instances.c.deleted != instances.c.deleted.default.arg,
  4790. instances.c.uuid == table.c.instance_uuid)).\
  4791. order_by(table.c.id).limit(max_rows)
  4792. delete_statement = DeleteFromSelect(table, query_delete,
  4793. table.c.id)
  4794. try:
  4795. with conn.begin():
  4796. conn.execute(query_insert)
  4797. result_delete = conn.execute(delete_statement)
  4798. return result_delete.rowcount
  4799. except db_exc.DBReferenceError as ex:
  4800. LOG.warning('Failed to archive %(table)s: %(error)s',
  4801. {'table': table.name,
  4802. 'error': six.text_type(ex)})
  4803. return 0
  4804. def _archive_deleted_rows_for_table(tablename, max_rows):
  4805. """Move up to max_rows rows from one tables to the corresponding
  4806. shadow table.
  4807. :returns: number of rows archived
  4808. """
  4809. engine = get_engine()
  4810. conn = engine.connect()
  4811. metadata = MetaData()
  4812. metadata.bind = engine
  4813. # NOTE(tdurakov): table metadata should be received
  4814. # from models, not db tables. Default value specified by SoftDeleteMixin
  4815. # is known only by models, not DB layer.
  4816. # IMPORTANT: please do not change source of metadata information for table.
  4817. table = models.BASE.metadata.tables[tablename]
  4818. shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
  4819. rows_archived = 0
  4820. deleted_instance_uuids = []
  4821. try:
  4822. shadow_table = Table(shadow_tablename, metadata, autoload=True)
  4823. except NoSuchTableError:
  4824. # No corresponding shadow table; skip it.
  4825. return rows_archived, deleted_instance_uuids
  4826. if tablename == "dns_domains":
  4827. # We have one table (dns_domains) where the key is called
  4828. # "domain" rather than "id"
  4829. column = table.c.domain
  4830. else:
  4831. column = table.c.id
  4832. # NOTE(guochbo): Use DeleteFromSelect to avoid
  4833. # database's limit of maximum parameter in one SQL statement.
  4834. deleted_column = table.c.deleted
  4835. columns = [c.name for c in table.c]
  4836. # NOTE(clecomte): Tables instance_actions and instances_actions_events
  4837. # have to be manage differently so we soft-delete them here to let
  4838. # the archive work the same for all tables
  4839. # NOTE(takashin): The record in table migrations should be
  4840. # soft deleted when the instance is deleted.
  4841. # This is just for upgrading.
  4842. if tablename in ("instance_actions", "migrations"):
  4843. instances = models.BASE.metadata.tables["instances"]
  4844. deleted_instances = sql.select([instances.c.uuid]).\
  4845. where(instances.c.deleted != instances.c.deleted.default.arg)
  4846. update_statement = table.update().values(deleted=table.c.id).\
  4847. where(table.c.instance_uuid.in_(deleted_instances))
  4848. conn.execute(update_statement)
  4849. elif tablename == "instance_actions_events":
  4850. # NOTE(clecomte): we have to grab all the relation from
  4851. # instances because instance_actions_events rely on
  4852. # action_id and not uuid
  4853. instances = models.BASE.metadata.tables["instances"]
  4854. instance_actions = models.BASE.metadata.tables["instance_actions"]
  4855. deleted_instances = sql.select([instances.c.uuid]).\
  4856. where(instances.c.deleted != instances.c.deleted.default.arg)
  4857. deleted_actions = sql.select([instance_actions.c.id]).\
  4858. where(instance_actions.c.instance_uuid.in_(deleted_instances))
  4859. update_statement = table.update().values(deleted=table.c.id).\
  4860. where(table.c.action_id.in_(deleted_actions))
  4861. conn.execute(update_statement)
  4862. select = sql.select([column],
  4863. deleted_column != deleted_column.default.arg).\
  4864. order_by(column).limit(max_rows)
  4865. rows = conn.execute(select).fetchall()
  4866. records = [r[0] for r in rows]
  4867. if records:
  4868. insert = shadow_table.insert(inline=True).\
  4869. from_select(columns, sql.select([table], column.in_(records)))
  4870. delete = table.delete().where(column.in_(records))
  4871. # NOTE(tssurya): In order to facilitate the deletion of records from
  4872. # instance_mappings and request_specs tables in the nova_api DB, the
  4873. # rows of deleted instances from the instances table are stored prior
  4874. # to their deletion. Basically the uuids of the archived instances
  4875. # are queried and returned.
  4876. if tablename == "instances":
  4877. query_select = sql.select([table.c.uuid], table.c.id.in_(records))
  4878. rows = conn.execute(query_select).fetchall()
  4879. deleted_instance_uuids = [r[0] for r in rows]
  4880. try:
  4881. # Group the insert and delete in a transaction.
  4882. with conn.begin():
  4883. conn.execute(insert)
  4884. result_delete = conn.execute(delete)
  4885. rows_archived = result_delete.rowcount
  4886. except db_exc.DBReferenceError as ex:
  4887. # A foreign key constraint keeps us from deleting some of
  4888. # these rows until we clean up a dependent table. Just
  4889. # skip this table for now; we'll come back to it later.
  4890. LOG.warning("IntegrityError detected when archiving table "
  4891. "%(tablename)s: %(error)s",
  4892. {'tablename': tablename, 'error': six.text_type(ex)})
  4893. if ((max_rows is None or rows_archived < max_rows)
  4894. and 'instance_uuid' in columns):
  4895. instances = models.BASE.metadata.tables['instances']
  4896. limit = max_rows - rows_archived if max_rows is not None else None
  4897. extra = _archive_if_instance_deleted(table, shadow_table, instances,
  4898. conn, limit)
  4899. rows_archived += extra
  4900. return rows_archived, deleted_instance_uuids
  4901. def archive_deleted_rows(max_rows=None):
  4902. """Move up to max_rows rows from production tables to the corresponding
  4903. shadow tables.
  4904. :returns: dict that maps table name to number of rows archived from that
  4905. table, for example:
  4906. ::
  4907. {
  4908. 'instances': 5,
  4909. 'block_device_mapping': 5,
  4910. 'pci_devices': 2,
  4911. }
  4912. """
  4913. table_to_rows_archived = {}
  4914. deleted_instance_uuids = []
  4915. total_rows_archived = 0
  4916. meta = MetaData(get_engine(use_slave=True))
  4917. meta.reflect()
  4918. # Reverse sort the tables so we get the leaf nodes first for processing.
  4919. for table in reversed(meta.sorted_tables):
  4920. tablename = table.name
  4921. rows_archived = 0
  4922. # skip the special sqlalchemy-migrate migrate_version table and any
  4923. # shadow tables
  4924. if (tablename == 'migrate_version' or
  4925. tablename.startswith(_SHADOW_TABLE_PREFIX)):
  4926. continue
  4927. rows_archived,\
  4928. deleted_instance_uuid = _archive_deleted_rows_for_table(
  4929. tablename, max_rows=max_rows - total_rows_archived)
  4930. total_rows_archived += rows_archived
  4931. if tablename == 'instances':
  4932. deleted_instance_uuids = deleted_instance_uuid
  4933. # Only report results for tables that had updates.
  4934. if rows_archived:
  4935. table_to_rows_archived[tablename] = rows_archived
  4936. if total_rows_archived >= max_rows:
  4937. break
  4938. return table_to_rows_archived, deleted_instance_uuids
  4939. @pick_context_manager_writer
  4940. def service_uuids_online_data_migration(context, max_count):
  4941. from nova.objects import service
  4942. count_all = 0
  4943. count_hit = 0
  4944. db_services = model_query(context, models.Service).filter_by(
  4945. uuid=None).limit(max_count)
  4946. for db_service in db_services:
  4947. count_all += 1
  4948. service_obj = service.Service._from_db_object(
  4949. context, service.Service(), db_service)
  4950. if 'uuid' in service_obj:
  4951. count_hit += 1
  4952. return count_all, count_hit
  4953. ####################
  4954. def _instance_group_get_query(context, model_class, id_field=None, id=None,
  4955. read_deleted=None):
  4956. columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
  4957. query = model_query(context, model_class, read_deleted=read_deleted,
  4958. project_only=True)
  4959. for c in columns_to_join.get(model_class, []):
  4960. query = query.options(joinedload(c))
  4961. if id and id_field:
  4962. query = query.filter(id_field == id)
  4963. return query
  4964. @pick_context_manager_writer
  4965. def instance_group_create(context, values, policies=None, members=None):
  4966. """Create a new group."""
  4967. uuid = values.get('uuid', None)
  4968. if uuid is None:
  4969. uuid = uuidutils.generate_uuid()
  4970. values['uuid'] = uuid
  4971. try:
  4972. group = models.InstanceGroup()
  4973. group.update(values)
  4974. group.save(context.session)
  4975. except db_exc.DBDuplicateEntry:
  4976. raise exception.InstanceGroupIdExists(group_uuid=uuid)
  4977. # We don't want '_policies' and '_members' attributes to be lazy loaded
  4978. # later. We know there is nothing here since we just created this
  4979. # instance group.
  4980. if policies:
  4981. _instance_group_policies_add(context, group.id, policies)
  4982. else:
  4983. group._policies = []
  4984. if members:
  4985. _instance_group_members_add(context, group.id, members)
  4986. else:
  4987. group._members = []
  4988. return instance_group_get(context, uuid)
  4989. @pick_context_manager_reader
  4990. def instance_group_get(context, group_uuid):
  4991. """Get a specific group by uuid."""
  4992. group = _instance_group_get_query(context,
  4993. models.InstanceGroup,
  4994. models.InstanceGroup.uuid,
  4995. group_uuid).\
  4996. first()
  4997. if not group:
  4998. raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
  4999. return group
  5000. @pick_context_manager_reader
  5001. def instance_group_get_by_instance(context, instance_uuid):
  5002. group_member = model_query(context, models.InstanceGroupMember).\
  5003. filter_by(instance_id=instance_uuid).\
  5004. first()
  5005. if not group_member:
  5006. raise exception.InstanceGroupNotFound(group_uuid='')
  5007. group = _instance_group_get_query(context, models.InstanceGroup,
  5008. models.InstanceGroup.id,
  5009. group_member.group_id).first()
  5010. if not group:
  5011. raise exception.InstanceGroupNotFound(
  5012. group_uuid=group_member.group_id)
  5013. return group
  5014. @pick_context_manager_writer
  5015. def instance_group_update(context, group_uuid, values):
  5016. """Update the attributes of a group.
  5017. If values contains a metadata key, it updates the aggregate metadata
  5018. too. Similarly for the policies and members.
  5019. """
  5020. group = model_query(context, models.InstanceGroup).\
  5021. filter_by(uuid=group_uuid).\
  5022. first()
  5023. if not group:
  5024. raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
  5025. policies = values.get('policies')
  5026. if policies is not None:
  5027. _instance_group_policies_add(context,
  5028. group.id,
  5029. values.pop('policies'),
  5030. set_delete=True)
  5031. members = values.get('members')
  5032. if members is not None:
  5033. _instance_group_members_add(context,
  5034. group.id,
  5035. values.pop('members'),
  5036. set_delete=True)
  5037. group.update(values)
  5038. if policies:
  5039. values['policies'] = policies
  5040. if members:
  5041. values['members'] = members
  5042. @pick_context_manager_writer
  5043. def instance_group_delete(context, group_uuid):
  5044. """Delete a group."""
  5045. group_id = _instance_group_id(context, group_uuid)
  5046. count = _instance_group_get_query(context,
  5047. models.InstanceGroup,
  5048. models.InstanceGroup.uuid,
  5049. group_uuid).soft_delete()
  5050. if count == 0:
  5051. raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
  5052. # Delete policies, metadata and members
  5053. instance_models = [models.InstanceGroupPolicy,
  5054. models.InstanceGroupMember]
  5055. for model in instance_models:
  5056. model_query(context, model).filter_by(group_id=group_id).soft_delete()
  5057. @pick_context_manager_reader
  5058. def instance_group_get_all(context):
  5059. """Get all groups."""
  5060. return _instance_group_get_query(context, models.InstanceGroup).all()
  5061. @pick_context_manager_reader
  5062. def instance_group_get_all_by_project_id(context, project_id):
  5063. """Get all groups."""
  5064. return _instance_group_get_query(context, models.InstanceGroup).\
  5065. filter_by(project_id=project_id).\
  5066. all()
  5067. def _instance_group_count_by_project_and_user(context, project_id, user_id):
  5068. return model_query(context, models.InstanceGroup, read_deleted="no").\
  5069. filter_by(project_id=project_id).\
  5070. filter_by(user_id=user_id).\
  5071. count()
  5072. def _instance_group_model_get_query(context, model_class, group_id,
  5073. read_deleted='no'):
  5074. return model_query(context,
  5075. model_class,
  5076. read_deleted=read_deleted).\
  5077. filter_by(group_id=group_id)
  5078. def _instance_group_id(context, group_uuid):
  5079. """Returns the group database ID for the group UUID."""
  5080. result = model_query(context,
  5081. models.InstanceGroup,
  5082. (models.InstanceGroup.id,)).\
  5083. filter_by(uuid=group_uuid).\
  5084. first()
  5085. if not result:
  5086. raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
  5087. return result.id
  5088. def _instance_group_members_add(context, id, members, set_delete=False):
  5089. all_members = set(members)
  5090. query = _instance_group_model_get_query(context,
  5091. models.InstanceGroupMember, id)
  5092. if set_delete:
  5093. query.filter(~models.InstanceGroupMember.instance_id.in_(
  5094. all_members)).\
  5095. soft_delete(synchronize_session=False)
  5096. query = query.filter(
  5097. models.InstanceGroupMember.instance_id.in_(all_members))
  5098. already_existing = set()
  5099. for member_ref in query.all():
  5100. already_existing.add(member_ref.instance_id)
  5101. for instance_id in members:
  5102. if instance_id in already_existing:
  5103. continue
  5104. member_ref = models.InstanceGroupMember()
  5105. member_ref.update({'instance_id': instance_id,
  5106. 'group_id': id})
  5107. context.session.add(member_ref)
  5108. return members
  5109. @pick_context_manager_writer
  5110. def instance_group_members_add(context, group_uuid, members,
  5111. set_delete=False):
  5112. id = _instance_group_id(context, group_uuid)
  5113. return _instance_group_members_add(context, id, members,
  5114. set_delete=set_delete)
  5115. @pick_context_manager_writer
  5116. def instance_group_member_delete(context, group_uuid, instance_id):
  5117. id = _instance_group_id(context, group_uuid)
  5118. count = _instance_group_model_get_query(context,
  5119. models.InstanceGroupMember,
  5120. id).\
  5121. filter_by(instance_id=instance_id).\
  5122. soft_delete()
  5123. if count == 0:
  5124. raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
  5125. instance_id=instance_id)
  5126. @pick_context_manager_reader
  5127. def instance_group_members_get(context, group_uuid):
  5128. id = _instance_group_id(context, group_uuid)
  5129. instances = model_query(context,
  5130. models.InstanceGroupMember,
  5131. (models.InstanceGroupMember.instance_id,)).\
  5132. filter_by(group_id=id).all()
  5133. return [instance[0] for instance in instances]
  5134. def _instance_group_policies_add(context, id, policies, set_delete=False):
  5135. allpols = set(policies)
  5136. query = _instance_group_model_get_query(context,
  5137. models.InstanceGroupPolicy, id)
  5138. if set_delete:
  5139. query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
  5140. soft_delete(synchronize_session=False)
  5141. query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
  5142. already_existing = set()
  5143. for policy_ref in query.all():
  5144. already_existing.add(policy_ref.policy)
  5145. for policy in policies:
  5146. if policy in already_existing:
  5147. continue
  5148. policy_ref = models.InstanceGroupPolicy()
  5149. policy_ref.update({'policy': policy,
  5150. 'group_id': id})
  5151. context.session.add(policy_ref)
  5152. return policies
  5153. ####################
  5154. @pick_context_manager_reader
  5155. def pci_device_get_by_addr(context, node_id, dev_addr):
  5156. pci_dev_ref = model_query(context, models.PciDevice).\
  5157. filter_by(compute_node_id=node_id).\
  5158. filter_by(address=dev_addr).\
  5159. first()
  5160. if not pci_dev_ref:
  5161. raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
  5162. return pci_dev_ref
  5163. @pick_context_manager_reader
  5164. def pci_device_get_by_id(context, id):
  5165. pci_dev_ref = model_query(context, models.PciDevice).\
  5166. filter_by(id=id).\
  5167. first()
  5168. if not pci_dev_ref:
  5169. raise exception.PciDeviceNotFoundById(id=id)
  5170. return pci_dev_ref
  5171. @pick_context_manager_reader
  5172. def pci_device_get_all_by_node(context, node_id):
  5173. return model_query(context, models.PciDevice).\
  5174. filter_by(compute_node_id=node_id).\
  5175. all()
  5176. @pick_context_manager_reader
  5177. def pci_device_get_all_by_parent_addr(context, node_id, parent_addr):
  5178. return model_query(context, models.PciDevice).\
  5179. filter_by(compute_node_id=node_id).\
  5180. filter_by(parent_addr=parent_addr).\
  5181. all()
  5182. @require_context
  5183. @pick_context_manager_reader
  5184. def pci_device_get_all_by_instance_uuid(context, instance_uuid):
  5185. return model_query(context, models.PciDevice).\
  5186. filter_by(status='allocated').\
  5187. filter_by(instance_uuid=instance_uuid).\
  5188. all()
  5189. @pick_context_manager_reader
  5190. def _instance_pcidevs_get_multi(context, instance_uuids):
  5191. if not instance_uuids:
  5192. return []
  5193. return model_query(context, models.PciDevice).\
  5194. filter_by(status='allocated').\
  5195. filter(models.PciDevice.instance_uuid.in_(instance_uuids))
  5196. @pick_context_manager_writer
  5197. def pci_device_destroy(context, node_id, address):
  5198. result = model_query(context, models.PciDevice).\
  5199. filter_by(compute_node_id=node_id).\
  5200. filter_by(address=address).\
  5201. soft_delete()
  5202. if not result:
  5203. raise exception.PciDeviceNotFound(node_id=node_id, address=address)
  5204. @pick_context_manager_writer
  5205. def pci_device_update(context, node_id, address, values):
  5206. query = model_query(context, models.PciDevice, read_deleted="no").\
  5207. filter_by(compute_node_id=node_id).\
  5208. filter_by(address=address)
  5209. if query.update(values) == 0:
  5210. device = models.PciDevice()
  5211. device.update(values)
  5212. context.session.add(device)
  5213. return query.one()
  5214. ####################
  5215. @pick_context_manager_writer
  5216. def instance_tag_add(context, instance_uuid, tag):
  5217. tag_ref = models.Tag()
  5218. tag_ref.resource_id = instance_uuid
  5219. tag_ref.tag = tag
  5220. try:
  5221. _check_instance_exists_in_project(context, instance_uuid)
  5222. with get_context_manager(context).writer.savepoint.using(context):
  5223. context.session.add(tag_ref)
  5224. except db_exc.DBDuplicateEntry:
  5225. # NOTE(snikitin): We should ignore tags duplicates
  5226. pass
  5227. return tag_ref
  5228. @pick_context_manager_writer
  5229. def instance_tag_set(context, instance_uuid, tags):
  5230. _check_instance_exists_in_project(context, instance_uuid)
  5231. existing = context.session.query(models.Tag.tag).filter_by(
  5232. resource_id=instance_uuid).all()
  5233. existing = set(row.tag for row in existing)
  5234. tags = set(tags)
  5235. to_delete = existing - tags
  5236. to_add = tags - existing
  5237. if to_delete:
  5238. context.session.query(models.Tag).filter_by(
  5239. resource_id=instance_uuid).filter(
  5240. models.Tag.tag.in_(to_delete)).delete(
  5241. synchronize_session=False)
  5242. if to_add:
  5243. data = [
  5244. {'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
  5245. context.session.execute(models.Tag.__table__.insert(), data)
  5246. return context.session.query(models.Tag).filter_by(
  5247. resource_id=instance_uuid).all()
  5248. @pick_context_manager_reader
  5249. def instance_tag_get_by_instance_uuid(context, instance_uuid):
  5250. _check_instance_exists_in_project(context, instance_uuid)
  5251. return context.session.query(models.Tag).filter_by(
  5252. resource_id=instance_uuid).all()
  5253. @pick_context_manager_writer
  5254. def instance_tag_delete(context, instance_uuid, tag):
  5255. _check_instance_exists_in_project(context, instance_uuid)
  5256. result = context.session.query(models.Tag).filter_by(
  5257. resource_id=instance_uuid, tag=tag).delete()
  5258. if not result:
  5259. raise exception.InstanceTagNotFound(instance_id=instance_uuid,
  5260. tag=tag)
  5261. @pick_context_manager_writer
  5262. def instance_tag_delete_all(context, instance_uuid):
  5263. _check_instance_exists_in_project(context, instance_uuid)
  5264. context.session.query(models.Tag).filter_by(
  5265. resource_id=instance_uuid).delete()
  5266. @pick_context_manager_reader
  5267. def instance_tag_exists(context, instance_uuid, tag):
  5268. _check_instance_exists_in_project(context, instance_uuid)
  5269. q = context.session.query(models.Tag).filter_by(
  5270. resource_id=instance_uuid, tag=tag)
  5271. return context.session.query(q.exists()).scalar()
  5272. ####################
  5273. @pick_context_manager_writer
  5274. def console_auth_token_create(context, values):
  5275. instance_uuid = values.get('instance_uuid')
  5276. _check_instance_exists_in_project(context, instance_uuid)
  5277. token_ref = models.ConsoleAuthToken()
  5278. token_ref.update(values)
  5279. context.session.add(token_ref)
  5280. return token_ref
  5281. @pick_context_manager_reader
  5282. def console_auth_token_get_valid(context, token_hash, instance_uuid=None):
  5283. if instance_uuid is not None:
  5284. _check_instance_exists_in_project(context, instance_uuid)
  5285. query = context.session.query(models.ConsoleAuthToken).\
  5286. filter_by(token_hash=token_hash)
  5287. if instance_uuid is not None:
  5288. query = query.filter_by(instance_uuid=instance_uuid)
  5289. return query.filter(
  5290. models.ConsoleAuthToken.expires > timeutils.utcnow_ts()).first()
  5291. @pick_context_manager_writer
  5292. def console_auth_token_destroy_all_by_instance(context, instance_uuid):
  5293. context.session.query(models.ConsoleAuthToken).\
  5294. filter_by(instance_uuid=instance_uuid).delete()
  5295. @pick_context_manager_writer
  5296. def console_auth_token_destroy_expired_by_host(context, host):
  5297. context.session.query(models.ConsoleAuthToken).\
  5298. filter_by(host=host).\
  5299. filter(models.ConsoleAuthToken.expires <= timeutils.utcnow_ts()).\
  5300. delete()