nova/nova/db/main/api.py

4984 lines
178 KiB
Python

# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import inspect
import traceback
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import exc as sqla_exc
from sqlalchemy import orm
from sqlalchemy import schema
from sqlalchemy import sql
from sqlalchemy.sql import expression
from sqlalchemy.sql import func
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
import nova.context
from nova.db.main import models
from nova.db import utils as db_utils
from nova.db.utils import require_context
from nova import exception
from nova.i18n import _
from nova import safe_utils
profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
DISABLE_DB_ACCESS = False
context_manager = enginefacade.transaction_context()
def _get_db_conf(conf_group, connection=None):
kw = dict(conf_group.items())
if connection is not None:
kw['connection'] = connection
return kw
def _context_manager_from_context(context):
if context:
try:
return context.db_connection
except AttributeError:
pass
def _joinedload_all(lead_entity, column):
"""Do a nested load.
For example, resolve the following::
_joinedload_all(models.SecurityGroup, 'instances.info_cache')
to:
orm.joinedload(
models.SecurityGroup.instances
).joinedload(
Instance.info_cache
)
"""
elements = column.split('.')
relationship_attr = getattr(lead_entity, elements.pop(0))
joined = orm.joinedload(relationship_attr)
for element in elements:
relationship_entity = relationship_attr.entity.class_
relationship_attr = getattr(relationship_entity, element)
joined = joined.joinedload(relationship_attr)
return joined
def configure(conf):
context_manager.configure(**_get_db_conf(conf.database))
if (
profiler_sqlalchemy and
CONF.profiler.enabled and
CONF.profiler.trace_sqlalchemy
):
context_manager.append_on_engine_create(
lambda eng: profiler_sqlalchemy.add_tracing(sa, eng, "db"))
def create_context_manager(connection=None):
"""Create a database context manager object for a cell database connection.
:param connection: The database connection string
"""
ctxt_mgr = enginefacade.transaction_context()
ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
return ctxt_mgr
def get_context_manager(context):
"""Get a database context manager object.
:param context: The request context that can contain a context manager
"""
return _context_manager_from_context(context) or context_manager
def get_engine(use_slave=False, context=None):
"""Get a database engine object.
:param use_slave: Whether to use the slave connection
:param context: The request context that can contain a context manager
"""
ctxt_mgr = get_context_manager(context)
if use_slave:
return ctxt_mgr.reader.get_engine()
return ctxt_mgr.writer.get_engine()
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def select_db_reader_mode(f):
"""Decorator to select synchronous or asynchronous reader mode.
The kwarg argument 'use_slave' defines reader mode. Asynchronous reader
will be used if 'use_slave' is True and synchronous reader otherwise.
If 'use_slave' is not specified default value 'False' will be used.
Wrapped function must have a context in the arguments.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
wrapped_func = safe_utils.get_wrapped_function(f)
keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs)
context = keyed_args['context']
use_slave = keyed_args.get('use_slave', False)
if use_slave:
reader_mode = get_context_manager(context).async_
else:
reader_mode = get_context_manager(context).reader
with reader_mode.using(context):
return f(*args, **kwargs)
wrapper.__signature__ = inspect.signature(f)
return wrapper
def _check_db_access():
# disable all database access if required
if DISABLE_DB_ACCESS:
service_name = 'nova-compute'
stacktrace = ''.join(traceback.format_stack())
LOG.error(
'No DB access allowed in %(service_name)s: %(stacktrace)s',
{'service_name': service_name, 'stacktrace': stacktrace})
raise exception.DBNotAllowed(binary=service_name)
def pick_context_manager_writer(f):
"""Decorator to use a writer db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapper(context, *args, **kwargs):
_check_db_access()
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.writer.using(context):
return f(context, *args, **kwargs)
wrapper.__signature__ = inspect.signature(f)
return wrapper
def pick_context_manager_reader(f):
"""Decorator to use a reader db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapper(context, *args, **kwargs):
_check_db_access()
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.reader.using(context):
return f(context, *args, **kwargs)
wrapper.__signature__ = inspect.signature(f)
return wrapper
def pick_context_manager_reader_allow_async(f):
"""Decorator to use a reader.allow_async db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapper(context, *args, **kwargs):
_check_db_access()
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.reader.allow_async.using(context):
return f(context, *args, **kwargs)
wrapper.__signature__ = inspect.signature(f)
return wrapper
def model_query(
context, model, args=None, read_deleted=None, project_only=False,
):
"""Query helper that accounts for context's `read_deleted` field.
:param context: The request context that can contain a context manager
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
:param read_deleted: If not None, overrides context's read_deleted field.
Permitted values are 'no', which does not return deleted values;
'only', which only returns deleted values; and 'yes', which does not
filter deleted values.
:param project_only: If set and context is user-type, then restrict
query to match the context's project_id. If set to 'allow_none',
restriction includes project_id = None.
"""
if read_deleted is None:
read_deleted = context.read_deleted
query_kwargs = {}
if 'no' == read_deleted:
query_kwargs['deleted'] = False
elif 'only' == read_deleted:
query_kwargs['deleted'] = True
elif 'yes' == read_deleted:
pass
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
query = sqlalchemyutils.model_query(
model, context.session, args, **query_kwargs)
# We can't use oslo.db model_query's project_id here, as it doesn't allow
# us to return both our projects and unowned projects.
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.filter(sql.or_(
model.project_id == context.project_id,
model.project_id == sql.null()
))
else:
query = query.filter_by(project_id=context.project_id)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
if not datetime_keys:
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], str):
try:
values[key] = timeutils.parse_strtime(values[key])
except ValueError:
# Try alternate parsing since parse_strtime will fail
# with say converting '2015-05-28T19:59:38+00:00'
values[key] = timeutils.parse_isotime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
###################
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return Constraint(conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return EqualityCondition(values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.items():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [sql.or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@pick_context_manager_writer
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
service = service_get(context, service_id)
model_query(context, models.Service).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
if service.binary == 'nova-compute':
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
model_query(context, models.ComputeNode).\
filter(sql.or_(
models.ComputeNode.service_id == service_id,
models.ComputeNode.host == service['host'])).\
soft_delete(synchronize_session=False)
@pick_context_manager_reader
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
query = model_query(context, models.Service).filter_by(id=service_id)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@pick_context_manager_reader
def service_get_by_uuid(context, service_uuid):
"""Get a service by it's uuid or raise ServiceNotFound if it does not
exist.
"""
query = model_query(context, models.Service).filter_by(uuid=service_uuid)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_uuid)
return result
@pick_context_manager_reader_allow_async
def service_get_minimum_version(context, binaries):
"""Get the minimum service version in the database."""
min_versions = context.session.query(
models.Service.binary,
func.min(models.Service.version)).\
filter(models.Service.binary.in_(binaries)).\
filter(models.Service.deleted == 0).\
filter(models.Service.forced_down == sql.false()).\
group_by(models.Service.binary)
return dict(min_versions)
@pick_context_manager_reader
def service_get_all(context, disabled=None):
"""Get all services."""
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@pick_context_manager_reader
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@pick_context_manager_reader
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by hostname and topic it listens to."""
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@pick_context_manager_reader
def service_get_all_by_binary(context, binary, include_disabled=False):
"""Get services for a given binary.
Includes disabled services if 'include_disabled' parameter is True
"""
query = model_query(context, models.Service).filter_by(binary=binary)
if not include_disabled:
query = query.filter_by(disabled=False)
return query.all()
@pick_context_manager_reader
def service_get_all_computes_by_hv_type(context, hv_type,
include_disabled=False):
"""Get all compute services for a given hypervisor type.
Includes disabled services if 'include_disabled' parameter is True.
"""
query = model_query(context, models.Service, read_deleted="no").\
filter_by(binary='nova-compute')
if not include_disabled:
query = query.filter_by(disabled=False)
query = query.join(models.ComputeNode,
models.Service.host == models.ComputeNode.host).\
filter(models.ComputeNode.hypervisor_type == hv_type).\
distinct()
return query.all()
@pick_context_manager_reader
def service_get_by_host_and_binary(context, host, binary):
"""Get a service by hostname and binary."""
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@pick_context_manager_reader
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@pick_context_manager_reader_allow_async
def service_get_by_compute_host(context, host):
"""Get the service entry for a given compute host.
Returns the service entry joined with the compute_node entry.
"""
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary='nova-compute').\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@pick_context_manager_writer
def service_create(context, values):
"""Create a service from the values dictionary."""
service_ref = models.Service()
service_ref.update(values)
# We only auto-disable nova-compute services since those are the only
# ones that can be enabled using the os-services REST API and they are
# the only ones where being disabled means anything. It does
# not make sense to be able to disable non-compute services like
# nova-scheduler or nova-osapi_compute since that does nothing.
if not CONF.enable_new_services and values.get('binary') == 'nova-compute':
msg = _("New compute service disabled due to config option.")
service_ref.disabled = True
service_ref.disabled_reason = msg
try:
service_ref.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
:raises: NotFound if service does not exist.
"""
service_ref = service_get(context, service_id)
# Only servicegroup.drivers.db.DbDriver._report_state() updates
# 'report_count', so if that value changes then store the timestamp
# as the last time we got a state report.
if 'report_count' in values:
if values['report_count'] > service_ref.report_count:
service_ref.last_seen_up = timeutils.utcnow()
service_ref.update(values)
return service_ref
###################
def _compute_node_select(context, filters=None, limit=None, marker=None):
if filters is None:
filters = {}
cn_tbl = models.ComputeNode.__table__.alias('cn')
select = sa.select(cn_tbl)
if context.read_deleted == "no":
select = select.where(cn_tbl.c.deleted == 0)
if "compute_id" in filters:
select = select.where(cn_tbl.c.id == filters["compute_id"])
if "service_id" in filters:
select = select.where(cn_tbl.c.service_id == filters["service_id"])
if "host" in filters:
select = select.where(cn_tbl.c.host == filters["host"])
if "hypervisor_hostname" in filters:
hyp_hostname = filters["hypervisor_hostname"]
select = select.where(cn_tbl.c.hypervisor_hostname == hyp_hostname)
if "mapped" in filters:
select = select.where(cn_tbl.c.mapped < filters['mapped'])
if marker is not None:
try:
compute_node_get(context, marker)
except exception.ComputeHostNotFound:
raise exception.MarkerNotFound(marker=marker)
select = select.where(cn_tbl.c.id > marker)
if limit is not None:
select = select.limit(limit)
# Explicitly order by id, so we're not dependent on the native sort
# order of the underlying DB.
select = select.order_by(expression.asc("id"))
return select
def _compute_node_fetchall(context, filters=None, limit=None, marker=None):
select = _compute_node_select(context, filters, limit=limit, marker=marker)
engine = get_engine(context=context)
with engine.connect() as conn, conn.begin():
results = conn.execute(select).fetchall()
# Callers expect dict-like objects, not SQLAlchemy RowProxy objects...
results = [dict(r._mapping) for r in results]
conn.close()
return results
@pick_context_manager_reader
def compute_node_get(context, compute_id):
"""Get a compute node by its id.
:param context: The security context
:param compute_id: ID of the compute node
:returns: Dictionary-like object containing properties of the compute node
:raises: ComputeHostNotFound if compute node with the given ID doesn't
exist.
"""
results = _compute_node_fetchall(context, {"compute_id": compute_id})
if not results:
raise exception.ComputeHostNotFound(host=compute_id)
return results[0]
# TODO(edleafe): remove once the compute node resource provider migration is
# complete, and this distinction is no longer necessary.
@pick_context_manager_reader
def compute_node_get_model(context, compute_id):
"""Get a compute node sqlalchemy model object by its id.
:param context: The security context
:param compute_id: ID of the compute node
:returns: Sqlalchemy model object containing properties of the compute node
:raises: ComputeHostNotFound if compute node with the given ID doesn't
exist.
"""
result = model_query(context, models.ComputeNode).\
filter_by(id=compute_id).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@pick_context_manager_reader
def compute_nodes_get_by_service_id(context, service_id):
"""Get a list of compute nodes by their associated service id.
:param context: The security context
:param service_id: ID of the associated service
:returns: List of dictionary-like objects, each containing properties of
the compute node, including its corresponding service and statistics
:raises: ServiceNotFound if service with the given ID doesn't exist.
"""
results = _compute_node_fetchall(context, {"service_id": service_id})
if not results:
raise exception.ServiceNotFound(service_id=service_id)
return results
@pick_context_manager_reader
def compute_node_get_by_host_and_nodename(context, host, nodename):
"""Get a compute node by its associated host and nodename.
:param context: The security context (admin)
:param host: Name of the host
:param nodename: Name of the node
:returns: Dictionary-like object containing properties of the compute node,
including its statistics
:raises: ComputeHostNotFound if host with the given name doesn't exist.
"""
results = _compute_node_fetchall(context,
{"host": host, "hypervisor_hostname": nodename})
if not results:
raise exception.ComputeHostNotFound(host=host)
return results[0]
@pick_context_manager_reader
def compute_node_get_by_nodename(context, hypervisor_hostname):
"""Get a compute node by hypervisor_hostname.
:param context: The security context (admin)
:param hypervisor_hostname: Name of the node
:returns: Dictionary-like object containing properties of the compute node,
including its statistics
:raises: ComputeHostNotFound if hypervisor_hostname with the given name
doesn't exist.
"""
results = _compute_node_fetchall(context,
{"hypervisor_hostname": hypervisor_hostname})
if not results:
raise exception.ComputeHostNotFound(host=hypervisor_hostname)
return results[0]
@pick_context_manager_reader
def compute_node_get_all(context):
"""Get all compute nodes.
:param context: The security context
:returns: List of dictionaries each containing compute node properties
"""
return _compute_node_fetchall(context)
@pick_context_manager_reader_allow_async
def compute_node_get_all_by_host(context, host):
"""Get all compute nodes by host name.
:param context: The security context (admin)
:param host: Name of the host
:returns: List of dictionaries each containing compute node properties
"""
results = _compute_node_fetchall(context, {"host": host})
if not results:
raise exception.ComputeHostNotFound(host=host)
return results
@pick_context_manager_reader
def compute_node_get_all_mapped_less_than(context, mapped_less_than):
"""Get all compute nodes with specific mapped values.
:param context: The security context
:param mapped_less_than: Get compute nodes with mapped less than this value
:returns: List of dictionaries each containing compute node properties
"""
return _compute_node_fetchall(context,
{'mapped': mapped_less_than})
@pick_context_manager_reader
def compute_node_get_all_by_pagination(context, limit=None, marker=None):
"""Get all compute nodes by pagination.
:param context: The security context
:param limit: Maximum number of items to return
:param marker: The last item of the previous page, the next results after
this value will be returned
:returns: List of dictionaries each containing compute node properties
"""
return _compute_node_fetchall(context, limit=limit, marker=marker)
@pick_context_manager_reader
def compute_node_search_by_hypervisor(context, hypervisor_match):
"""Get all compute nodes by hypervisor hostname.
:param context: The security context
:param hypervisor_match: The hypervisor hostname
:returns: List of dictionary-like objects each containing compute node
properties
"""
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
@pick_context_manager_writer
def _compute_node_create(context, values):
"""Create a compute node from the values dictionary.
:param context: The security context
:param values: Dictionary containing compute node properties
:returns: Dictionary-like object containing the properties of the created
node, including its corresponding service and statistics
"""
convert_objects_related_datetimes(values)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save(context.session)
return compute_node_ref
# NOTE(mgoddard): We avoid decorating this with @pick_context_manager_writer,
# so that we get a separate transaction in the exception handler. This avoids
# an error message about inactive DB sessions during a transaction rollback.
# See https://bugs.launchpad.net/nova/+bug/1853159.
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data. Will restore a soft deleted compute node if a
UUID has been explicitly requested.
"""
try:
compute_node_ref = _compute_node_create(context, values)
except db_exc.DBDuplicateEntry:
with excutils.save_and_reraise_exception(logger=LOG) as err_ctx:
# Check to see if we have a (soft) deleted ComputeNode with the
# same UUID and if so just update it and mark as no longer (soft)
# deleted. See bug 1839560 for details.
if 'uuid' in values:
# Get a fresh context for a new DB session and allow it to
# get a deleted record.
ctxt = nova.context.get_admin_context(read_deleted='yes')
compute_node_ref = _compute_node_get_and_update_deleted(
ctxt, values)
# If we didn't get anything back we failed to find the node
# by uuid and update it so re-raise the DBDuplicateEntry.
if compute_node_ref:
err_ctx.reraise = False
return compute_node_ref
@pick_context_manager_writer
def _compute_node_get_and_update_deleted(context, values):
"""Find a compute node by uuid, update and un-delete it.
This is a special case from the ``compute_node_create`` method which
needs to be separate to get a new Session.
This method will update the ComputeNode, if found, to have deleted=0 and
deleted_at=None values.
:param context: request auth context which should be able to read deleted
records
:param values: values used to update the ComputeNode record - must include
uuid
:return: updated ComputeNode sqlalchemy model object if successfully found
and updated, None otherwise
"""
cn = model_query(
context, models.ComputeNode).filter_by(uuid=values['uuid']).first()
if cn:
# Update with the provided values but un-soft-delete.
update_values = copy.deepcopy(values)
update_values['deleted'] = 0
update_values['deleted_at'] = None
return compute_node_update(context, cn.id, update_values)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def compute_node_update(context, compute_id, values):
"""Set the given properties on a compute node and update it.
:param context: The security context
:param compute_id: ID of the compute node
:param values: Dictionary containing compute node properties to be updated
:returns: Dictionary-like object containing the properties of the updated
compute node, including its corresponding service and statistics
:raises: ComputeHostNotFound if compute node with the given ID doesn't
exist.
"""
compute_ref = compute_node_get_model(context, compute_id)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
convert_objects_related_datetimes(values)
compute_ref.update(values)
return compute_ref
@pick_context_manager_writer
def compute_node_delete(context, compute_id, constraint=None):
"""Delete a compute node from the database.
:param context: The security context
:param compute_id: ID of the compute node
:param constraint: a constraint object
:raises: ComputeHostNotFound if compute node with the given ID doesn't
exist.
:raises: ConstraintNotMet if a constraint was specified and it was not met.
"""
query = model_query(context, models.ComputeNode).filter_by(id=compute_id)
if constraint is not None:
query = constraint.apply(models.ComputeNode, query)
result = query.soft_delete(synchronize_session=False)
if not result:
# The soft_delete could fail for one of two reasons:
# 1) The compute node no longer exists
# 2) The constraint, if specified, was not met
# Try to read the compute node and let it raise ComputeHostNotFound if
# 1) happened.
compute_node_get(context, compute_id)
# Else, raise ConstraintNotMet if 2) happened.
raise exception.ConstraintNotMet()
@pick_context_manager_reader
def compute_node_statistics(context):
"""Get aggregate statistics over all compute nodes.
:param context: The security context
:returns: Dictionary containing compute node characteristics summed up
over all the compute nodes, e.g. 'vcpus', 'free_ram_mb' etc.
"""
engine = get_engine(context=context)
services_tbl = models.Service.__table__
inner_sel = _compute_node_select(context).alias('inner_sel')
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
j = sa.join(
inner_sel, services_tbl,
sql.and_(
sql.or_(
inner_sel.c.host == services_tbl.c.host,
inner_sel.c.service_id == services_tbl.c.id
),
services_tbl.c.disabled == sql.false(),
services_tbl.c.binary == 'nova-compute',
services_tbl.c.deleted == 0
)
)
# NOTE(jaypipes): This COALESCE() stuff is temporary while the data
# migration to the new resource providers inventories and allocations
# tables is completed.
agg_cols = [
func.count().label('count'),
sql.func.sum(
inner_sel.c.vcpus
).label('vcpus'),
sql.func.sum(
inner_sel.c.memory_mb
).label('memory_mb'),
sql.func.sum(
inner_sel.c.local_gb
).label('local_gb'),
sql.func.sum(
inner_sel.c.vcpus_used
).label('vcpus_used'),
sql.func.sum(
inner_sel.c.memory_mb_used
).label('memory_mb_used'),
sql.func.sum(
inner_sel.c.local_gb_used
).label('local_gb_used'),
sql.func.sum(
inner_sel.c.free_ram_mb
).label('free_ram_mb'),
sql.func.sum(
inner_sel.c.free_disk_gb
).label('free_disk_gb'),
sql.func.sum(
inner_sel.c.current_workload
).label('current_workload'),
sql.func.sum(
inner_sel.c.running_vms
).label('running_vms'),
sql.func.sum(
inner_sel.c.disk_available_least
).label('disk_available_least'),
]
select = sql.select(*agg_cols).select_from(j)
with engine.connect() as conn, conn.begin():
results = conn.execute(select).fetchone()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
results = {field: int(results[idx] or 0)
for idx, field in enumerate(fields)}
return results
###################
@pick_context_manager_writer
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
certificate_ref = models.Certificate()
for (key, value) in values.items():
certificate_ref[key] = value
certificate_ref.save(context.session)
return certificate_ref
@pick_context_manager_reader
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@pick_context_manager_reader
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@pick_context_manager_reader
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
@pick_context_manager_writer
def virtual_interface_create(context, values):
"""Create a new virtual interface record.
:param values: Dict containing column values.
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save(context.session)
except db_exc.DBError:
LOG.exception("VIF creation failed with a database error.")
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context):
return model_query(context, models.VirtualInterface, read_deleted="no")
@require_context
@pick_context_manager_writer
def virtual_interface_update(context, address, values):
"""Create a virtual interface record in the database."""
vif_ref = virtual_interface_get_by_address(context, address)
vif_ref.update(values)
vif_ref.save(context.session)
return vif_ref
@require_context
@pick_context_manager_reader
def virtual_interface_get(context, vif_id):
"""Get a virtual interface by ID.
:param vif_id: ID of the virtual interface.
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_address(context, address):
"""Get a virtual interface by address.
:param address: The address of the interface you're looking to get.
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except db_exc.DBError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warning(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Get a virtual interface by UUID.
:param vif_uuid: The uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@pick_context_manager_reader_allow_async
def virtual_interface_get_by_instance(context, instance_uuid):
"""Gets all virtual interfaces for instance.
:param instance_uuid: UUID of the instance to filter on.
"""
vif_refs = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
order_by(expression.asc("created_at"), expression.asc("id")).\
all()
return vif_refs
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Get all virtual interface for instance that's associated with
network.
"""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
@pick_context_manager_writer
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records associated with instance.
:param instance_uuid: UUID of the instance to filter on.
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
@pick_context_manager_writer
def virtual_interface_delete(context, id):
"""Delete a virtual interface records.
:param id: ID of the interface.
"""
_virtual_interface_query(context).\
filter_by(id=id).\
soft_delete()
@require_context
@pick_context_manager_reader
def virtual_interface_get_all(context):
"""Get all virtual interface records."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, read_deleted='no').\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
convert_objects_related_datetimes(values, *datetime_keys)
def _check_instance_exists_in_project(context, instance_uuid):
if not model_query(context, models.Instance, read_deleted="no",
project_only=True).filter_by(
uuid=instance_uuid).first():
raise exception.InstanceNotFound(instance_id=instance_uuid)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_create(context, values):
"""Create an instance from the values dictionary.
:param context: Request context object
:param values: Dict containing column values.
"""
default_group = security_group_ensure_default(context)
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref['extra'] = models.InstanceExtra()
instance_ref['extra'].update(
{'numa_topology': None,
'pci_requests': None,
'vcpu_model': None,
'trusted_certs': None,
'resources': None,
})
instance_ref['extra'].update(values.pop('extra', {}))
instance_ref.update(values)
# Gather the security groups for the instance
sg_models = []
if 'default' in security_groups:
sg_models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
sg_models.extend(_security_group_get_by_names(
context, security_groups))
if 'hostname' in values:
_validate_unique_server_name(context, values['hostname'])
instance_ref.security_groups = sg_models
context.session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
# Parity with the return value of instance_get_all_by_filters_sort()
# Obviously a newly-created instance record can't already have a fault
# record because of the FK constraint, so this is fine.
instance_ref.fault = None
return instance_ref
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_destroy(
context, instance_uuid, constraint=None, hard_delete=False,
):
"""Destroy the instance or raise if it does not exist.
:param context: request context object
:param instance_uuid: uuid of the instance to delete
:param constraint: a constraint object
:param hard_delete: when set to True, removes all records related to the
instance
"""
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid)
else:
raise exception.InvalidUUID(uuid=instance_uuid)
query = model_query(context, models.Instance).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
# Either in hard or soft delete, we soft delete the instance first
# to make sure that the constraints were met.
count = query.soft_delete()
if count == 0:
# The failure to soft delete could be due to one of two things:
# 1) A racing request has deleted the instance out from under us
# 2) A constraint was not met
# Try to read the instance back once more and let it raise
# InstanceNotFound if 1) happened. This will give the caller an error
# that more accurately reflects the reason for the failure.
_instance_get_by_uuid(context, instance_uuid)
# Else, raise ConstraintNotMet if 2) happened.
raise exception.ConstraintNotMet()
models_to_delete = [
models.SecurityGroupInstanceAssociation, models.InstanceInfoCache,
models.InstanceMetadata, models.InstanceFault, models.InstanceExtra,
models.InstanceSystemMetadata, models.BlockDeviceMapping,
models.Migration, models.VirtualInterface
]
# For most referenced models we filter by the instance_uuid column, but for
# these models we filter by the uuid column.
filtered_by_uuid = [models.InstanceIdMapping]
for model in models_to_delete + filtered_by_uuid:
key = 'instance_uuid' if model not in filtered_by_uuid else 'uuid'
filter_ = {key: instance_uuid}
if hard_delete:
# We need to read any soft-deleted related records to make sure
# and clean those up as well otherwise we can fail with ForeignKey
# constraint errors when hard deleting the instance.
model_query(context, model, read_deleted='yes').filter_by(
**filter_).delete()
else:
model_query(context, model).filter_by(**filter_).soft_delete()
# NOTE(snikitin): We can't use model_query here, because there is no
# column 'deleted' in 'tags' or 'console_auth_tokens' tables.
context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).delete()
context.session.query(models.ConsoleAuthToken).filter_by(
instance_uuid=instance_uuid).delete()
# NOTE(cfriesen): We intentionally do not soft-delete entries in the
# instance_actions or instance_actions_events tables because they
# can be used by operators to find out what actions were performed on a
# deleted instance. Both of these tables are special-cased in
# _archive_deleted_rows_for_table().
if hard_delete:
# NOTE(ttsiousts): In case of hard delete, we need to remove the
# instance actions too since instance_uuid is a foreign key and
# for this we need to delete the corresponding InstanceActionEvents
actions = context.session.query(models.InstanceAction).filter_by(
instance_uuid=instance_uuid).all()
for action in actions:
context.session.query(models.InstanceActionEvent).filter_by(
action_id=action.id).delete()
context.session.query(models.InstanceAction).filter_by(
instance_uuid=instance_uuid).delete()
# NOTE(ttsiouts): The instance is the last thing to be deleted in
# order to respect all constraints
context.session.query(models.Instance).filter_by(
uuid=instance_uuid).delete()
return instance_ref
@require_context
@pick_context_manager_reader_allow_async
def instance_get_by_uuid(context, uuid, columns_to_join=None):
"""Get an instance or raise if it does not exist."""
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join)
def _instance_get_by_uuid(context, uuid, columns_to_join=None):
result = _build_instance_get(
context, columns_to_join=columns_to_join
).filter_by(uuid=uuid).first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
@pick_context_manager_reader
def instance_get(context, instance_id, columns_to_join=None):
"""Get an instance or raise if it does not exist."""
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
LOG.warning("Invalid instance id %s in request", instance_id)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, columns_to_join=None):
query = model_query(
context, models.Instance, project_only=True,
).options(
orm.joinedload(
models.Instance.security_groups
).joinedload(models.SecurityGroup.rules)
).options(orm.joinedload(models.Instance.info_cache))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
if 'extra.' in column:
column_ref = getattr(models.InstanceExtra, column.split('.')[1])
query = query.options(
orm.joinedload(models.Instance.extra).undefer(column_ref)
)
elif column in ['metadata', 'system_metadata']:
# NOTE(melwitt): We use subqueryload() instead of joinedload() for
# metadata and system_metadata because of the one-to-many
# relationship of the data. Directly joining these columns can
# result in a large number of additional rows being queried if an
# instance has a large number of (system_)metadata items, resulting
# in a large data transfer. Instead, the subqueryload() will
# perform additional queries to obtain metadata and system_metadata
# for the instance.
column_ref = getattr(models.Instance, column)
query = query.options(orm.subqueryload(column_ref))
else:
column_ref = getattr(models.Instance, column)
query = query.options(orm.joinedload(column_ref))
# NOTE(alaski) Stop lazy loading of columns not needed.
for column in ['metadata', 'system_metadata']:
if column not in columns_to_join:
column_ref = getattr(models.Instance, column)
query = query.options(orm.noload(column_ref))
# NOTE(melwitt): We need to use order_by(<unique column>) so that the
# additional queries emitted by subqueryload() include the same ordering as
# used by the parent query.
# https://docs.sqlalchemy.org/en/13/orm/loading_relationships.html#the-importance-of-ordering
return query.order_by(models.Instance.id)
def _instances_fill_metadata(context, instances, manual_joins=None):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
if 'fault' in manual_joins:
faults = instance_fault_get_by_instance_uuids(context, uuids,
latest=True)
else:
faults = {}
filled_instances = []
for inst in instances:
inst = dict(inst)
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
inst_faults = faults.get(inst['uuid'])
inst['fault'] = inst_faults and inst_faults[0] or None
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
"""Separate manually joined columns from columns_to_join
If columns_to_join contains 'metadata', 'system_metadata', 'fault', or
'pci_devices' those columns are removed from columns_to_join and added
to a manual_joins list to be used with the _instances_fill_metadata method.
The columns_to_join formal parameter is copied and not modified, the return
tuple has the modified columns_to_join list to be used with joinedload in
a model query.
:param:columns_to_join: List of columns to join in a model query.
:return: tuple of (manual_joins, columns_to_join)
"""
manual_joins = []
columns_to_join_new = copy.copy(columns_to_join)
for column in ('metadata', 'system_metadata', 'pci_devices', 'fault'):
if column in columns_to_join_new:
columns_to_join_new.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join_new
@require_context
@pick_context_manager_reader
def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
column_ref = getattr(models.Instance, column)
query = query.options(orm.joinedload(column_ref))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
@pick_context_manager_reader_allow_async
def instance_get_all_by_filters(
context, filters, sort_key='created_at', sort_dir='desc', limit=None,
marker=None, columns_to_join=None,
):
"""Get all instances matching all filters sorted by the primary key.
See instance_get_all_by_filters_sort for more information.
"""
# Invoke the API with the multiple sort keys and directions using the
# single sort key/direction
return instance_get_all_by_filters_sort(context, filters, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
sort_keys=[sort_key],
sort_dirs=[sort_dir])
def _get_query_nova_resource_by_changes_time(query, filters, model_object):
"""Filter resources by changes-since or changes-before.
Special keys are used to tweak the query further::
| 'changes-since' - only return resources updated after
| 'changes-before' - only return resources updated before
Return query results.
:param query: query to apply filters to.
:param filters: dictionary of filters with regex values.
:param model_object: object of the operation target.
"""
for change_filter in ['changes-since', 'changes-before']:
if filters and filters.get(change_filter):
changes_filter_time = timeutils.normalize_time(
filters.get(change_filter))
updated_at = getattr(model_object, 'updated_at')
if change_filter == 'changes-since':
query = query.filter(updated_at >= changes_filter_time)
else:
query = query.filter(updated_at <= changes_filter_time)
return query
@require_context
@pick_context_manager_reader_allow_async
def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
columns_to_join=None, sort_keys=None,
sort_dirs=None):
"""Get all instances that match all filters sorted by the given keys.
Deleted instances will be returned by default, unless there's a filter that
says otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters::
| ['project_id', 'user_id', 'image_ref',
| 'vm_state', 'instance_type_id', 'uuid',
| 'metadata', 'host', 'system_metadata', 'locked', 'hidden']
Hidden instances will *not* be returned by default, unless there's a
filter that says otherwise.
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'::
| filters = {
| 'filter': [
| {'name': 'tag-key', 'value': '<metakey>'},
| {'name': 'tag-value', 'value': '<metaval>'},
| {'name': 'tag:<metakey>', 'value': '<metaval>'}
| ]
| }
Special keys are used to tweak the query further::
| 'changes-since' - only return instances updated after
| 'changes-before' - only return instances updated before
| 'deleted' - only return (or exclude) deleted instances
| 'soft_deleted' - modify behavior of 'deleted' to either
| include or exclude instances whose
| vm_state is SOFT_DELETED.
A fourth type of filter (also using exact matching), filters
based on instance tags (not metadata tags). There are two types
of these tags:
`tags` -- One or more strings that will be used to filter results
in an AND expression: T1 AND T2
`tags-any` -- One or more strings that will be used to filter results in
an OR expression: T1 OR T2
`not-tags` -- One or more strings that will be used to filter results in
an NOT AND expression: NOT (T1 AND T2)
`not-tags-any` -- One or more strings that will be used to filter results
in an NOT OR expression: NOT (T1 OR T2)
Tags should be represented as list::
| filters = {
| 'tags': [some-tag, some-another-tag],
| 'tags-any: [some-any-tag, some-another-any-tag],
| 'not-tags: [some-not-tag, some-another-not-tag],
| 'not-tags-any: [some-not-any-tag, some-another-not-any-tag]
| }
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
if limit == 0:
return []
sort_keys, sort_dirs = db_utils.process_sort_params(
sort_keys, sort_dirs, default_dir='desc')
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query_prefix = context.session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
column_ref = getattr(models.InstanceExtra, column.split('.')[1])
query_prefix = query_prefix.options(
orm.joinedload(models.Instance.extra).undefer(column_ref)
)
else:
column_ref = getattr(models.Instance, column)
query_prefix = query_prefix.options(orm.joinedload(column_ref))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = copy.deepcopy(filters)
model_object = models.Instance
query_prefix = _get_query_nova_resource_by_changes_time(
query_prefix, filters, model_object,
)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
deleted = filters.pop('deleted')
if deleted:
if filters.pop('soft_deleted', True):
delete = sql.or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.filter(delete)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = sql.or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == sql.null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
cleaned = 1 if filters.pop('cleaned') else 0
query_prefix = query_prefix.filter(models.Instance.cleaned == cleaned)
if 'tags' in filters:
tags = filters.pop('tags')
# We build a JOIN ladder expression for each tag, JOIN'ing
# the first tag to the instances table, and each subsequent
# tag to the last JOIN'd tags table
first_tag = tags.pop(0)
query_prefix = query_prefix.join(models.Instance.tags)
query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = orm.aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias,
models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tags-any' in filters:
tags = filters.pop('tags-any')
tag_alias = orm.aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
if 'not-tags' in filters:
tags = filters.pop('not-tags')
first_tag = tags.pop(0)
subq = query_prefix.session.query(models.Tag.resource_id)
subq = subq.join(models.Instance.tags)
subq = subq.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = orm.aliased(models.Tag)
subq = subq.join(tag_alias, models.Instance.tags)
subq = subq.filter(tag_alias.tag == tag)
query_prefix = query_prefix.filter(~models.Instance.uuid.in_(subq))
if 'not-tags-any' in filters:
tags = filters.pop('not-tags-any')
query_prefix = query_prefix.filter(~models.Instance.tags.any(
models.Tag.tag.in_(tags)))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
if filters.pop('hidden', False):
query_prefix = query_prefix.filter(
models.Instance.hidden == sql.true())
else:
# If the query should not include hidden instances, then
# filter instances with hidden=False or hidden=NULL because
# older records may have no value set.
query_prefix = query_prefix.filter(sql.or_(
models.Instance.hidden == sql.false(),
models.Instance.hidden == sql.null()))
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata', 'locked', 'hidden']
# Filter the query
query_prefix = _exact_instance_filter(query_prefix,
filters, exact_match_filter_names)
if query_prefix is None:
return []
query_prefix = _regex_instance_filter(query_prefix, filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(
context.elevated(read_deleted='yes'), marker,
)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker=marker)
try:
query_prefix = sqlalchemyutils.paginate_query(
query_prefix,
models.Instance,
limit,
sort_keys,
marker=marker,
sort_dirs=sort_dirs,
)
except db_exc.InvalidSortKey:
raise exception.InvalidSortKey()
instances = query_prefix.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
@pick_context_manager_reader_allow_async
def instance_get_by_sort_filters(context, sort_keys, sort_dirs, values):
"""Get the UUID of the first instance in a sort order.
Attempt to get a single instance based on a combination of sort
keys, directions and filter values. This is used to try to find a
marker instance when we don't have a marker uuid.
:returns: The UUID of the instance that matched, if any.
"""
model = models.Instance
return _model_get_uuid_by_sort_filters(context, model, sort_keys,
sort_dirs, values)
def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
values):
query = context.session.query(model.uuid)
# NOTE(danms): Below is a re-implementation of our
# oslo_db.sqlalchemy.utils.paginate_query() utility. We can't use that
# directly because it does not return the marker and we need it to.
# The below is basically the same algorithm, stripped down to just what
# we need, and augmented with the filter criteria required for us to
# get back the instance that would correspond to our query.
# This is our position in sort_keys,sort_dirs,values for the loop below
key_index = 0
# We build a list of criteria to apply to the query, which looks
# approximately like this (assuming all ascending):
#
# OR(row.key1 > val1,
# AND(row.key1 == val1, row.key2 > val2),
# AND(row.key1 == val1, row.key2 == val2, row.key3 >= val3),
# )
#
# The final key is compared with the "or equal" variant so that
# a complete match instance is still returned.
criteria = []
for skey, sdir, val in zip(sort_keys, sort_dirs, values):
# Apply ordering to our query for the key, direction we're processing
if sdir == 'desc':
query = query.order_by(expression.desc(getattr(model, skey)))
else:
query = query.order_by(expression.asc(getattr(model, skey)))
# Build a list of equivalence requirements on keys we've already
# processed through the loop. In other words, if we're adding
# key2 > val2, make sure that key1 == val1
crit_attrs = []
for equal_attr in range(0, key_index):
crit_attrs.append(
(getattr(model, sort_keys[equal_attr]) == values[equal_attr]))
model_attr = getattr(model, skey)
if isinstance(model_attr.type, sa.Boolean):
model_attr = expression.cast(model_attr, sa.Integer)
val = int(val)
if skey == sort_keys[-1]:
# If we are the last key, then we should use or-equal to
# allow a complete match to be returned
if sdir == 'asc':
crit = (model_attr >= val)
else:
crit = (model_attr <= val)
else:
# If we're not the last key, then strict greater or less than
# so we order strictly.
if sdir == 'asc':
crit = (model_attr > val)
else:
crit = (model_attr < val)
# AND together all the above
crit_attrs.append(crit)
criteria.append(sql.and_(*crit_attrs))
key_index += 1
# OR together all the ANDs
query = query.filter(sql.or_(*criteria))
# We can't raise InstanceNotFound because we don't have a uuid to
# be looking for, so just return nothing if no match.
result = query.limit(1).first()
if result:
# We're querying for a single column, which means we get back a
# tuple of one thing. Strip that out and just return the uuid
# for our caller.
return result[0]
else:
return result
def _db_connection_type(db_connection):
"""Returns a lowercase symbol for the db type.
This is useful when we need to change what we are doing per DB
(like handling regexes). In a CellsV2 world it probably needs to
do something better than use the database configuration string.
"""
db_string = db_connection.split(':')[0].split('+')[0]
return db_string.lower()
def _safe_regex_mysql(raw_string):
"""Make regex safe to mysql.
Certain items like '|' are interpreted raw by mysql REGEX. If you
search for a single | then you trigger an error because it's
expecting content on either side.
For consistency sake we escape all '|'. This does mean we wouldn't
support something like foo|bar to match completely different
things, however, one can argue putting such complicated regex into
name search probably means you are doing this wrong.
"""
return raw_string.replace('|', '\\|')
def _get_regexp_ops(connection):
"""Return safety filter and db opts for regex."""
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
regex_safe_filters = {
'mysql': _safe_regex_mysql
}
db_type = _db_connection_type(connection)
return (regex_safe_filters.get(db_type, lambda x: x),
regexp_op_map.get(db_type, 'LIKE'))
def _regex_instance_filter(query, filters):
"""Applies regular expression filtering to an Instance query.
Returns the updated query.
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
"""
model = models.Instance
safe_regex_filter, db_regexp_op = _get_regexp_ops(CONF.database.connection)
for filter_name in filters:
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
filter_val = filters[filter_name]
# Sometimes the REGEX filter value is not a string
if not isinstance(filter_val, str):
filter_val = str(filter_val)
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
u'%' + filter_val + u'%'))
else:
filter_val = safe_regex_filter(filter_val)
query = query.filter(column_attr.op(db_regexp_op)(
filter_val))
return query
def _exact_instance_filter(query, filters, legal_keys):
"""Applies exact match filtering to an Instance query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
model = models.Instance
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.items():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.items():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
if not value:
return None # empty IN-predicate; short circuit
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter(*[getattr(models.Instance, k) == v
for k, v in filter_dict.items()])
return query
@require_context
@pick_context_manager_reader_allow_async
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
columns_to_join=None, limit=None,
marker=None):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
query = context.session.query(models.Instance)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
for column in columns_to_join_new:
if 'extra.' in column:
column_ref = getattr(models.InstanceExtra, column.split('.')[1])
query = query.options(
orm.joinedload(models.Instance.extra).undefer(column_ref)
)
else:
column_ref = getattr(models.Instance, column)
query = query.options(orm.joinedload(column_ref))
query = query.filter(sql.or_(
models.Instance.terminated_at == sql.null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
if marker is not None:
try:
marker = _instance_get_by_uuid(
context.elevated(read_deleted='yes'), marker)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker=marker)
query = sqlalchemyutils.paginate_query(
query, models.Instance, limit, ['project_id', 'uuid'], marker=marker,
)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
def _instance_get_all_query(context, project_only=False, joins=None):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(
context,
models.Instance,
project_only=project_only,
)
for column in joins:
if 'extra.' in column:
column_ref = getattr(models.InstanceExtra, column.split('.')[1])
query = query.options(
orm.joinedload(models.Instance.extra).undefer(column_ref)
)
else:
column_ref = getattr(models.Instance, column)
query = query.options(orm.joinedload(column_ref))
return query
@pick_context_manager_reader_allow_async
def instance_get_all_by_host(context, host, columns_to_join=None):
"""Get all instances belonging to a host."""
query = _instance_get_all_query(context, joins=columns_to_join)
instances = query.filter_by(host=host).all()
return _instances_fill_metadata(
context,
instances,
manual_joins=columns_to_join,
)
def _instance_get_all_uuids_by_hosts(context, hosts):
itbl = models.Instance.__table__
default_deleted_value = itbl.c.deleted.default.arg
sel = sql.select(itbl.c.host, itbl.c.uuid)
sel = sel.where(sql.and_(
itbl.c.deleted == default_deleted_value,
itbl.c.host.in_(sa.bindparam('hosts', expanding=True))))
# group the instance UUIDs by hostname
res = collections.defaultdict(list)
for rec in context.session.execute(sel, {'hosts': hosts}).fetchall():
res[rec[0]].append(rec[1])
return res
@pick_context_manager_reader
def instance_get_all_uuids_by_hosts(context, hosts):
"""Get a dict, keyed by hostname, of a list of the instance UUIDs on the
host for each supplied hostname, not Instance model objects.
The dict is a defaultdict of list, thus inspecting the dict for a host not
in the dict will return an empty list not a KeyError.
"""
return _instance_get_all_uuids_by_hosts(context, hosts)
@pick_context_manager_reader
def instance_get_all_by_host_and_node(
context, host, node, columns_to_join=None,
):
"""Get all instances belonging to a node."""
if columns_to_join is None:
manual_joins = []
else:
candidates = ['system_metadata', 'metadata']
manual_joins = [x for x in columns_to_join if x in candidates]
columns_to_join = list(set(columns_to_join) - set(candidates))
instances = _instance_get_all_query(
context,
joins=columns_to_join,
).filter_by(host=host).filter_by(node=node).all()
return _instances_fill_metadata(
context,
instances,
manual_joins=manual_joins,
)
@pick_context_manager_reader
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
instances = _instance_get_all_query(context).filter_by(
host=host,
).filter(
models.Instance.instance_type_id != type_id
).all()
return _instances_fill_metadata(context, instances)
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@pick_context_manager_reader
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
instances = model_query(context, models.Instance).filter(
models.Instance.updated_at <= reboot_window
).filter_by(task_state=task_states.REBOOTING).all()
return _instances_fill_metadata(
context,
instances,
manual_joins=[],
)
def _retry_instance_update():
"""Wrap with oslo_db_api.wrap_db_retry, and also retry on
UnknownInstanceUpdateConflict.
"""
exception_checker = \
lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,))
return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
exception_checker=exception_checker)
@require_context
@_retry_instance_update()
@pick_context_manager_writer
def instance_update(context, instance_uuid, values, expected=None):
"""Set the given properties on an instance and update it.
:raises: NotFound if instance does not exist.
"""
return _instance_update(context, instance_uuid, values, expected)
@require_context
@_retry_instance_update()
@pick_context_manager_writer
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None, expected=None):
"""Set the given properties on an instance and update it.
Return a shallow copy of the original instance reference, as well as the
updated one.
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:param context: request context object
:param instance_uuid: instance uuid
:param values: dict containing column values
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
:raises: NotFound if instance does not exist.
"""
instance_ref = _instance_get_by_uuid(context, instance_uuid,
columns_to_join=columns_to_join)
return (copy.copy(instance_ref), _instance_update(
context, instance_uuid, values, expected, original=instance_ref))
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
# NOTE: we have to hard_delete here otherwise we will get more than one
# system_metadata record when we read deleted for an instance;
# regular metadata doesn't have the same problem because we don't
# allow reading deleted regular metadata anywhere.
if metadata_type == 'system_metadata':
for condemned in to_delete:
context.session.delete(condemned)
instance[metadata_type].remove(condemned)
else:
for condemned in to_delete:
condemned.soft_delete(context.session)
for key, value in metadata.items():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
context.session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, expected, original=None):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
# NOTE(mdbooth): We pop values from this dict below, so we copy it here to
# ensure there are no side effects for the caller or if we retry the
# function due to a db conflict.
updates = copy.copy(values)
if expected is None:
expected = {}
else:
# Coerce all single values to singleton lists
expected = {k: [None] if v is None else sqlalchemyutils.to_list(v)
for (k, v) in expected.items()}
# Extract 'expected_' values from values dict, as these aren't actually
# updates
for field in ('task_state', 'vm_state'):
expected_field = 'expected_%s' % field
if expected_field in updates:
value = updates.pop(expected_field, None)
# Coerce all single values to singleton lists
if value is None:
expected[field] = [None]
else:
expected[field] = sqlalchemyutils.to_list(value)
# Values which need to be updated separately
metadata = updates.pop('metadata', None)
system_metadata = updates.pop('system_metadata', None)
_handle_objects_related_type_conversions(updates)
# Hostname is potentially unique, but this is enforced in code rather
# than the DB. The query below races, but the number of users of
# osapi_compute_unique_server_name_scope is small, and a robust fix
# will be complex. This is intentionally left as is for the moment.
if 'hostname' in updates:
_validate_unique_server_name(context, updates['hostname'])
compare = models.Instance(uuid=instance_uuid, **expected)
try:
instance_ref = model_query(context, models.Instance,
project_only=True).\
update_on_match(compare, 'uuid', updates)
except update_match.NoRowsMatched:
# Update failed. Try to find why and raise a specific error.
# We should get here only because our expected values were not current
# when update_on_match executed. Having failed, we now have a hint that
# the values are out of date and should check them.
# This code is made more complex because we are using repeatable reads.
# If we have previously read the original instance in the current
# transaction, reading it again will return the same data, even though
# the above update failed because it has changed: it is not possible to
# determine what has changed in this transaction. In this case we raise
# UnknownInstanceUpdateConflict, which will cause the operation to be
# retried in a new transaction.
# Because of the above, if we have previously read the instance in the
# current transaction it will have been passed as 'original', and there
# is no point refreshing it. If we have not previously read the
# instance, we can fetch it here and we will get fresh data.
if original is None:
original = _instance_get_by_uuid(context, instance_uuid)
conflicts_expected = {}
conflicts_actual = {}
for (field, expected_values) in expected.items():
actual = original[field]
if actual not in expected_values:
conflicts_expected[field] = expected_values
conflicts_actual[field] = actual
# Exception properties
exc_props = {
'instance_uuid': instance_uuid,
'expected': conflicts_expected,
'actual': conflicts_actual
}
# There was a conflict, but something (probably the MySQL read view,
# but possibly an exceptionally unlikely second race) is preventing us
# from seeing what it is. When we go round again we'll get a fresh
# transaction and a fresh read view.
if len(conflicts_actual) == 0:
raise exception.UnknownInstanceUpdateConflict(**exc_props)
# Task state gets special handling for convenience. We raise the
# specific error UnexpectedDeletingTaskStateError or
# UnexpectedTaskStateError as appropriate
if 'task_state' in conflicts_actual:
conflict_task_state = conflicts_actual['task_state']
if conflict_task_state == task_states.DELETING:
exc = exception.UnexpectedDeletingTaskStateError
else:
exc = exception.UnexpectedTaskStateError
# Everything else is an InstanceUpdateConflict
else:
exc = exception.InstanceUpdateConflict
raise exc(**exc_props)
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
metadata)
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
system_metadata)
return instance_ref
@pick_context_manager_writer
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save(context.session)
@require_context
@pick_context_manager_writer
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
@pick_context_manager_reader
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
convert_objects_related_datetimes(values)
info_cache = model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
needs_create = False
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
values['instance_uuid'] = instance_uuid
info_cache = models.InstanceInfoCache(**values)
needs_create = True
try:
with get_context_manager(context).writer.savepoint.using(context):
if needs_create:
info_cache.save(context.session)
else:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
@pick_context_manager_writer
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
def _instance_extra_create(context, values):
inst_extra_ref = models.InstanceExtra()
inst_extra_ref.update(values)
inst_extra_ref.save(context.session)
return inst_extra_ref
@pick_context_manager_writer
def instance_extra_update_by_uuid(context, instance_uuid, updates):
"""Update the instance extra record by instance uuid
:param instance_uuid: UUID of the instance tied to the record
:param updates: A dict of updates to apply
"""
rows_updated = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid).\
update(updates)
if not rows_updated:
LOG.debug("Created instance_extra for %s", instance_uuid)
create_values = copy.copy(updates)
create_values["instance_uuid"] = instance_uuid
_instance_extra_create(context, create_values)
rows_updated = 1
return rows_updated
@pick_context_manager_reader
def instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=None,
):
"""Get the instance extra record
:param instance_uuid: UUID of the instance tied to the topology record
:param columns: A list of the columns to load, or None for 'all of them'
"""
query = model_query(context, models.InstanceExtra).filter_by(
instance_uuid=instance_uuid,
)
if columns is None:
columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
'trusted_certs', 'resources', 'migration_context']
for column in columns:
column_ref = getattr(models.InstanceExtra, column)
query = query.options(orm.undefer(column_ref))
instance_extra = query.first()
return instance_extra
###################
@require_context
@pick_context_manager_reader
def quota_get(context, project_id, resource, user_id=None):
"""Retrieve a quota or raise if it does not exist."""
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
@pick_context_manager_reader
def quota_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all quotas associated with a given project and user."""
user_quotas = model_query(context, models.ProjectUserQuota,
(models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit)).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for user_quota in user_quotas:
result[user_quota.resource] = user_quota.hard_limit
return result
@require_context
@pick_context_manager_reader
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@pick_context_manager_reader
def quota_get_all(context, project_id):
"""Retrieve all user quotas associated with a given project."""
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
def quota_get_per_project_resources():
"""Retrieve the names of resources whose quotas are calculated on a
per-project rather than a per-user basis.
"""
return PER_PROJECT_QUOTAS
@pick_context_manager_writer
def quota_create(context, project_id, resource, limit, user_id=None):
"""Create a quota for the given project and resource."""
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@pick_context_manager_writer
def quota_update(context, project_id, resource, limit, user_id=None):
"""Update a quota or raise if it does not exist."""
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
@pick_context_manager_reader
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@pick_context_manager_reader
def quota_class_get_default(context):
"""Retrieve all default quotas."""
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@pick_context_manager_reader
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@pick_context_manager_writer
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource.