Fetch specific columns rather than full ORM entities
Michael Bayer while analysing neutron process function call trace,
suggested to run queries against specific columns rather than full
ORM entities as it can help reduce load both at the DB level and
in the Python level since they are much faster to fetch as
non-ORM entities. In this patch we are trying that on simpler
queries to improve neutron performance.
Co-Authored-By: Joe Talerico <jtaleric@redhat.com>
Change-Id: I6a41e9487a4427f876442bbeeae61974e892225e
(cherry picked from commit 72ef0e7814
)
This commit is contained in:
parent
011d0fbf7c
commit
c34f5dc957
@ -589,16 +589,16 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
with db_api.context_manager.reader.using(context):
|
||||
# TODO(electrocucaracha): Look a solution for Join in OVO
|
||||
ipal = models_v2.IPAllocation
|
||||
alloc_qry = context.session.query(ipal)
|
||||
alloc_qry = context.session.query(ipal.port_id)
|
||||
alloc_qry = alloc_qry.join("port", "routerport")
|
||||
gateway_ip = str(cur_subnet['gateway_ip'])
|
||||
allocated = alloc_qry.filter(
|
||||
ipal.ip_address == gateway_ip,
|
||||
ipal.subnet_id == cur_subnet['id']).first()
|
||||
if allocated and allocated['port_id']:
|
||||
if allocated and allocated.port_id:
|
||||
raise n_exc.GatewayIpInUse(
|
||||
ip_address=gateway_ip,
|
||||
port_id=allocated['port_id'])
|
||||
port_id=allocated.port_id)
|
||||
|
||||
if validators.is_attr_set(s.get('dns_nameservers')):
|
||||
if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers:
|
||||
|
@ -127,10 +127,9 @@ class External_net_db_mixin(object):
|
||||
# must make sure we do not have any external gateway ports
|
||||
# (and thus, possible floating IPs) on this network before
|
||||
# allow it to be update to external=False
|
||||
port = context.session.query(models_v2.Port).filter_by(
|
||||
device_owner=DEVICE_OWNER_ROUTER_GW,
|
||||
network_id=net_data['id']).first()
|
||||
if port:
|
||||
if context.session.query(models_v2.Port.id).filter_by(
|
||||
device_owner=constants.DEVICE_OWNER_ROUTER_GW,
|
||||
network_id=net_data['id']).first():
|
||||
raise extnet_exc.ExternalNetworkInUse(net_id=net_id)
|
||||
|
||||
net_obj.ExternalNetwork.delete_objects(
|
||||
@ -175,12 +174,11 @@ class External_net_db_mixin(object):
|
||||
if (object_type != 'network' or
|
||||
policy['action'] != 'access_as_external'):
|
||||
return
|
||||
net_as_external = context.session.query(rbac_db.NetworkRBAC).filter(
|
||||
rbac_db.NetworkRBAC.object_id == policy['object_id'],
|
||||
rbac_db.NetworkRBAC.action == 'access_as_external').count()
|
||||
# If the network still have rbac policies, we should not
|
||||
# update external attribute.
|
||||
if net_as_external:
|
||||
if context.session.query(rbac_db.NetworkRBAC.object_id).filter(
|
||||
rbac_db.NetworkRBAC.object_id == policy['object_id'],
|
||||
rbac_db.NetworkRBAC.action == 'access_as_external').count():
|
||||
return
|
||||
net = self.get_network(context, policy['object_id'])
|
||||
self._process_l3_update(context, net,
|
||||
@ -211,7 +209,7 @@ class External_net_db_mixin(object):
|
||||
l3_models.Router.tenant_id == policy['target_tenant'])
|
||||
# if there is a wildcard entry we can safely proceed without the
|
||||
# router lookup because they will have access either way
|
||||
if context.session.query(rbac_db.NetworkRBAC).filter(
|
||||
if context.session.query(rbac_db.NetworkRBAC.object_id).filter(
|
||||
rbac.object_id == policy['object_id'],
|
||||
rbac.action == 'access_as_external',
|
||||
rbac.target_tenant == '*').count():
|
||||
|
@ -1873,12 +1873,12 @@ class L3RpcNotifierMixin(object):
|
||||
return
|
||||
network_id = updated['network_id']
|
||||
subnet_id = updated['id']
|
||||
query = context.session.query(models_v2.Port).filter_by(
|
||||
query = context.session.query(models_v2.Port.device_id).filter_by(
|
||||
network_id=network_id,
|
||||
device_owner=DEVICE_OWNER_ROUTER_GW)
|
||||
query = query.join(models_v2.Port.fixed_ips).filter(
|
||||
models_v2.IPAllocation.subnet_id == subnet_id)
|
||||
router_ids = set(port['device_id'] for port in query)
|
||||
router_ids = set(port.device_id for port in query)
|
||||
for router_id in router_ids:
|
||||
l3plugin.notify_router_updated(context, router_id)
|
||||
|
||||
|
@ -87,7 +87,7 @@ class PortBindingMixin(portbindings_base.PortBindingBaseMixin):
|
||||
def get_port_host(self, context, port_id):
|
||||
with db_api.context_manager.reader.using(context):
|
||||
bind_port = (
|
||||
context.session.query(pmodels.PortBindingPort).
|
||||
context.session.query(pmodels.PortBindingPort.host).
|
||||
filter_by(port_id=port_id).
|
||||
first()
|
||||
)
|
||||
|
@ -165,7 +165,8 @@ def _get_standard_attr_id(context, object_id, object_type):
|
||||
"adding provisioning blocks for a new resource "
|
||||
"you must call add_model_for_resource during "
|
||||
"initialization for your type." % object_type)
|
||||
obj = (context.session.query(model).enable_eagerloads(False).
|
||||
obj = (context.session.query(model.standard_attr_id).
|
||||
enable_eagerloads(False).
|
||||
filter_by(id=object_id).first())
|
||||
if not obj:
|
||||
# concurrent delete
|
||||
|
@ -141,7 +141,7 @@ class RbacPluginMixin(common_db_mixin.CommonDbMixin):
|
||||
if entry_id in self.object_type_cache:
|
||||
return self.object_type_cache[entry_id]
|
||||
for otype, model in models.get_type_model_map().items():
|
||||
if (context.session.query(model).
|
||||
if (context.session.query(model.id).
|
||||
filter(model.id == entry_id).first()):
|
||||
self.object_type_cache[entry_id] = otype
|
||||
return otype
|
||||
|
@ -75,7 +75,7 @@ class SubnetAllocator(driver.Pool):
|
||||
|
||||
def _get_allocated_cidrs(self):
|
||||
with db_api.context_manager.reader.using(self._context):
|
||||
query = self._context.session.query(models_v2.Subnet)
|
||||
query = self._context.session.query(models_v2.Subnet.cidr)
|
||||
subnets = query.filter_by(subnetpool_id=self._subnetpool['id'])
|
||||
return (x.cidr for x in subnets)
|
||||
|
||||
@ -97,7 +97,7 @@ class SubnetAllocator(driver.Pool):
|
||||
subnetpool_id = self._subnetpool['id']
|
||||
tenant_id = self._subnetpool['tenant_id']
|
||||
with db_api.context_manager.reader.using(self._context):
|
||||
qry = self._context.session.query(models_v2.Subnet)
|
||||
qry = self._context.session.query(models_v2.Subnet.cidr)
|
||||
allocations = qry.filter_by(subnetpool_id=subnetpool_id,
|
||||
tenant_id=tenant_id)
|
||||
value = 0
|
||||
|
@ -214,9 +214,10 @@ def make_port_dict_with_security_groups(port, sec_groups):
|
||||
def get_port_binding_host(context, port_id):
|
||||
try:
|
||||
with db_api.context_manager.reader.using(context):
|
||||
query = (context.session.query(models.PortBinding).
|
||||
filter(models.PortBinding.port_id.startswith(port_id)).
|
||||
one())
|
||||
query = (context.session.query(models.PortBinding.host).
|
||||
filter(models.PortBinding.port_id.startswith(port_id)))
|
||||
query = query.filter(
|
||||
models.PortBinding.status == n_const.ACTIVE).one()
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("No binding found for port %(port_id)s",
|
||||
{'port_id': port_id})
|
||||
@ -232,7 +233,7 @@ def get_port_binding_host(context, port_id):
|
||||
def generate_distributed_port_status(context, port_id):
|
||||
# an OR'ed value of status assigned to parent port from the
|
||||
# distributedportbinding bucket
|
||||
query = context.session.query(models.DistributedPortBinding)
|
||||
query = context.session.query(models.DistributedPortBinding.status)
|
||||
final_status = n_const.PORT_STATUS_BUILD
|
||||
for bind in query.filter(models.DistributedPortBinding.port_id == port_id):
|
||||
if bind.status == n_const.PORT_STATUS_ACTIVE:
|
||||
@ -307,7 +308,7 @@ def get_port_db_objects(context, port_ids):
|
||||
def is_dhcp_active_on_any_subnet(context, subnet_ids):
|
||||
if not subnet_ids:
|
||||
return False
|
||||
return bool(context.session.query(models_v2.Subnet).
|
||||
return bool(context.session.query(models_v2.Subnet.id).
|
||||
enable_eagerloads(False).filter_by(enable_dhcp=True).
|
||||
filter(models_v2.Subnet.id.in_(subnet_ids)).count())
|
||||
|
||||
@ -322,7 +323,7 @@ def _prevent_segment_delete_with_port_bound(resource, event, trigger,
|
||||
|
||||
with db_api.context_manager.reader.using(context):
|
||||
segment_id = segment['id']
|
||||
query = context.session.query(models_v2.Port)
|
||||
query = context.session.query(models_v2.Port.id)
|
||||
query = query.join(
|
||||
models.PortBindingLevel,
|
||||
models.PortBindingLevel.port_id == models_v2.Port.id)
|
||||
|
@ -240,7 +240,8 @@ class TrackedResource(BaseResource):
|
||||
LOG.debug(("Synchronizing usage tracker for tenant:%(tenant_id)s on "
|
||||
"resource:%(resource)s"),
|
||||
{'tenant_id': tenant_id, 'resource': self.name})
|
||||
in_use = context.session.query(self._model_class).filter_by(
|
||||
in_use = context.session.query(
|
||||
self._model_class.tenant_id).filter_by(
|
||||
tenant_id=tenant_id).count()
|
||||
# Update quota usage
|
||||
return self._resync(context, tenant_id, in_use)
|
||||
@ -269,7 +270,8 @@ class TrackedResource(BaseResource):
|
||||
"%(tenant_id)s is out of sync, need to count used "
|
||||
"quota"), {'resource': self.name,
|
||||
'tenant_id': tenant_id})
|
||||
in_use = context.session.query(self._model_class).filter_by(
|
||||
in_use = context.session.query(
|
||||
self._model_class.tenant_id).filter_by(
|
||||
tenant_id=tenant_id).count()
|
||||
|
||||
# Update quota usage, if requested (by default do not do that, as
|
||||
|
@ -55,7 +55,7 @@ class L3Scheduler(object):
|
||||
def _router_has_binding(self, context, router_id, l3_agent_id):
|
||||
router_binding_model = rb_model.RouterL3AgentBinding
|
||||
|
||||
query = context.session.query(router_binding_model)
|
||||
query = context.session.query(router_binding_model.router_id)
|
||||
query = query.filter(router_binding_model.router_id == router_id,
|
||||
router_binding_model.l3_agent_id == l3_agent_id)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user