4958805863
There was an extra variable for the so called "missing" quotas since 2013. This patch merges them with the normal nova quotas, cleans up the tests, removes any usage of that special var and as a side effect, adds the missing keys to the project quota update page. Change-Id: Icaac1ebb1749e13fdfc307e89dcc6bbd64bef922 Closes-Bug: 1655393
454 lines
17 KiB
Python
454 lines
17 KiB
Python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
from collections import defaultdict
|
|
import itertools
|
|
import logging
|
|
|
|
from django.utils.translation import ugettext_lazy as _
|
|
|
|
from horizon import exceptions
|
|
from horizon.utils.memoized import memoized # noqa
|
|
|
|
from openstack_dashboard.api import base
|
|
from openstack_dashboard.api import cinder
|
|
from openstack_dashboard.api import network
|
|
from openstack_dashboard.api import neutron
|
|
from openstack_dashboard.api import nova
|
|
from openstack_dashboard.contrib.developer.profiler import api as profiler
|
|
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
NOVA_QUOTA_FIELDS = ("metadata_items",
|
|
"cores",
|
|
"instances",
|
|
"injected_files",
|
|
"injected_file_content_bytes",
|
|
"ram",
|
|
"floating_ips",
|
|
"fixed_ips",
|
|
"security_groups",
|
|
"security_group_rules",
|
|
"key_pairs",
|
|
"injected_file_path_bytes",
|
|
)
|
|
|
|
|
|
CINDER_QUOTA_FIELDS = ("volumes",
|
|
"snapshots",
|
|
"gigabytes",)
|
|
|
|
NEUTRON_QUOTA_FIELDS = ("network",
|
|
"subnet",
|
|
"port",
|
|
"router",
|
|
"floatingip",
|
|
"security_group",
|
|
"security_group_rule",
|
|
)
|
|
|
|
QUOTA_FIELDS = NOVA_QUOTA_FIELDS + CINDER_QUOTA_FIELDS + NEUTRON_QUOTA_FIELDS
|
|
|
|
QUOTA_NAMES = {
|
|
"metadata_items": _('Metadata Items'),
|
|
"cores": _('VCPUs'),
|
|
"instances": _('Instances'),
|
|
"injected_files": _('Injected Files'),
|
|
"injected_file_content_bytes": _('Injected File Content Bytes'),
|
|
"ram": _('RAM (MB)'),
|
|
"floating_ips": _('Floating IPs'),
|
|
"fixed_ips": _('Fixed IPs'),
|
|
"security_groups": _('Security Groups'),
|
|
"security_group_rules": _('Security Group Rules'),
|
|
"key_pairs": _('Key Pairs'),
|
|
"injected_file_path_bytes": _('Injected File Path Bytes'),
|
|
"volumes": _('Volumes'),
|
|
"snapshots": _('Volume Snapshots'),
|
|
"gigabytes": _('Total Size of Volumes and Snapshots (GB)'),
|
|
"network": _("Networks"),
|
|
"subnet": _("Subnets"),
|
|
"port": _("Ports"),
|
|
"router": _("Routers"),
|
|
"floatingip": _('Floating IPs'),
|
|
"security_group": _("Security Groups"),
|
|
"security_group_rule": _("Security Group Rules")
|
|
}
|
|
|
|
|
|
class QuotaUsage(dict):
|
|
"""Tracks quota limit, used, and available for a given set of quotas."""
|
|
|
|
def __init__(self):
|
|
self.usages = defaultdict(dict)
|
|
|
|
def __contains__(self, key):
|
|
return key in self.usages
|
|
|
|
def __getitem__(self, key):
|
|
return self.usages[key]
|
|
|
|
def __setitem__(self, key, value):
|
|
raise NotImplementedError("Directly setting QuotaUsage values is not "
|
|
"supported. Please use the add_quota and "
|
|
"tally methods.")
|
|
|
|
def __repr__(self):
|
|
return repr(dict(self.usages))
|
|
|
|
def get(self, key, default=None):
|
|
return self.usages.get(key, default)
|
|
|
|
def add_quota(self, quota):
|
|
"""Adds an internal tracking reference for the given quota."""
|
|
if quota.limit is None or quota.limit == -1:
|
|
# Handle "unlimited" quotas.
|
|
self.usages[quota.name]['quota'] = float("inf")
|
|
self.usages[quota.name]['available'] = float("inf")
|
|
else:
|
|
self.usages[quota.name]['quota'] = int(quota.limit)
|
|
|
|
def tally(self, name, value):
|
|
"""Adds to the "used" metric for the given quota."""
|
|
value = value or 0 # Protection against None.
|
|
# Start at 0 if this is the first value.
|
|
if 'used' not in self.usages[name]:
|
|
self.usages[name]['used'] = 0
|
|
# Increment our usage and update the "available" metric.
|
|
self.usages[name]['used'] += int(value) # Fail if can't coerce to int.
|
|
self.update_available(name)
|
|
|
|
def update_available(self, name):
|
|
"""Updates the "available" metric for the given quota."""
|
|
quota = self.usages.get(name, {}).get('quota', float('inf'))
|
|
available = quota - self.usages[name]['used']
|
|
if available < 0:
|
|
available = 0
|
|
self.usages[name]['available'] = available
|
|
|
|
|
|
def _get_quota_data(request, tenant_mode=True, disabled_quotas=None,
|
|
tenant_id=None):
|
|
quotasets = []
|
|
if not tenant_id:
|
|
tenant_id = request.user.tenant_id
|
|
if disabled_quotas is None:
|
|
disabled_quotas = get_disabled_quotas(request)
|
|
|
|
qs = base.QuotaSet()
|
|
|
|
if 'instances' not in disabled_quotas:
|
|
if tenant_mode:
|
|
quotasets.append(nova.tenant_quota_get(request, tenant_id))
|
|
else:
|
|
quotasets.append(nova.default_quota_get(request, tenant_id))
|
|
|
|
if 'volumes' not in disabled_quotas:
|
|
try:
|
|
if tenant_mode:
|
|
quotasets.append(cinder.tenant_quota_get(request, tenant_id))
|
|
else:
|
|
quotasets.append(cinder.default_quota_get(request, tenant_id))
|
|
except cinder.cinder_exception.ClientException:
|
|
disabled_quotas.update(CINDER_QUOTA_FIELDS)
|
|
msg = _("Unable to retrieve volume limit information.")
|
|
exceptions.handle(request, msg)
|
|
for quota in itertools.chain(*quotasets):
|
|
if quota.name not in disabled_quotas:
|
|
qs[quota.name] = quota.limit
|
|
return qs
|
|
|
|
|
|
@profiler.trace
|
|
def get_default_quota_data(request, disabled_quotas=None, tenant_id=None):
|
|
return _get_quota_data(request,
|
|
tenant_mode=False,
|
|
disabled_quotas=disabled_quotas,
|
|
tenant_id=tenant_id)
|
|
|
|
|
|
@profiler.trace
|
|
def get_tenant_quota_data(request, disabled_quotas=None, tenant_id=None):
|
|
qs = _get_quota_data(request,
|
|
tenant_mode=True,
|
|
disabled_quotas=disabled_quotas,
|
|
tenant_id=tenant_id)
|
|
|
|
# TODO(jpichon): There is no API to get the default system quotas
|
|
# in Neutron (cf. LP#1204956), so for now handle tenant quotas here.
|
|
# This should be handled in _get_quota_data() eventually.
|
|
if not disabled_quotas:
|
|
return qs
|
|
|
|
# Check if neutron is enabled by looking for network
|
|
if 'network' not in disabled_quotas:
|
|
tenant_id = tenant_id or request.user.tenant_id
|
|
neutron_quotas = neutron.tenant_quota_get(request, tenant_id)
|
|
if 'floating_ips' in disabled_quotas:
|
|
# Neutron with quota extension disabled
|
|
if 'floatingip' in disabled_quotas:
|
|
qs.add(base.QuotaSet({'floating_ips': -1}))
|
|
# Neutron with quota extension enabled
|
|
else:
|
|
# Rename floatingip to floating_ips since that's how it's
|
|
# expected in some places (e.g. Security & Access' Floating IPs)
|
|
fips_quota = neutron_quotas.get('floatingip').limit
|
|
qs.add(base.QuotaSet({'floating_ips': fips_quota}))
|
|
if 'security_groups' in disabled_quotas:
|
|
if 'security_group' in disabled_quotas:
|
|
qs.add(base.QuotaSet({'security_groups': -1}))
|
|
# Neutron with quota extension enabled
|
|
else:
|
|
# Rename security_group to security_groups since that's how it's
|
|
# expected in some places (e.g. Security & Access' Security Groups)
|
|
sec_quota = neutron_quotas.get('security_group').limit
|
|
qs.add(base.QuotaSet({'security_groups': sec_quota}))
|
|
if 'network' in disabled_quotas:
|
|
for item in qs.items:
|
|
if item.name == 'networks':
|
|
qs.items.remove(item)
|
|
break
|
|
else:
|
|
net_quota = neutron_quotas.get('network').limit
|
|
qs.add(base.QuotaSet({'networks': net_quota}))
|
|
if 'subnet' in disabled_quotas:
|
|
for item in qs.items:
|
|
if item.name == 'subnets':
|
|
qs.items.remove(item)
|
|
break
|
|
else:
|
|
net_quota = neutron_quotas.get('subnet').limit
|
|
qs.add(base.QuotaSet({'subnets': net_quota}))
|
|
if 'router' in disabled_quotas:
|
|
for item in qs.items:
|
|
if item.name == 'routers':
|
|
qs.items.remove(item)
|
|
break
|
|
else:
|
|
router_quota = neutron_quotas.get('router').limit
|
|
qs.add(base.QuotaSet({'routers': router_quota}))
|
|
|
|
return qs
|
|
|
|
|
|
@profiler.trace
|
|
def get_disabled_quotas(request):
|
|
disabled_quotas = set([])
|
|
|
|
# Cinder
|
|
if not cinder.is_volume_service_enabled(request):
|
|
disabled_quotas.update(CINDER_QUOTA_FIELDS)
|
|
|
|
# Neutron
|
|
if not base.is_service_enabled(request, 'network'):
|
|
disabled_quotas.update(NEUTRON_QUOTA_FIELDS)
|
|
else:
|
|
# Remove the nova network quotas
|
|
disabled_quotas.update(['floating_ips', 'fixed_ips'])
|
|
|
|
if neutron.is_extension_supported(request, 'security-group'):
|
|
# If Neutron security group is supported, disable Nova quotas
|
|
disabled_quotas.update(['security_groups', 'security_group_rules'])
|
|
else:
|
|
# If Nova security group is used, disable Neutron quotas
|
|
disabled_quotas.update(['security_group', 'security_group_rule'])
|
|
|
|
if not neutron.is_router_enabled(request):
|
|
disabled_quotas.update(['router', 'floatingip'])
|
|
|
|
try:
|
|
if not neutron.is_quotas_extension_supported(request):
|
|
disabled_quotas.update(NEUTRON_QUOTA_FIELDS)
|
|
except Exception:
|
|
LOG.exception("There was an error checking if the Neutron "
|
|
"quotas extension is enabled.")
|
|
|
|
# Nova
|
|
if not (base.is_service_enabled(request, 'compute') and
|
|
nova.can_set_quotas()):
|
|
disabled_quotas.update(NOVA_QUOTA_FIELDS)
|
|
|
|
# There appear to be no glance quota fields currently
|
|
return disabled_quotas
|
|
|
|
|
|
@profiler.trace
|
|
def _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id):
|
|
# Unlike the other services it can be the case that nova is enabled but
|
|
# doesn't support quotas, in which case we still want to get usage info,
|
|
# so don't rely on '"instances" in disabled_quotas' as elsewhere
|
|
if not base.is_service_enabled(request, 'compute'):
|
|
return
|
|
|
|
if tenant_id:
|
|
instances, has_more = nova.server_list(
|
|
request, search_opts={'tenant_id': tenant_id})
|
|
else:
|
|
instances, has_more = nova.server_list(request)
|
|
|
|
# Fetch deleted flavors if necessary.
|
|
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
|
|
missing_flavors = [instance.flavor['id'] for instance in instances
|
|
if instance.flavor['id'] not in flavors]
|
|
for missing in missing_flavors:
|
|
if missing not in flavors:
|
|
try:
|
|
flavors[missing] = nova.flavor_get(request, missing)
|
|
except Exception:
|
|
flavors[missing] = {}
|
|
exceptions.handle(request, ignore=True)
|
|
|
|
usages.tally('instances', len(instances))
|
|
|
|
# Sum our usage based on the flavors of the instances.
|
|
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
|
|
usages.tally('cores', getattr(flavor, 'vcpus', None))
|
|
usages.tally('ram', getattr(flavor, 'ram', None))
|
|
|
|
# Initialize the tally if no instances have been launched yet
|
|
if len(instances) == 0:
|
|
usages.tally('cores', 0)
|
|
usages.tally('ram', 0)
|
|
|
|
|
|
@profiler.trace
|
|
def _get_tenant_network_usages(request, usages, disabled_quotas, tenant_id):
|
|
floating_ips = []
|
|
try:
|
|
if network.floating_ip_supported(request):
|
|
floating_ips = network.tenant_floating_ip_list(request)
|
|
except Exception:
|
|
pass
|
|
usages.tally('floating_ips', len(floating_ips))
|
|
|
|
if 'security_group' not in disabled_quotas:
|
|
security_groups = []
|
|
security_groups = network.security_group_list(request)
|
|
usages.tally('security_groups', len(security_groups))
|
|
|
|
if 'network' not in disabled_quotas:
|
|
networks = []
|
|
networks = neutron.network_list(request, shared=False)
|
|
if tenant_id:
|
|
networks = [net for net in networks if net.tenant_id == tenant_id]
|
|
usages.tally('networks', len(networks))
|
|
# get shared networks
|
|
shared_networks = neutron.network_list(request, shared=True)
|
|
if tenant_id:
|
|
shared_networks = [net for net in shared_networks
|
|
if net.tenant_id == tenant_id]
|
|
usages.tally('networks', len(shared_networks))
|
|
|
|
if 'subnet' not in disabled_quotas:
|
|
subnets = neutron.subnet_list(request, shared=False)
|
|
if tenant_id:
|
|
subnets = [sub for sub in subnets if sub.tenant_id == tenant_id]
|
|
# get shared subnets
|
|
shared_subnets = neutron.subnet_list(request, shared=True)
|
|
if tenant_id:
|
|
shared_subnets = [subnet for subnet in shared_subnets
|
|
if subnet.tenant_id == tenant_id]
|
|
usages.tally('subnets', len(subnets) + len(shared_subnets))
|
|
|
|
if 'router' not in disabled_quotas:
|
|
routers = []
|
|
routers = neutron.router_list(request)
|
|
if tenant_id:
|
|
routers = [rou for rou in routers if rou.tenant_id == tenant_id]
|
|
usages.tally('routers', len(routers))
|
|
|
|
|
|
@profiler.trace
|
|
def _get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id):
|
|
if 'volumes' not in disabled_quotas:
|
|
try:
|
|
if tenant_id:
|
|
opts = {'all_tenants': 1, 'project_id': tenant_id}
|
|
volumes = cinder.volume_list(request, opts)
|
|
snapshots = cinder.volume_snapshot_list(request, opts)
|
|
else:
|
|
volumes = cinder.volume_list(request)
|
|
snapshots = cinder.volume_snapshot_list(request)
|
|
volume_usage = sum([int(v.size) for v in volumes])
|
|
snapshot_usage = sum([int(s.size) for s in snapshots])
|
|
usages.tally('gigabytes', (snapshot_usage + volume_usage))
|
|
usages.tally('volumes', len(volumes))
|
|
usages.tally('snapshots', len(snapshots))
|
|
except cinder.cinder_exception.ClientException:
|
|
msg = _("Unable to retrieve volume limit information.")
|
|
exceptions.handle(request, msg)
|
|
|
|
|
|
@profiler.trace
|
|
@memoized
|
|
def tenant_quota_usages(request, tenant_id=None):
|
|
"""Get our quotas and construct our usage object.
|
|
If no tenant_id is provided, a the request.user.project_id
|
|
is assumed to be used
|
|
"""
|
|
if not tenant_id:
|
|
tenant_id = request.user.project_id
|
|
|
|
disabled_quotas = get_disabled_quotas(request)
|
|
usages = QuotaUsage()
|
|
|
|
for quota in get_tenant_quota_data(request,
|
|
disabled_quotas=disabled_quotas,
|
|
tenant_id=tenant_id):
|
|
usages.add_quota(quota)
|
|
|
|
# Get our usages.
|
|
_get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id)
|
|
_get_tenant_network_usages(request, usages, disabled_quotas, tenant_id)
|
|
_get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id)
|
|
|
|
return usages
|
|
|
|
|
|
@profiler.trace
|
|
def tenant_limit_usages(request):
|
|
# TODO(licostan): This method shall be removed from Quota module.
|
|
# ProjectUsage/BaseUsage maybe used instead on volume/image dashboards.
|
|
limits = {}
|
|
|
|
try:
|
|
if base.is_service_enabled(request, 'compute'):
|
|
limits.update(nova.tenant_absolute_limits(request, reserved=True))
|
|
except Exception:
|
|
msg = _("Unable to retrieve compute limit information.")
|
|
exceptions.handle(request, msg)
|
|
|
|
if cinder.is_volume_service_enabled(request):
|
|
try:
|
|
limits.update(cinder.tenant_absolute_limits(request))
|
|
volumes = cinder.volume_list(request)
|
|
snapshots = cinder.volume_snapshot_list(request)
|
|
# gigabytesUsed should be a total of volumes and snapshots
|
|
vol_size = sum([getattr(volume, 'size', 0) for volume
|
|
in volumes])
|
|
snap_size = sum([getattr(snap, 'size', 0) for snap
|
|
in snapshots])
|
|
limits['gigabytesUsed'] = vol_size + snap_size
|
|
limits['volumesUsed'] = len(volumes)
|
|
limits['snapshotsUsed'] = len(snapshots)
|
|
except cinder.cinder_exception.ClientException:
|
|
msg = _("Unable to retrieve volume limit information.")
|
|
exceptions.handle(request, msg)
|
|
|
|
return limits
|
|
|
|
|
|
def enabled_quotas(request):
|
|
"""Returns the list of quotas available minus those that are disabled"""
|
|
return set(QUOTA_FIELDS) - get_disabled_quotas(request)
|