Wrap api calls with tracing decorators

Since Horizon is going to be the first (or, one of the first)
OpenStack component using the new MongoDB driver, it won't be able to
retrieve traces made by other components using MongoDB driver (which
it should use because Ceilometer driver was too slow) for a
while. This means that Horizon itself is responsible for tracing the
rendering of its pages down to the layer where the flow of control
leaves Horizon domain. So, a lot of api wrappers in
openstack_dashboard.api are augmented with tracing decorator to
achieve this goal.

Co-Authored-By: Timur Sufiev <tsufiev@mirantis.com>
Implements-blueprint: openstack-profiler-at-developer-dashboard
Change-Id: Ib36692f0e9e68ed7fa0cd47919ba6581c9c8ab57
This commit is contained in:
Paul Karikh 2016-07-08 19:04:42 +03:00 committed by Timur Sufiev
parent 4ceeef5376
commit 97945b5f6a
12 changed files with 445 additions and 3 deletions

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django import shortcuts
from django import template
from django.utils import encoding
@ -20,6 +21,8 @@ from django.views import generic
import horizon
from horizon import exceptions
from osprofiler import profiler
class PageTitleMixin(object):
"""A mixin that renders out a page title into a view.
@ -65,8 +68,20 @@ class PageTitleMixin(object):
return super(PageTitleMixin, self).render_to_response(context)
def trace(name):
def decorator(func):
if getattr(settings, 'OPENSTACK_PROFILER', {}).get('enabled', False):
return profiler.trace(name, info=None, hide_args=False,
allow_multiple_trace=True)(func)
else:
return func
return decorator
class HorizonTemplateView(PageTitleMixin, generic.TemplateView):
pass
@trace('horizon.render_to_response')
def render_to_response(self, context):
return super(HorizonTemplateView, self).render_to_response(context)
class HorizonFormView(PageTitleMixin, generic.FormView):

View File

@ -36,6 +36,7 @@ from horizon.utils.memoized import memoized_with_request # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
from openstack_dashboard.contrib.developer.profiler import api as profiler
LOG = logging.getLogger(__name__)
@ -249,6 +250,7 @@ def update_pagination(entities, page_size, marker, sort_dir):
return entities, has_more_data, has_prev_data
@profiler.trace
def volume_list_paged(request, search_opts=None, marker=None, paginate=False,
sort_dir="desc"):
"""To see all volumes in the cloud as an admin you can pass in a special
@ -288,6 +290,7 @@ def volume_list_paged(request, search_opts=None, marker=None, paginate=False,
return volumes, has_more_data, has_prev_data
@profiler.trace
def volume_get(request, volume_id):
volume_data = cinderclient(request).volumes.get(volume_id)
@ -311,6 +314,7 @@ def volume_get(request, volume_id):
return Volume(volume_data)
@profiler.trace
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None):
@ -328,25 +332,30 @@ def volume_create(request, size, name, description, volume_type,
return Volume(volume)
@profiler.trace
def volume_extend(request, volume_id, new_size):
return cinderclient(request).volumes.extend(volume_id, new_size)
@profiler.trace
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
@profiler.trace
def volume_retype(request, volume_id, new_type, migration_policy):
return cinderclient(request).volumes.retype(volume_id,
new_type,
migration_policy)
@profiler.trace
def volume_set_bootable(request, volume_id, bootable):
return cinderclient(request).volumes.set_bootable(volume_id,
bootable)
@profiler.trace
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
@ -355,18 +364,22 @@ def volume_update(request, volume_id, name, description):
**vol_data)
@profiler.trace
def volume_set_metadata(request, volume_id, metadata):
return cinderclient(request).volumes.set_metadata(volume_id, metadata)
@profiler.trace
def volume_delete_metadata(request, volume_id, keys):
return cinderclient(request).volumes.delete_metadata(volume_id, keys)
@profiler.trace
def volume_reset_state(request, volume_id, state):
return cinderclient(request).volumes.reset_state(volume_id, state)
@profiler.trace
def volume_upload_to_image(request, volume_id, force, image_name,
container_format, disk_format):
return cinderclient(request).volumes.upload_to_image(volume_id,
@ -376,10 +389,12 @@ def volume_upload_to_image(request, volume_id, force, image_name,
disk_format)
@profiler.trace
def volume_get_encryption_metadata(request, volume_id):
return cinderclient(request).volumes.get_encryption_metadata(volume_id)
@profiler.trace
def volume_migrate(request, volume_id, host, force_host_copy=False,
lock_volume=False):
return cinderclient(request).volumes.migrate_volume(volume_id,
@ -388,11 +403,13 @@ def volume_migrate(request, volume_id, host, force_host_copy=False,
lock_volume)
@profiler.trace
def volume_snapshot_get(request, snapshot_id):
snapshot = cinderclient(request).volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
@profiler.trace
def volume_snapshot_list(request, search_opts=None):
snapshots, _, __ = volume_snapshot_list_paged(request,
search_opts=search_opts,
@ -400,6 +417,7 @@ def volume_snapshot_list(request, search_opts=None):
return snapshots
@profiler.trace
def volume_snapshot_list_paged(request, search_opts=None, marker=None,
paginate=False, sort_dir="desc"):
has_more_data = False
@ -430,6 +448,7 @@ def volume_snapshot_list_paged(request, search_opts=None, marker=None,
return snapshots, has_more_data, has_prev_data
@profiler.trace
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
@ -441,10 +460,12 @@ def volume_snapshot_create(request, volume_id, name,
volume_id, **data))
@profiler.trace
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
@profiler.trace
def volume_snapshot_update(request, snapshot_id, name, description):
snapshot_data = {'name': name,
'description': description}
@ -453,26 +474,31 @@ def volume_snapshot_update(request, snapshot_id, name, description):
**snapshot_data)
@profiler.trace
def volume_snapshot_set_metadata(request, snapshot_id, metadata):
return cinderclient(request).volume_snapshots.set_metadata(
snapshot_id, metadata)
@profiler.trace
def volume_snapshot_delete_metadata(request, snapshot_id, keys):
return cinderclient(request).volume_snapshots.delete_metadata(
snapshot_id, keys)
@profiler.trace
def volume_snapshot_reset_state(request, snapshot_id, state):
return cinderclient(request).volume_snapshots.reset_state(
snapshot_id, state)
@profiler.trace
def volume_cgroup_get(request, cgroup_id):
cgroup = cinderclient(request).consistencygroups.get(cgroup_id)
return VolumeConsistencyGroup(cgroup)
@profiler.trace
def volume_cgroup_get_with_vol_type_names(request, cgroup_id):
cgroup = volume_cgroup_get(request, cgroup_id)
vol_types = volume_type_list(request)
@ -485,6 +511,7 @@ def volume_cgroup_get_with_vol_type_names(request, cgroup_id):
return cgroup
@profiler.trace
def volume_cgroup_list(request, search_opts=None):
c_client = cinderclient(request)
if c_client is None:
@ -493,6 +520,7 @@ def volume_cgroup_list(request, search_opts=None):
search_opts=search_opts)]
@profiler.trace
def volume_cgroup_list_with_vol_type_names(request, search_opts=None):
cgroups = volume_cgroup_list(request, search_opts)
vol_types = volume_type_list(request)
@ -507,6 +535,7 @@ def volume_cgroup_list_with_vol_type_names(request, search_opts=None):
return cgroups
@profiler.trace
def volume_cgroup_create(request, volume_types, name,
description=None, availability_zone=None):
data = {'name': name,
@ -518,6 +547,7 @@ def volume_cgroup_create(request, volume_types, name,
return VolumeConsistencyGroup(cgroup)
@profiler.trace
def volume_cgroup_create_from_source(request, name, cg_snapshot_id=None,
source_cgroup_id=None,
description=None,
@ -532,10 +562,12 @@ def volume_cgroup_create_from_source(request, name, cg_snapshot_id=None,
project_id))
@profiler.trace
def volume_cgroup_delete(request, cgroup_id, force=False):
return cinderclient(request).consistencygroups.delete(cgroup_id, force)
@profiler.trace
def volume_cgroup_update(request, cgroup_id, name=None, description=None,
add_vols=None, remove_vols=None):
cgroup_data = {}
@ -589,6 +621,7 @@ def volume_backup_supported(request):
return cinder_config.get('enable_backup', False)
@profiler.trace
def volume_backup_get(request, backup_id):
backup = cinderclient(request).backups.get(backup_id)
return VolumeBackup(backup)
@ -599,6 +632,7 @@ def volume_backup_list(request):
return backups
@profiler.trace
def volume_backup_list_paged(request, marker=None, paginate=False,
sort_dir="desc"):
has_more_data = False
@ -629,6 +663,7 @@ def volume_backup_list_paged(request, marker=None, paginate=False,
return backups, has_more_data, has_prev_data
@profiler.trace
def volume_backup_create(request,
volume_id,
container_name,
@ -642,15 +677,18 @@ def volume_backup_create(request,
return VolumeBackup(backup)
@profiler.trace
def volume_backup_delete(request, backup_id):
return cinderclient(request).backups.delete(backup_id)
@profiler.trace
def volume_backup_restore(request, backup_id, volume_id):
return cinderclient(request).restores.restore(backup_id=backup_id,
volume_id=volume_id)
@profiler.trace
def volume_manage(request,
host,
identifier,
@ -673,10 +711,12 @@ def volume_manage(request,
bootable=bootable)
@profiler.trace
def volume_unmanage(request, volume_id):
return cinderclient(request).volumes.unmanage(volume=volume_id)
@profiler.trace
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
@ -684,10 +724,12 @@ def tenant_quota_get(request, tenant_id):
return base.QuotaSet(c_client.quotas.get(tenant_id))
@profiler.trace
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
@profiler.trace
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
@ -732,19 +774,23 @@ def volume_type_get_with_qos_association(request, volume_type_id):
return vol_type
@profiler.trace
def default_quota_update(request, **kwargs):
cinderclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
@profiler.trace
def volume_type_list(request):
return cinderclient(request).volume_types.list()
@profiler.trace
def volume_type_create(request, name, description=None, is_public=True):
return cinderclient(request).volume_types.create(name, description,
is_public)
@profiler.trace
def volume_type_update(request, volume_type_id, name=None, description=None,
is_public=None):
return cinderclient(request).volume_types.update(volume_type_id,
@ -753,11 +799,13 @@ def volume_type_update(request, volume_type_id, name=None, description=None,
is_public)
@profiler.trace
@memoized
def volume_type_default(request):
return cinderclient(request).volume_types.default()
@profiler.trace
def volume_type_delete(request, volume_type_id):
try:
return cinderclient(request).volume_types.delete(volume_type_id)
@ -766,32 +814,39 @@ def volume_type_delete(request, volume_type_id):
"This volume type is used by one or more volumes."))
@profiler.trace
def volume_type_get(request, volume_type_id):
return cinderclient(request).volume_types.get(volume_type_id)
@profiler.trace
def volume_encryption_type_create(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.create(volume_type_id,
specs=data)
@profiler.trace
def volume_encryption_type_delete(request, volume_type_id):
return cinderclient(request).volume_encryption_types.delete(volume_type_id)
@profiler.trace
def volume_encryption_type_get(request, volume_type_id):
return cinderclient(request).volume_encryption_types.get(volume_type_id)
@profiler.trace
def volume_encryption_type_list(request):
return cinderclient(request).volume_encryption_types.list()
@profiler.trace
def volume_encryption_type_update(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.update(volume_type_id,
specs=data)
@profiler.trace
def volume_type_extra_get(request, type_id, raw=False):
vol_type = volume_type_get(request, type_id)
extras = vol_type.get_keys()
@ -813,18 +868,22 @@ def volume_type_extra_delete(request, type_id, keys):
return vol_type.unset_keys(keys)
@profiler.trace
def qos_spec_list(request):
return cinderclient(request).qos_specs.list()
@profiler.trace
def qos_spec_get(request, qos_spec_id):
return cinderclient(request).qos_specs.get(qos_spec_id)
@profiler.trace
def qos_spec_delete(request, qos_spec_id):
return cinderclient(request).qos_specs.delete(qos_spec_id, force=True)
@profiler.trace
def qos_spec_create(request, name, specs):
return cinderclient(request).qos_specs.create(name, specs)
@ -838,22 +897,27 @@ def qos_spec_get_keys(request, qos_spec_id, raw=False):
key, value in qos_specs.items()]
@profiler.trace
def qos_spec_set_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.set_keys(qos_spec_id, specs)
@profiler.trace
def qos_spec_unset_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.unset_keys(qos_spec_id, specs)
@profiler.trace
def qos_spec_associate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.associate(qos_specs, vol_type_id)
@profiler.trace
def qos_spec_disassociate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.disassociate(qos_specs, vol_type_id)
@profiler.trace
def qos_spec_get_associations(request, qos_spec_id):
return cinderclient(request).qos_specs.get_associations(qos_spec_id)
@ -862,6 +926,7 @@ def qos_specs_list(request):
return [QosSpecs(s) for s in qos_spec_list(request)]
@profiler.trace
@memoized
def tenant_absolute_limits(request):
limits = cinderclient(request).limits.get().absolute
@ -882,14 +947,17 @@ def tenant_absolute_limits(request):
return limits_dict
@profiler.trace
def service_list(request):
return cinderclient(request).services.list()
@profiler.trace
def availability_zone_list(request, detailed=False):
return cinderclient(request).availability_zones.list(detailed=detailed)
@profiler.trace
@memoized_with_request(cinderclient)
def list_extensions(cinder_api):
return tuple(cinder_list_extensions.ListExtManager(cinder_api).show_all())
@ -905,6 +973,7 @@ def extension_supported(extensions, extension_name):
return False
@profiler.trace
def transfer_list(request, detailed=True, search_opts=None):
"""To see all volumes transfers as an admin pass in a special
search option: {'all_tenants': 1}
@ -918,24 +987,29 @@ def transfer_list(request, detailed=True, search_opts=None):
return []
@profiler.trace
def transfer_get(request, transfer_id):
transfer_data = cinderclient(request).transfers.get(transfer_id)
return VolumeTransfer(transfer_data)
@profiler.trace
def transfer_create(request, transfer_id, name):
volume = cinderclient(request).transfers.create(transfer_id, name)
return VolumeTransfer(volume)
@profiler.trace
def transfer_accept(request, transfer_id, auth_key):
return cinderclient(request).transfers.accept(transfer_id, auth_key)
@profiler.trace
def transfer_delete(request, transfer_id):
return cinderclient(request).transfers.delete(transfer_id)
@profiler.trace
def pool_list(request, detailed=False):
c_client = cinderclient(request)
if c_client is None:

View File

@ -19,6 +19,7 @@ from collections import OrderedDict
from horizon.utils import memoized
from openstack_dashboard.api import neutron
from openstack_dashboard.contrib.developer.profiler import api as profiler
neutronclient = neutron.neutronclient
@ -77,10 +78,12 @@ def rule_create(request, **kwargs):
return Rule(rule)
@profiler.trace
def rule_list(request, **kwargs):
return _rule_list(request, expand_policy=True, **kwargs)
@profiler.trace
def rule_list_for_tenant(request, tenant_id, **kwargs):
"""Return a rule list available for the tenant.
@ -104,6 +107,7 @@ def _rule_list(request, expand_policy, **kwargs):
return [Rule(r) for r in rules]
@profiler.trace
def rule_get(request, rule_id):
return _rule_get(request, rule_id, expand_policy=True)
@ -120,10 +124,12 @@ def _rule_get(request, rule_id, expand_policy):
return Rule(rule)
@profiler.trace
def rule_delete(request, rule_id):
neutronclient(request).delete_firewall_rule(rule_id)
@profiler.trace
def rule_update(request, rule_id, **kwargs):
body = {'firewall_rule': kwargs}
rule = neutronclient(request).update_firewall_rule(
@ -131,6 +137,7 @@ def rule_update(request, rule_id, **kwargs):
return Rule(rule)
@profiler.trace
def policy_create(request, **kwargs):
"""Create a firewall policy
@ -148,10 +155,12 @@ def policy_create(request, **kwargs):
return Policy(policy)
@profiler.trace
def policy_list(request, **kwargs):
return _policy_list(request, expand_rule=True, **kwargs)
@profiler.trace
def policy_list_for_tenant(request, tenant_id, **kwargs):
"""Return a policy list available for the tenant.
@ -176,6 +185,7 @@ def _policy_list(request, expand_rule, **kwargs):
return [Policy(p) for p in policies]
@profiler.trace
def policy_get(request, policy_id):
return _policy_get(request, policy_id, expand_rule=True)
@ -195,10 +205,12 @@ def _policy_get(request, policy_id, expand_rule):
return Policy(policy)
@profiler.trace
def policy_delete(request, policy_id):
neutronclient(request).delete_firewall_policy(policy_id)
@profiler.trace
def policy_update(request, policy_id, **kwargs):
body = {'firewall_policy': kwargs}
policy = neutronclient(request).update_firewall_policy(
@ -206,18 +218,21 @@ def policy_update(request, policy_id, **kwargs):
return Policy(policy)
@profiler.trace
def policy_insert_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_insert_rule(
policy_id, kwargs)
return Policy(policy)
@profiler.trace
def policy_remove_rule(request, policy_id, **kwargs):
policy = neutronclient(request).firewall_policy_remove_rule(
policy_id, kwargs)
return Policy(policy)
@profiler.trace
def firewall_create(request, **kwargs):
"""Create a firewall for specified policy
@ -234,10 +249,12 @@ def firewall_create(request, **kwargs):
return Firewall(firewall)
@profiler.trace
def firewall_list(request, **kwargs):
return _firewall_list(request, expand_policy=True, **kwargs)
@profiler.trace
def firewall_list_for_tenant(request, tenant_id, **kwargs):
"""Return a firewall list available for the tenant.
@ -264,6 +281,7 @@ def _firewall_list(request, expand_policy, **kwargs):
return [Firewall(f) for f in firewalls]
@profiler.trace
def firewall_get(request, firewall_id):
return _firewall_get(request, firewall_id, expand_policy=True)
@ -281,10 +299,12 @@ def _firewall_get(request, firewall_id, expand_policy):
return Firewall(firewall)
@profiler.trace
def firewall_delete(request, firewall_id):
neutronclient(request).delete_firewall(firewall_id)
@profiler.trace
def firewall_update(request, firewall_id, **kwargs):
body = {'firewall': kwargs}
firewall = neutronclient(request).update_firewall(
@ -292,6 +312,7 @@ def firewall_update(request, firewall_id, **kwargs):
return Firewall(firewall)
@profiler.trace
@memoized.memoized
def firewall_unassociated_routers_list(request, tenant_id):
all_routers = neutron.router_list(request, tenant_id=tenant_id)

View File

@ -36,6 +36,7 @@ from six.moves import _thread as thread
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.contrib.developer.profiler import api as profiler
LOG = logging.getLogger(__name__)
@ -209,10 +210,12 @@ def _normalize_list_input(filters, **kwargs):
kwargs['filters'] = filters
@profiler.trace
def image_delete(request, image_id):
return glanceclient(request).images.delete(image_id)
@profiler.trace
def image_get(request, image_id):
"""Returns an Image object populated with metadata for image
with supplied identifier.
@ -221,6 +224,7 @@ def image_get(request, image_id):
return Image(image)
@profiler.trace
def image_list_detailed(request, marker=None, sort_dir='desc',
sort_key='created_at', filters=None, paginate=False,
reversed_order=False, **kwargs):
@ -327,6 +331,7 @@ def image_list_detailed(request, marker=None, sort_dir='desc',
return wrapped_images, has_more_data, has_prev_data
@profiler.trace
def image_update(request, image_id, **kwargs):
image_data = kwargs.get('data', None)
try:
@ -391,6 +396,7 @@ class ExternallyUploadedImage(Image):
return self._token_id
@profiler.trace
def image_create(request, **kwargs):
"""Create image.
@ -442,6 +448,7 @@ def image_create(request, **kwargs):
return Image(image)
@profiler.trace
def image_update_properties(request, image_id, remove_props=None, **kwargs):
"""Add or update a custom property of an image."""
return glanceclient(request, '2').images.update(image_id,
@ -449,6 +456,7 @@ def image_update_properties(request, image_id, remove_props=None, **kwargs):
**kwargs)
@profiler.trace
def image_delete_properties(request, image_id, keys):
"""Delete custom properties for an image."""
return glanceclient(request, '2').images.update(image_id, keys)
@ -525,6 +533,7 @@ def metadefs_namespace_get(request, namespace, resource_type=None, wrap=False):
return namespace
@profiler.trace
def metadefs_namespace_list(request,
filters=None,
sort_dir='asc',
@ -611,6 +620,7 @@ def metadefs_namespace_list(request,
return namespaces, has_more_data, has_prev_data
@profiler.trace
def metadefs_namespace_full_list(request, resource_type, filters=None,
*args, **kwargs):
filters = filters or {}
@ -624,20 +634,24 @@ def metadefs_namespace_full_list(request, resource_type, filters=None,
], has_more_data, has_prev_data
@profiler.trace
def metadefs_namespace_create(request, namespace):
return glanceclient(request, '2').metadefs_namespace.create(**namespace)
@profiler.trace
def metadefs_namespace_update(request, namespace_name, **properties):
return glanceclient(request, '2').metadefs_namespace.update(
namespace_name,
**properties)
@profiler.trace
def metadefs_namespace_delete(request, namespace_name):
return glanceclient(request, '2').metadefs_namespace.delete(namespace_name)
@profiler.trace
def metadefs_resource_types_list(request):
# Listing Resource Types requires the v2 API. If not supported we return
# an empty array so callers don't need to worry about version checking.
@ -647,6 +661,7 @@ def metadefs_resource_types_list(request):
return glanceclient(request, '2').metadefs_resource_type.list()
@profiler.trace
def metadefs_namespace_resource_types(request, namespace_name):
resource_types = glanceclient(request, '2').metadefs_resource_type.get(
namespace_name)
@ -655,6 +670,7 @@ def metadefs_namespace_resource_types(request, namespace_name):
return list(resource_types)
@profiler.trace
def metadefs_namespace_add_resource_type(request,
namespace_name,
resource_type):
@ -662,6 +678,7 @@ def metadefs_namespace_add_resource_type(request,
namespace_name, **resource_type)
@profiler.trace
def metadefs_namespace_remove_resource_type(request,
namespace_name,
resource_type_name):

View File

@ -25,6 +25,7 @@ from horizon import exceptions
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.contrib.developer.profiler import api as profiler
def format_parameters(params):
@ -57,6 +58,7 @@ def heatclient(request, password=None):
return client
@profiler.trace
def stacks_list(request, marker=None, sort_dir='desc', sort_key='created_at',
paginate=False, filters=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
@ -107,6 +109,7 @@ def _ignore_if(key, value):
return False
@profiler.trace
def get_template_files(template_data=None, template_url=None):
if template_data:
tpl = template_data
@ -147,93 +150,116 @@ def _get_file_contents(from_data, files):
_get_file_contents(value, files)
@profiler.trace
def stack_delete(request, stack_id):
return heatclient(request).stacks.delete(stack_id)
@profiler.trace
def stack_get(request, stack_id):
return heatclient(request).stacks.get(stack_id)
@profiler.trace
def template_get(request, stack_id):
return heatclient(request).stacks.template(stack_id)
@profiler.trace
def stack_create(request, password=None, **kwargs):
return heatclient(request, password).stacks.create(**kwargs)
@profiler.trace
def stack_preview(request, password=None, **kwargs):
return heatclient(request, password).stacks.preview(**kwargs)
@profiler.trace
def stack_update(request, stack_id, password=None, **kwargs):
return heatclient(request, password).stacks.update(stack_id, **kwargs)
@profiler.trace
def snapshot_create(request, stack_id):
return heatclient(request).stacks.snapshot(stack_id)
@profiler.trace
def snapshot_list(request, stack_id):
return heatclient(request).stacks.snapshot_list(stack_id)
@profiler.trace
def snapshot_show(request, stack_id, snapshot_id):
return heatclient(request).stacks.snapshot_show(stack_id, snapshot_id)
@profiler.trace
def snapshot_delete(request, stack_id, snapshot_id):
return heatclient(request).stacks.snapshot_delete(stack_id, snapshot_id)
@profiler.trace
def events_list(request, stack_name):
return heatclient(request).events.list(stack_name)
@profiler.trace
def resources_list(request, stack_name):
return heatclient(request).resources.list(stack_name)
@profiler.trace
def resource_get(request, stack_id, resource_name):
return heatclient(request).resources.get(stack_id, resource_name)
@profiler.trace
def resource_metadata_get(request, stack_id, resource_name):
return heatclient(request).resources.metadata(stack_id, resource_name)
@profiler.trace
def template_validate(request, **kwargs):
return heatclient(request).stacks.validate(**kwargs)
@profiler.trace
def action_check(request, stack_id):
return heatclient(request).actions.check(stack_id)
@profiler.trace
def action_suspend(request, stack_id):
return heatclient(request).actions.suspend(stack_id)
@profiler.trace
def action_resume(request, stack_id):
return heatclient(request).actions.resume(stack_id)
@profiler.trace
def resource_types_list(request, filters=None):
return heatclient(request).resource_types.list(filters=filters)
@profiler.trace
def resource_type_get(request, resource_type):
return heatclient(request).resource_types.get(resource_type)
@profiler.trace
def service_list(request):
return heatclient(request).services.list()
@profiler.trace
def template_version_list(request):
return heatclient(request).template_versions.list()
@profiler.trace
def template_function_list(request, template_version):
return heatclient(request).template_versions.get(template_version)

View File

@ -35,6 +35,7 @@ from horizon import messages
from horizon.utils import functions as utils
from openstack_dashboard.api import base
from openstack_dashboard.contrib.developer.profiler import api as profiler
from openstack_dashboard import policy
@ -190,6 +191,7 @@ def keystoneclient(request, admin=False):
return conn
@profiler.trace
def domain_create(request, name, description=None, enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.create(name=name,
@ -197,16 +199,19 @@ def domain_create(request, name, description=None, enabled=None):
enabled=enabled)
@profiler.trace
def domain_get(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.get(domain_id)
@profiler.trace
def domain_delete(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.delete(domain_id)
@profiler.trace
def domain_list(request):
manager = keystoneclient(request, admin=True).domains
return manager.list()
@ -226,6 +231,7 @@ def domain_lookup(request):
return {domain.id: domain.name}
@profiler.trace
def domain_update(request, domain_id, name=None, description=None,
enabled=None):
manager = keystoneclient(request, admin=True).domains
@ -238,6 +244,7 @@ def domain_update(request, domain_id, name=None, description=None,
return response
@profiler.trace
def tenant_create(request, name, description=None, enabled=None,
domain=None, **kwargs):
manager = VERSIONS.get_project_manager(request, admin=True)
@ -315,16 +322,19 @@ def is_domain_admin(request):
# A quick search through the codebase reveals that it's always called with
# admin=true so I suspect we could eliminate it entirely as with the other
# tenant commands.
@profiler.trace
def tenant_get(request, project, admin=True):
manager = VERSIONS.get_project_manager(request, admin=admin)
return manager.get(project)
@profiler.trace
def tenant_delete(request, project):
manager = VERSIONS.get_project_manager(request, admin=True)
return manager.delete(project)
@profiler.trace
def tenant_list(request, paginate=False, marker=None, domain=None, user=None,
admin=True, filters=None):
manager = VERSIONS.get_project_manager(request, admin=admin)
@ -367,6 +377,7 @@ def tenant_list(request, paginate=False, marker=None, domain=None, user=None,
return tenants, has_more_data
@profiler.trace
def tenant_update(request, project, name=None, description=None,
enabled=None, domain=None, **kwargs):
manager = VERSIONS.get_project_manager(request, admin=True)
@ -381,6 +392,7 @@ def tenant_update(request, project, name=None, description=None,
raise exceptions.Conflict()
@profiler.trace
def user_list(request, project=None, domain=None, group=None, filters=None):
users = []
if VERSIONS.active < 3:
@ -403,6 +415,7 @@ def user_list(request, project=None, domain=None, group=None, filters=None):
return [VERSIONS.upgrade_v2_user(user) for user in users]
@profiler.trace
def user_create(request, name=None, email=None, password=None, project=None,
enabled=None, domain=None, description=None, **data):
manager = keystoneclient(request, admin=True).users
@ -419,15 +432,18 @@ def user_create(request, name=None, email=None, password=None, project=None,
raise exceptions.Conflict()
@profiler.trace
def user_delete(request, user_id):
return keystoneclient(request, admin=True).users.delete(user_id)
@profiler.trace
def user_get(request, user_id, admin=True):
user = keystoneclient(request, admin=admin).users.get(user_id)
return VERSIONS.upgrade_v2_user(user)
@profiler.trace
def user_update(request, user, **data):
manager = keystoneclient(request, admin=True).users
error = None
@ -476,6 +492,7 @@ def user_update(request, user, **data):
raise exceptions.Conflict()
@profiler.trace
def user_update_enabled(request, user, enabled):
manager = keystoneclient(request, admin=True).users
if VERSIONS.active < 3:
@ -484,6 +501,7 @@ def user_update_enabled(request, user, enabled):
return manager.update(user, enabled=enabled)
@profiler.trace
def user_update_password(request, user, password, admin=True):
if not keystone_can_edit_user():
@ -518,6 +536,7 @@ def user_verify_admin_password(request, admin_password):
return False
@profiler.trace
def user_update_own_password(request, origpassword, password):
client = keystoneclient(request, admin=False)
client.user_id = request.user.id
@ -527,6 +546,7 @@ def user_update_own_password(request, origpassword, password):
return client.users.update_password(origpassword, password)
@profiler.trace
def user_update_tenant(request, user, project, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
@ -535,6 +555,7 @@ def user_update_tenant(request, user, project, admin=True):
return manager.update(user, project=project)
@profiler.trace
def group_create(request, domain_id, name, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.create(domain=domain_id,
@ -542,16 +563,19 @@ def group_create(request, domain_id, name, description=None):
description=description)
@profiler.trace
def group_get(request, group_id, admin=True):
manager = keystoneclient(request, admin=admin).groups
return manager.get(group_id)
@profiler.trace
def group_delete(request, group_id):
manager = keystoneclient(request, admin=True).groups
return manager.delete(group_id)
@profiler.trace
def group_list(request, domain=None, project=None, user=None, filters=None):
manager = keystoneclient(request, admin=True).groups
groups = []
@ -580,6 +604,7 @@ def group_list(request, domain=None, project=None, user=None, filters=None):
return groups
@profiler.trace
def group_update(request, group_id, name=None, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.update(group=group_id,
@ -587,11 +612,13 @@ def group_update(request, group_id, name=None, description=None):
description=description)
@profiler.trace
def add_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.add_to_group(group=group_id, user=user_id)
@profiler.trace
def remove_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.remove_from_group(group=group_id, user=user_id)
@ -625,6 +652,7 @@ def get_project_groups_roles(request, project):
return groups_roles
@profiler.trace
def role_assignments_list(request, project=None, user=None, role=None,
group=None, domain=None, effective=False,
include_subtree=True):
@ -641,26 +669,31 @@ def role_assignments_list(request, project=None, user=None, role=None,
include_subtree=include_subtree)
@profiler.trace
def role_create(request, name):
manager = keystoneclient(request, admin=True).roles
return manager.create(name)
@profiler.trace
def role_get(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.get(role_id)
@profiler.trace
def role_update(request, role_id, name=None):
manager = keystoneclient(request, admin=True).roles
return manager.update(role_id, name)
@profiler.trace
def role_delete(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.delete(role_id)
@profiler.trace
def role_list(request, filters=None):
"""Returns a global list of available roles."""
manager = keystoneclient(request, admin=True).roles
@ -680,6 +713,7 @@ def role_list(request, filters=None):
return roles
@profiler.trace
def roles_for_user(request, user, project=None, domain=None):
"""Returns a list of user roles scoped to a project or domain."""
manager = keystoneclient(request, admin=True).roles
@ -689,6 +723,7 @@ def roles_for_user(request, user, project=None, domain=None):
return manager.list(user=user, domain=domain, project=project)
@profiler.trace
def get_domain_users_roles(request, domain):
users_roles = collections.defaultdict(list)
domain_role_assignments = role_assignments_list(request,
@ -707,18 +742,21 @@ def get_domain_users_roles(request, domain):
return users_roles
@profiler.trace
def add_domain_user_role(request, user, role, domain):
"""Adds a role for a user on a domain."""
manager = keystoneclient(request, admin=True).roles
return manager.grant(role, user=user, domain=domain)
@profiler.trace
def remove_domain_user_role(request, user, role, domain=None):
"""Removes a given single role for a user from a domain."""
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role, user=user, domain=domain)
@profiler.trace
def get_project_users_roles(request, project):
users_roles = collections.defaultdict(list)
if VERSIONS.active < 3:
@ -744,6 +782,7 @@ def get_project_users_roles(request, project):
return users_roles
@profiler.trace
def add_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
"""Adds a role for a user on a tenant."""
@ -755,6 +794,7 @@ def add_tenant_user_role(request, project=None, user=None, role=None,
group=group, domain=domain)
@profiler.trace
def remove_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
"""Removes a given single role for a user from a tenant."""
@ -775,11 +815,13 @@ def remove_tenant_user(request, project=None, user=None, domain=None):
project=project, domain=domain)
@profiler.trace
def roles_for_group(request, group, domain=None, project=None):
manager = keystoneclient(request, admin=True).roles
return manager.list(group=group, domain=domain, project=project)
@profiler.trace
def add_group_role(request, role, group, domain=None, project=None):
"""Adds a role for a group on a domain or project."""
manager = keystoneclient(request, admin=True).roles
@ -787,6 +829,7 @@ def add_group_role(request, role, group, domain=None, project=None):
project=project)
@profiler.trace
def remove_group_role(request, role, group, domain=None, project=None):
"""Removes a given single role for a group from a domain or project."""
manager = keystoneclient(request, admin=True).roles
@ -794,6 +837,7 @@ def remove_group_role(request, role, group, domain=None, project=None):
domain=domain)
@profiler.trace
def remove_group_roles(request, group, domain=None, project=None):
"""Removes all roles from a group on a domain or project."""
client = keystoneclient(request, admin=True)
@ -834,18 +878,22 @@ def ec2_manager(request):
return ec2.CredentialsManager(client)
@profiler.trace
def list_ec2_credentials(request, user_id):
return ec2_manager(request).list(user_id)
@profiler.trace
def create_ec2_credentials(request, user_id, tenant_id):
return ec2_manager(request).create(user_id, tenant_id)
@profiler.trace
def get_user_ec2_credentials(request, user_id, access_token):
return ec2_manager(request).get(user_id, access_token)
@profiler.trace
def delete_user_ec2_credentials(request, user_id, access_token):
return ec2_manager(request).delete(user_id, access_token)
@ -911,11 +959,13 @@ def identity_provider_create(request, idp_id, description=None,
raise exceptions.Conflict()
@profiler.trace
def identity_provider_get(request, idp_id):
manager = keystoneclient(request, admin=True).federation.identity_providers
return manager.get(idp_id)
@profiler.trace
def identity_provider_update(request, idp_id, description=None,
enabled=False, remote_ids=None):
manager = keystoneclient(request, admin=True).federation.identity_providers
@ -928,16 +978,19 @@ def identity_provider_update(request, idp_id, description=None,
raise exceptions.Conflict()
@profiler.trace
def identity_provider_delete(request, idp_id):
manager = keystoneclient(request, admin=True).federation.identity_providers
return manager.delete(idp_id)
@profiler.trace
def identity_provider_list(request):
manager = keystoneclient(request, admin=True).federation.identity_providers
return manager.list()
@profiler.trace
def mapping_create(request, mapping_id, rules):
manager = keystoneclient(request, admin=True).federation.mappings
try:
@ -946,26 +999,31 @@ def mapping_create(request, mapping_id, rules):
raise exceptions.Conflict()
@profiler.trace
def mapping_get(request, mapping_id):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.get(mapping_id)
@profiler.trace
def mapping_update(request, mapping_id, rules):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.update(mapping_id, rules=rules)
@profiler.trace
def mapping_delete(request, mapping_id):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.delete(mapping_id)
@profiler.trace
def mapping_list(request):
manager = keystoneclient(request, admin=True).federation.mappings
return manager.list()
@profiler.trace
def protocol_create(request, protocol_id, identity_provider, mapping):
manager = keystoneclient(request).federation.protocols
try:
@ -974,21 +1032,25 @@ def protocol_create(request, protocol_id, identity_provider, mapping):
raise exceptions.Conflict()
@profiler.trace
def protocol_get(request, identity_provider, protocol):
manager = keystoneclient(request).federation.protocols
return manager.get(identity_provider, protocol)
@profiler.trace
def protocol_update(request, identity_provider, protocol, mapping):
manager = keystoneclient(request).federation.protocols
return manager.update(identity_provider, protocol, mapping)
@profiler.trace
def protocol_delete(request, identity_provider, protocol):
manager = keystoneclient(request).federation.protocols
return manager.delete(identity_provider, protocol)
@profiler.trace
def protocol_list(request, identity_provider):
manager = keystoneclient(request).federation.protocols
return manager.list(identity_provider)

View File

@ -37,6 +37,7 @@ from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard.contrib.developer.profiler import api as profiler
from openstack_dashboard import policy
@ -258,6 +259,7 @@ class SecurityGroupManager(network_base.SecurityGroupManager):
secgroups = self.client.list_security_groups(**filters)
return [SecurityGroup(sg) for sg in secgroups.get('security_groups')]
@profiler.trace
def list(self):
tenant_id = self.request.user.tenant_id
return self._list(tenant_id=tenant_id)
@ -271,11 +273,13 @@ class SecurityGroupManager(network_base.SecurityGroupManager):
related_sgs = related_sgs.get('security_groups')
return dict((sg['id'], sg['name']) for sg in related_sgs)
@profiler.trace
def get(self, sg_id):
secgroup = self.client.show_security_group(sg_id).get('security_group')
sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])
return SecurityGroup(secgroup, sg_dict)
@profiler.trace
def create(self, name, desc):
body = {'security_group': {'name': name,
'description': desc,
@ -283,15 +287,18 @@ class SecurityGroupManager(network_base.SecurityGroupManager):
secgroup = self.client.create_security_group(body)
return SecurityGroup(secgroup.get('security_group'))
@profiler.trace
def update(self, sg_id, name, desc):
body = {'security_group': {'name': name,
'description': desc}}
secgroup = self.client.update_security_group(sg_id, body)
return SecurityGroup(secgroup.get('security_group'))
@profiler.trace
def delete(self, sg_id):
self.client.delete_security_group(sg_id)
@profiler.trace
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
@ -322,9 +329,11 @@ class SecurityGroupManager(network_base.SecurityGroupManager):
sg_dict = self._sg_name_dict(parent_group_id, [rule])
return SecurityGroupRule(rule, sg_dict)
@profiler.trace
def rule_delete(self, sgr_id):
self.client.delete_security_group_rule(sgr_id)
@profiler.trace
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
ports = port_list(self.request, device_id=instance_id)
@ -333,6 +342,7 @@ class SecurityGroupManager(network_base.SecurityGroupManager):
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else []
@profiler.trace
def update_instance_security_group(self, instance_id,
new_security_group_ids):
ports = port_list(self.request, device_id=instance_id)
@ -371,6 +381,7 @@ class FloatingIpManager(network_base.FloatingIpManager):
self.request = request
self.client = neutronclient(request)
@profiler.trace
def list_pools(self):
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
@ -393,6 +404,7 @@ class FloatingIpManager(network_base.FloatingIpManager):
fip['instance_id'] = None
fip['instance_type'] = None
@profiler.trace
def list(self, all_tenants=False, **search_opts):
if not all_tenants:
tenant_id = self.request.user.tenant_id
@ -413,11 +425,13 @@ class FloatingIpManager(network_base.FloatingIpManager):
self._set_instance_info(fip, port_dict.get(fip['port_id']))
return [FloatingIp(fip) for fip in fips]
@profiler.trace
def get(self, floating_ip_id):
fip = self.client.show_floatingip(floating_ip_id).get('floatingip')
self._set_instance_info(fip)
return FloatingIp(fip)
@profiler.trace
def allocate(self, pool, tenant_id=None, **params):
if not tenant_id:
tenant_id = self.request.user.project_id
@ -430,9 +444,11 @@ class FloatingIpManager(network_base.FloatingIpManager):
self._set_instance_info(fip)
return FloatingIp(fip)
@profiler.trace
def release(self, floating_ip_id):
self.client.delete_floatingip(floating_ip_id)
@profiler.trace
def associate(self, floating_ip_id, port_id):
# NOTE: In Neutron Horizon floating IP support, port_id is
# "<port_id>_<ip_address>" format to identify multiple ports.
@ -442,6 +458,7 @@ class FloatingIpManager(network_base.FloatingIpManager):
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict})
@profiler.trace
def disassociate(self, floating_ip_id):
update_dict = {'port_id': None}
self.client.update_floatingip(floating_ip_id,
@ -469,6 +486,7 @@ class FloatingIpManager(network_base.FloatingIpManager):
for s in n.subnets])
return reachable_subnets | shared
@profiler.trace
def list_targets(self):
tenant_id = self.request.user.tenant_id
ports = port_list(self.request, tenant_id=tenant_id)
@ -501,6 +519,7 @@ class FloatingIpManager(network_base.FloatingIpManager):
search_opts = {'device_id': instance_id}
return port_list(self.request, **search_opts)
@profiler.trace
def get_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
targets = [target for target in target_list
@ -517,6 +536,7 @@ class FloatingIpManager(network_base.FloatingIpManager):
return '{0}_{1}'.format(ports[0].id,
ports[0].fixed_ips[0]['ip_address'])
@profiler.trace
def list_target_id_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target['id'] for target in target_list
@ -557,6 +577,7 @@ def neutronclient(request):
return c
@profiler.trace
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
@ -611,6 +632,7 @@ def list_resources_with_long_filters(list_method,
return resources
@profiler.trace
def network_list(request, **params):
LOG.debug("network_list(): params=%s", params)
networks = neutronclient(request).list_networks(**params).get('networks')
@ -626,6 +648,7 @@ def network_list(request, **params):
return [Network(n) for n in networks]
@profiler.trace
def network_list_for_tenant(request, tenant_id, include_external=False,
**params):
"""Return a network list available for the tenant.
@ -668,6 +691,7 @@ def network_list_for_tenant(request, tenant_id, include_external=False,
return networks
@profiler.trace
def network_get(request, network_id, expand_subnet=True, **params):
LOG.debug("network_get(): netid=%s, params=%s" % (network_id, params))
network = neutronclient(request).show_network(network_id,
@ -682,6 +706,7 @@ def network_get(request, network_id, expand_subnet=True, **params):
return Network(network)
@profiler.trace
def network_create(request, **kwargs):
"""Create a network object.
@ -701,6 +726,7 @@ def network_create(request, **kwargs):
return Network(network)
@profiler.trace
def network_update(request, network_id, **kwargs):
LOG.debug("network_update(): netid=%s, params=%s" % (network_id, kwargs))
body = {'network': kwargs}
@ -709,17 +735,20 @@ def network_update(request, network_id, **kwargs):
return Network(network)
@profiler.trace
def network_delete(request, network_id):
LOG.debug("network_delete(): netid=%s" % network_id)
neutronclient(request).delete_network(network_id)
@profiler.trace
def subnet_list(request, **params):
LOG.debug("subnet_list(): params=%s" % (params))
subnets = neutronclient(request).list_subnets(**params).get('subnets')
return [Subnet(s) for s in subnets]
@profiler.trace
def subnet_get(request, subnet_id, **params):
LOG.debug("subnet_get(): subnetid=%s, params=%s" % (subnet_id, params))
subnet = neutronclient(request).show_subnet(subnet_id,
@ -727,6 +756,7 @@ def subnet_get(request, subnet_id, **params):
return Subnet(subnet)
@profiler.trace
def subnet_create(request, network_id, **kwargs):
"""Create a subnet on a specified network.
@ -755,6 +785,7 @@ def subnet_create(request, network_id, **kwargs):
return Subnet(subnet)
@profiler.trace
def subnet_update(request, subnet_id, **kwargs):
LOG.debug("subnet_update(): subnetid=%s, kwargs=%s" % (subnet_id, kwargs))
body = {'subnet': kwargs}
@ -763,11 +794,13 @@ def subnet_update(request, subnet_id, **kwargs):
return Subnet(subnet)
@profiler.trace
def subnet_delete(request, subnet_id):
LOG.debug("subnet_delete(): subnetid=%s" % subnet_id)
neutronclient(request).delete_subnet(subnet_id)
@profiler.trace
def subnetpool_list(request, **params):
LOG.debug("subnetpool_list(): params=%s" % (params))
subnetpools = \
@ -775,6 +808,7 @@ def subnetpool_list(request, **params):
return [SubnetPool(s) for s in subnetpools]
@profiler.trace
def subnetpool_get(request, subnetpool_id, **params):
LOG.debug("subnetpool_get(): subnetpoolid=%s, params=%s" %
(subnetpool_id, params))
@ -784,6 +818,7 @@ def subnetpool_get(request, subnetpool_id, **params):
return SubnetPool(subnetpool)
@profiler.trace
def subnetpool_create(request, name, prefixes, **kwargs):
"""Create a subnetpool.
@ -820,6 +855,7 @@ def subnetpool_create(request, name, prefixes, **kwargs):
return SubnetPool(subnetpool)
@profiler.trace
def subnetpool_update(request, subnetpool_id, **kwargs):
LOG.debug("subnetpool_update(): subnetpoolid=%s, kwargs=%s" %
(subnetpool_id, kwargs))
@ -830,17 +866,20 @@ def subnetpool_update(request, subnetpool_id, **kwargs):
return SubnetPool(subnetpool)
@profiler.trace
def subnetpool_delete(request, subnetpool_id):
LOG.debug("subnetpool_delete(): subnetpoolid=%s" % subnetpool_id)
return neutronclient(request).delete_subnetpool(subnetpool_id)
@profiler.trace
def port_list(request, **params):
LOG.debug("port_list(): params=%s" % (params))
ports = neutronclient(request).list_ports(**params).get('ports')
return [Port(p) for p in ports]
@profiler.trace
def port_get(request, port_id, **params):
LOG.debug("port_get(): portid=%s, params=%s" % (port_id, params))
port = neutronclient(request).show_port(port_id, **params).get('port')
@ -854,6 +893,7 @@ def unescape_port_kwargs(**kwargs):
return kwargs
@profiler.trace
def port_create(request, network_id, **kwargs):
"""Create a port on a specified network.
@ -877,11 +917,13 @@ def port_create(request, network_id, **kwargs):
return Port(port)
@profiler.trace
def port_delete(request, port_id):
LOG.debug("port_delete(): portid=%s" % port_id)
neutronclient(request).delete_port(port_id)
@profiler.trace
def port_update(request, port_id, **kwargs):
LOG.debug("port_update(): portid=%s, kwargs=%s" % (port_id, kwargs))
kwargs = unescape_port_kwargs(**kwargs)
@ -890,6 +932,7 @@ def port_update(request, port_id, **kwargs):
return Port(port)
@profiler.trace
def profile_list(request, type_p, **params):
LOG.debug("profile_list(): "
"profile_type=%(profile_type)s, params=%(params)s",
@ -903,6 +946,7 @@ def profile_list(request, type_p, **params):
return [Profile(n) for n in profiles]
@profiler.trace
def profile_get(request, profile_id, **params):
LOG.debug("profile_get(): "
"profileid=%(profileid)s, params=%(params)s",
@ -912,6 +956,7 @@ def profile_get(request, profile_id, **params):
return Profile(profile)
@profiler.trace
def profile_create(request, **kwargs):
LOG.debug("profile_create(): kwargs=%s", kwargs)
body = {'network_profile': {}}
@ -921,11 +966,13 @@ def profile_create(request, **kwargs):
return Profile(profile)
@profiler.trace
def profile_delete(request, profile_id):
LOG.debug("profile_delete(): profile_id=%s", profile_id)
neutronclient(request).delete_network_profile(profile_id)
@profiler.trace
def profile_update(request, profile_id, **kwargs):
LOG.debug("profile_update(): "
"profileid=%(profileid)s, kwargs=%(kwargs)s",
@ -936,6 +983,7 @@ def profile_update(request, profile_id, **kwargs):
return Profile(profile)
@profiler.trace
def profile_bindings_list(request, type_p, **params):
LOG.debug("profile_bindings_list(): "
"profile_type=%(profile_type)s params=%(params)s",
@ -949,6 +997,7 @@ def profile_bindings_list(request, type_p, **params):
return [Profile(n) for n in bindings]
@profiler.trace
def router_create(request, **kwargs):
LOG.debug("router_create():, kwargs=%s" % kwargs)
body = {'router': {}}
@ -959,6 +1008,7 @@ def router_create(request, **kwargs):
return Router(router)
@profiler.trace
def router_update(request, r_id, **kwargs):
LOG.debug("router_update(): router_id=%s, kwargs=%s" % (r_id, kwargs))
body = {'router': {}}
@ -967,17 +1017,20 @@ def router_update(request, r_id, **kwargs):
return Router(router['router'])
@profiler.trace
def router_get(request, router_id, **params):
router = neutronclient(request).show_router(router_id,
**params).get('router')
return Router(router)
@profiler.trace
def router_list(request, **params):
routers = neutronclient(request).list_routers(**params).get('routers')
return [Router(r) for r in routers]
@profiler.trace
def router_list_on_l3_agent(request, l3_agent_id, **params):
routers = neutronclient(request).\
list_routers_on_l3_agent(l3_agent_id,
@ -985,10 +1038,12 @@ def router_list_on_l3_agent(request, l3_agent_id, **params):
return [Router(r) for r in routers]
@profiler.trace
def router_delete(request, router_id):
neutronclient(request).delete_router(router_id)
@profiler.trace
def router_add_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
@ -999,6 +1054,7 @@ def router_add_interface(request, router_id, subnet_id=None, port_id=None):
return client.add_interface_router(router_id, body)
@profiler.trace
def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
body = {}
if subnet_id:
@ -1008,15 +1064,18 @@ def router_remove_interface(request, router_id, subnet_id=None, port_id=None):
neutronclient(request).remove_interface_router(router_id, body)
@profiler.trace
def router_add_gateway(request, router_id, network_id):
body = {'network_id': network_id}
neutronclient(request).add_gateway_router(router_id, body)
@profiler.trace
def router_remove_gateway(request, router_id):
neutronclient(request).remove_gateway_router(router_id)
@profiler.trace
def router_static_route_list(request, router_id=None):
router = router_get(request, router_id)
try:
@ -1028,6 +1087,7 @@ def router_static_route_list(request, router_id=None):
return routes
@profiler.trace
def router_static_route_remove(request, router_id, route_ids):
currentroutes = router_static_route_list(request, router_id=router_id)
newroutes = []
@ -1040,6 +1100,7 @@ def router_static_route_remove(request, router_id, route_ids):
return new
@profiler.trace
def router_static_route_add(request, router_id, newroute):
body = {}
currentroutes = router_static_route_list(request, router_id=router_id)
@ -1050,53 +1111,66 @@ def router_static_route_add(request, router_id, newroute):
return new
@profiler.trace
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota'])
@profiler.trace
def tenant_quota_update(request, tenant_id, **kwargs):
quotas = {'quota': kwargs}
return neutronclient(request).update_quota(tenant_id, quotas)
@profiler.trace
def agent_list(request, **params):
agents = neutronclient(request).list_agents(**params)
return [Agent(a) for a in agents['agents']]
@profiler.trace
def list_dhcp_agent_hosting_networks(request, network, **params):
agents = neutronclient(request).list_dhcp_agent_hosting_networks(network,
**params)
return [Agent(a) for a in agents['agents']]
@profiler.trace
def list_l3_agent_hosting_router(request, router, **params):
agents = neutronclient(request).list_l3_agent_hosting_routers(router,
**params)
return [Agent(a) for a in agents['agents']]
@profiler.trace
def show_network_ip_availability(request, network_id):
ip_availability = neutronclient(request).show_network_ip_availability(
network_id)
return ip_availability
@profiler.trace
def add_network_to_dhcp_agent(request, dhcp_agent, network_id):
body = {'network_id': network_id}
return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body)
@profiler.trace
def remove_network_from_dhcp_agent(request, dhcp_agent, network_id):
return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent,
network_id)
@profiler.trace
def provider_list(request):
providers = neutronclient(request).list_service_providers()
return providers['service_providers']
# TODO(pkarikh) need to uncomment when osprofiler will have no
# issues with unicode in:
# openstack_dashboard/test/test_data/nova_data.py#L470 data
# @profiler.trace
def servers_update_addresses(request, servers, all_tenants=False):
"""Retrieve servers networking information from Neutron if enabled.
@ -1186,6 +1260,7 @@ def _server_get_addresses(request, server, ports, floating_ips, network_names):
return dict(addresses)
@profiler.trace
@memoized
def list_extensions(request):
extensions_list = neutronclient(request).list_extensions()
@ -1195,6 +1270,7 @@ def list_extensions(request):
return ()
@profiler.trace
@memoized
def is_extension_supported(request, extension_alias):
extensions = list_extensions(request)

View File

@ -42,7 +42,7 @@ from horizon.utils.memoized import memoized_with_request # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.contrib.developer.profiler import api as profiler
LOG = logging.getLogger(__name__)
@ -398,32 +398,39 @@ class FloatingIpManager(network_base.FloatingIpManager):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
@profiler.trace
def list(self, all_tenants=False):
return [FloatingIp(fip) for fip in
self.client.floating_ips.list(
all_tenants=all_tenants)]
@profiler.trace
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
@profiler.trace
def allocate(self, pool, tenant_id=None, **params):
# NOTE: tenant_id will never be used here.
return FloatingIp(self.client.floating_ips.create(pool=pool))
@profiler.trace
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
@profiler.trace
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
@profiler.trace
def disassociate(self, floating_ip_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
@profiler.trace
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
@ -469,26 +476,31 @@ def novaclient(request_auth_params):
return c
@profiler.trace
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(
instance_id, console_type)['console'])
@profiler.trace
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
@profiler.trace
def server_rdp_console(request, instance_id, console_type='rdp-html5'):
return RDPConsole(novaclient(request).servers.get_rdp_console(
instance_id, console_type)['console'])
@profiler.trace
def server_serial_console(request, instance_id, console_type='serial'):
return SerialConsole(novaclient(request).servers.get_serial_console(
instance_id, console_type)['console'])
@profiler.trace
def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
ephemeral=0, swap=0, metadata=None, is_public=True,
rxtx_factor=1):
@ -502,10 +514,12 @@ def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
return flavor
@profiler.trace
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
@profiler.trace
def flavor_get(request, flavor_id, get_extras=False):
flavor = novaclient(request).flavors.get(flavor_id)
if get_extras:
@ -513,6 +527,7 @@ def flavor_get(request, flavor_id, get_extras=False):
return flavor
@profiler.trace
@memoized
def flavor_list(request, is_public=True, get_extras=False):
"""Get the list of available instance sizes (flavors)."""
@ -523,6 +538,7 @@ def flavor_list(request, is_public=True, get_extras=False):
return flavors
@profiler.trace
def update_pagination(entities, page_size, marker, sort_dir, sort_key,
reversed_order):
has_more_data = has_prev_data = False
@ -547,6 +563,7 @@ def update_pagination(entities, page_size, marker, sort_dir, sort_key,
return entities, has_more_data, has_prev_data
@profiler.trace
@memoized
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None,
paginate=False, sort_key="name", sort_dir="desc",
@ -576,24 +593,28 @@ def flavor_list_paged(request, is_public=True, get_extras=False, marker=None,
return (flavors, has_more_data, has_prev_data)
@profiler.trace
@memoized_with_request(novaclient)
def flavor_access_list(nova_api, flavor=None):
"""Get the list of access instance sizes (flavors)."""
return nova_api.flavor_access.list(flavor=flavor)
@profiler.trace
def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant)
@profiler.trace
def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant)
@profiler.trace
def flavor_get_extras(request, flavor_id, raw=False, flavor=None):
"""Get flavor extra specs."""
if flavor is None:
@ -605,12 +626,14 @@ def flavor_get_extras(request, flavor_id, raw=False, flavor=None):
key, value in extras.items()]
@profiler.trace
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
@profiler.trace
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
@ -619,30 +642,37 @@ def flavor_extra_set(request, flavor_id, metadata):
return flavor.set_keys(metadata)
@profiler.trace
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
@profiler.trace
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
@profiler.trace
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
@profiler.trace
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
@profiler.trace
def keypair_list(request):
return novaclient(request).keypairs.list()
@profiler.trace
def keypair_get(request, keypair_id):
return novaclient(request).keypairs.get(keypair_id)
@profiler.trace
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping=None,
block_device_mapping_v2=None, nics=None,
@ -660,14 +690,17 @@ def server_create(request, name, image, flavor, key_name, user_data,
meta=meta, scheduler_hints=scheduler_hints), request)
@profiler.trace
def server_delete(request, instance_id):
novaclient(request).servers.delete(instance_id)
@profiler.trace
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
@profiler.trace
def server_list(request, search_opts=None, all_tenants=False):
page_size = utils.get_page_size(request)
c = novaclient(request)
@ -696,36 +729,44 @@ def server_list(request, search_opts=None, all_tenants=False):
return (servers, has_more_data)
@profiler.trace
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
@profiler.trace
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
@profiler.trace
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
@profiler.trace
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
@profiler.trace
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
@profiler.trace
def server_shelve(request, instance_id):
novaclient(request).servers.shelve(instance_id)
@profiler.trace
def server_unshelve(request, instance_id):
novaclient(request).servers.unshelve(instance_id)
@profiler.trace
def server_reboot(request, instance_id, soft_reboot=False):
hardness = nova_servers.REBOOT_HARD
if soft_reboot:
@ -733,20 +774,24 @@ def server_reboot(request, instance_id, soft_reboot=False):
novaclient(request).servers.reboot(instance_id, hardness)
@profiler.trace
def server_rebuild(request, instance_id, image_id, password=None,
disk_config=None):
return novaclient(request).servers.rebuild(instance_id, image_id,
password, disk_config)
@profiler.trace
def server_update(request, instance_id, name):
return novaclient(request).servers.update(instance_id, name=name.strip())
@profiler.trace
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
@profiler.trace
def server_live_migrate(request, instance_id, host, block_migration=False,
disk_over_commit=False):
novaclient(request).servers.live_migrate(instance_id, host,
@ -754,96 +799,118 @@ def server_live_migrate(request, instance_id, host, block_migration=False,
disk_over_commit)
@profiler.trace
def server_resize(request, instance_id, flavor, disk_config=None, **kwargs):
novaclient(request).servers.resize(instance_id, flavor,
disk_config, **kwargs)
@profiler.trace
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
@profiler.trace
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
@profiler.trace
def server_start(request, instance_id):
novaclient(request).servers.start(instance_id)
@profiler.trace
def server_stop(request, instance_id):
novaclient(request).servers.stop(instance_id)
@profiler.trace
def server_lock(request, instance_id):
novaclient(request).servers.lock(instance_id)
@profiler.trace
def server_unlock(request, instance_id):
novaclient(request).servers.unlock(instance_id)
@profiler.trace
def server_metadata_update(request, instance_id, metadata):
novaclient(request).servers.set_meta(instance_id, metadata)
@profiler.trace
def server_metadata_delete(request, instance_id, keys):
novaclient(request).servers.delete_meta(instance_id, keys)
@profiler.trace
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.get(tenant_id))
@profiler.trace
def tenant_quota_update(request, tenant_id, **kwargs):
if kwargs:
novaclient(request).quotas.update(tenant_id, **kwargs)
@profiler.trace
def default_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.defaults(tenant_id))
@profiler.trace
def default_quota_update(request, **kwargs):
novaclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
@profiler.trace
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
@profiler.trace
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
@profiler.trace
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
@profiler.trace
def get_x509_credentials(request):
return novaclient(request).certs.create()
@profiler.trace
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
@profiler.trace
def get_password(request, instance_id, private_key=None):
return novaclient(request).servers.get_password(instance_id, private_key)
@profiler.trace
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
@profiler.trace
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
@profiler.trace
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api import cinder
@ -856,18 +923,22 @@ def instance_volumes_list(request, instance_id):
return volumes
@profiler.trace
def hypervisor_list(request):
return novaclient(request).hypervisors.list()
@profiler.trace
def hypervisor_stats(request):
return novaclient(request).hypervisors.statistics()
@profiler.trace
def hypervisor_search(request, query, servers=True):
return novaclient(request).hypervisors.search(query, servers)
@profiler.trace
def evacuate_host(request, host, target=None, on_shared_storage=False):
# TODO(jmolle) This should be change for nova atomic api host_evacuate
hypervisors = novaclient(request).hypervisors.search(host, True)
@ -894,6 +965,7 @@ def evacuate_host(request, host, target=None, on_shared_storage=False):
return True
@profiler.trace
def migrate_host(request, host, live_migrate=False, disk_over_commit=False,
block_migration=False):
hypervisors = novaclient(request).hypervisors.search(host, True)
@ -930,6 +1002,7 @@ def migrate_host(request, host, live_migrate=False, disk_over_commit=False,
return True
@profiler.trace
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
@ -948,22 +1021,27 @@ def tenant_absolute_limits(request, reserved=False):
return limits_dict
@profiler.trace
def availability_zone_list(request, detailed=False):
return novaclient(request).availability_zones.list(detailed=detailed)
@profiler.trace
def server_group_list(request):
return novaclient(request).server_groups.list()
@profiler.trace
def service_list(request, binary=None):
return novaclient(request).services.list(binary=binary)
@profiler.trace
def service_enable(request, host, binary):
return novaclient(request).services.enable(host, binary)
@profiler.trace
def service_disable(request, host, binary, reason=None):
if reason:
return novaclient(request).services.disable_log_reason(host,
@ -972,6 +1050,7 @@ def service_disable(request, host, binary, reason=None):
return novaclient(request).services.disable(host, binary)
@profiler.trace
def aggregate_details_list(request):
result = []
c = novaclient(request)
@ -980,38 +1059,47 @@ def aggregate_details_list(request):
return result
@profiler.trace
def aggregate_create(request, name, availability_zone=None):
return novaclient(request).aggregates.create(name, availability_zone)
@profiler.trace
def aggregate_delete(request, aggregate_id):
return novaclient(request).aggregates.delete(aggregate_id)
@profiler.trace
def aggregate_get(request, aggregate_id):
return novaclient(request).aggregates.get(aggregate_id)
@profiler.trace
def aggregate_update(request, aggregate_id, values):
return novaclient(request).aggregates.update(aggregate_id, values)
@profiler.trace
def aggregate_set_metadata(request, aggregate_id, metadata):
return novaclient(request).aggregates.set_metadata(aggregate_id, metadata)
@profiler.trace
def host_list(request):
return novaclient(request).hosts.list()
@profiler.trace
def add_host_to_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.add_host(aggregate_id, host)
@profiler.trace
def remove_host_from_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.remove_host(aggregate_id, host)
@profiler.trace
def interface_attach(request,
server, port_id=None, net_id=None, fixed_ip=None):
return novaclient(request).servers.interface_attach(server,
@ -1020,10 +1108,12 @@ def interface_attach(request,
fixed_ip)
@profiler.trace
def interface_detach(request, server, port_id):
return novaclient(request).servers.interface_detach(server, port_id)
@profiler.trace
@memoized_with_request(novaclient)
def list_extensions(nova_api):
"""List all nova extensions, except the ones in the blacklist."""
@ -1036,6 +1126,7 @@ def list_extensions(nova_api):
)
@profiler.trace
@memoized_with_request(list_extensions, 1)
def extension_supported(extension_name, extensions):
"""Determine if nova supports a given extension name.
@ -1049,16 +1140,19 @@ def extension_supported(extension_name, extensions):
return False
@profiler.trace
def can_set_server_password():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('can_set_password', False)
@profiler.trace
def instance_action_list(request, instance_id):
return nova_instance_action.InstanceActionManager(
novaclient(request)).list(instance_id)
@profiler.trace
def can_set_mount_point():
"""Return the Hypervisor's capability of setting mount points."""
hypervisor_features = getattr(
@ -1066,6 +1160,7 @@ def can_set_mount_point():
return hypervisor_features.get("can_set_mount_point", False)
@profiler.trace
def requires_keypair():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('requires_keypair', False)

View File

@ -26,7 +26,7 @@ from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.api import base
from openstack_dashboard.contrib.developer.profiler import api as profiler
FOLDER_DELIMITER = "/"
CHUNK_SIZE = getattr(settings, 'SWIFT_FILE_TRANSFER_CHUNK_SIZE', 512 * 1024)
@ -116,6 +116,7 @@ def swift_api(request):
auth_version="2.0")
@profiler.trace
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
@ -124,6 +125,7 @@ def swift_container_exists(request, container_name):
return False
@profiler.trace
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
@ -132,6 +134,7 @@ def swift_object_exists(request, container_name, object_name):
return False
@profiler.trace
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
@ -144,6 +147,7 @@ def swift_get_containers(request, marker=None):
return (container_objs, False)
@profiler.trace
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
@ -177,6 +181,7 @@ def swift_get_container(request, container_name, with_data=True):
return Container(container_info)
@profiler.trace
def swift_create_container(request, name, metadata=({})):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
@ -185,12 +190,14 @@ def swift_create_container(request, name, metadata=({})):
return Container({'name': name})
@profiler.trace
def swift_update_container(request, name, metadata=({})):
headers = _metadata_to_header(metadata)
swift_api(request).post_container(name, headers=headers)
return Container({'name': name})
@profiler.trace
def swift_delete_container(request, name):
# It cannot be deleted if it's not empty. The batch remove of objects
# be done in swiftclient instead of Horizon.
@ -204,6 +211,7 @@ def swift_delete_container(request, name):
return True
@profiler.trace
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
@ -222,6 +230,7 @@ def swift_get_objects(request, container_name, prefix=None, marker=None,
return (object_objs, False)
@profiler.trace
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
@ -257,6 +266,7 @@ def wildcard_search(string, q):
return wildcard_search(tail, '*'.join(q_list[1:]))
@profiler.trace
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
if swift_object_exists(request, new_container_name, new_object_name):
@ -270,6 +280,7 @@ def swift_copy_object(request, orig_container_name, orig_object_name,
headers=headers)
@profiler.trace
def swift_upload_object(request, container_name, object_name,
object_file=None):
headers = {}
@ -288,6 +299,7 @@ def swift_upload_object(request, container_name, object_name,
return StorageObject(obj_info, container_name)
@profiler.trace
def swift_create_pseudo_folder(request, container_name, pseudo_folder_name):
# Make sure the folder name doesn't already exist.
if swift_object_exists(request, container_name, pseudo_folder_name):
@ -306,11 +318,13 @@ def swift_create_pseudo_folder(request, container_name, pseudo_folder_name):
return PseudoFolder(obj_info, container_name)
@profiler.trace
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
@profiler.trace
def swift_delete_folder(request, container_name, object_name):
objects, more = swift_get_objects(request, container_name,
prefix=object_name)
@ -330,6 +344,7 @@ def swift_delete_folder(request, container_name, object_name):
return True
@profiler.trace
def swift_get_object(request, container_name, object_name, with_data=True,
resp_chunk_size=CHUNK_SIZE):
if with_data:
@ -359,6 +374,7 @@ def swift_get_object(request, container_name, object_name, with_data=True,
data=data)
@profiler.trace
def swift_get_capabilities(request):
try:
return swift_api(request).get_capabilities()

View File

@ -19,6 +19,7 @@ from collections import OrderedDict
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import neutron
from openstack_dashboard.contrib.developer.profiler import api as profiler
neutronclient = neutron.neutronclient
@ -55,6 +56,7 @@ class VPNService(neutron.NeutronAPIDictWrapper):
super(VPNService, self).__init__(apiresource)
@profiler.trace
def vpnservice_create(request, **kwargs):
"""Create VPNService
@ -77,6 +79,7 @@ def vpnservice_create(request, **kwargs):
return VPNService(vpnservice)
@profiler.trace
def vpnservice_list(request, **kwargs):
return _vpnservice_list(request, expand_subnet=True, expand_router=True,
expand_conns=True, **kwargs)
@ -104,6 +107,7 @@ def _vpnservice_list(request, expand_subnet=False, expand_router=False,
return [VPNService(v) for v in vpnservices]
@profiler.trace
def vpnservice_get(request, vpnservice_id):
return _vpnservice_get(request, vpnservice_id, expand_subnet=True,
expand_router=True, expand_conns=True)
@ -126,16 +130,19 @@ def _vpnservice_get(request, vpnservice_id, expand_subnet=False,
return VPNService(vpnservice)
@profiler.trace
def vpnservice_update(request, vpnservice_id, **kwargs):
vpnservice = neutronclient(request).update_vpnservice(
vpnservice_id, kwargs).get('vpnservice')
return VPNService(vpnservice)
@profiler.trace
def vpnservice_delete(request, vpnservice_id):
neutronclient(request).delete_vpnservice(vpnservice_id)
@profiler.trace
def ikepolicy_create(request, **kwargs):
"""Create IKEPolicy
@ -164,6 +171,7 @@ def ikepolicy_create(request, **kwargs):
return IKEPolicy(ikepolicy)
@profiler.trace
def ikepolicy_list(request, **kwargs):
return _ikepolicy_list(request, expand_conns=True, **kwargs)
@ -179,6 +187,7 @@ def _ikepolicy_list(request, expand_conns=False, **kwargs):
return [IKEPolicy(v) for v in ikepolicies]
@profiler.trace
def ikepolicy_get(request, ikepolicy_id):
return _ikepolicy_get(request, ikepolicy_id, expand_conns=True)
@ -193,16 +202,19 @@ def _ikepolicy_get(request, ikepolicy_id, expand_conns=False):
return IKEPolicy(ikepolicy)
@profiler.trace
def ikepolicy_update(request, ikepolicy_id, **kwargs):
ikepolicy = neutronclient(request).update_ikepolicy(
ikepolicy_id, kwargs).get('ikepolicy')
return IKEPolicy(ikepolicy)
@profiler.trace
def ikepolicy_delete(request, ikepolicy_id):
neutronclient(request).delete_ikepolicy(ikepolicy_id)
@profiler.trace
def ipsecpolicy_create(request, **kwargs):
"""Create IPSecPolicy
@ -231,6 +243,7 @@ def ipsecpolicy_create(request, **kwargs):
return IPSecPolicy(ipsecpolicy)
@profiler.trace
def ipsecpolicy_list(request, **kwargs):
return _ipsecpolicy_list(request, expand_conns=True, **kwargs)
@ -246,6 +259,7 @@ def _ipsecpolicy_list(request, expand_conns=False, **kwargs):
return [IPSecPolicy(v) for v in ipsecpolicies]
@profiler.trace
def ipsecpolicy_get(request, ipsecpolicy_id):
return _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=True)
@ -261,16 +275,19 @@ def _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=False):
return IPSecPolicy(ipsecpolicy)
@profiler.trace
def ipsecpolicy_update(request, ipsecpolicy_id, **kwargs):
ipsecpolicy = neutronclient(request).update_ipsecpolicy(
ipsecpolicy_id, kwargs).get('ipsecpolicy')
return IPSecPolicy(ipsecpolicy)
@profiler.trace
def ipsecpolicy_delete(request, ipsecpolicy_id):
neutronclient(request).delete_ipsecpolicy(ipsecpolicy_id)
@profiler.trace
def ipsecsiteconnection_create(request, **kwargs):
"""Create IPSecSiteConnection
@ -309,6 +326,7 @@ def ipsecsiteconnection_create(request, **kwargs):
return IPSecSiteConnection(ipsecsiteconnection)
@profiler.trace
@memoized
def ipsecsiteconnection_list(request, **kwargs):
return _ipsecsiteconnection_list(request, expand_ikepolicies=True,
@ -342,6 +360,7 @@ def _ipsecsiteconnection_list(request, expand_ikepolicies=False,
return [IPSecSiteConnection(v) for v in ipsecsiteconnections]
@profiler.trace
def ipsecsiteconnection_get(request, ipsecsiteconnection_id):
return _ipsecsiteconnection_get(request, ipsecsiteconnection_id,
expand_ikepolicies=True,
@ -366,11 +385,13 @@ def _ipsecsiteconnection_get(request, ipsecsiteconnection_id,
return IPSecSiteConnection(ipsecsiteconnection)
@profiler.trace
def ipsecsiteconnection_update(request, ipsecsiteconnection_id, **kwargs):
ipsecsiteconnection = neutronclient(request).update_ipsec_site_connection(
ipsecsiteconnection_id, kwargs).get('ipsec_site_connection')
return IPSecSiteConnection(ipsecsiteconnection)
@profiler.trace
def ipsecsiteconnection_delete(request, ipsecsiteconnection_id):
neutronclient(request).delete_ipsec_site_connection(ipsecsiteconnection_id)

View File

@ -97,3 +97,13 @@ def get_trace(request, trace_id):
# throw away toplevel node which is dummy and doesn't contain any info,
# use its first and only child as the toplevel node
return rec(trace['children'][0])
if not PROFILER_SETTINGS.get('enabled', False):
def trace(function):
return function
else:
def trace(function):
func_name = function.__module__ + '.' + function.__name__
decorator = profiler.trace(func_name)
return decorator(function)

View File

@ -24,6 +24,7 @@ from openstack_dashboard.api import cinder
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.contrib.developer.profiler import api as profiler
LOG = logging.getLogger(__name__)
@ -167,6 +168,7 @@ def _get_quota_data(request, tenant_mode=True, disabled_quotas=None,
return qs
@profiler.trace
def get_default_quota_data(request, disabled_quotas=None, tenant_id=None):
return _get_quota_data(request,
tenant_mode=False,
@ -174,6 +176,7 @@ def get_default_quota_data(request, disabled_quotas=None, tenant_id=None):
tenant_id=tenant_id)
@profiler.trace
def get_tenant_quota_data(request, disabled_quotas=None, tenant_id=None):
qs = _get_quota_data(request,
tenant_mode=True,
@ -237,6 +240,7 @@ def get_tenant_quota_data(request, disabled_quotas=None, tenant_id=None):
return qs
@profiler.trace
def get_disabled_quotas(request):
disabled_quotas = set([])
@ -280,6 +284,7 @@ def get_disabled_quotas(request):
return disabled_quotas
@profiler.trace
def _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id):
# Unlike the other services it can be the case that nova is enabled but
# doesn't support quotas, in which case we still want to get usage info,
@ -318,6 +323,7 @@ def _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id):
usages.tally('ram', 0)
@profiler.trace
def _get_tenant_network_usages(request, usages, disabled_quotas, tenant_id):
floating_ips = []
try:
@ -364,6 +370,7 @@ def _get_tenant_network_usages(request, usages, disabled_quotas, tenant_id):
usages.tally('routers', len(routers))
@profiler.trace
def _get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id):
if 'volumes' not in disabled_quotas:
try:
@ -382,6 +389,7 @@ def _get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id):
exceptions.handle(request, msg)
@profiler.trace
@memoized
def tenant_quota_usages(request, tenant_id=None):
"""Get our quotas and construct our usage object.
@ -407,6 +415,7 @@ def tenant_quota_usages(request, tenant_id=None):
return usages
@profiler.trace
def tenant_limit_usages(request):
# TODO(licostan): This method shall be removed from Quota module.
# ProjectUsage/BaseUsage maybe used instead on volume/image dashboards.