Split large methods into submethods to reduce max-complexity
By this refactoring max-complexity of PEP8 is reduced from 33 to 19. Closes-Bug: #1399523 Change-Id: I7e9e5cefd3e72e322d8b327f495ce7fc863c3678
This commit is contained in:
parent
d7ad91e8a8
commit
3b24bca074
@ -218,6 +218,70 @@ def check_message(keywords, message):
|
||||
raise
|
||||
|
||||
|
||||
def handle_unauthorized(request, message, redirect, ignore, escalate, handled,
|
||||
force_silence, force_log,
|
||||
log_method, log_entry, log_level):
|
||||
if ignore:
|
||||
return NotAuthorized
|
||||
if not force_silence and not handled:
|
||||
log_method(error_color("Unauthorized: %s" % log_entry))
|
||||
if not handled:
|
||||
if message:
|
||||
message = _("Unauthorized: %s") % message
|
||||
# We get some pretty useless error messages back from
|
||||
# some clients, so let's define our own fallback.
|
||||
fallback = _("Unauthorized. Please try logging in again.")
|
||||
messages.error(request, message or fallback)
|
||||
# Escalation means logging the user out and raising NotAuthorized
|
||||
# so the middleware will redirect them appropriately.
|
||||
if escalate:
|
||||
# Prevents creation of circular import. django.contrib.auth
|
||||
# requires openstack_dashboard.settings to be loaded (by trying to
|
||||
# access settings.CACHES in in django.core.caches) while
|
||||
# openstack_dashboard.settings requires django.contrib.auth to be
|
||||
# loaded while importing openstack_auth.utils
|
||||
from django.contrib.auth import logout # noqa
|
||||
logout(request)
|
||||
raise NotAuthorized
|
||||
# Otherwise continue and present our "unauthorized" error message.
|
||||
return NotAuthorized
|
||||
|
||||
|
||||
def handle_notfound(request, message, redirect, ignore, escalate, handled,
|
||||
force_silence, force_log,
|
||||
log_method, log_entry, log_level):
|
||||
if not force_silence and not handled and (not ignore or force_log):
|
||||
log_method(error_color("Not Found: %s" % log_entry))
|
||||
if not ignore and not handled:
|
||||
messages.error(request, message or log_entry)
|
||||
if redirect:
|
||||
raise Http302(redirect)
|
||||
if not escalate:
|
||||
return NotFound # return to normal code flow
|
||||
|
||||
|
||||
def handle_recoverable(request, message, redirect, ignore, escalate, handled,
|
||||
force_silence, force_log,
|
||||
log_method, log_entry, log_level):
|
||||
if not force_silence and not handled and (not ignore or force_log):
|
||||
# Default recoverable error to WARN log level
|
||||
log_method = getattr(LOG, log_level or "warning")
|
||||
log_method(error_color("Recoverable error: %s" % log_entry))
|
||||
if not ignore and not handled:
|
||||
messages.error(request, message or log_entry)
|
||||
if redirect:
|
||||
raise Http302(redirect)
|
||||
if not escalate:
|
||||
return RecoverableError # return to normal code flow
|
||||
|
||||
|
||||
HANDLE_EXC_METHODS = [
|
||||
{'exc': UNAUTHORIZED, 'handler': handle_unauthorized, 'set_wrap': False},
|
||||
{'exc': NOT_FOUND, 'handler': handle_notfound, 'set_wrap': True},
|
||||
{'exc': RECOVERABLE, 'handler': handle_recoverable, 'set_wrap': True},
|
||||
]
|
||||
|
||||
|
||||
def handle(request, message=None, redirect=None, ignore=False,
|
||||
escalate=False, log_level=None, force_log=None):
|
||||
"""Centralized error handling for Horizon.
|
||||
@ -277,55 +341,16 @@ def handle(request, message=None, redirect=None, ignore=False,
|
||||
if message:
|
||||
message = encoding.force_text(message)
|
||||
|
||||
if issubclass(exc_type, UNAUTHORIZED):
|
||||
if ignore:
|
||||
return NotAuthorized
|
||||
if not force_silence and not handled:
|
||||
log_method(error_color("Unauthorized: %s" % log_entry))
|
||||
if not handled:
|
||||
if message:
|
||||
message = _("Unauthorized: %s") % message
|
||||
# We get some pretty useless error messages back from
|
||||
# some clients, so let's define our own fallback.
|
||||
fallback = _("Unauthorized. Please try logging in again.")
|
||||
messages.error(request, message or fallback)
|
||||
# Escalation means logging the user out and raising NotAuthorized
|
||||
# so the middleware will redirect them appropriately.
|
||||
if escalate:
|
||||
# Prevents creation of circular import. django.contrib.auth
|
||||
# requires openstack_dashboard.settings to be loaded (by trying to
|
||||
# access settings.CACHES in in django.core.caches) while
|
||||
# openstack_dashboard.settings requires django.contrib.auth to be
|
||||
# loaded while importing openstack_auth.utils
|
||||
from django.contrib.auth import logout # noqa
|
||||
logout(request)
|
||||
raise NotAuthorized
|
||||
# Otherwise continue and present our "unauthorized" error message.
|
||||
return NotAuthorized
|
||||
|
||||
if issubclass(exc_type, NOT_FOUND):
|
||||
wrap = True
|
||||
if not force_silence and not handled and (not ignore or force_log):
|
||||
log_method(error_color("Not Found: %s" % log_entry))
|
||||
if not ignore and not handled:
|
||||
messages.error(request, message or log_entry)
|
||||
if redirect:
|
||||
raise Http302(redirect)
|
||||
if not escalate:
|
||||
return NotFound # return to normal code flow
|
||||
|
||||
if issubclass(exc_type, RECOVERABLE):
|
||||
wrap = True
|
||||
if not force_silence and not handled and (not ignore or force_log):
|
||||
# Default recoverable error to WARN log level
|
||||
log_method = getattr(LOG, log_level or "warning")
|
||||
log_method(error_color("Recoverable error: %s" % log_entry))
|
||||
if not ignore and not handled:
|
||||
messages.error(request, message or log_entry)
|
||||
if redirect:
|
||||
raise Http302(redirect)
|
||||
if not escalate:
|
||||
return RecoverableError # return to normal code flow
|
||||
for exc_handler in HANDLE_EXC_METHODS:
|
||||
if issubclass(exc_type, exc_handler['exc']):
|
||||
if exc_handler['set_wrap']:
|
||||
wrap = True
|
||||
handler = exc_handler['handler']
|
||||
ret = handler(request, message, redirect, ignore, escalate,
|
||||
handled, force_silence, force_log,
|
||||
log_method, log_entry, log_level)
|
||||
if ret:
|
||||
return ret # return to normal code flow
|
||||
|
||||
# If we've gotten here, time to wrap and/or raise our exception.
|
||||
if wrap:
|
||||
|
@ -24,6 +24,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from horizon import exceptions
|
||||
from horizon import forms
|
||||
from horizon import messages
|
||||
from horizon.utils import memoized
|
||||
from horizon import workflows
|
||||
|
||||
from openstack_dashboard import api
|
||||
@ -396,22 +397,21 @@ class CreateProject(workflows.Workflow):
|
||||
def format_status_message(self, message):
|
||||
return message % self.context.get('name', 'unknown project')
|
||||
|
||||
def handle(self, request, data):
|
||||
def _create_project(self, request, data):
|
||||
# create the project
|
||||
domain_id = data['domain_id']
|
||||
try:
|
||||
desc = data['description']
|
||||
self.object = api.keystone.tenant_create(request,
|
||||
name=data['name'],
|
||||
description=desc,
|
||||
enabled=data['enabled'],
|
||||
domain=domain_id)
|
||||
return api.keystone.tenant_create(request,
|
||||
name=data['name'],
|
||||
description=desc,
|
||||
enabled=data['enabled'],
|
||||
domain=domain_id)
|
||||
except Exception:
|
||||
exceptions.handle(request, ignore=True)
|
||||
return False
|
||||
|
||||
project_id = self.object.id
|
||||
return
|
||||
|
||||
def _update_project_members(self, request, data, project_id):
|
||||
# update project members
|
||||
users_to_add = 0
|
||||
try:
|
||||
@ -445,36 +445,37 @@ class CreateProject(workflows.Workflow):
|
||||
% {'users_to_add': users_to_add,
|
||||
'group_msg': group_msg})
|
||||
|
||||
if PROJECT_GROUP_ENABLED:
|
||||
# update project groups
|
||||
groups_to_add = 0
|
||||
try:
|
||||
available_roles = api.keystone.role_list(request)
|
||||
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
|
||||
def _update_project_groups(self, request, data, project_id):
|
||||
# update project groups
|
||||
groups_to_add = 0
|
||||
try:
|
||||
available_roles = api.keystone.role_list(request)
|
||||
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
|
||||
|
||||
# count how many groups are to be added
|
||||
for role in available_roles:
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
role_list = data[field_name]
|
||||
groups_to_add += len(role_list)
|
||||
# add new groups to project
|
||||
for role in available_roles:
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
role_list = data[field_name]
|
||||
groups_added = 0
|
||||
for group in role_list:
|
||||
api.keystone.add_group_role(request,
|
||||
role=role.id,
|
||||
group=group,
|
||||
project=project_id)
|
||||
groups_added += 1
|
||||
groups_to_add -= groups_added
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_('Failed to add %s project groups '
|
||||
'and update project quotas.')
|
||||
% groups_to_add)
|
||||
# count how many groups are to be added
|
||||
for role in available_roles:
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
role_list = data[field_name]
|
||||
groups_to_add += len(role_list)
|
||||
# add new groups to project
|
||||
for role in available_roles:
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
role_list = data[field_name]
|
||||
groups_added = 0
|
||||
for group in role_list:
|
||||
api.keystone.add_group_role(request,
|
||||
role=role.id,
|
||||
group=group,
|
||||
project=project_id)
|
||||
groups_added += 1
|
||||
groups_to_add -= groups_added
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_('Failed to add %s project groups '
|
||||
'and update project quotas.')
|
||||
% groups_to_add)
|
||||
|
||||
def _update_project_quota(self, request, data, project_id):
|
||||
# Update the project quota.
|
||||
nova_data = dict(
|
||||
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
|
||||
@ -500,6 +501,16 @@ class CreateProject(workflows.Workflow):
|
||||
**neutron_data)
|
||||
except Exception:
|
||||
exceptions.handle(request, _('Unable to set project quotas.'))
|
||||
|
||||
def handle(self, request, data):
|
||||
project = self._create_project(request, data)
|
||||
if not project:
|
||||
return False
|
||||
project_id = project.id
|
||||
self._update_project_members(request, data, project_id)
|
||||
if PROJECT_GROUP_ENABLED:
|
||||
self._update_project_groups(request, data, project_id)
|
||||
self._update_project_quota(request, data, project_id)
|
||||
return True
|
||||
|
||||
|
||||
@ -550,34 +561,90 @@ class UpdateProject(workflows.Workflow):
|
||||
def format_status_message(self, message):
|
||||
return message % self.context.get('name', 'unknown project')
|
||||
|
||||
def handle(self, request, data):
|
||||
# FIXME(gabriel): This should be refactored to use Python's built-in
|
||||
# sets and do this all in a single "roles to add" and "roles to remove"
|
||||
# pass instead of the multi-pass thing happening now.
|
||||
@memoized.memoized_method
|
||||
def _get_available_roles(self, request):
|
||||
return api.keystone.role_list(request)
|
||||
|
||||
project_id = data['project_id']
|
||||
domain_id = ''
|
||||
def _update_project(self, request, data):
|
||||
# update project info
|
||||
try:
|
||||
project = api.keystone.tenant_update(
|
||||
project_id = data['project_id']
|
||||
return api.keystone.tenant_update(
|
||||
request,
|
||||
project_id,
|
||||
name=data['name'],
|
||||
description=data['description'],
|
||||
enabled=data['enabled'])
|
||||
# Use the domain_id from the project if available
|
||||
domain_id = getattr(project, "domain_id", None)
|
||||
except Exception:
|
||||
exceptions.handle(request, ignore=True)
|
||||
return
|
||||
|
||||
def _add_roles_to_users(self, request, data, project_id, user,
|
||||
current_roles, available_roles):
|
||||
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
|
||||
current_role_ids = [role.id for role in current_roles]
|
||||
|
||||
for role in available_roles:
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
# Check if the user is in the list of users with this role.
|
||||
if user.id in data[field_name]:
|
||||
# Add it if necessary
|
||||
if role.id not in current_role_ids:
|
||||
# user role has changed
|
||||
api.keystone.add_tenant_user_role(
|
||||
request,
|
||||
project=project_id,
|
||||
user=user.id,
|
||||
role=role.id)
|
||||
else:
|
||||
# User role is unchanged, so remove it from the
|
||||
# remaining roles list to avoid removing it later.
|
||||
index = current_role_ids.index(role.id)
|
||||
current_role_ids.pop(index)
|
||||
|
||||
return current_role_ids
|
||||
|
||||
def _remove_roles_from_user(self, request, project_id, user,
|
||||
current_role_ids):
|
||||
for id_to_delete in current_role_ids:
|
||||
api.keystone.remove_tenant_user_role(
|
||||
request,
|
||||
project=project_id,
|
||||
user=user.id,
|
||||
role=id_to_delete)
|
||||
|
||||
def _is_removing_self_admin_role(self, request, project_id, user,
|
||||
current_roles, current_role_ids):
|
||||
is_current_user = user.id == request.user.id
|
||||
is_current_project = project_id == request.user.tenant_id
|
||||
admin_roles = [role for role in current_roles
|
||||
if role.name.lower() == 'admin']
|
||||
if len(admin_roles):
|
||||
removing_admin = any([role.id in current_role_ids
|
||||
for role in admin_roles])
|
||||
else:
|
||||
removing_admin = False
|
||||
|
||||
if is_current_user and is_current_project and removing_admin:
|
||||
# Cannot remove "admin" role on current(admin) project
|
||||
msg = _('You cannot revoke your administrative privileges '
|
||||
'from the project you are currently logged into. '
|
||||
'Please switch to another project with '
|
||||
'administrative privileges or remove the '
|
||||
'administrative role manually via the CLI.')
|
||||
messages.warning(request, msg)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _update_project_members(self, request, data, project_id):
|
||||
# update project members
|
||||
users_to_modify = 0
|
||||
# Project-user member step
|
||||
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
|
||||
try:
|
||||
# Get our role options
|
||||
available_roles = api.keystone.role_list(request)
|
||||
available_roles = self._get_available_roles(request)
|
||||
# Get the users currently associated with this project so we
|
||||
# can diff against it.
|
||||
project_members = api.keystone.user_list(request,
|
||||
@ -587,56 +654,18 @@ class UpdateProject(workflows.Workflow):
|
||||
for user in project_members:
|
||||
# Check if there have been any changes in the roles of
|
||||
# Existing project members.
|
||||
current_roles = api.keystone.roles_for_user(self.request,
|
||||
user.id,
|
||||
project_id)
|
||||
current_role_ids = [role.id for role in current_roles]
|
||||
|
||||
for role in available_roles:
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
# Check if the user is in the list of users with this role.
|
||||
if user.id in data[field_name]:
|
||||
# Add it if necessary
|
||||
if role.id not in current_role_ids:
|
||||
# user role has changed
|
||||
api.keystone.add_tenant_user_role(
|
||||
request,
|
||||
project=project_id,
|
||||
user=user.id,
|
||||
role=role.id)
|
||||
else:
|
||||
# User role is unchanged, so remove it from the
|
||||
# remaining roles list to avoid removing it later.
|
||||
index = current_role_ids.index(role.id)
|
||||
current_role_ids.pop(index)
|
||||
|
||||
current_roles = api.keystone.roles_for_user(
|
||||
self.request, user.id, project_id)
|
||||
current_role_ids = self._add_roles_to_users(
|
||||
request, data, project_id, user,
|
||||
current_roles, available_roles)
|
||||
# Prevent admins from doing stupid things to themselves.
|
||||
is_current_user = user.id == request.user.id
|
||||
is_current_project = project_id == request.user.tenant_id
|
||||
admin_roles = [role for role in current_roles
|
||||
if role.name.lower() == 'admin']
|
||||
if len(admin_roles):
|
||||
removing_admin = any([role.id in current_role_ids
|
||||
for role in admin_roles])
|
||||
else:
|
||||
removing_admin = False
|
||||
if is_current_user and is_current_project and removing_admin:
|
||||
# Cannot remove "admin" role on current(admin) project
|
||||
msg = _('You cannot revoke your administrative privileges '
|
||||
'from the project you are currently logged into. '
|
||||
'Please switch to another project with '
|
||||
'administrative privileges or remove the '
|
||||
'administrative role manually via the CLI.')
|
||||
messages.warning(request, msg)
|
||||
|
||||
removing_admin = self._is_removing_self_admin_role(
|
||||
request, project_id, user, current_roles, current_role_ids)
|
||||
# Otherwise go through and revoke any removed roles.
|
||||
else:
|
||||
for id_to_delete in current_role_ids:
|
||||
api.keystone.remove_tenant_user_role(
|
||||
request,
|
||||
project=project_id,
|
||||
user=user.id,
|
||||
role=id_to_delete)
|
||||
if not removing_admin:
|
||||
self._remove_roles_from_user(request, project_id, user,
|
||||
current_role_ids)
|
||||
users_to_modify -= 1
|
||||
|
||||
# Grant new roles on the project.
|
||||
@ -655,6 +684,7 @@ class UpdateProject(workflows.Workflow):
|
||||
role=role.id)
|
||||
users_added += 1
|
||||
users_to_modify -= users_added
|
||||
return True
|
||||
except Exception:
|
||||
if PROJECT_GROUP_ENABLED:
|
||||
group_msg = _(", update project groups")
|
||||
@ -668,78 +698,81 @@ class UpdateProject(workflows.Workflow):
|
||||
'group_msg': group_msg})
|
||||
return False
|
||||
|
||||
if PROJECT_GROUP_ENABLED:
|
||||
# update project groups
|
||||
groups_to_modify = 0
|
||||
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
|
||||
try:
|
||||
# Get the groups currently associated with this project so we
|
||||
# can diff against it.
|
||||
project_groups = api.keystone.group_list(request,
|
||||
domain=domain_id,
|
||||
project=project_id)
|
||||
groups_to_modify = len(project_groups)
|
||||
for group in project_groups:
|
||||
# Check if there have been any changes in the roles of
|
||||
# Existing project members.
|
||||
current_roles = api.keystone.roles_for_group(
|
||||
self.request,
|
||||
group=group.id,
|
||||
project=project_id)
|
||||
current_role_ids = [role.id for role in current_roles]
|
||||
for role in available_roles:
|
||||
# Check if the group is in the list of groups with
|
||||
# this role.
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
if group.id in data[field_name]:
|
||||
# Add it if necessary
|
||||
if role.id not in current_role_ids:
|
||||
# group role has changed
|
||||
api.keystone.add_group_role(
|
||||
request,
|
||||
role=role.id,
|
||||
group=group.id,
|
||||
project=project_id)
|
||||
else:
|
||||
# Group role is unchanged, so remove it from
|
||||
# the remaining roles list to avoid removing it
|
||||
# later.
|
||||
index = current_role_ids.index(role.id)
|
||||
current_role_ids.pop(index)
|
||||
|
||||
# Revoke any removed roles.
|
||||
for id_to_delete in current_role_ids:
|
||||
api.keystone.remove_group_role(request,
|
||||
role=id_to_delete,
|
||||
group=group.id,
|
||||
project=project_id)
|
||||
groups_to_modify -= 1
|
||||
|
||||
# Grant new roles on the project.
|
||||
def _update_project_groups(self, request, data, project_id, domain_id):
|
||||
# update project groups
|
||||
groups_to_modify = 0
|
||||
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
|
||||
try:
|
||||
available_roles = self._get_available_roles(request)
|
||||
# Get the groups currently associated with this project so we
|
||||
# can diff against it.
|
||||
project_groups = api.keystone.group_list(request,
|
||||
domain=domain_id,
|
||||
project=project_id)
|
||||
groups_to_modify = len(project_groups)
|
||||
for group in project_groups:
|
||||
# Check if there have been any changes in the roles of
|
||||
# Existing project members.
|
||||
current_roles = api.keystone.roles_for_group(
|
||||
self.request,
|
||||
group=group.id,
|
||||
project=project_id)
|
||||
current_role_ids = [role.id for role in current_roles]
|
||||
for role in available_roles:
|
||||
# Check if the group is in the list of groups with
|
||||
# this role.
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
# Count how many groups may be added for error handling.
|
||||
groups_to_modify += len(data[field_name])
|
||||
for role in available_roles:
|
||||
groups_added = 0
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
for group_id in data[field_name]:
|
||||
if not filter(lambda x: group_id == x.id,
|
||||
project_groups):
|
||||
api.keystone.add_group_role(request,
|
||||
role=role.id,
|
||||
group=group_id,
|
||||
project=project_id)
|
||||
groups_added += 1
|
||||
groups_to_modify -= groups_added
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_('Failed to modify %s project '
|
||||
'members, update project groups '
|
||||
'and update project quotas.')
|
||||
% groups_to_modify)
|
||||
return False
|
||||
if group.id in data[field_name]:
|
||||
# Add it if necessary
|
||||
if role.id not in current_role_ids:
|
||||
# group role has changed
|
||||
api.keystone.add_group_role(
|
||||
request,
|
||||
role=role.id,
|
||||
group=group.id,
|
||||
project=project_id)
|
||||
else:
|
||||
# Group role is unchanged, so remove it from
|
||||
# the remaining roles list to avoid removing it
|
||||
# later.
|
||||
index = current_role_ids.index(role.id)
|
||||
current_role_ids.pop(index)
|
||||
|
||||
# Revoke any removed roles.
|
||||
for id_to_delete in current_role_ids:
|
||||
api.keystone.remove_group_role(request,
|
||||
role=id_to_delete,
|
||||
group=group.id,
|
||||
project=project_id)
|
||||
groups_to_modify -= 1
|
||||
|
||||
# Grant new roles on the project.
|
||||
for role in available_roles:
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
# Count how many groups may be added for error handling.
|
||||
groups_to_modify += len(data[field_name])
|
||||
for role in available_roles:
|
||||
groups_added = 0
|
||||
field_name = member_step.get_member_field_name(role.id)
|
||||
for group_id in data[field_name]:
|
||||
if not filter(lambda x: group_id == x.id,
|
||||
project_groups):
|
||||
api.keystone.add_group_role(request,
|
||||
role=role.id,
|
||||
group=group_id,
|
||||
project=project_id)
|
||||
groups_added += 1
|
||||
groups_to_modify -= groups_added
|
||||
return True
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_('Failed to modify %s project '
|
||||
'members, update project groups '
|
||||
'and update project quotas.')
|
||||
% groups_to_modify)
|
||||
return False
|
||||
|
||||
def _update_project_quota(self, request, data, project_id):
|
||||
# update the project quota
|
||||
nova_data = dict(
|
||||
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
|
||||
@ -771,3 +804,32 @@ class UpdateProject(workflows.Workflow):
|
||||
'members, but unable to modify '
|
||||
'project quotas.'))
|
||||
return False
|
||||
|
||||
def handle(self, request, data):
|
||||
# FIXME(gabriel): This should be refactored to use Python's built-in
|
||||
# sets and do this all in a single "roles to add" and "roles to remove"
|
||||
# pass instead of the multi-pass thing happening now.
|
||||
|
||||
project = self._update_project(request, data)
|
||||
if not project:
|
||||
return False
|
||||
|
||||
project_id = data['project_id']
|
||||
# Use the domain_id from the project if available
|
||||
domain_id = getattr(project, "domain_id", '')
|
||||
|
||||
ret = self._update_project_members(request, data, project_id)
|
||||
if not ret:
|
||||
return False
|
||||
|
||||
if PROJECT_GROUP_ENABLED:
|
||||
ret = self._update_project_groups(request, data,
|
||||
project_id, domain_id)
|
||||
if not ret:
|
||||
return False
|
||||
|
||||
ret = self._update_project_quota(request, data, project_id)
|
||||
if not ret:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -273,72 +273,82 @@ class AddRule(forms.SelfHandlingForm):
|
||||
# and it is available only for neutron security group.
|
||||
self.fields['ip_protocol'].widget = forms.HiddenInput()
|
||||
|
||||
def clean(self):
|
||||
cleaned_data = super(AddRule, self).clean()
|
||||
|
||||
def update_cleaned_data(key, value):
|
||||
cleaned_data[key] = value
|
||||
self.errors.pop(key, None)
|
||||
|
||||
rule_menu = cleaned_data.get('rule_menu')
|
||||
port_or_range = cleaned_data.get("port_or_range")
|
||||
remote = cleaned_data.get("remote")
|
||||
def _update_and_pop_error(self, cleaned_data, key, value):
|
||||
cleaned_data[key] = value
|
||||
self.errors.pop(key, None)
|
||||
|
||||
def _clean_rule_icmp(self, cleaned_data, rule_menu):
|
||||
icmp_type = cleaned_data.get("icmp_type", None)
|
||||
icmp_code = cleaned_data.get("icmp_code", None)
|
||||
|
||||
self._update_and_pop_error(cleaned_data, 'ip_protocol', rule_menu)
|
||||
if icmp_type is None:
|
||||
msg = _('The ICMP type is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if icmp_code is None:
|
||||
msg = _('The ICMP code is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if icmp_type not in range(-1, 256):
|
||||
msg = _('The ICMP type not in range (-1, 255)')
|
||||
raise ValidationError(msg)
|
||||
if icmp_code not in range(-1, 256):
|
||||
msg = _('The ICMP code not in range (-1, 255)')
|
||||
raise ValidationError(msg)
|
||||
self._update_and_pop_error(cleaned_data, 'from_port', icmp_type)
|
||||
self._update_and_pop_error(cleaned_data, 'to_port', icmp_code)
|
||||
self._update_and_pop_error(cleaned_data, 'port', None)
|
||||
|
||||
def _clean_rule_tcp_udp(self, cleaned_data, rule_menu):
|
||||
port_or_range = cleaned_data.get("port_or_range")
|
||||
from_port = cleaned_data.get("from_port", None)
|
||||
to_port = cleaned_data.get("to_port", None)
|
||||
port = cleaned_data.get("port", None)
|
||||
|
||||
self._update_and_pop_error(cleaned_data, 'ip_protocol', rule_menu)
|
||||
self._update_and_pop_error(cleaned_data, 'icmp_code', None)
|
||||
self._update_and_pop_error(cleaned_data, 'icmp_type', None)
|
||||
if port_or_range == "port":
|
||||
self._update_and_pop_error(cleaned_data, 'from_port', port)
|
||||
self._update_and_pop_error(cleaned_data, 'to_port', port)
|
||||
if port is None:
|
||||
msg = _('The specified port is invalid.')
|
||||
raise ValidationError(msg)
|
||||
else:
|
||||
self._update_and_pop_error(cleaned_data, 'port', None)
|
||||
if from_port is None:
|
||||
msg = _('The "from" port number is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if to_port is None:
|
||||
msg = _('The "to" port number is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if to_port < from_port:
|
||||
msg = _('The "to" port number must be greater than '
|
||||
'or equal to the "from" port number.')
|
||||
raise ValidationError(msg)
|
||||
|
||||
def _apply_rule_menu(self, cleaned_data, rule_menu):
|
||||
cleaned_data['ip_protocol'] = self.rules[rule_menu]['ip_protocol']
|
||||
cleaned_data['from_port'] = int(self.rules[rule_menu]['from_port'])
|
||||
cleaned_data['to_port'] = int(self.rules[rule_menu]['to_port'])
|
||||
if rule_menu not in ['all_tcp', 'all_udp', 'all_icmp']:
|
||||
direction = self.rules[rule_menu].get('direction')
|
||||
cleaned_data['direction'] = direction
|
||||
|
||||
def _clean_rule_menu(self, cleaned_data):
|
||||
rule_menu = cleaned_data.get('rule_menu')
|
||||
if rule_menu == 'icmp':
|
||||
update_cleaned_data('ip_protocol', rule_menu)
|
||||
if icmp_type is None:
|
||||
msg = _('The ICMP type is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if icmp_code is None:
|
||||
msg = _('The ICMP code is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if icmp_type not in range(-1, 256):
|
||||
msg = _('The ICMP type not in range (-1, 255)')
|
||||
raise ValidationError(msg)
|
||||
if icmp_code not in range(-1, 256):
|
||||
msg = _('The ICMP code not in range (-1, 255)')
|
||||
raise ValidationError(msg)
|
||||
update_cleaned_data('from_port', icmp_type)
|
||||
update_cleaned_data('to_port', icmp_code)
|
||||
update_cleaned_data('port', None)
|
||||
self._clean_rule_icmp(cleaned_data, rule_menu)
|
||||
elif rule_menu == 'tcp' or rule_menu == 'udp':
|
||||
update_cleaned_data('ip_protocol', rule_menu)
|
||||
update_cleaned_data('icmp_code', None)
|
||||
update_cleaned_data('icmp_type', None)
|
||||
if port_or_range == "port":
|
||||
update_cleaned_data('from_port', port)
|
||||
update_cleaned_data('to_port', port)
|
||||
if port is None:
|
||||
msg = _('The specified port is invalid.')
|
||||
raise ValidationError(msg)
|
||||
else:
|
||||
update_cleaned_data('port', None)
|
||||
if from_port is None:
|
||||
msg = _('The "from" port number is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if to_port is None:
|
||||
msg = _('The "to" port number is invalid.')
|
||||
raise ValidationError(msg)
|
||||
if to_port < from_port:
|
||||
msg = _('The "to" port number must be greater than '
|
||||
'or equal to the "from" port number.')
|
||||
raise ValidationError(msg)
|
||||
self._clean_rule_tcp_udp(cleaned_data, rule_menu)
|
||||
elif rule_menu == 'custom':
|
||||
pass
|
||||
else:
|
||||
cleaned_data['ip_protocol'] = self.rules[rule_menu]['ip_protocol']
|
||||
cleaned_data['from_port'] = int(self.rules[rule_menu]['from_port'])
|
||||
cleaned_data['to_port'] = int(self.rules[rule_menu]['to_port'])
|
||||
if rule_menu not in ['all_tcp', 'all_udp', 'all_icmp']:
|
||||
direction = self.rules[rule_menu].get('direction')
|
||||
cleaned_data['direction'] = direction
|
||||
self._apply_rule_menu(cleaned_data, rule_menu)
|
||||
|
||||
def clean(self):
|
||||
cleaned_data = super(AddRule, self).clean()
|
||||
|
||||
self._clean_rule_menu(cleaned_data)
|
||||
|
||||
# NOTE(amotoki): There are two cases where cleaned_data['direction']
|
||||
# is empty: (1) Nova Security Group is used. Since "direction" is
|
||||
@ -349,13 +359,14 @@ class AddRule(forms.SelfHandlingForm):
|
||||
if not cleaned_data['direction']:
|
||||
cleaned_data['direction'] = 'ingress'
|
||||
|
||||
remote = cleaned_data.get("remote")
|
||||
if remote == "cidr":
|
||||
update_cleaned_data('security_group', None)
|
||||
self._update_and_pop_error(cleaned_data, 'security_group', None)
|
||||
else:
|
||||
update_cleaned_data('cidr', None)
|
||||
self._update_and_pop_error(cleaned_data, 'cidr', None)
|
||||
|
||||
# If cleaned_data does not contain cidr, cidr is already marked
|
||||
# as invalid, so skip the further validation for cidr.
|
||||
# If cleaned_data does not contain a non-empty value, IPField already
|
||||
# has validated it, so skip the further validation for cidr.
|
||||
# In addition cleaned_data['cidr'] is None means source_group is used.
|
||||
if 'cidr' in cleaned_data and cleaned_data['cidr'] is not None:
|
||||
cidr = cleaned_data['cidr']
|
||||
|
@ -29,6 +29,7 @@ from django.views.decorators.debug import sensitive_variables # noqa
|
||||
from horizon import exceptions
|
||||
from horizon import forms
|
||||
from horizon.utils import functions
|
||||
from horizon.utils import memoized
|
||||
from horizon.utils import validators
|
||||
from horizon import workflows
|
||||
|
||||
@ -170,28 +171,9 @@ class SetInstanceDetailsAction(workflows.Action):
|
||||
_("Boot from volume snapshot (creates a new volume)")))
|
||||
self.fields['source_type'].choices = source_type_choices
|
||||
|
||||
def clean(self):
|
||||
cleaned_data = super(SetInstanceDetailsAction, self).clean()
|
||||
|
||||
count = cleaned_data.get('count', 1)
|
||||
# Prevent launching more instances than the quota allows
|
||||
usages = quotas.tenant_quota_usages(self.request)
|
||||
available_count = usages['instances']['available']
|
||||
if available_count < count:
|
||||
error_message = ungettext_lazy('The requested instance '
|
||||
'cannot be launched as you only '
|
||||
'have %(avail)i of your quota '
|
||||
'available. ',
|
||||
'The requested %(req)i instances '
|
||||
'cannot be launched as you only '
|
||||
'have %(avail)i of your quota '
|
||||
'available.',
|
||||
count)
|
||||
params = {'req': count,
|
||||
'avail': available_count}
|
||||
raise forms.ValidationError(error_message % params)
|
||||
@memoized.memoized_method
|
||||
def _get_flavor(self, flavor_id):
|
||||
try:
|
||||
flavor_id = cleaned_data.get('flavor')
|
||||
# We want to retrieve details for a given flavor,
|
||||
# however flavor_list uses a memoized decorator
|
||||
# so it is used instead of flavor_get to reduce the number
|
||||
@ -200,6 +182,43 @@ class SetInstanceDetailsAction(workflows.Action):
|
||||
flavor = [x for x in flavors if x.id == flavor_id][0]
|
||||
except IndexError:
|
||||
flavor = None
|
||||
return flavor
|
||||
|
||||
@memoized.memoized_method
|
||||
def _get_image(self, image_id):
|
||||
try:
|
||||
# We want to retrieve details for a given image,
|
||||
# however get_available_images uses a cache of image list,
|
||||
# so it is used instead of image_get to reduce the number
|
||||
# of API calls.
|
||||
images = image_utils.get_available_images(
|
||||
self.request,
|
||||
self.context.get('project_id'),
|
||||
self._images_cache)
|
||||
image = [x for x in images if x.id == image_id][0]
|
||||
except IndexError:
|
||||
image = None
|
||||
return image
|
||||
|
||||
def _check_quotas(self, cleaned_data):
|
||||
count = cleaned_data.get('count', 1)
|
||||
|
||||
# Prevent launching more instances than the quota allows
|
||||
usages = quotas.tenant_quota_usages(self.request)
|
||||
available_count = usages['instances']['available']
|
||||
if available_count < count:
|
||||
error_message = ungettext_lazy(
|
||||
'The requested instance cannot be launched as you only '
|
||||
'have %(avail)i of your quota available. ',
|
||||
'The requested %(req)i instances cannot be launched as you '
|
||||
'only have %(avail)i of your quota available.',
|
||||
count)
|
||||
params = {'req': count,
|
||||
'avail': available_count}
|
||||
raise forms.ValidationError(error_message % params)
|
||||
|
||||
flavor_id = cleaned_data.get('flavor')
|
||||
flavor = self._get_flavor(flavor_id)
|
||||
|
||||
count_error = []
|
||||
# Validate cores and ram.
|
||||
@ -227,91 +246,114 @@ class SetInstanceDetailsAction(workflows.Action):
|
||||
else:
|
||||
self._errors['count'] = self.error_class([msg])
|
||||
|
||||
def _check_flavor_for_image(self, cleaned_data):
|
||||
# Prevents trying to launch an image needing more resources.
|
||||
image_id = cleaned_data.get('image_id')
|
||||
image = self._get_image(image_id)
|
||||
flavor_id = cleaned_data.get('flavor')
|
||||
flavor = self._get_flavor(flavor_id)
|
||||
if not image or not flavor:
|
||||
return
|
||||
props_mapping = (("min_ram", "ram"), ("min_disk", "disk"))
|
||||
for iprop, fprop in props_mapping:
|
||||
if getattr(image, iprop) > 0 and \
|
||||
getattr(image, iprop) > getattr(flavor, fprop):
|
||||
msg = (_("The flavor '%(flavor)s' is too small "
|
||||
"for requested image.\n"
|
||||
"Minimum requirements: "
|
||||
"%(min_ram)s MB of RAM and "
|
||||
"%(min_disk)s GB of Root Disk.") %
|
||||
{'flavor': flavor.name,
|
||||
'min_ram': image.min_ram,
|
||||
'min_disk': image.min_disk})
|
||||
self._errors['image_id'] = self.error_class([msg])
|
||||
break # Not necessary to continue the tests.
|
||||
|
||||
def _check_volume_for_image(self, cleaned_data):
|
||||
image_id = cleaned_data.get('image_id')
|
||||
image = self._get_image(image_id)
|
||||
volume_size = cleaned_data.get('volume_size')
|
||||
if not image or not volume_size:
|
||||
return
|
||||
volume_size = int(volume_size)
|
||||
img_gigs = functions.bytes_to_gigabytes(image.size)
|
||||
smallest_size = max(img_gigs, image.min_disk)
|
||||
if volume_size < smallest_size:
|
||||
msg = (_("The Volume size is too small for the"
|
||||
" '%(image_name)s' image and has to be"
|
||||
" greater than or equal to "
|
||||
"'%(smallest_size)d' GB.") %
|
||||
{'image_name': image.name,
|
||||
'smallest_size': smallest_size})
|
||||
self._errors['volume_size'] = self.error_class([msg])
|
||||
|
||||
def _check_source_image(self, cleaned_data):
|
||||
if not cleaned_data.get('image_id'):
|
||||
msg = _("You must select an image.")
|
||||
self._errors['image_id'] = self.error_class([msg])
|
||||
else:
|
||||
self._check_flavor_for_image(cleaned_data)
|
||||
|
||||
def _check_source_volume_image(self, cleaned_data):
|
||||
volume_size = self.data.get('volume_size', None)
|
||||
if not volume_size:
|
||||
msg = _("You must set volume size")
|
||||
self._errors['volume_size'] = self.error_class([msg])
|
||||
if float(volume_size) <= 0:
|
||||
msg = _("Volume size must be greater than 0")
|
||||
self._errors['volume_size'] = self.error_class([msg])
|
||||
if not cleaned_data.get('image_id'):
|
||||
msg = _("You must select an image.")
|
||||
self._errors['image_id'] = self.error_class([msg])
|
||||
return
|
||||
else:
|
||||
self._check_flavor_for_image(cleaned_data)
|
||||
self._check_volume_for_image(cleaned_data)
|
||||
|
||||
def _check_source_instance_snapshot(self, cleaned_data):
|
||||
# using the array form of get blows up with KeyError
|
||||
# if instance_snapshot_id is nil
|
||||
if not cleaned_data.get('instance_snapshot_id'):
|
||||
msg = _("You must select a snapshot.")
|
||||
self._errors['instance_snapshot_id'] = self.error_class([msg])
|
||||
|
||||
def _check_source_volume(self, cleaned_data):
|
||||
if not cleaned_data.get('volume_id'):
|
||||
msg = _("You must select a volume.")
|
||||
self._errors['volume_id'] = self.error_class([msg])
|
||||
# Prevent launching multiple instances with the same volume.
|
||||
# TODO(gabriel): is it safe to launch multiple instances with
|
||||
# a snapshot since it should be cloned to new volumes?
|
||||
count = cleaned_data.get('count', 1)
|
||||
if count > 1:
|
||||
msg = _('Launching multiple instances is only supported for '
|
||||
'images and instance snapshots.')
|
||||
raise forms.ValidationError(msg)
|
||||
|
||||
def _check_source_volume_snapshot(self, cleaned_data):
|
||||
if not cleaned_data.get('volume_snapshot_id'):
|
||||
msg = _("You must select a snapshot.")
|
||||
self._errors['volume_snapshot_id'] = self.error_class([msg])
|
||||
|
||||
def _check_source(self, cleaned_data):
|
||||
# Validate our instance source.
|
||||
source_type = self.data.get('source_type', None)
|
||||
source_check_methods = {
|
||||
'image_id': self._check_source_image,
|
||||
'volume_image_id': self._check_source_volume_image,
|
||||
'instance_snapshot_id': self._check_source_instance_snapshot,
|
||||
'volume_id': self._check_source_volume,
|
||||
'volume_snapshot_id': self._check_source_volume_snapshot
|
||||
}
|
||||
check_method = source_check_methods.get(source_type)
|
||||
if check_method:
|
||||
check_method(cleaned_data)
|
||||
|
||||
if source_type in ('image_id', 'volume_image_id'):
|
||||
if source_type == 'volume_image_id':
|
||||
volume_size = self.data.get('volume_size', None)
|
||||
if not volume_size:
|
||||
msg = _("You must set volume size")
|
||||
self._errors['volume_size'] = self.error_class([msg])
|
||||
if float(volume_size) <= 0:
|
||||
msg = _("Volume size must be greater than 0")
|
||||
self._errors['volume_size'] = self.error_class([msg])
|
||||
if not cleaned_data.get('image_id'):
|
||||
msg = _("You must select an image.")
|
||||
self._errors['image_id'] = self.error_class([msg])
|
||||
else:
|
||||
# Prevents trying to launch an image needing more resources.
|
||||
try:
|
||||
image_id = cleaned_data.get('image_id')
|
||||
# We want to retrieve details for a given image,
|
||||
# however get_available_images uses a cache of image list,
|
||||
# so it is used instead of image_get to reduce the number
|
||||
# of API calls.
|
||||
images = image_utils.get_available_images(
|
||||
self.request,
|
||||
self.context.get('project_id'),
|
||||
self._images_cache)
|
||||
image = [x for x in images if x.id == image_id][0]
|
||||
except IndexError:
|
||||
image = None
|
||||
def clean(self):
|
||||
cleaned_data = super(SetInstanceDetailsAction, self).clean()
|
||||
|
||||
if image and flavor:
|
||||
props_mapping = (("min_ram", "ram"), ("min_disk", "disk"))
|
||||
for iprop, fprop in props_mapping:
|
||||
if getattr(image, iprop) > 0 and \
|
||||
getattr(image, iprop) > getattr(flavor, fprop):
|
||||
msg = (_("The flavor '%(flavor)s' is too small "
|
||||
"for requested image.\n"
|
||||
"Minimum requirements: "
|
||||
"%(min_ram)s MB of RAM and "
|
||||
"%(min_disk)s GB of Root Disk.") %
|
||||
{'flavor': flavor.name,
|
||||
'min_ram': image.min_ram,
|
||||
'min_disk': image.min_disk})
|
||||
self._errors['image_id'] = self.error_class([msg])
|
||||
break # Not necessary to continue the tests.
|
||||
|
||||
volume_size = cleaned_data.get('volume_size')
|
||||
if volume_size and source_type == 'volume_image_id':
|
||||
volume_size = int(volume_size)
|
||||
img_gigs = functions.bytes_to_gigabytes(image.size)
|
||||
smallest_size = max(img_gigs, image.min_disk)
|
||||
if volume_size < smallest_size:
|
||||
msg = (_("The Volume size is too small for the"
|
||||
" '%(image_name)s' image and has to be"
|
||||
" greater than or equal to "
|
||||
"'%(smallest_size)d' GB.") %
|
||||
{'image_name': image.name,
|
||||
'smallest_size': smallest_size})
|
||||
self._errors['volume_size'] = self.error_class(
|
||||
[msg])
|
||||
|
||||
elif source_type == 'instance_snapshot_id':
|
||||
# using the array form of get blows up with KeyError
|
||||
# if instance_snapshot_id is nil
|
||||
if not cleaned_data.get('instance_snapshot_id'):
|
||||
msg = _("You must select a snapshot.")
|
||||
self._errors['instance_snapshot_id'] = self.error_class([msg])
|
||||
|
||||
elif source_type == 'volume_id':
|
||||
if not cleaned_data.get('volume_id'):
|
||||
msg = _("You must select a volume.")
|
||||
self._errors['volume_id'] = self.error_class([msg])
|
||||
# Prevent launching multiple instances with the same volume.
|
||||
# TODO(gabriel): is it safe to launch multiple instances with
|
||||
# a snapshot since it should be cloned to new volumes?
|
||||
if count > 1:
|
||||
msg = _('Launching multiple instances is only supported for '
|
||||
'images and instance snapshots.')
|
||||
raise forms.ValidationError(msg)
|
||||
|
||||
elif source_type == 'volume_snapshot_id':
|
||||
if not cleaned_data.get('volume_snapshot_id'):
|
||||
msg = _("You must select a snapshot.")
|
||||
self._errors['volume_snapshot_id'] = self.error_class([msg])
|
||||
self._check_quotas(cleaned_data)
|
||||
self._check_source(cleaned_data)
|
||||
|
||||
return cleaned_data
|
||||
|
||||
|
@ -95,6 +95,141 @@ class CreateForm(forms.SelfHandlingForm):
|
||||
'data-source-no_source_type': _('Availability Zone'),
|
||||
'data-source-image_source': _('Availability Zone')}))
|
||||
|
||||
def prepare_source_fields_if_snapshot_specified(self, request):
|
||||
try:
|
||||
snapshot = self.get_snapshot(request,
|
||||
request.GET["snapshot_id"])
|
||||
self.fields['name'].initial = snapshot.name
|
||||
self.fields['size'].initial = snapshot.size
|
||||
self.fields['snapshot_source'].choices = ((snapshot.id,
|
||||
snapshot),)
|
||||
try:
|
||||
# Set the volume type from the original volume
|
||||
orig_volume = cinder.volume_get(request,
|
||||
snapshot.volume_id)
|
||||
self.fields['type'].initial = orig_volume.volume_type
|
||||
except Exception:
|
||||
pass
|
||||
self.fields['size'].help_text = (
|
||||
_('Volume size must be equal to or greater than the '
|
||||
'snapshot size (%sGB)') % snapshot.size)
|
||||
del self.fields['image_source']
|
||||
del self.fields['volume_source']
|
||||
del self.fields['volume_source_type']
|
||||
del self.fields['availability_zone']
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_('Unable to load the specified snapshot.'))
|
||||
|
||||
def prepare_source_fields_if_image_specified(self, request):
|
||||
self.fields['availability_zone'].choices = \
|
||||
self.availability_zones(request)
|
||||
try:
|
||||
image = self.get_image(request,
|
||||
request.GET["image_id"])
|
||||
image.bytes = image.size
|
||||
self.fields['name'].initial = image.name
|
||||
min_vol_size = functions.bytes_to_gigabytes(
|
||||
image.size)
|
||||
size_help_text = (_('Volume size must be equal to or greater '
|
||||
'than the image size (%s)')
|
||||
% filesizeformat(image.size))
|
||||
properties = getattr(image, 'properties', {})
|
||||
min_disk_size = (getattr(image, 'min_disk', 0) or
|
||||
properties.get('min_disk', 0))
|
||||
if (min_disk_size > min_vol_size):
|
||||
min_vol_size = min_disk_size
|
||||
size_help_text = (_('Volume size must be equal to or '
|
||||
'greater than the image minimum '
|
||||
'disk size (%sGB)')
|
||||
% min_disk_size)
|
||||
self.fields['size'].initial = min_vol_size
|
||||
self.fields['size'].help_text = size_help_text
|
||||
self.fields['image_source'].choices = ((image.id, image),)
|
||||
del self.fields['snapshot_source']
|
||||
del self.fields['volume_source']
|
||||
del self.fields['volume_source_type']
|
||||
except Exception:
|
||||
msg = _('Unable to load the specified image. %s')
|
||||
exceptions.handle(request, msg % request.GET['image_id'])
|
||||
|
||||
def prepare_source_fields_if_volume_specified(self, request):
|
||||
self.fields['availability_zone'].choices = \
|
||||
self.availability_zones(request)
|
||||
volume = None
|
||||
try:
|
||||
volume = self.get_volume(request, request.GET["volume_id"])
|
||||
except Exception:
|
||||
msg = _('Unable to load the specified volume. %s')
|
||||
exceptions.handle(request, msg % request.GET['volume_id'])
|
||||
|
||||
if volume is not None:
|
||||
self.fields['name'].initial = volume.name
|
||||
self.fields['description'].initial = volume.description
|
||||
min_vol_size = volume.size
|
||||
size_help_text = (_('Volume size must be equal to or greater '
|
||||
'than the origin volume size (%s)')
|
||||
% filesizeformat(volume.size))
|
||||
self.fields['size'].initial = min_vol_size
|
||||
self.fields['size'].help_text = size_help_text
|
||||
self.fields['volume_source'].choices = ((volume.id, volume),)
|
||||
self.fields['type'].initial = volume.type
|
||||
del self.fields['snapshot_source']
|
||||
del self.fields['image_source']
|
||||
del self.fields['volume_source_type']
|
||||
|
||||
def prepare_source_fields_default(self, request):
|
||||
source_type_choices = []
|
||||
self.fields['availability_zone'].choices = \
|
||||
self.availability_zones(request)
|
||||
|
||||
try:
|
||||
snapshot_list = cinder.volume_snapshot_list(request)
|
||||
snapshots = [s for s in snapshot_list
|
||||
if s.status == 'available']
|
||||
if snapshots:
|
||||
source_type_choices.append(("snapshot_source",
|
||||
_("Snapshot")))
|
||||
choices = [('', _("Choose a snapshot"))] + \
|
||||
[(s.id, s) for s in snapshots]
|
||||
self.fields['snapshot_source'].choices = choices
|
||||
else:
|
||||
del self.fields['snapshot_source']
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_("Unable to retrieve volume snapshots."))
|
||||
|
||||
images = utils.get_available_images(request,
|
||||
request.user.tenant_id)
|
||||
if images:
|
||||
source_type_choices.append(("image_source", _("Image")))
|
||||
choices = [('', _("Choose an image"))]
|
||||
for image in images:
|
||||
image.bytes = image.size
|
||||
image.size = functions.bytes_to_gigabytes(image.bytes)
|
||||
choices.append((image.id, image))
|
||||
self.fields['image_source'].choices = choices
|
||||
else:
|
||||
del self.fields['image_source']
|
||||
|
||||
volumes = self.get_volumes(request)
|
||||
if volumes:
|
||||
source_type_choices.append(("volume_source", _("Volume")))
|
||||
choices = [('', _("Choose a volume"))]
|
||||
for volume in volumes:
|
||||
choices.append((volume.id, volume))
|
||||
self.fields['volume_source'].choices = choices
|
||||
else:
|
||||
del self.fields['volume_source']
|
||||
|
||||
if source_type_choices:
|
||||
choices = ([('no_source_type',
|
||||
_("No source, empty volume"))] +
|
||||
source_type_choices)
|
||||
self.fields['volume_source_type'].choices = choices
|
||||
else:
|
||||
del self.fields['volume_source_type']
|
||||
|
||||
def __init__(self, request, *args, **kwargs):
|
||||
super(CreateForm, self).__init__(request, *args, **kwargs)
|
||||
volume_types = cinder.volume_type_list(request)
|
||||
@ -103,136 +238,13 @@ class CreateForm(forms.SelfHandlingForm):
|
||||
for type in volume_types]
|
||||
|
||||
if "snapshot_id" in request.GET:
|
||||
try:
|
||||
snapshot = self.get_snapshot(request,
|
||||
request.GET["snapshot_id"])
|
||||
self.fields['name'].initial = snapshot.name
|
||||
self.fields['size'].initial = snapshot.size
|
||||
self.fields['snapshot_source'].choices = ((snapshot.id,
|
||||
snapshot),)
|
||||
try:
|
||||
# Set the volume type from the original volume
|
||||
orig_volume = cinder.volume_get(request,
|
||||
snapshot.volume_id)
|
||||
self.fields['type'].initial = orig_volume.volume_type
|
||||
except Exception:
|
||||
pass
|
||||
self.fields['size'].help_text = (
|
||||
_('Volume size must be equal to or greater than the '
|
||||
'snapshot size (%sGB)') % snapshot.size)
|
||||
del self.fields['image_source']
|
||||
del self.fields['volume_source']
|
||||
del self.fields['volume_source_type']
|
||||
del self.fields['availability_zone']
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_('Unable to load the specified snapshot.'))
|
||||
self.prepare_source_fields_if_snapshot_specified(request)
|
||||
elif 'image_id' in request.GET:
|
||||
self.fields['availability_zone'].choices = \
|
||||
self.availability_zones(request)
|
||||
try:
|
||||
image = self.get_image(request,
|
||||
request.GET["image_id"])
|
||||
image.bytes = image.size
|
||||
self.fields['name'].initial = image.name
|
||||
min_vol_size = functions.bytes_to_gigabytes(
|
||||
image.size)
|
||||
size_help_text = (_('Volume size must be equal to or greater '
|
||||
'than the image size (%s)')
|
||||
% filesizeformat(image.size))
|
||||
properties = getattr(image, 'properties', {})
|
||||
min_disk_size = (getattr(image, 'min_disk', 0) or
|
||||
properties.get('min_disk', 0))
|
||||
if (min_disk_size > min_vol_size):
|
||||
min_vol_size = min_disk_size
|
||||
size_help_text = (_('Volume size must be equal to or '
|
||||
'greater than the image minimum '
|
||||
'disk size (%sGB)')
|
||||
% min_disk_size)
|
||||
self.fields['size'].initial = min_vol_size
|
||||
self.fields['size'].help_text = size_help_text
|
||||
self.fields['image_source'].choices = ((image.id, image),)
|
||||
del self.fields['snapshot_source']
|
||||
del self.fields['volume_source']
|
||||
del self.fields['volume_source_type']
|
||||
except Exception:
|
||||
msg = _('Unable to load the specified image. %s')
|
||||
exceptions.handle(request, msg % request.GET['image_id'])
|
||||
self.prepare_source_fields_if_image_specified(request)
|
||||
elif 'volume_id' in request.GET:
|
||||
self.fields['availability_zone'].choices = \
|
||||
self.availability_zones(request)
|
||||
volume = None
|
||||
try:
|
||||
volume = self.get_volume(request, request.GET["volume_id"])
|
||||
except Exception:
|
||||
msg = _('Unable to load the specified volume. %s')
|
||||
exceptions.handle(request, msg % request.GET['volume_id'])
|
||||
|
||||
if volume is not None:
|
||||
self.fields['name'].initial = volume.name
|
||||
self.fields['description'].initial = volume.description
|
||||
min_vol_size = volume.size
|
||||
size_help_text = (_('Volume size must be equal to or greater '
|
||||
'than the origin volume size (%s)')
|
||||
% filesizeformat(volume.size))
|
||||
self.fields['size'].initial = min_vol_size
|
||||
self.fields['size'].help_text = size_help_text
|
||||
self.fields['volume_source'].choices = ((volume.id, volume),)
|
||||
self.fields['type'].initial = volume.type
|
||||
del self.fields['snapshot_source']
|
||||
del self.fields['image_source']
|
||||
del self.fields['volume_source_type']
|
||||
self.prepare_source_fields_if_volume_specified(request)
|
||||
else:
|
||||
source_type_choices = []
|
||||
self.fields['availability_zone'].choices = \
|
||||
self.availability_zones(request)
|
||||
|
||||
try:
|
||||
snapshot_list = cinder.volume_snapshot_list(request)
|
||||
snapshots = [s for s in snapshot_list
|
||||
if s.status == 'available']
|
||||
if snapshots:
|
||||
source_type_choices.append(("snapshot_source",
|
||||
_("Snapshot")))
|
||||
choices = [('', _("Choose a snapshot"))] + \
|
||||
[(s.id, s) for s in snapshots]
|
||||
self.fields['snapshot_source'].choices = choices
|
||||
else:
|
||||
del self.fields['snapshot_source']
|
||||
except Exception:
|
||||
exceptions.handle(request,
|
||||
_("Unable to retrieve volume snapshots."))
|
||||
|
||||
images = utils.get_available_images(request,
|
||||
request.user.tenant_id)
|
||||
if images:
|
||||
source_type_choices.append(("image_source", _("Image")))
|
||||
choices = [('', _("Choose an image"))]
|
||||
for image in images:
|
||||
image.bytes = image.size
|
||||
image.size = functions.bytes_to_gigabytes(image.bytes)
|
||||
choices.append((image.id, image))
|
||||
self.fields['image_source'].choices = choices
|
||||
else:
|
||||
del self.fields['image_source']
|
||||
|
||||
volumes = self.get_volumes(request)
|
||||
if volumes:
|
||||
source_type_choices.append(("volume_source", _("Volume")))
|
||||
choices = [('', _("Choose a volume"))]
|
||||
for volume in volumes:
|
||||
choices.append((volume.id, volume))
|
||||
self.fields['volume_source'].choices = choices
|
||||
else:
|
||||
del self.fields['volume_source']
|
||||
|
||||
if source_type_choices:
|
||||
choices = ([('no_source_type',
|
||||
_("No source, empty volume"))] +
|
||||
source_type_choices)
|
||||
self.fields['volume_source_type'].choices = choices
|
||||
else:
|
||||
del self.fields['volume_source_type']
|
||||
self.prepare_source_fields_default(request)
|
||||
|
||||
def clean(self):
|
||||
cleaned_data = super(CreateForm, self).clean()
|
||||
|
@ -252,32 +252,7 @@ def get_disabled_quotas(request):
|
||||
return disabled_quotas
|
||||
|
||||
|
||||
@memoized
|
||||
def tenant_quota_usages(request, tenant_id=None):
|
||||
"""Get our quotas and construct our usage object.
|
||||
If no tenant_id is provided, a the request.user.project_id
|
||||
is assumed to be used
|
||||
"""
|
||||
if not tenant_id:
|
||||
tenant_id = request.user.project_id
|
||||
|
||||
disabled_quotas = get_disabled_quotas(request)
|
||||
|
||||
usages = QuotaUsage()
|
||||
for quota in get_tenant_quota_data(request,
|
||||
disabled_quotas=disabled_quotas,
|
||||
tenant_id=tenant_id):
|
||||
usages.add_quota(quota)
|
||||
|
||||
# Get our usages.
|
||||
floating_ips = []
|
||||
try:
|
||||
if network.floating_ip_supported(request):
|
||||
floating_ips = network.tenant_floating_ip_list(request)
|
||||
except Exception:
|
||||
pass
|
||||
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
|
||||
|
||||
def _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id):
|
||||
if tenant_id:
|
||||
instances, has_more = nova.server_list(
|
||||
request, search_opts={'tenant_id': tenant_id}, all_tenants=True)
|
||||
@ -285,6 +260,7 @@ def tenant_quota_usages(request, tenant_id=None):
|
||||
instances, has_more = nova.server_list(request)
|
||||
|
||||
# Fetch deleted flavors if necessary.
|
||||
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
|
||||
missing_flavors = [instance.flavor['id'] for instance in instances
|
||||
if instance.flavor['id'] not in flavors]
|
||||
for missing in missing_flavors:
|
||||
@ -296,6 +272,25 @@ def tenant_quota_usages(request, tenant_id=None):
|
||||
exceptions.handle(request, ignore=True)
|
||||
|
||||
usages.tally('instances', len(instances))
|
||||
|
||||
# Sum our usage based on the flavors of the instances.
|
||||
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
|
||||
usages.tally('cores', getattr(flavor, 'vcpus', None))
|
||||
usages.tally('ram', getattr(flavor, 'ram', None))
|
||||
|
||||
# Initialise the tally if no instances have been launched yet
|
||||
if len(instances) == 0:
|
||||
usages.tally('cores', 0)
|
||||
usages.tally('ram', 0)
|
||||
|
||||
|
||||
def _get_tenant_network_usages(request, usages, disabled_quotas, tenant_id):
|
||||
floating_ips = []
|
||||
try:
|
||||
if network.floating_ip_supported(request):
|
||||
floating_ips = network.tenant_floating_ip_list(request)
|
||||
except Exception:
|
||||
pass
|
||||
usages.tally('floating_ips', len(floating_ips))
|
||||
|
||||
if 'security_group' not in disabled_quotas:
|
||||
@ -322,6 +317,8 @@ def tenant_quota_usages(request, tenant_id=None):
|
||||
routers = filter(lambda rou: rou.tenant_id == tenant_id, routers)
|
||||
usages.tally('routers', len(routers))
|
||||
|
||||
|
||||
def _get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id):
|
||||
if 'volumes' not in disabled_quotas:
|
||||
if tenant_id:
|
||||
opts = {'alltenants': 1, 'tenant_id': tenant_id}
|
||||
@ -334,15 +331,28 @@ def tenant_quota_usages(request, tenant_id=None):
|
||||
usages.tally('volumes', len(volumes))
|
||||
usages.tally('snapshots', len(snapshots))
|
||||
|
||||
# Sum our usage based on the flavors of the instances.
|
||||
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
|
||||
usages.tally('cores', getattr(flavor, 'vcpus', None))
|
||||
usages.tally('ram', getattr(flavor, 'ram', None))
|
||||
|
||||
# Initialise the tally if no instances have been launched yet
|
||||
if len(instances) == 0:
|
||||
usages.tally('cores', 0)
|
||||
usages.tally('ram', 0)
|
||||
@memoized
|
||||
def tenant_quota_usages(request, tenant_id=None):
|
||||
"""Get our quotas and construct our usage object.
|
||||
If no tenant_id is provided, a the request.user.project_id
|
||||
is assumed to be used
|
||||
"""
|
||||
if not tenant_id:
|
||||
tenant_id = request.user.project_id
|
||||
|
||||
disabled_quotas = get_disabled_quotas(request)
|
||||
usages = QuotaUsage()
|
||||
|
||||
for quota in get_tenant_quota_data(request,
|
||||
disabled_quotas=disabled_quotas,
|
||||
tenant_id=tenant_id):
|
||||
usages.add_quota(quota)
|
||||
|
||||
# Get our usages.
|
||||
_get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id)
|
||||
_get_tenant_network_usages(request, usages, disabled_quotas, tenant_id)
|
||||
_get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id)
|
||||
|
||||
return usages
|
||||
|
||||
|
2
tox.ini
2
tox.ini
@ -67,7 +67,7 @@ exclude = .venv,.git,.tox,dist,*openstack/common*,*lib/python*,*egg,build,panel_
|
||||
# H803 git commit title should not end with period (disabled on purpose, see bug #1236621)
|
||||
# H904 Wrap long lines in parentheses instead of a backslash
|
||||
ignore = H307,H405,H803,H904
|
||||
max-complexity=33
|
||||
max-complexity = 19
|
||||
|
||||
[hacking]
|
||||
import_exceptions = collections.defaultdict,
|
||||
|
Loading…
x
Reference in New Issue
Block a user