Quota management for Nova-APIGW(part4 metadata and injected files)

Quota control at Nova APIGW for VM's metadata and injected files.
These controls work on the API side directly, no need to gather the
usage from the bottom pod.

The quota management and control in Tricircle is described in the
design doc:
https://docs.google.com/document/d/18kZZ1snMOCD9IQvUKI5NVDzSASpw-QKj7l2zNqMEd3g/

BP: https://blueprints.launchpad.net/tricircle/+spec/implement-stateless

Change-Id: Ib3f0a908118df4a9eb98d9bf56ebd9be48ebde10
Signed-off-by: Chaoyi Huang <joehuang@huawei.com>
This commit is contained in:
Chaoyi Huang 2016-04-01 16:00:42 +08:00
parent d317e3895b
commit c130571d98
4 changed files with 464 additions and 145 deletions

View File

@ -139,6 +139,7 @@ cfg.CONF.register_group(quota_group)
CONF.register_opts(quota_opts, quota_group) CONF.register_opts(quota_opts, quota_group)
NON_QUOTA_KEYS = ['tenant_id', 'id'] NON_QUOTA_KEYS = ['tenant_id', 'id']
DEFAULT_PROJECT = 'default'
class BaseResource(object): class BaseResource(object):
@ -1294,8 +1295,16 @@ class QuotaSetOperation(object):
project = keystone.projects.get(id, project = keystone.projects.get(id,
subtree_as_ids=subtree_as_ids) subtree_as_ids=subtree_as_ids)
generic_project.parent_id = project.parent_id generic_project.parent_id = project.parent_id
# all projects in KeyStone will be put under the parent
# 'default' if not specifying the parent project id when
# creating project
if generic_project.parent_id == DEFAULT_PROJECT:
generic_project.parent_id = None
generic_project.subtree = ( generic_project.subtree = (
project.subtree if subtree_as_ids else None) project.subtree if subtree_as_ids else None)
except k_exceptions.NotFound: except k_exceptions.NotFound:
msg = _("Tenant ID: %s does not exist.") % id msg = _("Tenant ID: %s does not exist.") % id
LOG.error(msg=msg) LOG.error(msg=msg)
@ -1310,7 +1319,7 @@ class QuotaSetOperation(object):
quota_set = kw.get('quota_set') quota_set = kw.get('quota_set')
if not quota_set: if not quota_set:
raise t_exceptions.InvalidInput(reason=_('no quota_set')) raise t_exceptions.InvalidInput(reason='no quota_set')
# TODO(joehuang): process is_force flag here # TODO(joehuang): process is_force flag here

View File

@ -15,6 +15,7 @@
import six import six
import tricircle.common.exceptions as t_exceptions
from tricircle.common.i18n import _ from tricircle.common.i18n import _
@ -81,3 +82,33 @@ def bool_from_string(subject, strict=False, default=False):
raise ValueError(msg) raise ValueError(msg)
else: else:
return default return default
def check_string_length(value, name=None, min_len=0, max_len=None):
"""Check the length of specified string
:param value: the value of the string
:param name: the name of the string
:param min_len: the minimum length of the string
:param max_len: the maximum length of the string
"""
if not isinstance(value, six.string_types):
if name is None:
msg = _("The input is not a string or unicode")
else:
msg = _("%s is not a string or unicode") % name
raise t_exceptions.InvalidInput(message=msg)
if name is None:
name = value
if len(value) < min_len:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_len}
raise t_exceptions.InvalidInput(message=msg)
if max_len and len(value) > max_len:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_len}
raise t_exceptions.InvalidInput(message=msg)

View File

@ -17,6 +17,9 @@ import netaddr
import pecan import pecan
from pecan import expose from pecan import expose
from pecan import rest from pecan import rest
import six
import oslo_log.log as logging
import neutronclient.common.exceptions as q_exceptions import neutronclient.common.exceptions as q_exceptions
@ -24,11 +27,21 @@ from tricircle.common import az_ag
import tricircle.common.client as t_client import tricircle.common.client as t_client
from tricircle.common import constants from tricircle.common import constants
import tricircle.common.context as t_context import tricircle.common.context as t_context
import tricircle.common.exceptions as t_exceptions
from tricircle.common.i18n import _
from tricircle.common.i18n import _LE
import tricircle.common.lock_handle as t_lock import tricircle.common.lock_handle as t_lock
from tricircle.common.quota import QUOTAS
from tricircle.common import utils
import tricircle.db.api as db_api import tricircle.db.api as db_api
from tricircle.db import core from tricircle.db import core
from tricircle.db import models from tricircle.db import models
LOG = logging.getLogger(__name__)
MAX_METADATA_KEY_LENGTH = 255
MAX_METADATA_VALUE_LENGTH = 255
class ServerController(rest.RestController): class ServerController(rest.RestController):
@ -41,6 +54,167 @@ class ServerController(rest.RestController):
self.clients[pod_name] = t_client.Client(pod_name) self.clients[pod_name] = t_client.Client(pod_name)
return self.clients[pod_name] return self.clients[pod_name]
def _get_all(self, context):
ret = []
pods = db_api.list_pods(context)
for pod in pods:
if not pod['az_name']:
continue
client = self._get_client(pod['pod_name'])
servers = client.list_servers(context)
self._remove_fip_info(servers)
ret.extend(servers)
return ret
@expose(generic=True, template='json')
def get_one(self, _id):
context = t_context.extract_context_from_environ()
if _id == 'detail':
return {'servers': self._get_all(context)}
mappings = db_api.get_bottom_mappings_by_top_id(
context, _id, constants.RT_SERVER)
if not mappings:
pecan.abort(404, 'Server not found')
return
pod, bottom_id = mappings[0]
client = self._get_client(pod['pod_name'])
server = client.get_servers(context, bottom_id)
if not server:
pecan.abort(404, 'Server not found')
return
else:
return {'server': server}
@expose(generic=True, template='json')
def get_all(self):
context = t_context.extract_context_from_environ()
return {'servers': self._get_all(context)}
@expose(generic=True, template='json')
def post(self, **kw):
context = t_context.extract_context_from_environ()
if 'server' not in kw:
pecan.abort(400, 'Request body not found')
return
if 'availability_zone' not in kw['server']:
pecan.abort(400, 'Availability zone not set')
return
pod, b_az = az_ag.get_pod_by_az_tenant(
context, kw['server']['availability_zone'], self.project_id)
if not pod:
pecan.abort(400, 'No pod bound to availability zone')
return
t_server_dict = kw['server']
self._process_metadata_quota(context, t_server_dict)
self._process_injected_file_quota(context, t_server_dict)
server_body = self._get_create_server_body(kw['server'], b_az)
top_client = self._get_client()
sg_filters = [{'key': 'tenant_id', 'comparator': 'eq',
'value': self.project_id}]
top_sgs = top_client.list_security_groups(context, sg_filters)
top_sg_map = dict((sg['name'], sg) for sg in top_sgs)
if 'security_groups' not in kw['server']:
security_groups = ['default']
else:
security_groups = []
for sg in kw['server']['security_groups']:
if 'name' not in sg:
pecan.abort(404, 'Security group name not specified')
return
if sg['name'] not in top_sg_map:
pecan.abort(404,
'Security group %s not found' % sg['name'])
return
security_groups.append(sg['name'])
t_sg_ids, b_sg_ids, is_news = self._handle_security_group(
context, pod, top_sg_map, security_groups)
if 'networks' in kw['server']:
server_body['networks'] = []
for net_info in kw['server']['networks']:
if 'uuid' in net_info:
network = top_client.get_networks(context,
net_info['uuid'])
if not network:
pecan.abort(400, 'Network not found')
return
if not self._check_network_server_the_same_az(
network, kw['server']['availability_zone']):
pecan.abort(400, 'Network and server not in the same '
'availability zone')
return
subnets = top_client.list_subnets(
context, [{'key': 'network_id',
'comparator': 'eq',
'value': network['id']}])
if not subnets:
pecan.abort(400, 'Network not contain subnets')
return
t_port_id, b_port_id = self._handle_network(
context, pod, network, subnets,
top_sg_ids=t_sg_ids, bottom_sg_ids=b_sg_ids)
elif 'port' in net_info:
port = top_client.get_ports(context, net_info['port'])
if not port:
pecan.abort(400, 'Port not found')
return
t_port_id, b_port_id = self._handle_port(
context, pod, port)
server_body['networks'].append({'port': b_port_id})
# only for security group first created in a pod, we invoke
# _handle_sg_rule_for_new_group to initialize rules in that group, this
# method removes all the rules in the new group then add new rules
top_sg_id_map = dict((sg['id'], sg) for sg in top_sgs)
new_top_sgs = []
new_bottom_sg_ids = []
default_sg = None
for t_id, b_id, is_new in zip(t_sg_ids, b_sg_ids, is_news):
sg_name = top_sg_id_map[t_id]['name']
if sg_name == 'default':
default_sg = top_sg_id_map[t_id]
continue
if not is_new:
continue
new_top_sgs.append(top_sg_id_map[t_id])
new_bottom_sg_ids.append(b_id)
self._handle_sg_rule_for_new_group(context, pod, new_top_sgs,
new_bottom_sg_ids)
if default_sg:
self._handle_sg_rule_for_default_group(
context, pod, default_sg, self.project_id)
client = self._get_client(pod['pod_name'])
nics = [
{'port-id': _port['port']} for _port in server_body['networks']]
server = client.create_servers(context,
name=server_body['name'],
image=server_body['imageRef'],
flavor=server_body['flavorRef'],
nics=nics,
security_groups=b_sg_ids)
with context.session.begin():
core.create_resource(context, models.ResourceRouting,
{'top_id': server['id'],
'bottom_id': server['id'],
'pod_id': pod['pod_id'],
'project_id': self.project_id,
'resource_type': constants.RT_SERVER})
return {'server': server}
def _get_or_create_route(self, context, pod, _id, _type): def _get_or_create_route(self, context, pod, _id, _type):
def list_resources(t_ctx, q_ctx, pod_, _id_, _type_): def list_resources(t_ctx, q_ctx, pod_, _id_, _type_):
client = self._get_client(pod_['pod_name']) client = self._get_client(pod_['pod_name'])
@ -475,159 +649,100 @@ class ServerController(rest.RestController):
else: else:
return False return False
def _get_all(self, context): def _process_injected_file_quota(self, context, t_server_dict):
ret = [] try:
pods = db_api.list_pods(context) ctx = context.elevated()
for pod in pods: injected_files = t_server_dict.get('injected_files', None)
if not pod['az_name']: self._check_injected_file_quota(ctx, injected_files)
continue except (t_exceptions.OnsetFileLimitExceeded,
client = self._get_client(pod['pod_name']) t_exceptions.OnsetFilePathLimitExceeded,
servers = client.list_servers(context) t_exceptions.OnsetFileContentLimitExceeded) as e:
self._remove_fip_info(servers) msg = str(e)
ret.extend(servers) LOG.exception(_LE('Quota exceeded %(msg)s'),
return ret {'msg': msg})
pecan.abort(400, _('Quota exceeded %s') % msg)
@expose(generic=True, template='json') def _check_injected_file_quota(self, context, injected_files):
def get_one(self, _id): """Enforce quota limits on injected files.
context = t_context.extract_context_from_environ()
if _id == 'detail': Raises a QuotaError if any limit is exceeded.
return {'servers': self._get_all(context)}
mappings = db_api.get_bottom_mappings_by_top_id( """
context, _id, constants.RT_SERVER)
if not mappings:
pecan.abort(404, 'Server not found')
return
pod, bottom_id = mappings[0]
client = self._get_client(pod['pod_name'])
server = client.get_servers(context, bottom_id)
if not server:
pecan.abort(404, 'Server not found')
return
else:
return {'server': server}
@expose(generic=True, template='json') if injected_files is None:
def get_all(self):
context = t_context.extract_context_from_environ()
return {'servers': self._get_all(context)}
@expose(generic=True, template='json')
def post(self, **kw):
context = t_context.extract_context_from_environ()
if 'server' not in kw:
pecan.abort(400, 'Request body not found')
return return
if 'availability_zone' not in kw['server']: # Check number of files first
pecan.abort(400, 'Availability zone not set') try:
return QUOTAS.limit_check(context,
injected_files=len(injected_files))
except t_exceptions.OverQuota:
raise t_exceptions.OnsetFileLimitExceeded()
pod, b_az = az_ag.get_pod_by_az_tenant( # OK, now count path and content lengths; we're looking for
context, kw['server']['availability_zone'], self.project_id) # the max...
if not pod: max_path = 0
pecan.abort(400, 'No pod bound to availability zone') max_content = 0
return for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
server_body = self._get_create_server_body(kw['server'], b_az) try:
QUOTAS.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except t_exceptions.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise t_exceptions.OnsetFilePathLimitExceeded()
else:
raise t_exceptions.OnsetFileContentLimitExceeded()
top_client = self._get_client() def _process_metadata_quota(self, context, t_server_dict):
try:
ctx = context.elevated()
metadata = t_server_dict.get('metadata', None)
self._check_metadata_properties_quota(ctx, metadata)
except t_exceptions.InvalidMetadata as e1:
LOG.exception(_LE('Invalid metadata %(exception)s'),
{'exception': str(e1)})
pecan.abort(400, _('Invalid metadata'))
except t_exceptions.InvalidMetadataSize as e2:
LOG.exception(_LE('Invalid metadata size %(exception)s'),
{'exception': str(e2)})
pecan.abort(400, _('Invalid metadata size'))
except t_exceptions.MetadataLimitExceeded as e3:
LOG.exception(_LE('Quota exceeded %(exception)s'),
{'exception': str(e3)})
pecan.abort(400, _('Quota exceeded in metadata'))
sg_filters = [{'key': 'tenant_id', 'comparator': 'eq', def _check_metadata_properties_quota(self, context, metadata=None):
'value': self.project_id}] """Enforce quota limits on metadata properties."""
top_sgs = top_client.list_security_groups(context, sg_filters) if not metadata:
top_sg_map = dict((sg['name'], sg) for sg in top_sgs) metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise t_exceptions.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except t_exceptions.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise t_exceptions.MetadataLimitExceeded(allowed=quota_metadata)
if 'security_groups' not in kw['server']: # Because metadata is processed in the bottom pod, we just do
security_groups = ['default'] # parameter validation here to ensure quota management
else: for k, v in six.iteritems(metadata):
security_groups = [] try:
for sg in kw['server']['security_groups']: utils.check_string_length(v)
if 'name' not in sg: utils.check_string_length(k, min_len=1)
pecan.abort(404, 'Security group name not specify') except t_exceptions.InvalidInput as e:
return raise t_exceptions.InvalidMetadata(reason=str(e))
if sg['name'] not in top_sg_map:
pecan.abort(404,
'Security group %s not found' % sg['name'])
return
security_groups.append(sg['name'])
t_sg_ids, b_sg_ids, is_news = self._handle_security_group(
context, pod, top_sg_map, security_groups)
if 'networks' in kw['server']: if len(k) > MAX_METADATA_KEY_LENGTH:
server_body['networks'] = [] msg = _("Metadata property key greater than 255 characters")
for net_info in kw['server']['networks']: raise t_exceptions.InvalidMetadataSize(reason=msg)
if 'uuid' in net_info: if len(v) > MAX_METADATA_VALUE_LENGTH:
network = top_client.get_networks(context, msg = _("Metadata property value greater than 255 characters")
net_info['uuid']) raise t_exceptions.InvalidMetadataSize(reason=msg)
if not network:
pecan.abort(400, 'Network not found')
return
if not self._check_network_server_the_same_az(
network, kw['server']['availability_zone']):
pecan.abort(400, 'Network and server not in the same '
'availability zone')
return
subnets = top_client.list_subnets(
context, [{'key': 'network_id',
'comparator': 'eq',
'value': network['id']}])
if not subnets:
pecan.abort(400, 'Network not contain subnets')
return
t_port_id, b_port_id = self._handle_network(
context, pod, network, subnets,
top_sg_ids=t_sg_ids, bottom_sg_ids=b_sg_ids)
elif 'port' in net_info:
port = top_client.get_ports(context, net_info['port'])
if not port:
pecan.abort(400, 'Port not found')
return
t_port_id, b_port_id = self._handle_port(
context, pod, port)
server_body['networks'].append({'port': b_port_id})
# only for security group first created in a pod, we invoke
# _handle_sg_rule_for_new_group to initialize rules in that group, this
# method removes all the rules in the new group then add new rules
top_sg_id_map = dict((sg['id'], sg) for sg in top_sgs)
new_top_sgs = []
new_bottom_sg_ids = []
default_sg = None
for t_id, b_id, is_new in zip(t_sg_ids, b_sg_ids, is_news):
sg_name = top_sg_id_map[t_id]['name']
if sg_name == 'default':
default_sg = top_sg_id_map[t_id]
continue
if not is_new:
continue
new_top_sgs.append(top_sg_id_map[t_id])
new_bottom_sg_ids.append(b_id)
self._handle_sg_rule_for_new_group(context, pod, new_top_sgs,
new_bottom_sg_ids)
if default_sg:
self._handle_sg_rule_for_default_group(
context, pod, default_sg, self.project_id)
client = self._get_client(pod['pod_name'])
nics = [
{'port-id': _port['port']} for _port in server_body['networks']]
server = client.create_servers(context,
name=server_body['name'],
image=server_body['imageRef'],
flavor=server_body['flavorRef'],
nics=nics,
security_groups=b_sg_ids)
with context.session.begin():
core.create_resource(context, models.ResourceRouting,
{'top_id': server['id'],
'bottom_id': server['id'],
'pod_id': pod['pod_id'],
'project_id': self.project_id,
'resource_type': constants.RT_SERVER})
return {'server': server}

View File

@ -24,6 +24,8 @@ import neutronclient.common.exceptions as q_exceptions
from oslo_utils import uuidutils from oslo_utils import uuidutils
from tricircle.common import context from tricircle.common import context
import tricircle.common.exceptions as t_exceptions
from tricircle.common.i18n import _
from tricircle.common import lock_handle from tricircle.common import lock_handle
from tricircle.db import api from tricircle.db import api
from tricircle.db import core from tricircle.db import core
@ -859,6 +861,168 @@ class ServerTest(unittest.TestCase):
ips.append(rule['remote_ip_prefix']) ips.append(rule['remote_ip_prefix'])
self.assertEqual(expected_ips, ips) self.assertEqual(expected_ips, ips)
@patch.object(pecan, 'abort')
def test_process_injected_file_quota(self, mock_abort):
ctx = self.context.elevated()
def _update_default_quota(num1, len1, len2):
self.default_quota = dict(
injected_files=num1, injected_file_path_bytes=len1,
injected_file_content_bytes=len2)
for k, v in self.default_quota.items():
api.quota_class_create(ctx, 'default', k, v)
injected_files = [
{
"path": "/etc/banner.txt",
"contents": "foo foo",
},
{
"path": "/etc/canner.txt",
"contents": "goo goo",
},
]
t_server_dict = {'injected_files': injected_files}
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
_update_default_quota(len(injected_files) - 1,
max_path + 1,
max_content + 1)
self.assertRaises(t_exceptions.OnsetFileLimitExceeded,
self.controller._check_injected_file_quota,
ctx, injected_files)
self.controller._process_injected_file_quota(ctx, t_server_dict)
msg = _('Quota exceeded %s') % \
t_exceptions.OnsetFileLimitExceeded.message
calls = [mock.call(400, msg)]
mock_abort.assert_has_calls(calls)
_update_default_quota(len(injected_files),
max_path + 1,
max_content + 1)
self.controller._check_injected_file_quota(ctx, injected_files)
_update_default_quota(len(injected_files) + 1,
max_path - 1,
max_content + 1)
self.assertRaises(t_exceptions.OnsetFilePathLimitExceeded,
self.controller._check_injected_file_quota,
ctx, injected_files)
self.controller._process_injected_file_quota(ctx, t_server_dict)
msg = _('Quota exceeded %s') % \
t_exceptions.OnsetFilePathLimitExceeded.message
calls = [mock.call(400, msg)]
mock_abort.assert_has_calls(calls)
_update_default_quota(len(injected_files) + 1,
max_path,
max_content + 1)
self.controller._check_injected_file_quota(ctx, injected_files)
_update_default_quota(len(injected_files) + 1,
max_path + 1,
max_content - 1)
self.assertRaises(t_exceptions.OnsetFileContentLimitExceeded,
self.controller._check_injected_file_quota,
ctx, injected_files)
self.controller._process_injected_file_quota(ctx, t_server_dict)
msg = _('Quota exceeded %s') % \
t_exceptions.OnsetFileContentLimitExceeded.message
calls = [mock.call(400, msg)]
mock_abort.assert_has_calls(calls)
_update_default_quota(len(injected_files) + 1,
max_path + 1,
max_content)
self.controller._check_injected_file_quota(ctx, injected_files)
@patch.object(pecan, 'abort')
def test_process_metadata_quota(self, mock_abort):
ctx = self.context.elevated()
def _update_default_quota(num):
self.default_quota = dict(metadata_items=num)
for k, v in self.default_quota.items():
api.quota_class_create(ctx, 'default', k, v)
meta_data_items = {
'A': '1',
'B': '2',
'C': '3',
}
t_server_dict = {'metadata': meta_data_items}
self.controller._check_metadata_properties_quota(ctx)
self.controller._check_metadata_properties_quota(ctx, {})
self.assertRaises(t_exceptions.InvalidMetadata,
self.controller._check_metadata_properties_quota,
ctx, [1, ])
meta_data_items['A'] = None
self.assertRaises(t_exceptions.InvalidMetadata,
self.controller._check_metadata_properties_quota,
ctx, meta_data_items)
self.controller._process_metadata_quota(ctx, t_server_dict)
msg = _('Invalid metadata')
calls = [mock.call(400, msg)]
mock_abort.assert_has_calls(calls)
meta_data_items['A'] = '1'
_update_default_quota(len(meta_data_items))
self.controller._check_metadata_properties_quota(ctx, meta_data_items)
_update_default_quota(len(meta_data_items) + 1)
self.controller._check_metadata_properties_quota(ctx, meta_data_items)
meta_data_items['C'] = '3'
_update_default_quota(len(meta_data_items) - 1)
self.assertRaises(t_exceptions.MetadataLimitExceeded,
self.controller._check_metadata_properties_quota,
ctx, meta_data_items)
self.controller._process_metadata_quota(ctx, t_server_dict)
msg = _('Quota exceeded in metadata')
calls = [mock.call(400, msg)]
mock_abort.assert_has_calls(calls)
_update_default_quota(len(meta_data_items) + 1)
string_exceed_MAX_METADATA_LEGNGTH = (server.MAX_METADATA_VALUE_LENGTH
+ 1) * '3'
meta_data_items['C'] = string_exceed_MAX_METADATA_LEGNGTH
self.assertRaises(t_exceptions.InvalidMetadataSize,
self.controller._check_metadata_properties_quota,
ctx, meta_data_items)
self.controller._process_metadata_quota(ctx, t_server_dict)
msg = _('Invalid metadata size')
calls = [mock.call(400, msg)]
mock_abort.assert_has_calls(calls)
meta_data_items['C'] = '3'
meta_data_items[string_exceed_MAX_METADATA_LEGNGTH] = '4'
self.assertRaises(t_exceptions.InvalidMetadataSize,
self.controller._check_metadata_properties_quota,
ctx, meta_data_items)
self.controller._process_metadata_quota(ctx, t_server_dict)
msg = _('Invalid metadata size')
calls = [mock.call(400, msg)]
mock_abort.assert_has_calls(calls)
def tearDown(self): def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine()) core.ModelBase.metadata.drop_all(core.get_engine())
for res in RES_LIST: for res in RES_LIST: