Add resource quota checks for clusters
Resources for checks: * CPU * RAM * instances * floating ips * volumes * volume storage capacity * security groups * security group rules Closes-bug: #1174612 Change-Id: Iecef1acc506a75d041ed5329b102d9891aad6962
This commit is contained in:
parent
4bb6d9d610
commit
120afc77a0
@ -329,3 +329,15 @@ class MalformedRequestBody(SaharaException):
|
||||
def __init__(self, reason):
|
||||
self.message = self.message % reason
|
||||
super(MalformedRequestBody, self).__init__()
|
||||
|
||||
|
||||
class QuotaException(SaharaException):
|
||||
code = "QUOTA_ERROR"
|
||||
message = _("Quota exceeded for %(resource)s: Requested %(requested)s,"
|
||||
" but available %(available)s")
|
||||
|
||||
def __init__(self, resource, requested, available):
|
||||
self.message = self.message % {'resource': resource,
|
||||
'requested': requested,
|
||||
'available': available}
|
||||
super(QuotaException, self).__init__()
|
||||
|
@ -23,6 +23,7 @@ from sahara import conductor as c
|
||||
from sahara import context
|
||||
from sahara.plugins import base as plugin_base
|
||||
from sahara.plugins import provisioning
|
||||
from sahara.service import quotas
|
||||
from sahara.utils import general as g
|
||||
from sahara.utils.notification import sender
|
||||
from sahara.utils.openstack import nova
|
||||
@ -69,10 +70,11 @@ def scale_cluster(id, data):
|
||||
|
||||
additional = construct_ngs_for_scaling(cluster, additional_node_groups)
|
||||
cluster = conductor.cluster_get(ctx, cluster)
|
||||
_add_ports_for_auto_sg(ctx, cluster, plugin)
|
||||
|
||||
try:
|
||||
cluster = g.change_cluster_status(cluster, "Validating")
|
||||
|
||||
quotas.check_scaling(cluster, to_be_enlarged, additional)
|
||||
plugin.validate_scaling(cluster, to_be_enlarged, additional)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -97,10 +99,12 @@ def create_cluster(values):
|
||||
sender.notify(ctx, cluster.id, cluster.name, "New",
|
||||
"create")
|
||||
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
|
||||
_add_ports_for_auto_sg(ctx, cluster, plugin)
|
||||
|
||||
# validating cluster
|
||||
try:
|
||||
cluster = g.change_cluster_status(cluster, "Validating")
|
||||
quotas.check_cluster(cluster)
|
||||
plugin.validate(cluster)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -112,6 +116,13 @@ def create_cluster(values):
|
||||
return cluster
|
||||
|
||||
|
||||
def _add_ports_for_auto_sg(ctx, cluster, plugin):
|
||||
for ng in cluster.node_groups:
|
||||
if ng.auto_security_group:
|
||||
ports = {'open_ports': plugin.get_open_ports(ng)}
|
||||
conductor.node_group_update(ctx, ng, ports)
|
||||
|
||||
|
||||
def terminate_cluster(id):
|
||||
cluster = g.change_cluster_status(id, "Deleting")
|
||||
|
||||
|
@ -207,8 +207,6 @@ def _prepare_provisioning(cluster_id):
|
||||
update_dict = {}
|
||||
update_dict["image_username"] = INFRA.get_node_group_image_username(
|
||||
nodegroup)
|
||||
if nodegroup.auto_security_group:
|
||||
update_dict["open_ports"] = plugin.get_open_ports(nodegroup)
|
||||
conductor.node_group_update(ctx, nodegroup, update_dict)
|
||||
|
||||
cluster = conductor.cluster_get(ctx, cluster_id)
|
||||
|
174
sahara/service/quotas.py
Normal file
174
sahara/service/quotas.py
Normal file
@ -0,0 +1,174 @@
|
||||
# Copyright (c) 2015 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from sahara import context
|
||||
from sahara import exceptions as ex
|
||||
from sahara.i18n import _
|
||||
from sahara.utils.openstack import cinder as cinder_client
|
||||
from sahara.utils.openstack import neutron as neutron_client
|
||||
from sahara.utils.openstack import nova as nova_client
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def _get_zero_limits():
|
||||
return {
|
||||
'ram': 0,
|
||||
'cpu': 0,
|
||||
'instances': 0,
|
||||
'floatingips': 0,
|
||||
'security_groups': 0,
|
||||
'security_group_rules': 0,
|
||||
'ports': 0,
|
||||
'volumes': 0,
|
||||
'volume_gbs': 0
|
||||
}
|
||||
|
||||
|
||||
def check_cluster(cluster):
|
||||
req_limits = _get_req_cluster_limits(cluster)
|
||||
_check_limits(req_limits)
|
||||
|
||||
|
||||
def check_scaling(cluster, to_be_enlarged, additional):
|
||||
req_limits = _get_req_scaling_limits(cluster, to_be_enlarged, additional)
|
||||
_check_limits(req_limits)
|
||||
|
||||
|
||||
def _check_limits(req_limits):
|
||||
limits_name_map = {
|
||||
'ram': _("RAM"),
|
||||
'cpu': _("VCPU"),
|
||||
'instances': _("instance"),
|
||||
'floatingips': _("floating ip"),
|
||||
'security_groups': _("security group"),
|
||||
'security_group_rules': _("security group rule"),
|
||||
'ports': _("port"),
|
||||
'volumes': _("volume"),
|
||||
'volume_gbs': _("volume storage")
|
||||
}
|
||||
|
||||
avail_limits = _get_avail_limits()
|
||||
for quota, quota_name in six.iteritems(limits_name_map):
|
||||
if avail_limits[quota] < req_limits[quota]:
|
||||
raise ex.QuotaException(quota_name, req_limits[quota],
|
||||
avail_limits[quota])
|
||||
|
||||
|
||||
def _get_req_cluster_limits(cluster):
|
||||
req_limits = _get_zero_limits()
|
||||
for ng in cluster.node_groups:
|
||||
_update_limits_for_ng(req_limits, ng, ng.count)
|
||||
return req_limits
|
||||
|
||||
|
||||
def _get_req_scaling_limits(cluster, to_be_enlarged, additional):
|
||||
ng_id_map = to_be_enlarged.copy()
|
||||
ng_id_map.update(additional)
|
||||
req_limits = _get_zero_limits()
|
||||
for ng in cluster.node_groups:
|
||||
if ng_id_map.get(ng.id):
|
||||
_update_limits_for_ng(req_limits, ng, ng_id_map[ng.id] - ng.count)
|
||||
return req_limits
|
||||
|
||||
|
||||
def _update_limits_for_ng(limits, ng, count):
|
||||
sign = lambda x: (1, -1)[x < 0]
|
||||
nova = nova_client.client()
|
||||
limits['instances'] += count
|
||||
flavor = nova.flavors.get(ng.flavor_id)
|
||||
limits['ram'] += flavor.ram * count
|
||||
limits['cpu'] += flavor.vcpus * count
|
||||
if ng.floating_ip_pool:
|
||||
limits['floatingips'] += count
|
||||
if ng.volumes_per_node:
|
||||
limits['volumes'] += ng.volumes_per_node * count
|
||||
limits['volume_gbs'] += ng.volumes_per_node * ng.volumes_size * count
|
||||
if ng.auto_security_group:
|
||||
limits['security_groups'] += sign(count)
|
||||
# NOTE: +3 - all traffic for private network
|
||||
if CONF.use_neutron:
|
||||
limits['security_group_rules'] += (
|
||||
(len(ng.open_ports) + 3) * sign(count))
|
||||
else:
|
||||
limits['security_group_rules'] = max(
|
||||
limits['security_group_rules'], len(ng.open_ports) + 3)
|
||||
if CONF.use_neutron:
|
||||
limits['ports'] += count
|
||||
|
||||
|
||||
def _get_avail_limits():
|
||||
limits = _get_zero_limits()
|
||||
limits.update(_get_nova_limits())
|
||||
limits.update(_get_neutron_limits())
|
||||
limits.update(_get_cinder_limits())
|
||||
return limits
|
||||
|
||||
|
||||
def _get_nova_limits():
|
||||
limits = {}
|
||||
nova = nova_client.client()
|
||||
lim = nova.limits.get().to_dict()['absolute']
|
||||
limits['ram'] = lim['maxTotalRAMSize'] - lim['totalRAMUsed']
|
||||
limits['cpu'] = lim['maxTotalCores'] - lim['totalCoresUsed']
|
||||
limits['instances'] = lim['maxTotalInstances'] - lim['totalInstancesUsed']
|
||||
if CONF.use_neutron:
|
||||
return limits
|
||||
if CONF.use_floating_ips:
|
||||
limits['floatingips'] = (
|
||||
lim['maxTotalFloatingIps'] - lim['totalFloatingIpsUsed'])
|
||||
limits['security_groups'] = (
|
||||
lim['maxSecurityGroups'] - lim['totalSecurityGroupsUsed'])
|
||||
limits['security_group_rules'] = lim['maxSecurityGroupRules']
|
||||
return limits
|
||||
|
||||
|
||||
def _get_neutron_limits():
|
||||
limits = {}
|
||||
if not CONF.use_neutron:
|
||||
return limits
|
||||
neutron = neutron_client.client()
|
||||
tenant_id = context.ctx().tenant_id
|
||||
total_lim = neutron.show_quota(tenant_id)['quota']
|
||||
if CONF.use_floating_ips:
|
||||
usage_fip = neutron.list_floatingips(
|
||||
tenant_id=tenant_id)['floatingips']
|
||||
limits['floatingips'] = total_lim['floatingip'] - len(usage_fip)
|
||||
usage_sg = (
|
||||
neutron.list_security_groups(tenant_id=tenant_id)['security_groups'])
|
||||
limits['security_groups'] = total_lim['security_group'] - len(usage_sg)
|
||||
|
||||
usage_sg_rules = (neutron.list_security_group_rules(
|
||||
tenant_id=tenant_id)['security_group_rules'])
|
||||
limits['security_group_rules'] = (
|
||||
total_lim['security_group_rule'] - len(usage_sg_rules))
|
||||
usage_ports = neutron.list_ports(tenant_id=tenant_id)['ports']
|
||||
limits['ports'] = total_lim['port'] - len(usage_ports)
|
||||
return limits
|
||||
|
||||
|
||||
def _get_cinder_limits():
|
||||
avail_limits = {}
|
||||
cinder = cinder_client.client()
|
||||
lim = {}
|
||||
for l in cinder.limits.get().absolute:
|
||||
lim[l.name] = l.value
|
||||
avail_limits['volumes'] = lim['maxTotalVolumes'] - lim['totalVolumesUsed']
|
||||
avail_limits['volume_gbs'] = (
|
||||
lim['maxTotalVolumeGigabytes'] - lim['totalGigabytesUsed'])
|
||||
return avail_limits
|
93
sahara/tests/unit/service/test_quotas.py
Normal file
93
sahara/tests/unit/service/test_quotas.py
Normal file
@ -0,0 +1,93 @@
|
||||
# Copyright (c) 2015 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from sahara import exceptions as exc
|
||||
from sahara.service import quotas
|
||||
from sahara.tests.unit import base
|
||||
|
||||
|
||||
class TestQuotas(base.SaharaTestCase):
|
||||
|
||||
LIST_LIMITS = ['ram', 'cpu', 'instances', 'floatingips',
|
||||
'security_groups', 'security_group_rules', 'ports',
|
||||
'volumes', 'volume_gbs']
|
||||
|
||||
def test_get_zero_limits(self):
|
||||
res = quotas._get_zero_limits()
|
||||
self.assertEqual(9, len(res))
|
||||
for key in self.LIST_LIMITS:
|
||||
self.assertEqual(0, res[key])
|
||||
|
||||
@mock.patch('sahara.service.quotas._get_avail_limits')
|
||||
def test_check_limits(self, mock_avail_limits):
|
||||
avail_limits = {}
|
||||
req_limits = {}
|
||||
|
||||
for key in self.LIST_LIMITS:
|
||||
avail_limits[key] = 2
|
||||
req_limits[key] = 1
|
||||
mock_avail_limits.return_value = avail_limits
|
||||
self.assertIsNone(quotas._check_limits(req_limits))
|
||||
|
||||
for key in self.LIST_LIMITS:
|
||||
req_limits[key] = 2
|
||||
self.assertIsNone(quotas._check_limits(req_limits))
|
||||
|
||||
for key in self.LIST_LIMITS:
|
||||
req_limits[key] = 3
|
||||
self.assertRaises(exc.QuotaException, quotas._check_limits, req_limits)
|
||||
|
||||
@mock.patch('sahara.utils.openstack.nova.client')
|
||||
def test_update_limits_for_ng(self, nova_mock):
|
||||
flavor_mock = mock.Mock()
|
||||
type(flavor_mock).ram = mock.PropertyMock(return_value=4)
|
||||
type(flavor_mock).vcpus = mock.PropertyMock(return_value=2)
|
||||
|
||||
flavor_get_mock = mock.Mock()
|
||||
flavor_get_mock.get.return_value = flavor_mock
|
||||
|
||||
type(nova_mock.return_value).flavors = mock.PropertyMock(
|
||||
return_value=flavor_get_mock)
|
||||
|
||||
ng = mock.Mock()
|
||||
type(ng).flavor_id = mock.PropertyMock(return_value=3)
|
||||
type(ng).floating_ip_pool = mock.PropertyMock(return_value='pool')
|
||||
type(ng).volumes_per_node = mock.PropertyMock(return_value=4)
|
||||
type(ng).volumes_size = mock.PropertyMock(return_value=5)
|
||||
type(ng).auto_security_group = mock.PropertyMock(return_value=True)
|
||||
type(ng).open_ports = mock.PropertyMock(return_value=[1111, 2222])
|
||||
|
||||
limits = quotas._get_zero_limits()
|
||||
self.override_config('use_neutron', True)
|
||||
quotas._update_limits_for_ng(limits, ng, 3)
|
||||
|
||||
self.assertEqual(3, limits['instances'])
|
||||
self.assertEqual(12, limits['ram'])
|
||||
self.assertEqual(6, limits['cpu'])
|
||||
self.assertEqual(3, limits['floatingips'])
|
||||
self.assertEqual(12, limits['volumes'])
|
||||
self.assertEqual(60, limits['volume_gbs'])
|
||||
self.assertEqual(1, limits['security_groups'])
|
||||
self.assertEqual(5, limits['security_group_rules'])
|
||||
self.assertEqual(3, limits['ports'])
|
||||
|
||||
type(ng).open_ports = mock.PropertyMock(return_value=[1, 2, 3])
|
||||
self.override_config('use_neutron', False)
|
||||
quotas._update_limits_for_ng(limits, ng, 3)
|
||||
|
||||
self.assertEqual(6, limits['security_group_rules'])
|
||||
self.assertEqual(3, limits['ports'])
|
Loading…
Reference in New Issue
Block a user