Audit scoper for storage CDM

This patch adds audit scoper for Storage CDM.

Change-Id: I0c5b3b652027e1394fd7744d904397ce87ed35a1
Implements: blueprint audit-scoper-for-storage-data-model
This commit is contained in:
aditi 2018-01-10 15:31:19 +05:30
parent 5cbb9aca7e
commit bcc129cf94
6 changed files with 417 additions and 5 deletions

View File

@ -0,0 +1,5 @@
---
features:
- |
Adds audit scoper for storage data model, now watcher users can specify
audit scope for storage CDM in the same manner as compute scope.

View File

@ -34,6 +34,85 @@ class CinderClusterDataModelCollector(base.BaseClusterDataModelCollector):
The Cinder cluster data model collector creates an in-memory
representation of the resources exposed by the storage service.
"""
SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {
"type": "object",
"properties": {
"availability_zones": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"additionalProperties": False
}
},
"volume_types": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"additionalProperties": False
}
},
"exclude": {
"type": "array",
"items": {
"type": "object",
"properties": {
"storage_pools": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"additionalProperties": False
}
},
"volumes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"uuid": {
"type": "string"
}
},
"additionalProperties": False
}
},
"projects": {
"type": "array",
"items": {
"type": "object",
"properties": {
"uuid": {
"type": "string"
}
},
"additionalProperties": False
}
},
"additionalProperties": False
}
}
}
},
"additionalProperties": False
}
}
def __init__(self, config, osc=None):
super(CinderClusterDataModelCollector, self).__init__(config, osc)

View File

@ -12,6 +12,8 @@
# limitations under the License.
from oslo_log import log
from watcher.common import cinder_helper
from watcher.common import exception
from watcher.decision_engine.scope import base
@ -25,19 +27,139 @@ class StorageScope(base.BaseScope):
def __init__(self, scope, config, osc=None):
super(StorageScope, self).__init__(scope, config)
self._osc = osc
self.wrapper = cinder_helper.CinderHelper(osc=self._osc)
def _collect_vtype(self, volume_types, allowed_nodes):
service_list = self.wrapper.get_storage_node_list()
vt_names = [volume_type['name'] for volume_type in volume_types]
include_all_nodes = False
if '*' in vt_names:
if len(vt_names) == 1:
include_all_nodes = True
else:
raise exception.WildcardCharacterIsUsed(
resource="volume_types")
for service in service_list:
if include_all_nodes:
allowed_nodes.append(service.host)
continue
backend = service.host.split('@')[1]
v_types = self.wrapper.get_volume_type_by_backendname(
backend)
for volume_type in v_types:
if volume_type in vt_names:
# Note(adisky): It can generate duplicate values
# but it will later converted to set
allowed_nodes.append(service.host)
def _collect_zones(self, availability_zones, allowed_nodes):
service_list = self.wrapper.get_storage_node_list()
zone_names = [zone['name'] for zone
in availability_zones]
include_all_nodes = False
if '*' in zone_names:
if len(zone_names) == 1:
include_all_nodes = True
else:
raise exception.WildcardCharacterIsUsed(
resource="availability zones")
for service in service_list:
if service.zone in zone_names or include_all_nodes:
allowed_nodes.append(service.host)
def exclude_resources(self, resources, **kwargs):
pools_to_exclude = kwargs.get('pools')
volumes_to_exclude = kwargs.get('volumes')
projects_to_exclude = kwargs.get('projects')
for resource in resources:
if 'storage_pools' in resource:
pools_to_exclude.extend(
[storage_pool['name'] for storage_pool
in resource['storage_pools']])
elif 'volumes' in resource:
volumes_to_exclude.extend(
[volume['uuid'] for volume in
resource['volumes']])
elif 'projects' in resource:
projects_to_exclude.extend(
[project['uuid'] for project in
resource['projects']])
def exclude_pools(self, pools_to_exclude, cluster_model):
for pool_name in pools_to_exclude:
pool = cluster_model.get_pool_by_pool_name(pool_name)
volumes = cluster_model.get_pool_volumes(pool)
for volume in volumes:
cluster_model.remove_volume(volume)
cluster_model.remove_pool(pool)
def exclude_volumes(self, volumes_to_exclude, cluster_model):
for volume_uuid in volumes_to_exclude:
volume = cluster_model.get_volume_by_uuid(volume_uuid)
cluster_model.remove_volume(volume)
def exclude_projects(self, projects_to_exclude, cluster_model):
all_volumes = cluster_model.get_all_volumes()
for volume_uuid in all_volumes:
volume = all_volumes.get(volume_uuid)
if volume.project_id in projects_to_exclude:
cluster_model.remove_volume(volume)
def remove_nodes_from_model(self, nodes_to_remove, cluster_model):
for hostname in nodes_to_remove:
node = cluster_model.get_node_by_name(hostname)
pools = cluster_model.get_node_pools(node)
for pool in pools:
volumes = cluster_model.get_pool_volumes(pool)
for volume in volumes:
cluster_model.remove_volume(volume)
cluster_model.remove_pool(pool)
cluster_model.remove_node(node)
def get_scoped_model(self, cluster_model):
"""Leave only nodes and instances proposed in the audit scope"""
"""Leave only nodes, pools and volumes proposed in the audit scope"""
if not cluster_model:
return None
allowed_nodes = []
nodes_to_remove = set()
volumes_to_exclude = []
projects_to_exclude = []
pools_to_exclude = []
model_hosts = list(cluster_model.get_all_storage_nodes().keys())
storage_scope = []
for scope in self.scope:
storage_scope = scope.get('storage')
if not storage_scope:
return cluster_model
# TODO(hidekazu): currently self.scope is always []
# Audit scoper for storage data model will be implemented:
# https://blueprints.launchpad.net/watcher/+spec/audit-scoper-for-storage-data-model
for rule in storage_scope:
if 'volume_types' in rule:
self._collect_vtype(rule['volume_types'],
allowed_nodes, cluster_model)
elif 'availability_zones' in rule:
self._collect_zones(rule['availability_zones'],
allowed_nodes)
elif 'exclude' in rule:
self.exclude_resources(
rule['exclude'], pools=pools_to_exclude,
volumes=volumes_to_exclude,
projects=projects_to_exclude)
if allowed_nodes:
nodes_to_remove = set(model_hosts) - set(allowed_nodes)
self.remove_nodes_from_model(nodes_to_remove, cluster_model)
self.exclude_pools(pools_to_exclude, cluster_model)
self.exclude_volumes(volumes_to_exclude, cluster_model)
self.exclude_projects(projects_to_exclude, cluster_model)
return cluster_model

View File

@ -207,7 +207,8 @@ class BaseStrategy(loadable.Loadable):
if self._storage_model is None:
collector = self.collector_manager.get_cluster_model_collector(
'storage', osc=self.osc)
audit_scope_handler = collector.audit_scope_handler
audit_scope_handler = collector.get_audit_scope_handler(
audit_scope=self.audit.scope)
self._storage_model = audit_scope_handler.get_scoped_model(
collector.get_latest_cluster_data_model())

View File

@ -35,3 +35,14 @@ compute_scope = [{'compute': [{'host_aggregates': [{'id': '*'}]},
]}]
}
]
fake_scope_2 = [{'storage': [{'availability_zones': [{'name': 'zone_0'}]},
{'exclude': [
{'volumes': [
{'uuid': 'VOLUME_1'}]},
{'storage_pools': [
{'name': 'host_0@backend_0#pool_1'}]}
]}]
}
]

View File

@ -0,0 +1,194 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2018 NEC Corportion
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from watcher.common import cinder_helper
from watcher.common import exception
from watcher.decision_engine.scope import storage
from watcher.tests import base
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.scope import fake_scopes
class TestStorageScope(base.TestCase):
def setUp(self):
super(TestStorageScope, self).setUp()
self.fake_cluster = faker_cluster_state.FakerStorageModelCollector()
@mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list')
def test_get_scoped_model_with_zones_pools_volumes(self, mock_zone_list):
cluster = self.fake_cluster.generate_scenario_1()
audit_scope = fake_scopes.fake_scope_2
mock_zone_list.return_value = [
mock.Mock(zone='zone_{0}'.format(i),
host='host_{0}@backend_{1}'.format(i, i))
for i in range(2)]
model = storage.StorageScope(audit_scope, mock.Mock(),
osc=mock.Mock()).get_scoped_model(cluster)
expected_edges = [('VOLUME_0', 'host_0@backend_0#pool_0'),
('host_0@backend_0#pool_0', 'host_0@backend_0')]
self.assertEqual(sorted(expected_edges), sorted(model.edges()))
@mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list')
def test_get_scoped_model_without_scope(self, mock_zone_list):
cluster = self.fake_cluster.generate_scenario_1()
storage.StorageScope([], mock.Mock(),
osc=mock.Mock()).get_scoped_model(cluster)
assert not mock_zone_list.called
@mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list')
def test_collect_zones(self, mock_zone_list):
allowed_nodes = []
az_scope = [{'name': 'zone_1'}]
mock_zone_list.return_value = [
mock.Mock(zone='zone_{0}'.format(i),
host='host_{0}@backend_{1}'.format(i, i))
for i in range(2)]
storage.StorageScope([{'availability _zones': az_scope}],
mock.Mock(), osc=mock.Mock())._collect_zones(
az_scope, allowed_nodes)
self.assertEqual(['host_1@backend_1'], sorted(allowed_nodes))
# storage scope with az wildcard
az_scope = [{'name': '*'}]
del allowed_nodes[:]
storage.StorageScope([{'availability _zones': az_scope}],
mock.Mock(), osc=mock.Mock())._collect_zones(
az_scope, allowed_nodes)
self.assertEqual(['host_0@backend_0', 'host_1@backend_1'],
sorted(allowed_nodes))
# storage scope with az wildcard and other
az_scope = [{'name': '*'}, {'name': 'zone_0'}]
del allowed_nodes[:]
scope_handler = storage.StorageScope(
[{'availability _zones': az_scope}], mock.Mock(), osc=mock.Mock())
self.assertRaises(exception.WildcardCharacterIsUsed,
scope_handler._collect_zones,
az_scope, allowed_nodes)
@mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list')
@mock.patch.object(cinder_helper.CinderHelper,
'get_volume_type_by_backendname')
def test_collect_vtype(self, mock_vt_list, mock_zone_list):
allowed_nodes = []
mock_zone_list.return_value = [
mock.Mock(zone='zone_{0}'.format(i),
host='host_{0}@backend_{1}'.format(i, i))
for i in range(2)]
def side_effect(arg):
if arg == 'backend_0':
return ['type_0']
else:
return ['type_1']
mock_vt_list.side_effect = side_effect
vt_scope = [{'name': 'type_1'}]
storage.StorageScope([{'volume_types': vt_scope}],
mock.Mock(), osc=mock.Mock())._collect_vtype(
vt_scope, allowed_nodes)
self.assertEqual(['host_1@backend_1'], sorted(allowed_nodes))
# storage scope with vt wildcard
vt_scope = [{'name': '*'}]
del allowed_nodes[:]
storage.StorageScope([{'volume_types': vt_scope}],
mock.Mock(), osc=mock.Mock())._collect_vtype(
vt_scope, allowed_nodes)
self.assertEqual(['host_0@backend_0', 'host_1@backend_1'],
sorted(allowed_nodes))
# storage scope with vt wildcard and other
vt_scope = [{'name': '*'}, {'name': 'type_0'}]
del allowed_nodes[:]
scope_handler = storage.StorageScope([{'volume_types': vt_scope}],
mock.Mock(), osc=mock.Mock())
self.assertRaises(exception.WildcardCharacterIsUsed,
scope_handler._collect_vtype,
vt_scope, allowed_nodes)
def test_exclude_resources(self):
pools_to_exclude = []
projects_to_exclude = []
volumes_to_exclude = []
resources = [{'volumes': [{'uuid': 'VOLUME_1'},
{'uuid': 'VOLUME_2'}]
},
{'storage_pools': [{'name': 'host_0@backend_0#pool_1'},
{'name': 'host_1@backend_1#pool_1'}]
},
{'projects': [{'uuid': 'PROJECT_1'},
{'uuid': 'PROJECT_2'},
{'uuid': 'PROJECT_3'}]
}
]
storage.StorageScope(resources, mock.Mock(),
osc=mock.Mock()).exclude_resources(
resources, pools=pools_to_exclude, projects=projects_to_exclude,
volumes=volumes_to_exclude)
self.assertEqual(['VOLUME_1', 'VOLUME_2'], volumes_to_exclude)
self.assertEqual(['PROJECT_1', 'PROJECT_2', 'PROJECT_3'],
projects_to_exclude)
self.assertEqual(['host_0@backend_0#pool_1',
'host_1@backend_1#pool_1'], pools_to_exclude)
def test_exclude_volumes(self):
cluster = self.fake_cluster.generate_scenario_1()
exclude = ['VOLUME_0', 'VOLUME_3']
storage.StorageScope([], mock.Mock(),
osc=mock.Mock()).exclude_volumes(exclude,
cluster)
self.assertNotIn(exclude[0], cluster.get_all_volumes().keys())
self.assertNotIn(exclude[1], cluster.get_all_volumes().keys())
def test_exclude_pools(self):
cluster = self.fake_cluster.generate_scenario_1()
exclude = ['host_0@backend_0#pool_0']
node_name = (exclude[0].split('#'))[0]
storage.StorageScope([], mock.Mock(),
osc=mock.Mock()).exclude_pools(exclude,
cluster)
node = cluster.get_node_by_name(node_name)
self.assertNotIn(exclude, cluster.get_node_pools(node))
def test_exclude_projects(self):
cluster = self.fake_cluster.generate_scenario_1()
exclude = ['project_1', 'project_2']
storage.StorageScope([], mock.Mock(),
osc=mock.Mock()).exclude_projects(exclude,
cluster)
projects = []
volumes = cluster.get_all_volumes()
for volume_id in volumes:
volume = volumes.get(volume_id)
projects.append(volume.get('project_id'))
self.assertNotIn(exclude[0], projects)
self.assertNotIn(exclude[1], projects)
def test_remove_nodes_from_model(self):
cluster = self.fake_cluster.generate_scenario_1()
nodes_to_remove = ['host_0@backend_0']
storage.StorageScope([], mock.Mock(),
osc=mock.Mock()).remove_nodes_from_model(
nodes_to_remove, cluster)
self.assertEqual(['host_1@backend_1'],
list(cluster.get_all_storage_nodes()))