Implements resource type CRUD.

This is the boiler plate for resource type CRUD.

Legacy Ceilometer rule are still loaded from entry point

The CRUD interface doesn't allow to add attributes yet

Blueprint resource-type-rest-api
Change-Id: I762e42b1b5f9ed78fdaef51bb601e97468b8cf61
This commit is contained in:
Mehdi Abaakouk 2016-01-19 12:39:21 +01:00
parent 0f2538db6a
commit 420b1c424b
12 changed files with 626 additions and 64 deletions

View File

@ -300,6 +300,28 @@ The same endpoint can be used to append metrics to a resource:
.. _Nova: http://launchpad.net/nova
Resource Types
==============
Gnocchi is able to manage resource types with custom attributes.
To create a new resource type:
{{ scenarios['create-resource-type']['doc'] }}
Then to retrieve its description:
{{ scenarios['get-resource-type']['doc'] }}
All resource types can be listed like this:
{{ scenarios['list-resource-type']['doc'] }}
It can also be deleted if no more resources are associated to it:
{{ scenarios['delete-resource-type']['doc'] }}
Searching for resources
=======================

View File

@ -309,6 +309,30 @@
- name: get-patched-instance
request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1
- name: create-resource-type
request: |
POST /v1/resource_type HTTP/1.1
Content-Type: application/json
{"name": "my_custom_type"}
- name: create-resource-type-2
request: |
POST /v1/resource_type HTTP/1.1
Content-Type: application/json
{"name": "my_other_type"}
- name: get-resource-type
request: GET /v1/resource_type/my_custom_type HTTP/1.1
- name: list-resource-type
request: GET /v1/resource_type HTTP/1.1
- name: delete-resource-type
request: DELETE /v1/resource_type/my_custom_type HTTP/1.1
- name: search-resource-history
request: |
POST /v1/search/resource/instance?history=true HTTP/1.1

View File

@ -12,6 +12,11 @@
"list resource": "rule:admin_or_creator or rule:resource_owner",
"search resource": "rule:admin_or_creator or rule:resource_owner",
"create resource type": "role:admin",
"delete resource type": "role:admin",
"list resource type": "",
"get resource type": "",
"get archive policy": "",
"list archive policy": "",
"create archive policy": "role:admin",

View File

@ -37,6 +37,11 @@ OPTS = [
_marker = object()
class ResourceType(object):
def __eq__(self, other):
return self.name == other.name
class Resource(object):
def get_metric(self, metric_name):
for m in self.metrics:
@ -124,6 +129,14 @@ class ArchivePolicyInUse(IndexerException):
self.archive_policy = archive_policy
class ResourceTypeInUse(IndexerException):
"""Error raised when an resource type is still being used."""
def __init__(self, resource_type):
super(ResourceTypeInUse, self).__init__(
"Resource type %s is still in use" % resource_type)
self.resource_type = resource_type
class NoSuchArchivePolicyRule(IndexerException):
"""Error raised when an archive policy rule does not exist."""
def __init__(self, archive_policy_rule):
@ -158,6 +171,14 @@ class ResourceAlreadyExists(IndexerException):
self.resource = resource
class ResourceTypeAlreadyExists(IndexerException):
"""Error raised when a resource type already exists."""
def __init__(self, resource_type):
super(ResourceTypeAlreadyExists, self).__init__(
"Resource type %s already exists" % resource_type)
self.resource_type = resource_type
class ResourceAttributeError(IndexerException, AttributeError):
"""Error raised when an attribute does not exist for a resource type."""
def __init__(self, resource, attribute):
@ -336,3 +357,22 @@ class IndexerDriver(object):
if fnmatch.fnmatch(metric_name or "", rule.metric_pattern):
return self.get_archive_policy(rule.archive_policy_name)
raise NoArchivePolicyRuleMatch(metric_name)
@staticmethod
def create_resource_type(resource_type):
raise exceptions.NotImplementedError
@staticmethod
def get_resource_type(name):
"""Get a resource type from the indexer.
:param name: name of the resource type
"""
raise exceptions.NotImplementedError
@staticmethod
def list_resource_types(attribute_filter=None,
limit=None,
marker=None,
sorts=None):
raise exceptions.NotImplementedError

View File

@ -0,0 +1,54 @@
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add tablename to resource_type
Revision ID: 0718ed97e5b3
Revises: 828c16f70cce
Create Date: 2016-01-20 08:14:04.893783
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0718ed97e5b3'
down_revision = '828c16f70cce'
branch_labels = None
depends_on = None
def upgrade():
op.add_column("resource_type", sa.Column('tablename', sa.String(18),
nullable=True))
resource_type = sa.Table(
'resource_type', sa.MetaData(),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('tablename', sa.String(18), nullable=False)
)
op.execute(resource_type.update().where(
resource_type.c.name == "instance_network_interface"
).values({'tablename': op.inline_literal("'instance_net_int'")}))
op.execute(resource_type.update().where(
resource_type.c.name != "instance_network_interface"
).values({'tablename': op.inline_literal('name')}))
op.alter_column("resource_type", "tablename", type_=sa.String(18),
nullable=False)
op.create_unique_constraint("uniq_resource_type0tablename",
"resource_type", ["tablename"])

View File

@ -25,6 +25,7 @@ from oslo_db import exception
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import utils as oslo_db_utils
from oslo_log import log
import six
import sqlalchemy
import sqlalchemy_utils
@ -45,24 +46,7 @@ ResourceType = base.ResourceType
_marker = indexer._marker
def get_resource_mappers(ext):
if ext.name == "generic":
resource_ext = ext.plugin
resource_history_ext = ResourceHistory
else:
tablename = getattr(ext.plugin, '__tablename__', ext.name)
resource_ext = type(str(ext.name),
(ext.plugin, base.ResourceExtMixin, Resource),
{"__tablename__": tablename})
resource_history_ext = type(str("%s_history" % ext.name),
(ext.plugin, base.ResourceHistoryExtMixin,
ResourceHistory),
{"__tablename__": (
"%s_history" % tablename)})
return {'resource': resource_ext,
'history': resource_history_ext}
LOG = log.getLogger(__name__)
class PerInstanceFacade(object):
@ -103,11 +87,123 @@ class PerInstanceFacade(object):
self.trans._factory._writer_engine.dispose()
class SQLAlchemyIndexer(indexer.IndexerDriver):
resources = extension.ExtensionManager('gnocchi.indexer.resources')
class ResourceClassMapper(object):
def __init__(self):
self._resources = extension.ExtensionManager(
'gnocchi.indexer.resources')
self._cache = self.load_legacy_mappers()
self._lock = threading.RLock()
_RESOURCE_CLASS_MAPPER = {ext.name: get_resource_mappers(ext)
for ext in resources.extensions}
@staticmethod
def _build_class_mappers(resource_type, baseclass=None):
tablename = resource_type.tablename
# TODO(sileht): Add columns
if not baseclass:
baseclass = type(str("%s_base" % tablename), (object, ), {})
resource_ext = type(
str("%s_resource" % tablename),
(baseclass, base.ResourceExtMixin, base.Resource),
{"__tablename__": tablename})
resource_history_ext = type(
str("%s_history" % tablename),
(baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory),
{"__tablename__": ("%s_history" % tablename)})
return {'resource': resource_ext,
'history': resource_history_ext}
def is_legacy(self, resource_type_name):
return resource_type_name in self._resources
def load_legacy_mappers(self):
mappers = {}
for ext in self._resources.extensions:
tablename = getattr(ext.plugin, '__tablename__', ext.name)
if ext.name == "generic":
mappers[tablename] = {'resource': base.Resource,
'history': base.ResourceHistory}
else:
resource_type = base.ResourceType(name=ext.name,
tablename=tablename)
mappers[tablename] = self._build_class_mappers(resource_type,
ext.plugin)
return mappers
def get_legacy_resource_types(self):
resource_types = []
for ext in self._resources.extensions:
tablename = getattr(ext.plugin, '__tablename__', ext.name)
resource_types.append(base.ResourceType(name=ext.name,
tablename=tablename))
return resource_types
def get_classes(self, resource_type):
# NOTE(sileht): Most of the times we can bypass the lock so do it
try:
return self._cache[resource_type.tablename]
except KeyError:
pass
# TODO(sileht): if the table doesn't exis
with self._lock:
try:
return self._cache[resource_type.tablename]
except KeyError:
mapper = self._build_class_mappers(resource_type)
self._cache[resource_type.tablename] = mapper
return mapper
@oslo_db.api.wrap_db_retry(retry_on_deadlock=True)
def map_and_create_tables(self, resource_type, connection):
with self._lock:
# NOTE(sileht): map this resource_type to have
# Base.metadata filled with sa.Table objects
mappers = self.get_classes(resource_type)
tables = [Base.metadata.tables[klass.__tablename__]
for klass in mappers.values()]
Base.metadata.create_all(connection, tables=tables)
def unmap_and_delete_tables(self, resource_type, connection):
with self._lock:
# NOTE(sileht): map this resource_type to have
# Base.metadata filled with sa.Table objects
mappers = self.get_classes(resource_type)
tables = [Base.metadata.tables[klass.__tablename__]
for klass in mappers.values()]
if connection is not None:
# NOTE(sileht): Base.metadata.drop_all doesn't
# issue CASCADE stuffs correctly at least on postgresql
# We drop foreign keys manually to not lock the destination
# table for too long during drop table.
# It's safe to not use a transaction since
# the resource_type table is already cleaned and commited
# so this code cannot be triggerred anymore for this
# resource_type
for table in tables:
for fk in table.foreign_key_constraints:
self._safe_execute(
connection,
sqlalchemy.schema.DropConstraint(fk))
for table in tables:
self._safe_execute(connection,
sqlalchemy.schema.DropTable(table))
# TODO(sileht): Remove this resource on other workers
# by using expiration on cache ?
for table in tables:
Base.metadata.remove(table)
del self._cache[resource_type.tablename]
@oslo_db.api.wrap_db_retry(retry_on_deadlock=True)
def _safe_execute(self, connection, works):
# NOTE(sileht): we create a transaction to ensure mysql
# create locks on other transaction...
trans = connection.begin()
connection.execute(works)
trans.commit()
class SQLAlchemyIndexer(indexer.IndexerDriver):
_RESOURCE_TYPE_MANAGER = ResourceClassMapper()
def __init__(self, conf):
conf.set_override("connection", conf.indexer.url, "database")
@ -147,17 +243,73 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
else:
command.upgrade(cfg, "head")
for resource_type in self._RESOURCE_CLASS_MAPPER:
for rt in self._RESOURCE_TYPE_MANAGER.get_legacy_resource_types():
try:
with self.facade.writer() as session:
session.add(ResourceType(name=resource_type))
session.add(rt)
except exception.DBDuplicateEntry:
pass
def _resource_type_to_class(self, resource_type, purpose="resource"):
if resource_type not in self._RESOURCE_CLASS_MAPPER:
raise indexer.NoSuchResourceType(resource_type)
return self._RESOURCE_CLASS_MAPPER[resource_type][purpose]
def create_resource_type(self, name):
# NOTE(sileht): mysql have a stupid and small length limitation on the
# foreign key and index name, so we can't use the resource type name as
# tablename, the limit is 64. The longest name we have is
# fk_<tablename>_history_revision_resource_history_revision,
# so 64 - 46 = 18
tablename = "rt_%s" % uuid.uuid4().hex[:15]
resource_type = ResourceType(name=name,
tablename=tablename)
try:
with self.facade.writer() as session:
session.add(resource_type)
except exception.DBDuplicateEntry:
raise indexer.ResourceTypeAlreadyExists(name)
with self.facade.writer_connection() as connection:
self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type,
connection)
return resource_type
def get_resource_type(self, name):
with self.facade.independent_reader() as session:
return self._get_resource_type(session, name)
def _get_resource_type(self, session, name):
resource_type = session.query(ResourceType).get(name)
if not resource_type:
raise indexer.NoSuchResourceType(name)
return resource_type
def list_resource_types(self):
with self.facade.independent_reader() as session:
return list(session.query(ResourceType).order_by(
ResourceType.name.asc()).all())
def delete_resource_type(self, name):
# FIXME(sileht) this type have special handling
# until we remove this special thing we reject its deletion
if self._RESOURCE_TYPE_MANAGER.is_legacy(name):
raise indexer.ResourceTypeInUse(name)
try:
with self.facade.writer() as session:
resource_type = self._get_resource_type(session, name)
session.delete(resource_type)
except exception.DBReferenceError as e:
if (e.constraint in [
'fk_resource_resource_type_name',
'fk_resource_history_resource_type_name']):
raise indexer.ResourceTypeInUse(name)
raise
with self.facade.writer_connection() as connection:
self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables(resource_type,
connection)
def _resource_type_to_classes(self, session, name):
resource_type = self._get_resource_type(session, name)
return self._RESOURCE_TYPE_MANAGER.get_classes(resource_type)
def list_archive_policies(self):
with self.facade.independent_reader() as session:
@ -264,13 +416,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
user_id=None, project_id=None,
started_at=None, ended_at=None, metrics=None,
**kwargs):
resource_cls = self._resource_type_to_class(resource_type)
if (started_at is not None
and ended_at is not None
and started_at > ended_at):
raise ValueError(
"Start timestamp cannot be after end timestamp")
with self.facade.writer() as session:
resource_cls = self._resource_type_to_classes(
session, resource_type)['resource']
r = resource_cls(
id=id,
type=resource_type,
@ -298,16 +451,17 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
return r
@oslo_db.api.retry_on_deadlock
@oslo_db.api.wrap_db_retry(retry_on_deadlock=True)
def update_resource(self, resource_type,
resource_id, ended_at=_marker, metrics=_marker,
append_metrics=False,
create_revision=True,
**kwargs):
resource_cls = self._resource_type_to_class(resource_type)
resource_history_cls = self._resource_type_to_class(resource_type,
"history")
with self.facade.writer() as session:
classes = self._resource_type_to_classes(session, resource_type)
resource_cls = classes["resource"]
resource_history_cls = classes["history"]
try:
# NOTE(sileht): We use FOR UPDATE that is not galera friendly,
# but they are no other way to cleanly patch a resource and
@ -420,8 +574,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
raise indexer.NoSuchResource(resource_id)
def get_resource(self, resource_type, resource_id, with_metrics=False):
resource_cls = self._resource_type_to_class(resource_type)
with self.facade.independent_reader() as session:
resource_cls = self._resource_type_to_classes(
session, resource_type)['resource']
q = session.query(
resource_cls).filter(
resource_cls.id == resource_id)
@ -429,9 +584,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
q = q.options(sqlalchemy.orm.joinedload('metrics'))
return q.first()
def _get_history_result_mapper(self, resource_type):
resource_cls = self._resource_type_to_class(resource_type)
history_cls = self._resource_type_to_class(resource_type, 'history')
def _get_history_result_mapper(self, session, resource_type):
classes = self._resource_type_to_classes(session, resource_type)
resource_cls = classes['resource']
history_cls = classes['history']
resource_cols = {}
history_cols = {}
@ -468,6 +624,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
return Result
@oslo_db.api.wrap_db_retry(retry_on_deadlock=True)
def list_resources(self, resource_type='generic',
attribute_filter=None,
details=False,
@ -477,12 +634,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
sorts=None):
sorts = sorts or []
if history:
target_cls = self._get_history_result_mapper(resource_type)
else:
target_cls = self._resource_type_to_class(resource_type)
with self.facade.independent_reader() as session:
if history:
target_cls = self._get_history_result_mapper(
session, resource_type)
else:
target_cls = self._resource_type_to_classes(
session, resource_type)["resource"]
q = session.query(target_cls)
if attribute_filter:
@ -546,12 +705,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver):
all_resources.extend(resources)
else:
if is_history:
target_cls = self._resource_type_to_class(
type, "history")
target_cls = self._resource_type_to_classes(
session, type)['history']
f = target_cls.revision.in_(
[r.revision for r in resources])
else:
target_cls = self._resource_type_to_class(type)
target_cls = self._resource_type_to_classes(
session, type)["resource"]
f = target_cls.id.in_([r.id for r in resources])
q = session.query(target_cls).filter(f)

View File

@ -199,14 +199,22 @@ class Metric(Base, GnocchiBase, storage.Metric):
__hash__ = storage.Metric.__hash__
class ResourceType(Base, GnocchiBase):
class ResourceType(Base, GnocchiBase, indexer.ResourceType):
__tablename__ = 'resource_type'
__table_args__ = (
sqlalchemy.UniqueConstraint("tablename",
name="uniq_resource_type0tablename"),
COMMON_TABLES_ARGS,
)
name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True,
nullable=False)
tablename = sqlalchemy.Column(sqlalchemy.String(18), nullable=False)
def jsonify(self):
d = dict(self)
del d['tablename']
return d
class ResourceJsonifier(indexer.Resource):
@ -232,7 +240,7 @@ class ResourceMixin(ResourceJsonifier):
sqlalchemy.String(255),
sqlalchemy.ForeignKey('resource_type.name',
ondelete="RESTRICT",
name="fk_%s_type_resource_type_name" %
name="fk_%s_resource_type_name" %
cls.__tablename__),
nullable=False)
@ -315,8 +323,12 @@ class ResourceExtMixin(object):
sqlalchemy.ForeignKey(
'resource.id',
ondelete="CASCADE",
name="fk_%s_id_resource_id" % cls.__tablename__),
primary_key=True)
name="fk_%s_id_resource_id" % cls.__tablename__,
# NOTE(sileht): We use to ensure that postgresql
# does not use AccessExclusiveLock on destination table
use_alter=True),
primary_key=True
)
class ResourceHistoryExtMixin(object):
@ -332,8 +344,12 @@ class ResourceHistoryExtMixin(object):
'resource_history.revision',
ondelete="CASCADE",
name="fk_%s_revision_resource_history_revision"
% cls.__tablename__),
primary_key=True)
% cls.__tablename__,
# NOTE(sileht): We use to ensure that postgresql
# does not use AccessExclusiveLock on destination table
use_alter=True),
primary_key=True
)
class ArchivePolicyRule(Base, GnocchiBase):

View File

@ -806,6 +806,67 @@ def etag_set_headers(obj):
pecan.response.last_modified = obj.lastmodified
class ResourceTypeController(rest.RestController):
def __init__(self, name):
self._name = name
@pecan.expose('json')
def get(self):
try:
resource_type = pecan.request.indexer.get_resource_type(self._name)
except indexer.NoSuchResourceType as e:
abort(404, e)
enforce("get resource type", resource_type)
return resource_type
@pecan.expose()
def delete(self):
try:
resource_type = pecan.request.indexer.get_resource_type(self._name)
except indexer.NoSuchResourceType as e:
abort(404, e)
enforce("delete resource type", resource_type)
try:
pecan.request.indexer.delete_resource_type(self._name)
except (indexer.NoSuchResourceType,
indexer.ResourceTypeInUse) as e:
abort(400, e)
def ResourceTypeSchema(definition):
# FIXME(sileht): Add resource type attributes from the indexer
return voluptuous.Schema({
"name": six.text_type,
})(definition)
class ResourceTypesController(rest.RestController):
@pecan.expose()
def _lookup(self, name, *remainder):
return ResourceTypeController(name), remainder
@pecan.expose('json')
def post(self):
body = deserialize_and_validate(ResourceTypeSchema)
enforce("create resource type", body)
try:
resource_type = pecan.request.indexer.create_resource_type(**body)
except indexer.ResourceTypeAlreadyExists as e:
abort(409, e)
set_resp_location_hdr("/resource_type/" + resource_type.name)
pecan.response.status = 201
return resource_type
@pecan.expose('json')
def get_all(self, **kwargs):
enforce("list resource type", {})
try:
return pecan.request.indexer.list_resource_types()
except indexer.IndexerException as e:
abort(400, e)
def ResourceSchema(schema):
base_schema = {
voluptuous.Optional('started_at'): Timestamp,
@ -951,7 +1012,12 @@ RESOURCE_SCHEMA_MANAGER = extension.ExtensionManager(
def schema_for(resource_type):
return RESOURCE_SCHEMA_MANAGER[resource_type].plugin
if resource_type in RESOURCE_SCHEMA_MANAGER:
# TODO(sileht): Remove this legacy resource schema loading
return RESOURCE_SCHEMA_MANAGER[resource_type].plugin
else:
# TODO(sileht): Load schema from indexer
return GenericSchema
def ResourceID(value):
@ -1029,16 +1095,17 @@ class ResourcesByTypeController(rest.RestController):
@pecan.expose('json')
def get_all(self):
return dict(
(ext.name,
pecan.request.application_url + '/resource/' + ext.name)
for ext in RESOURCE_SCHEMA_MANAGER)
(rt.name,
pecan.request.application_url + '/resource/' + rt.name)
for rt in pecan.request.indexer.list_resource_types())
@pecan.expose()
def _lookup(self, resource_type, *remainder):
if resource_type in RESOURCE_SCHEMA_MANAGER:
return ResourcesController(resource_type), remainder
else:
abort(404, indexer.NoSuchResourceType(resource_type))
try:
pecan.request.indexer.get_resource_type(resource_type)
except indexer.NoSuchResourceType as e:
abort(404, e)
return ResourcesController(resource_type), remainder
def _ResourceSearchSchema(v):
@ -1114,10 +1181,11 @@ class SearchResourceTypeController(rest.RestController):
class SearchResourceController(rest.RestController):
@pecan.expose()
def _lookup(self, resource_type, *remainder):
if resource_type in RESOURCE_SCHEMA_MANAGER:
return SearchResourceTypeController(resource_type), remainder
else:
abort(404, indexer.NoSuchResourceType(resource_type))
try:
pecan.request.indexer.get_resource_type(resource_type)
except indexer.NoSuchResourceType as e:
abort(404, e)
return SearchResourceTypeController(resource_type), remainder
def _MetricSearchSchema(v):
@ -1415,6 +1483,7 @@ class V1Controller(object):
"metric": MetricsController(),
"batch": BatchController(),
"resource": ResourcesByTypeController(),
"resource_type": ResourceTypesController(),
"aggregation": AggregationController(),
"capabilities": CapabilityController(),
"status": StatusController(),

View File

@ -54,7 +54,7 @@ tests:
redirects: true
response_json_paths:
$.version: "1.0"
$.links.`len`: 10
$.links.`len`: 11
$.links[0].href: $SCHEME://$NETLOC/v1
$.links[7].href: $SCHEME://$NETLOC/v1/resource

View File

@ -0,0 +1,115 @@
#
# Test the resource type API to achieve coverage of just the
# ResourceTypesController and ResourceTypeController class code.
#
fixtures:
- ConfigFixture
tests:
- name: list resource type
desc: only legacy resource types are present
url: /v1/resource_type
response_json_paths:
$.`len`: 15
- name: post resource type as non-admin
url: /v1/resource_type
method: post
data:
name: my_custom_resource
request_headers:
content-type: application/json
status: 403
- name: post resource type
url: /v1/resource_type
method: post
request_headers:
x-roles: admin
content-type: application/json
data:
name: my_custom_resource
status: 201
response_json_paths:
$.name: my_custom_resource
response_headers:
location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource
- name: relist resource types
desc: we have a resource type now
url: /v1/resource_type
response_json_paths:
$.`len`: 16
$.[11].name: my_custom_resource
- name: get the custom resource type
url: /v1/resource_type/my_custom_resource
response_json_paths:
$.name: my_custom_resource
- name: delete as non-admin
url: /v1/resource_type/my_custom_resource
method: DELETE
status: 403
- name: post custom resource
url: /v1/resource/my_custom_resource
method: post
request_headers:
x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c
x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea
content-type: application/json
data:
id: d11edfca-4393-4fda-b94d-b05a3a1b3747
status: 201
- name: delete in use resource_type
url: /v1/resource_type/my_custom_resource
method: delete
request_headers:
x-roles: admin
status: 400
response_strings:
- Resource type my_custom_resource is still in use
- name: delete the resource
url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747
request_headers:
x-roles: admin
method: DELETE
status: 204
- name: delete the custom resource type
method: delete
request_headers:
x-roles: admin
url: /v1/resource_type/my_custom_resource
status: 204
- name: delete non-existing custom resource type
method: delete
request_headers:
x-roles: admin
url: /v1/resource_type/my_custom_resource
status: 404
# Can we readd and delete the same resource type again
- name: post resource type again
url: /v1/resource_type
method: post
request_headers:
x-roles: admin
content-type: application/json
data:
name: my_custom_resource
status: 201
- name: delete the custom resource type again
method: delete
request_headers:
x-roles: admin
url: /v1/resource_type/my_custom_resource
status: 204

View File

@ -17,6 +17,7 @@ import abc
import mock
from oslo_db.sqlalchemy import test_migrations
import six
import sqlalchemy
from gnocchi.indexer import sqlalchemy_base
from gnocchi.tests import base
@ -47,3 +48,18 @@ class ModelsMigrationsSync(
# NOTE(jd) Nothing to do here as setUp() in the base class is already
# creating table using upgrade
pass
@staticmethod
def filter_metadata_diff(diff):
new_diff = []
for line in diff:
item = line[1]
# NOTE(sileht): skip resource types created dynamically
if (isinstance(item, sqlalchemy.Table)
and item.name.startswith("rt_")):
continue
elif (isinstance(item, sqlalchemy.Index)
and item.name.startswith("ix_rt_")):
continue
new_diff.append(line)
return new_diff

View File

@ -972,3 +972,44 @@ class TestIndexerDriver(tests_base.TestCase):
self.index.delete_metric(e1)
metrics = self.index.list_metrics()
self.assertNotIn(e1, [m.id for m in metrics])
def test_resource_type_crud(self):
# Create
self.index.create_resource_type("indexer_test")
self.assertRaises(indexer.ResourceTypeAlreadyExists,
self.index.create_resource_type,
"indexer_test")
# Get and List
rtype = self.index.get_resource_type("indexer_test")
self.assertEqual("indexer_test", rtype.name)
rtypes = self.index.list_resource_types()
for rtype in rtypes:
if rtype.name == "indexer_test":
break
else:
self.fail("indexer_test not found")
# Test resource itself
rid = uuid.uuid4()
self.index.create_resource("indexer_test", rid,
str(uuid.uuid4()),
str(uuid.uuid4()))
r = self.index.get_resource("indexer_test", rid)
self.assertEqual("indexer_test", r.type)
# Deletion
self.assertRaises(indexer.ResourceTypeInUse,
self.index.delete_resource_type,
"indexer_test")
self.index.delete_resource(rid)
self.index.delete_resource_type("indexer_test")
# Ensure it's deleted
self.assertRaises(indexer.NoSuchResourceType,
self.index.get_resource_type,
"indexer_test")
self.assertRaises(indexer.NoSuchResourceType,
self.index.delete_resource_type,
"indexer_test")