Collecting OSWL

Db model created.
Json scema for API created.
Blueprints urls moved to app.
Insert and update for lists of data generalized.
SQL logging enabled for test mode.

Implements: blueprint openstack-workload-statistics
Change-Id: If74e4bea0bc9c6a28ce859d775d1b33f36b547a1
This commit is contained in:
Alexander Kislitsky 2015-01-29 13:29:12 +03:00
parent 429a87a388
commit 5321a4e4b1
14 changed files with 673 additions and 20 deletions

View File

@ -35,11 +35,14 @@ db = flask_sqlalchemy.SQLAlchemy(app)
from collector.api.resources.action_logs import bp as action_logs_bp
from collector.api.resources.installation_structure import \
bp as installation_structure_bp
from collector.api.resources.oswl import bp as oswl_stats_bp
from collector.api.resources.ping import bp as ping_bp
app.register_blueprint(installation_structure_bp)
app.register_blueprint(action_logs_bp)
app.register_blueprint(ping_bp)
app.register_blueprint(installation_structure_bp,
url_prefix='/api/v1/installation_structure')
app.register_blueprint(action_logs_bp, url_prefix='/api/v1/action_logs')
app.register_blueprint(ping_bp, url_prefix='/api/v1/ping')
app.register_blueprint(oswl_stats_bp, url_prefix='/api/v1/oswl_stats')
# Registering error handlers

View File

@ -27,3 +27,21 @@ ACTION_LOG_STATUSES = make_enum(
'existed',
'failed'
)
OSWL_STATUSES = make_enum(
'added',
'updated',
'failed'
)
OSWL_RESOURCE_TYPES = make_enum(
'vm',
'tenant',
'volume',
'security_group',
'keystone_user',
'flavor',
'cluster_stats'
)

View File

@ -18,6 +18,9 @@ from flask import jsonify
from functools import wraps
import jsonschema
import math
import six
from sqlalchemy import and_
from sqlalchemy import or_
from collector.api.app import db
@ -88,6 +91,11 @@ def db_transaction(fn):
def split_collection(collection, chunk_size=1000):
"""Splits collection on chunks
:param collection: input collection
:param chunk_size: size of chunk
:return:
"""
chunks_num = int(math.ceil(float(len(collection)) / chunk_size))
for i in xrange(chunks_num):
start = i * chunk_size
@ -95,9 +103,92 @@ def split_collection(collection, chunk_size=1000):
yield collection[start:end]
def build_index(dicts_coll, *fields):
def build_index(coll, *fields):
"""Builds dict from collection. Dict keys are built
from values of fields of collection items
:param coll: collection
:param fields: fields names for build result dict key
:return: dict of collection items indexed by attributes
values of collection item from 'fields'
"""
index = {}
for d in dicts_coll:
idx = tuple([d[f] for f in fields])
for d in coll:
if isinstance(d, dict):
idx = tuple(d[f] for f in fields)
else:
idx = tuple(getattr(d, f) for f in fields)
index[idx] = d
return index
def get_index(target, *fields):
"""Gets value of index for target object
:param target: target object
:param fields: fields names for index creation
:return: tuple of attributes values of target from 'fields'
"""
if isinstance(target, dict):
return tuple(target[field_name] for field_name in fields)
else:
return tuple(getattr(target, field_name) for field_name in fields)
def get_existed_objects_query(dicts, dict_to_obj_fields_mapping, model_class):
"""Generates SQL query for filtering existed objects
:param dicts: list of dicts
:param dict_to_obj_fields_mapping: tuple of pairs
('dict_field_name', 'obj_field_name') used for filtering existed objects
:param model_class: DB model
:return: SQL query for filtering existed objects
"""
dict_index_fields, obj_index_fields = zip(*dict_to_obj_fields_mapping)
clauses = []
for d in dicts:
clause = []
for idx, dict_field_name in enumerate(dict_index_fields):
obj_field_name = obj_index_fields[idx]
clause.append(
getattr(model_class, obj_field_name) == d[dict_field_name]
)
clauses.append(and_(*clause))
return db.session.query(model_class).filter(or_(*clauses))
def split_new_dicts_and_updated_objs(dicts, dict_to_obj_fields_mapping,
model_class):
"""Separates new data and updates existed objects
:param dicts: list of dicts for processing
:param dict_to_obj_fields_mapping: tuple of pairs
('dict_field_name', 'obj_field_name') used for filtering existed objects
:param model_class: DB model
:return: list of dicts for new objects, list of updated existed objects
"""
dict_index_fields, obj_index_fields = zip(*dict_to_obj_fields_mapping)
# Fetching existed objects
existed_objs = get_existed_objects_query(
dicts, dict_to_obj_fields_mapping, model_class).all()
existed_objs_idx = build_index(existed_objs, *obj_index_fields)
new_dicts = []
for d in dicts:
obj_idx = get_index(d, *dict_index_fields)
if obj_idx in existed_objs_idx:
# Updating existed object
obj = existed_objs_idx[obj_idx]
for k, v in six.iteritems(d):
setattr(obj, k, v)
else:
# Preparing new object data for saving
d_copy = d.copy()
for idx in xrange(len(dict_index_fields)):
dict_feild = dict_index_fields[idx]
obj_feild = obj_index_fields[idx]
d_copy[obj_feild] = d_copy.pop(dict_feild)
new_dicts.append(d_copy)
return new_dicts, existed_objs
def bulk_insert(dicts, model_class):
if dicts:
db.session.execute(model_class.__table__.insert(dicts))

View File

@ -38,3 +38,4 @@ class Testing(Production):
LOG_FILES_COUNT = 5
SQLALCHEMY_DATABASE_URI = \
'postgresql://collector:collector@localhost/collector'
SQLALCHEMY_ECHO = True

View File

@ -0,0 +1,75 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OSWL stats DB structure
Revision ID: 4da2da15ec42
Revises: 558f628a238
Create Date: 2015-01-27 17:41:50.594143
"""
# revision identifiers, used by Alembic.
revision = '4da2da15ec42'
down_revision = '558f628a238'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
resource_type = sa.Enum('vm', 'tenant', 'volume', 'security_group',
'keystone_user', 'flavor', 'cluster_stats',
name='oswl_resource_type')
def upgrade():
op.create_table(
'oswl_stats',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('master_node_uid', sa.Text(), nullable=False),
sa.Column('external_id', sa.Integer(), nullable=False),
sa.Column('cluster_id', sa.Integer(), nullable=False),
sa.Column('created_date', sa.Date(), nullable=False),
sa.Column('updated_time', sa.Time(), nullable=False),
sa.Column('resource_type', resource_type, nullable=False),
sa.Column('resource_data', postgresql.JSON(), nullable=True),
sa.Column('resource_checksum', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('master_node_uid', 'external_id')
),
op.create_index(op.f('ix_oswl_stats_created_date'), 'oswl_stats',
['created_date'], unique=False)
op.create_index(op.f('ix_oswl_stats_external_id'), 'oswl_stats',
['external_id'], unique=False)
op.create_index(op.f('ix_oswl_stats_master_node_uid'), 'oswl_stats',
['master_node_uid'], unique=False)
op.create_index(op.f('ix_oswl_stats_resource_type'), 'oswl_stats',
['resource_type'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_oswl_stats_resource_type'),
table_name='oswl_stats')
op.drop_index(op.f('ix_oswl_stats_master_node_uid'),
table_name='oswl_stats')
op.drop_index(op.f('ix_oswl_stats_external_id'),
table_name='oswl_stats')
op.drop_index(op.f('ix_oswl_stats_created_date'),
table_name='oswl_stats')
op.drop_table('oswl_stats')
resource_type.drop(bind=op.get_bind(), checkfirst=False)
### end Alembic commands ###

View File

@ -1,4 +1,4 @@
# Copyright 2014 Mirantis, Inc.
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -12,7 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.dialects.postgresql import JSON
from collector.api.app import db
from collector.api.common import consts
class ActionLog(db.Model):
@ -38,3 +41,24 @@ class InstallationStructure(db.Model):
structure = db.Column(db.Text)
creation_date = db.Column(db.DateTime)
modification_date = db.Column(db.DateTime)
class OpenStackWorkloadStats(db.Model):
__tablename__ = 'oswl_stats'
__table_args__ = (
db.UniqueConstraint('master_node_uid', 'external_id'),
)
id = db.Column(db.Integer, primary_key=True)
master_node_uid = db.Column(db.Text, nullable=False, index=True)
external_id = db.Column(db.Integer, nullable=False, index=True)
cluster_id = db.Column(db.Integer, nullable=False)
created_date = db.Column(db.Date, nullable=False, index=True)
updated_time = db.Column(db.Time, nullable=False)
resource_type = db.Column(
db.Enum(*consts.OSWL_RESOURCE_TYPES, name='oswl_resource_type'),
nullable=False,
index=True
)
resource_data = db.Column(JSON, nullable=True)
resource_checksum = db.Column(db.Text, nullable=False)

View File

@ -20,7 +20,7 @@ import six
from sqlalchemy import and_
from sqlalchemy import or_
bp = Blueprint('action_logs', __name__, url_prefix='/api/v1/action_logs')
bp = Blueprint('action_logs', __name__)
from collector.api.app import app
from collector.api.app import db

View File

@ -18,8 +18,7 @@ from flask import json
from flask import request
from flask_jsonschema import validate as validate_request
bp = Blueprint('installation_structure', __name__,
url_prefix='/api/v1/installation_structure')
bp = Blueprint('installation_structure', __name__)
from collector.api.app import app
from collector.api.app import db

View File

@ -0,0 +1,97 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from flask import Blueprint
from flask import request
from flask_jsonschema import validate as validate_request
bp = Blueprint('oswl_stats', __name__)
from collector.api.app import app
from collector.api.app import db
from collector.api.common import consts
from collector.api.common import util
from collector.api.common.util import exec_time
from collector.api.common.util import handle_response
from collector.api.db.model import OpenStackWorkloadStats
@bp.route('/', methods=['POST'])
@validate_request('oswl', 'request')
@handle_response('oswl', 'response')
@exec_time
def post():
app.logger.debug("Handling oswl post request: %s", request.json)
oswls = request.json['oswl_stats']
oswls_resp = []
dict_idx_names = ('master_node_uid', 'id')
obj_idx_names = ('master_node_uid', 'external_id')
dict_to_obj_fields_mapping = zip(dict_idx_names, obj_idx_names)
for chunk in util.split_collection(oswls):
app.logger.debug("Processing oswls chunk of size: %d", len(chunk))
dicts_new, objs_updated = \
util.split_new_dicts_and_updated_objs(
chunk, dict_to_obj_fields_mapping, OpenStackWorkloadStats)
try:
app.logger.debug("Bulk insert of oswls started")
util.bulk_insert(dicts_new, OpenStackWorkloadStats)
app.logger.debug("Bulk insert of oswls finished")
db.session.commit()
oswls_resp.extend(generate_success_response(dicts_new,
objs_updated))
app.logger.debug("Oswls chunk of size: %d is processed",
len(chunk))
except Exception:
app.logger.exception("Oswls chunk of size: %d processing failed",
len(chunk))
db.session.rollback()
oswls_resp.extend(generate_error_response(dicts_new,
objs_updated))
return 200, {'status': 'ok', 'oswl_stats': oswls_resp}
def generate_success_response(dicts_new, objs_updated):
oswls_resp = []
for d in dicts_new:
oswls_resp.append({
'master_node_uid': d['master_node_uid'],
'id': d['external_id'],
'status': consts.OSWL_STATUSES.added
})
for o in objs_updated:
oswls_resp.append({
'master_node_uid': o.master_node_uid,
'id': o.external_id,
'status': consts.OSWL_STATUSES.updated
})
return oswls_resp
def generate_error_response(dicts_new, objs_updated):
oswls_resp = []
for d in dicts_new:
oswls_resp.append({
'master_node_uid': d['master_node_uid'],
'id': d['external_id'],
'status': consts.OSWL_STATUSES.failed
})
for o in objs_updated:
oswls_resp.append({
'master_node_uid': o.master_node_uid,
'id': o.external_id,
'status': consts.OSWL_STATUSES.failed
})
return oswls_resp

View File

@ -16,7 +16,7 @@ from flask import Blueprint
from flask import request
from flask_jsonschema import validate as validate_request
bp = Blueprint('ping', __name__, url_prefix='/api/v1/ping')
bp = Blueprint('ping', __name__)
from collector.api.app import app
from collector.api.common.util import exec_time

View File

@ -0,0 +1,88 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"request": {
"type": "object",
"properties": {
"oswl_stats": {
"type": "array",
"items": {
"type": "object",
"properties": {
"master_node_uid": {"type": "string"},
"id": {"type": "integer"},
"cluster_id": {"type": "integer"},
"created_date": {
"type": "string",
"format": "date"
},
"updated_time": {
"type": "string",
"format": "time"
},
"resource_type": {"type": "string"},
"resource_checksum": {"type": "string"},
"resource_data": {
"type": "object",
"properties": {
"added": {"type": "object"},
"current": {"type": "array"},
"removed": {"type": "object"},
"modified": {"type": "object"}
},
"required": ["added", "current", "removed", "modified"],
"additionalProperties": false
}
},
"required": ["master_node_uid", "id", "cluster_id",
"created_date", "updated_time", "resource_checksum",
"resource_data"],
"additionalProperties": false
}
}
},
"required": ["oswl_stats"],
"additionalProperties": false
},
"response": {
"oneOf": [
{
"type": "object",
"properties": {
"status": {"enum": ["ok"]},
"message": {"type": "string"},
"exec_time": {"type": "number"},
"oswl_stats": {
"type": "array",
"items": {
"type": "object",
"properties": {
"master_node_uid": {"type": "string"},
"id": {"type": "integer"},
"status": {"enum": ["added", "updated", "failed"]}
},
"required": ["master_node_uid", "id", "status"],
"additionalProperties": false
}
}
},
"required": ["status", "oswl_stats"],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"status": {"enum": ["error"]},
"exec_time": {"type": "number"},
"message": {"type": "string"}
},
"required": ["status", "message"],
"additionalProperties": false
}
]
}
}

View File

@ -78,7 +78,7 @@ class DbTest(BaseTest):
'..', 'api', 'db', 'migrations')
with app.app_context():
try:
flask_migrate.downgrade(directory=directory)
flask_migrate.downgrade(directory=directory, revision="-2")
except CommandError:
# Workaround for the first migration
pass

View File

@ -15,9 +15,17 @@
from collector.test.base import BaseTest
from collector.api.common.util import build_index
from collector.api.common.util import get_index
from collector.api.common.util import split_collection
class Indexed(object):
"""Helper object for testing indexing of objects
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
class TestUtil(BaseTest):
def test_split_collection(self):
@ -39,17 +47,29 @@ class TestUtil(BaseTest):
coll = [
{'id': 1, 'cd': 2, 'msg': 'm'},
{'id': 1, 'cd': 2, 'msg': 'm'},
{'id': 1, 'cd': 3, 'msg': 'm'},
{'id': 2, 'cd': 4, 'msg': 'm'}
Indexed(**{'id': 1, 'cd': 3, 'msg': 'm'}),
Indexed(**{'id': 2, 'cd': 4, 'msg': 'm'})
]
index = build_index(coll, 'id')
self.assertEquals(2, len(index))
self.assertDictEqual(coll[2], index[(1,)])
self.assertDictEqual(coll[3], index[(2,)])
self.assertEqual(2, len(index))
self.assertEqual(coll[2], index[(1,)])
self.assertEqual(coll[3], index[(2,)])
index = build_index(coll, 'id', 'cd')
self.assertEquals(3, len(index))
self.assertEqual(3, len(index))
self.assertDictEqual(coll[1], index[(1, 2)])
self.assertDictEqual(coll[2], index[(1, 3)])
self.assertDictEqual(coll[3], index[(2, 4)])
self.assertEqual(coll[2], index[(1, 3)])
self.assertEqual(coll[3], index[(2, 4)])
def test_get_index(self):
checks = [
(Indexed(**{'one': 1, 'two': 2}), ('one', ), (1,)),
(Indexed(**{'one': 1, 'two': 2}), ('one', 'two'), (1, 2)),
(Indexed(**{'one': 1, 'two': 2}), (), ()),
]
for obj, fields, idx in checks:
self.assertTupleEqual(
idx,
get_index(obj, *fields)
)

View File

@ -0,0 +1,237 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from flask import json
import random
from collector.test.base import DbTest
from collector.api.app import db
from collector.api.common import consts
from collector.api.common import util
from collector.api.db.model import OpenStackWorkloadStats
class TestOswlStats(DbTest):
def test_not_allowed_methods(self):
resp = self.get('/api/v1/oswl_stats/', None)
self.check_response_error(resp, 405)
resp = self.delete('/api/v1/oswl_stats/')
self.check_response_error(resp, 405)
resp = self.patch('/api/v1/oswl_stats/', None)
self.check_response_error(resp, 405)
resp = self.put('/api/v1/oswl_stats/', None)
self.check_response_error(resp, 405)
def test_validation_error(self):
oswls_sets = [
[{'master_node_uid': 'x', 'cluster_id': None}],
[{'master_node_uid': 'x', 'cluster_id': 1, 'id': None}]
]
for oswls in oswls_sets:
resp = self.post(
'/api/v1/oswl_stats/',
{'oswl_stats': oswls}
)
self.check_response_error(resp, code=400)
def test_validation(self):
oswls_sets = [[
{
'master_node_uid': 'x',
'cluster_id': 1,
'id': 2,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'resource_checksum': 'xx',
'created_date': datetime.utcnow().date().isoformat(),
'updated_time': datetime.utcnow().time().isoformat(),
'resource_data': {
'added': {},
'current': [],
'removed': {},
'modified': {}
}
},
{
'master_node_uid': 'x',
'cluster_id': 1,
'id': 3,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'resource_checksum': 'xx',
'created_date': datetime.utcnow().date().isoformat(),
'updated_time': datetime.utcnow().time().isoformat(),
'resource_data': {
'added': {1: {'time': 343434343}},
'current': [{'id': 'xxx', 'status': 'down'}],
'removed': {},
'modified': {}
}
}
]]
for oswls in oswls_sets:
resp = self.post(
'/api/v1/oswl_stats/',
{'oswl_stats': oswls}
)
self.check_response_ok(resp)
def test_empty_oswl_post(self):
resp = self.post('/api/v1/oswl_stats/', {'oswl_stats': []})
self.check_response_ok(resp)
def generate_dumb_oswls(self, oswls_num):
return [
{
'master_node_uid': 'x',
'cluster_id': i,
'id': i,
'created_date': datetime.utcnow().date().isoformat(),
'updated_time': datetime.utcnow().time().isoformat(),
'resource_type': random.choice(consts.OSWL_RESOURCE_TYPES),
'resource_checksum': 'xx',
'resource_data': {
'added': {},
'current': [],
'removed': {},
'modified': {}
}
}
for i in xrange(oswls_num)]
def test_existed_oswls_filtering(self):
oswls_num = 10
dicts = self.generate_dumb_oswls(oswls_num)
dict_index_fields = ('master_node_uid', 'id')
obj_index_fields = ('master_node_uid', 'external_id')
query = util.get_existed_objects_query(
dicts,
zip(dict_index_fields, obj_index_fields),
OpenStackWorkloadStats
)
# We have no objects for update
self.assertEqual(0, query.count())
save_oswls = 6
for d in dicts[:save_oswls]:
copy_d = d.copy()
copy_d['external_id'] = copy_d.pop('id')
db.session.add(OpenStackWorkloadStats(**copy_d))
db.session.flush()
db.session.commit()
# We have objects for update
query = util.get_existed_objects_query(
dicts,
zip(dict_index_fields, obj_index_fields),
OpenStackWorkloadStats
)
self.assertEqual(save_oswls, query.count())
def test_oswls_bulk_insert(self):
oswls_num = 10
dicts = self.generate_dumb_oswls(oswls_num)
dict_index_fields = ('master_node_uid', 'id')
obj_index_fields = ('master_node_uid', 'external_id')
dicts_new, _ = util.split_new_dicts_and_updated_objs(
dicts,
zip(dict_index_fields, obj_index_fields),
OpenStackWorkloadStats
)
util.bulk_insert(dicts_new, OpenStackWorkloadStats)
db.session.commit()
def test_oswls_empty_bulk_insert(self):
util.bulk_insert([], OpenStackWorkloadStats)
db.session.commit()
def test_oswls_split_new_dicts_and_updated_objs(self):
oswls_num = 10
dicts = self.generate_dumb_oswls(oswls_num)
dict_index_fields = ('master_node_uid', 'id')
obj_index_fields = ('master_node_uid', 'external_id')
dicts_new, objs_updated = util.split_new_dicts_and_updated_objs(
dicts,
zip(dict_index_fields, obj_index_fields),
OpenStackWorkloadStats
)
# We have no objects for update
self.assertListEqual([], objs_updated)
self.assertEqual(oswls_num, len(dicts_new))
# Saving part of oswls
oswls_to_save = 3
util.bulk_insert(dicts_new[:oswls_to_save], OpenStackWorkloadStats)
db.session.commit()
# Adding changes into dicts
new_cs = 'new_{}'.format(dicts[0]['resource_checksum'])
dicts[0]['resource_checksum'] = new_cs
# Checking new dicts and updated objects
dicts_new, objs_updated = util.split_new_dicts_and_updated_objs(
dicts,
zip(dict_index_fields, obj_index_fields),
OpenStackWorkloadStats
)
self.assertEqual(oswls_num - oswls_to_save, len(dicts_new))
self.assertEqual(oswls_to_save, len(objs_updated))
# Checking new checksum value in the updated object
self.assertEqual(new_cs, objs_updated[0].resource_checksum)
def test_post(self):
oswls_num = 20
expected_oswls = self.generate_dumb_oswls(oswls_num)
resp = self.post(
'/api/v1/oswl_stats/',
{'oswl_stats': expected_oswls}
)
self.check_response_ok(resp)
resp_data = json.loads(resp.data)
oswls_actual_num = db.session.query(OpenStackWorkloadStats).count()
self.assertEqual(oswls_num, oswls_actual_num)
self.assertEqual(len(resp_data['oswl_stats']), oswls_actual_num)
for oswl in resp_data['oswl_stats']:
self.assertEqual(consts.OSWL_STATUSES.added, oswl['status'])
def test_post_empty(self):
resp = self.post(
'/api/v1/oswl_stats/',
{'oswl_stats': []}
)
self.check_response_ok(resp)
def test_post_duplication(self):
oswls_num = 30
expected_oswls = self.generate_dumb_oswls(oswls_num)
resp = self.post(
'/api/v1/oswl_stats/',
{'oswl_stats': expected_oswls}
)
self.check_response_ok(resp)
resp_data = json.loads(resp.data)
oswls_actual_num = db.session.query(OpenStackWorkloadStats).count()
self.assertEqual(oswls_num, oswls_actual_num)
self.assertEqual(len(resp_data['oswl_stats']), oswls_actual_num)
# Checking duplication
resp = self.post(
'/api/v1/oswl_stats/',
{'oswl_stats': expected_oswls}
)
self.check_response_ok(resp)