Remove schema from objects

These duplicated schemas in validation/json_schema.

Closes-Bug: #1367284
Change-Id: I8dee54fcbb408f447aeb2a68e05553bf13fd7a7f
This commit is contained in:
Przemyslaw Kaminski 2015-08-18 09:43:20 +02:00 committed by Igor Kalnitsky
parent 06e93da558
commit c946548aa8
13 changed files with 154 additions and 262 deletions

View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun import consts
#: JSON schema for ActionLog
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ActionLog",
"description": "Serialized ActionLog object",
"type": "object",
"properties": {
"id": {"type": "number"},
"actor_id": {"type": ["string", "null"]},
"action_group": {"type": "string"},
"action_name": {"type": "string"},
"action_type": {
"type": "string",
"enum": list(consts.ACTION_TYPES)
},
"start_timestamp": {"type": "string"},
"end_timestamp": {"type": "string"},
"additional_info": {"type": "object"},
"is_sent": {"type": "boolean"},
"cluster_id": {"type": ["number", "null"]},
"task_uuid": {"type": ["string", "null"]}
}
}

View File

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "MasterNodeSettings",
"description": "Serialized MasterNodeSettings object",
"type": "object",
"properties": {
"settings": {"type": "object"}
}
}

View File

@ -13,9 +13,9 @@
# under the License.
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.api.v1.validators.json_schema import master_node_settings
from nailgun.logger import logger
from nailgun.objects import MasterNodeSettings
class MasterNodeSettingsValidator(BasicValidator):
@ -30,6 +30,6 @@ class MasterNodeSettingsValidator(BasicValidator):
"It will be ignored",
)
cls.validate_schema(data, schema=MasterNodeSettings.schema)
cls.validate_schema(data, schema=master_node_settings.schema)
return data

View File

@ -15,8 +15,6 @@
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun import consts
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
@ -31,30 +29,6 @@ class ActionLog(NailgunObject):
#: Serializer for ActionLog
serializer = ActionLogSerializer
#: JSON schema for ActionLog
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ActionLog",
"description": "Serialized ActionLog object",
"type": "object",
"properties": {
"id": {"type": "number"},
"actor_id": {"type": ["string", "null"]},
"action_group": {"type": "string"},
"action_name": {"type": "string"},
"action_type": {
"type": "string",
"enum": list(consts.ACTION_TYPES)
},
"start_timestamp": {"type": "string"},
"end_timestamp": {"type": "string"},
"additional_info": {"type": "object"},
"is_sent": {"type": "boolean"},
"cluster_id": {"type": ["number", "null"]},
"task_uuid": {"type": ["string", "null"]}
}
}
@classmethod
def update(cls, instance, data):
"""Form additional info for further instance update.

View File

@ -52,27 +52,6 @@ class NailgunObject(object):
#: SQLAlchemy model for object
model = None
#: JSON schema for object
schema = {
"properties": {}
}
@classmethod
def check_field(cls, field):
"""Check if field is described in object's JSON schema
:param field: name of the field as string
:returns: None
:raises: errors.InvalidField
"""
if field not in cls.schema["properties"]:
raise errors.InvalidField(
u"Invalid field '{0}' for object '{1}'".format(
field,
cls.__name__
)
)
@classmethod
def get_by_uid(cls, uid, fail_if_not_found=False, lock_for_update=False):
"""Get instance by it's uid (PK in case of SQLAlchemy)
@ -267,7 +246,6 @@ class NailgunCollection(object):
else asc.
:returns: filtered iterable (SQLAlchemy query)
"""
map(cls.single.check_field, kwargs.iterkeys())
if iterable is not None:
use_iterable = iterable
else:
@ -292,7 +270,6 @@ class NailgunCollection(object):
:param iterable: iterable (SQLAlchemy query)
:returns: filtered iterable (SQLAlchemy query)
"""
map(cls.single.check_field, kwargs.iterkeys())
use_iterable = iterable or cls.all()
if cls._is_query(use_iterable):
conditions = []

View File

@ -26,8 +26,6 @@ import sqlalchemy as sa
import yaml
from nailgun.api.v1.validators.json_schema import cluster as cluster_schema
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy import models
@ -127,9 +125,6 @@ class Cluster(NailgunObject):
#: Serializer for Cluster
serializer = ClusterSerializer
#: Cluster JSON schema
schema = cluster_schema.single_schema
@classmethod
def create(cls, data):
"""Create Cluster instance with specified parameters in DB.

View File

@ -29,16 +29,6 @@ class MasterNodeSettings(NailgunObject):
serializer = MasterNodeSettingsSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ActionLog",
"description": "Serialized ActionLog object",
"type": "object",
"properties": {
"settings": {"type": "object"}
}
}
@classmethod
def get_one(cls, fail_if_not_found=False, lock_for_update=False):
"""Get one instance from table.

View File

@ -63,48 +63,6 @@ class Node(NailgunObject):
#: Serializer for Node
serializer = NodeSerializer
#: Node JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Node",
"description": "Serialized Node object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"name": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.NODE_STATUSES)
},
"group_id": {"type": "number"},
"meta": {"type": "object"},
"mac": {"type": "string"},
"manufacturer": {"type": "string"},
"platform_name": {"type": "string"},
"kernel_params": {"type": "string"},
"progress": {"type": "number"},
"os_platform": {"type": "string"},
"pending_addition": {"type": "boolean"},
"pending_deletion": {"type": "boolean"},
"error_type": {
"type": "string",
"enum": list(consts.NODE_ERRORS)
},
"error_msg": {"type": "string"},
"online": {"type": "boolean"},
"labels": {
"type": "object",
"additionalProperties": {
"type": ["string", "null"]
}
},
"roles": {"type": "array"},
"pending_roles": {"type": "array"},
"agent_checksum": {"type": "string"}
}
}
@classmethod
def delete(cls, instance):
fire_callback_on_node_delete(instance)

View File

@ -30,19 +30,6 @@ class NodeGroup(NailgunObject):
model = DBNodeGroup
serializer = NodeGroupSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "NodeGroup",
"description": "Serialized NodeGroup object",
"type": "object",
"properties": {
"id": {"type": "integer"},
"cluster_id": {"type": "integer"},
"name": {"type": "string"},
},
"required": ["cluster_id", "name"],
}
@classmethod
def create(cls, data):
new_group = super(NodeGroup, cls).create(data)

View File

@ -16,7 +16,6 @@
from datetime import datetime
from nailgun import consts
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
@ -35,30 +34,6 @@ class Notification(NailgunObject):
model = models.Notification
serializer = NotificationSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Notification",
"description": "Serialized Notification object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"node_id": {"type": "number"},
"task_id": {"type": "number"},
"time": {"type": "string"},
"date": {"type": "string"},
"topic": {
"type": "string",
"enum": list(consts.NOTIFICATION_TOPICS)
},
"message": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.NOTIFICATION_STATUSES)
}
}
}
@classmethod
def create(cls, data):
"""Creates and returns a notification instance.

View File

@ -42,42 +42,6 @@ class Release(NailgunObject):
#: Serializer for Release
serializer = release_serializer.ReleaseSerializer
#: Release JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Release",
"description": "Serialized Release object",
"type": "object",
"required": [
"name",
"operating_system"
],
"properties": {
"id": {"type": "number"},
"name": {"type": "string"},
"version": {"type": "string"},
"can_update_from_versions": {"type": "array"},
"description": {"type": "string"},
"operating_system": {"type": "string"},
"state": {
"type": "string",
"enum": list(consts.RELEASE_STATES)
},
"networks_metadata": {"type": "array"},
"attributes_metadata": {"type": "object"},
"volumes_metadata": {"type": "object"},
"modes_metadata": {"type": "object"},
"roles_metadata": {"type": "object"},
"network_roles_metadata": {"type": "array"},
"wizard_metadata": {"type": "object"},
"roles": {"type": "array"},
"clusters": {"type": "array"},
"is_deployable": {"type": "boolean"},
"vmware_attributes_metadata": {"type": "object"},
"modes": {"type": "array"}
}
}
@classmethod
def create(cls, data):
"""Create Release instance with specified parameters in DB.

View File

@ -39,31 +39,6 @@ class Task(NailgunObject):
model = models.Task
serializer = TaskSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Task",
"description": "Serialized Task object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"parent_id": {"type": "number"},
"name": {
"type": "string",
"enum": list(consts.TASK_NAMES)
},
"message": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.TASK_STATUSES)
},
"progress": {"type": "number"},
"weight": {"type": "number"},
"cache": {"type": "object"},
"result": {"type": "object"}
}
}
@classmethod
def create_subtask(cls, instance, name):
if name not in consts.TASK_NAMES:

View File

@ -24,11 +24,15 @@ from itertools import ifilter
import re
import uuid
from sqlalchemy import inspect as sqlalchemy_inspect
import jsonschema
from oslo_serialization import jsonutils
import six
from six.moves import range
from nailgun.api.v1.validators.json_schema import action_log
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import BaseTestCase
from nailgun.utils import reverse
@ -55,7 +59,7 @@ class TestObjects(BaseIntegrationTest):
def test_filter_by(self):
names = cycle('ABCD')
os = cycle(['CentOS', 'Ubuntu'])
os = cycle([consts.RELEASE_OS.centos, consts.RELEASE_OS.ubuntu])
for i in range(12):
self.env.create_release(
name=names.next(),
@ -66,7 +70,7 @@ class TestObjects(BaseIntegrationTest):
query_filtered = objects.ReleaseCollection.filter_by(
objects.ReleaseCollection.all(),
name="A",
operating_system="CentOS"
operating_system=consts.RELEASE_OS.centos
)
self.assertIsInstance(query_filtered, NoCacheQuery)
self.assertEqual(
@ -75,13 +79,13 @@ class TestObjects(BaseIntegrationTest):
)
for r in query_filtered:
self.assertEqual(r.name, "A")
self.assertEqual(r.operating_system, "CentOS")
self.assertEqual(r.operating_system, consts.RELEASE_OS.centos)
# filtering iterable - returns ifilter
iterable_filtered = objects.ReleaseCollection.filter_by(
list(objects.ReleaseCollection.all()),
name="A",
operating_system="CentOS"
operating_system=consts.RELEASE_OS.centos
)
self.assertIsInstance(iterable_filtered, ifilter)
self.assertEqual(
@ -90,7 +94,7 @@ class TestObjects(BaseIntegrationTest):
)
for r in iterable_filtered:
self.assertEqual(r.name, "A")
self.assertEqual(r.operating_system, "CentOS")
self.assertEqual(r.operating_system, consts.RELEASE_OS.centos)
iterable_filtered = objects.ReleaseCollection.filter_by(
list(),
@ -101,7 +105,7 @@ class TestObjects(BaseIntegrationTest):
def test_filter_by_not(self):
names = cycle('ABCDE')
os = cycle(['CentOS', 'Ubuntu'])
os = cycle([consts.RELEASE_OS.centos, consts.RELEASE_OS.ubuntu])
# create releases: we'll have only two releases with both
# name A and operating_system CentOS
@ -115,7 +119,7 @@ class TestObjects(BaseIntegrationTest):
query_filtered = objects.ReleaseCollection.filter_by_not(
objects.ReleaseCollection.all(),
name="A",
operating_system="CentOS"
operating_system=consts.RELEASE_OS.centos
)
self.assertIsInstance(query_filtered, NoCacheQuery)
self.assertEqual(
@ -124,15 +128,16 @@ class TestObjects(BaseIntegrationTest):
)
for r in query_filtered:
if r.name == "A":
self.assertNotEqual(r.operating_system, "CentOS")
elif r.operating_system == "CentOS":
self.assertNotEqual(r.operating_system,
consts.RELEASE_OS.centos)
elif r.operating_system == consts.RELEASE_OS.centos:
self.assertNotEqual(r.name, "A")
# filtering iterable - returns ifilter
iterable_filtered = objects.ReleaseCollection.filter_by_not(
list(objects.ReleaseCollection.all()),
name="A",
operating_system="CentOS"
operating_system=consts.RELEASE_OS.centos
)
self.assertIsInstance(iterable_filtered, ifilter)
self.assertEqual(
@ -141,8 +146,9 @@ class TestObjects(BaseIntegrationTest):
)
for r in iterable_filtered:
if r.name == "A":
self.assertNotEqual(r.operating_system, "CentOS")
elif r.operating_system == "CentOS":
self.assertNotEqual(r.operating_system,
consts.RELEASE_OS.centos)
elif r.operating_system == consts.RELEASE_OS.centos:
self.assertNotEqual(r.name, "A")
@ -274,7 +280,8 @@ class TestNodeObject(BaseIntegrationTest):
'pending_addition': True}]
self.env.create(
cluster_kwargs={
'net_provider': 'neutron'},
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
},
nodes_kwargs=nodes)
cluster = self.env.clusters[0]
@ -322,7 +329,8 @@ class TestNodeObject(BaseIntegrationTest):
'pending_addition': True}]
self.env.create(
cluster_kwargs={
'net_provider': 'neutron'},
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
},
nodes_kwargs=nodes)
cluster = self.env.clusters[0]
@ -375,10 +383,14 @@ class TestNodeObject(BaseIntegrationTest):
"mac",
"meta",
"name",
"agent_checksum"
"agent_checksum",
"uuid",
"timestamp",
"nic_interfaces",
"attributes",
]
fields = set(
objects.Node.schema["properties"].keys()
c.key for c in sqlalchemy_inspect(objects.Node.model).attrs
) - set(exclude_fields)
for f in fields:
@ -416,9 +428,10 @@ class TestNodeObject(BaseIntegrationTest):
self.assertNotEqual(node_db.meta["disks"], data["meta"]["disks"])
# test status handling
for status in ('provisioning', 'error'):
for status in (consts.NODE_STATUSES.provisioning,
consts.NODE_STATUSES.error):
node_db.status = status
data["status"] = "discover"
data["status"] = consts.NODE_STATUSES.discover
objects.Node.update_by_agent(node_db, copy.deepcopy(data))
self.assertEqual(node_db.status, status)
@ -601,66 +614,76 @@ class TestTaskObject(BaseIntegrationTest):
{'roles': ['cinder']}])
def _node_should_be_error_with_type(self, node, error_type):
self.assertEquals(node.status, 'error')
self.assertEquals(node.status, consts.NODE_STATUSES.error)
self.assertEquals(node.error_type, error_type)
self.assertEquals(node.progress, 0)
def _nodes_should_not_be_error(self, nodes):
for node in nodes:
self.assertEquals(node.status, 'discover')
self.assertEquals(node.status, consts.NODE_STATUSES.discover)
@property
def cluster(self):
return self.env.clusters[0]
def test_update_nodes_to_error_if_deployment_task_failed(self):
self.cluster.nodes[0].status = 'deploying'
self.cluster.nodes[0].status = consts.NODE_STATUSES.deploying
self.cluster.nodes[0].progress = 12
task = Task(name='deployment', cluster=self.cluster, status='error')
task = Task(name=consts.TASK_NAMES.deployment,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task)
self.db.flush()
objects.Task._update_cluster_data(task)
self.db.flush()
self.assertEquals(self.cluster.status, 'error')
self._node_should_be_error_with_type(self.cluster.nodes[0], 'deploy')
self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
self._node_should_be_error_with_type(self.cluster.nodes[0],
consts.NODE_ERRORS.deploy)
self._nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_error_if_deploy_task_failed(self):
task = Task(name='deploy', cluster=self.cluster, status='error')
task = Task(name=consts.TASK_NAMES.deploy,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task)
self.db.flush()
objects.Task._update_cluster_data(task)
self.db.flush()
self.assertEquals(self.cluster.status, 'error')
self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
def test_update_nodes_to_error_if_provision_task_failed(self):
self.cluster.nodes[0].status = 'provisioning'
self.cluster.nodes[0].status = consts.NODE_STATUSES.provisioning
self.cluster.nodes[0].progress = 12
task = Task(name='provision', cluster=self.cluster, status='error')
task = Task(name=consts.TASK_NAMES.provision,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task)
self.db.flush()
objects.Task._update_cluster_data(task)
self.db.flush()
self.assertEquals(self.cluster.status, 'error')
self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
self._node_should_be_error_with_type(self.cluster.nodes[0],
'provision')
consts.NODE_ERRORS.provision)
self._nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_operational(self):
task = Task(name='deploy', cluster=self.cluster, status='ready')
task = Task(name=consts.TASK_NAMES.deploy,
cluster=self.cluster,
status=consts.TASK_STATUSES.ready)
self.db.add(task)
self.db.flush()
objects.Task._update_cluster_data(task)
self.db.flush()
self.assertEquals(self.cluster.status, 'operational')
self.assertEquals(self.cluster.status,
consts.CLUSTER_STATUSES.operational)
def test_update_vms_conf(self):
kvm_node = self.cluster.nodes[0]
@ -669,7 +692,8 @@ class TestTaskObject(BaseIntegrationTest):
objects.Node.set_vms_conf(kvm_node,
[{'id': 1, 'cluster_id': self.cluster.id}])
task = Task(name=consts.TASK_NAMES.spawn_vms,
cluster=self.cluster, status='ready')
cluster=self.cluster,
status=consts.TASK_STATUSES.ready)
self.db.add(task)
self.db.flush()
@ -680,61 +704,67 @@ class TestTaskObject(BaseIntegrationTest):
if consts.VIRTUAL_NODE_TYPES.virt in node.roles:
self.assertTrue(node.attributes.vms_conf[0].get('created'))
else:
self.assertNotEquals(node.status, 'ready')
self.assertNotEquals(node.status, consts.NODE_STATUSES.ready)
def test_update_if_parent_task_is_ready_all_nodes_should_be_ready(self):
for node in self.cluster.nodes:
node.status = 'ready'
node.status = consts.NODE_STATUSES.ready
node.progress = 100
self.cluster.nodes[0].status = 'deploying'
self.cluster.nodes[0].status = consts.NODE_STATUSES.deploying
self.cluster.nodes[0].progress = 24
task = Task(name='deploy', cluster=self.cluster, status='ready')
task = Task(name=consts.TASK_NAMES.deploy,
cluster=self.cluster,
status=consts.TASK_STATUSES.ready)
self.db.add(task)
self.db.flush()
objects.Task._update_cluster_data(task)
self.db.flush()
self.assertEquals(self.cluster.status, 'operational')
self.assertEquals(self.cluster.status,
consts.CLUSTER_STATUSES.operational)
for node in self.cluster.nodes:
self.assertEquals(node.status, 'ready')
self.assertEquals(node.status, consts.NODE_STATUSES.ready)
self.assertEquals(node.progress, 100)
def test_update_cluster_status_if_task_was_already_in_error_status(self):
for node in self.cluster.nodes:
node.status = 'provisioning'
node.status = consts.NODE_STATUSES.provisioning
node.progress = 12
task = Task(name='provision', cluster=self.cluster, status='error')
task = Task(name=consts.TASK_NAMES.provision,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task)
self.db.flush()
data = {'status': 'error', 'progress': 100}
data = {'status': consts.TASK_STATUSES.error, 'progress': 100}
objects.Task.update(task, data)
self.db.flush()
self.assertEquals(self.cluster.status, 'error')
self.assertEquals(task.status, 'error')
self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
self.assertEquals(task.status, consts.TASK_STATUSES.error)
for node in self.cluster.nodes:
self.assertEquals(node.status, 'error')
self.assertEquals(node.status, consts.NODE_STATUSES.error)
self.assertEquals(node.progress, 0)
def test_do_not_set_cluster_to_error_if_validation_failed(self):
for task_name in ['check_before_deployment', 'check_networks']:
for task_name in [consts.TASK_NAMES.check_before_deployment,
consts.TASK_NAMES.check_networks]:
supertask = Task(
name='deploy',
name=consts.TASK_NAMES.deploy,
cluster=self.cluster,
status='error')
status=consts.TASK_STATUSES.error)
check_task = Task(
name=task_name,
cluster=self.cluster,
status='error')
status=consts.TASK_STATUSES.error)
supertask.subtasks.append(check_task)
self.db.add(check_task)
@ -743,10 +773,10 @@ class TestTaskObject(BaseIntegrationTest):
objects.Task._update_cluster_data(supertask)
self.db.flush()
self.assertEquals(self.cluster.status, 'new')
self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.new)
def test_get_task_by_uuid_returns_task(self):
task = Task(name='deploy')
task = Task(name=consts.TASK_NAMES.deploy)
self.db.add(task)
self.db.flush()
task_by_uuid = objects.Task.get_by_uuid(task.uuid)
@ -759,7 +789,7 @@ class TestTaskObject(BaseIntegrationTest):
fail_if_not_found=True)
def test_task_wrong_status_filtered(self):
task = Task(name='deploy')
task = Task(name=consts.TASK_NAMES.deploy)
self.db.add(task)
self.db.flush()
@ -808,7 +838,7 @@ class TestActionLogObject(BaseIntegrationTest):
instance_to_validate = jsonutils.loads(objects.ActionLog.to_json(al))
self.assertNotRaises(jsonschema.ValidationError, jsonschema.validate,
instance_to_validate, objects.ActionLog.schema)
instance_to_validate, action_log.schema)
def test_validate_json_schema_failure(self):
object_data = {
@ -825,7 +855,7 @@ class TestActionLogObject(BaseIntegrationTest):
instance_to_validate = jsonutils.loads(objects.ActionLog.to_json(al))
self.assertRaises(jsonschema.ValidationError, jsonschema.validate,
instance_to_validate, objects.ActionLog.schema)
instance_to_validate, action_log.schema)
def test_get_by_uuid_method(self):
object_data = {
@ -899,7 +929,7 @@ class TestClusterObject(BaseTestCase):
def _get_network_role_metadata(self, **kwargs):
network_role = {
'id': 'test_network_role',
'default_mapping': 'public',
'default_mapping': consts.NETWORKS.public,
'properties': {
'subnet': True,
'gateway': False,