Remove schema from objects

These duplicated schemas in validation/json_schema.

Closes-Bug: #1367284
Change-Id: I8dee54fcbb408f447aeb2a68e05553bf13fd7a7f
This commit is contained in:
Przemyslaw Kaminski 2015-08-18 09:43:20 +02:00 committed by Igor Kalnitsky
parent 06e93da558
commit c946548aa8
13 changed files with 154 additions and 262 deletions

View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun import consts
#: JSON schema for ActionLog
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ActionLog",
"description": "Serialized ActionLog object",
"type": "object",
"properties": {
"id": {"type": "number"},
"actor_id": {"type": ["string", "null"]},
"action_group": {"type": "string"},
"action_name": {"type": "string"},
"action_type": {
"type": "string",
"enum": list(consts.ACTION_TYPES)
},
"start_timestamp": {"type": "string"},
"end_timestamp": {"type": "string"},
"additional_info": {"type": "object"},
"is_sent": {"type": "boolean"},
"cluster_id": {"type": ["number", "null"]},
"task_uuid": {"type": ["string", "null"]}
}
}

View File

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "MasterNodeSettings",
"description": "Serialized MasterNodeSettings object",
"type": "object",
"properties": {
"settings": {"type": "object"}
}
}

View File

@ -13,9 +13,9 @@
# under the License. # under the License.
from nailgun.api.v1.validators.base import BasicValidator from nailgun.api.v1.validators.base import BasicValidator
from nailgun.api.v1.validators.json_schema import master_node_settings
from nailgun.logger import logger from nailgun.logger import logger
from nailgun.objects import MasterNodeSettings
class MasterNodeSettingsValidator(BasicValidator): class MasterNodeSettingsValidator(BasicValidator):
@ -30,6 +30,6 @@ class MasterNodeSettingsValidator(BasicValidator):
"It will be ignored", "It will be ignored",
) )
cls.validate_schema(data, schema=MasterNodeSettings.schema) cls.validate_schema(data, schema=master_node_settings.schema)
return data return data

View File

@ -15,8 +15,6 @@
from nailgun.db import db from nailgun.db import db
from nailgun.db.sqlalchemy import models from nailgun.db.sqlalchemy import models
from nailgun import consts
from nailgun.objects import NailgunCollection from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject from nailgun.objects import NailgunObject
@ -31,30 +29,6 @@ class ActionLog(NailgunObject):
#: Serializer for ActionLog #: Serializer for ActionLog
serializer = ActionLogSerializer serializer = ActionLogSerializer
#: JSON schema for ActionLog
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ActionLog",
"description": "Serialized ActionLog object",
"type": "object",
"properties": {
"id": {"type": "number"},
"actor_id": {"type": ["string", "null"]},
"action_group": {"type": "string"},
"action_name": {"type": "string"},
"action_type": {
"type": "string",
"enum": list(consts.ACTION_TYPES)
},
"start_timestamp": {"type": "string"},
"end_timestamp": {"type": "string"},
"additional_info": {"type": "object"},
"is_sent": {"type": "boolean"},
"cluster_id": {"type": ["number", "null"]},
"task_uuid": {"type": ["string", "null"]}
}
}
@classmethod @classmethod
def update(cls, instance, data): def update(cls, instance, data):
"""Form additional info for further instance update. """Form additional info for further instance update.

View File

@ -52,27 +52,6 @@ class NailgunObject(object):
#: SQLAlchemy model for object #: SQLAlchemy model for object
model = None model = None
#: JSON schema for object
schema = {
"properties": {}
}
@classmethod
def check_field(cls, field):
"""Check if field is described in object's JSON schema
:param field: name of the field as string
:returns: None
:raises: errors.InvalidField
"""
if field not in cls.schema["properties"]:
raise errors.InvalidField(
u"Invalid field '{0}' for object '{1}'".format(
field,
cls.__name__
)
)
@classmethod @classmethod
def get_by_uid(cls, uid, fail_if_not_found=False, lock_for_update=False): def get_by_uid(cls, uid, fail_if_not_found=False, lock_for_update=False):
"""Get instance by it's uid (PK in case of SQLAlchemy) """Get instance by it's uid (PK in case of SQLAlchemy)
@ -267,7 +246,6 @@ class NailgunCollection(object):
else asc. else asc.
:returns: filtered iterable (SQLAlchemy query) :returns: filtered iterable (SQLAlchemy query)
""" """
map(cls.single.check_field, kwargs.iterkeys())
if iterable is not None: if iterable is not None:
use_iterable = iterable use_iterable = iterable
else: else:
@ -292,7 +270,6 @@ class NailgunCollection(object):
:param iterable: iterable (SQLAlchemy query) :param iterable: iterable (SQLAlchemy query)
:returns: filtered iterable (SQLAlchemy query) :returns: filtered iterable (SQLAlchemy query)
""" """
map(cls.single.check_field, kwargs.iterkeys())
use_iterable = iterable or cls.all() use_iterable = iterable or cls.all()
if cls._is_query(use_iterable): if cls._is_query(use_iterable):
conditions = [] conditions = []

View File

@ -26,8 +26,6 @@ import sqlalchemy as sa
import yaml import yaml
from nailgun.api.v1.validators.json_schema import cluster as cluster_schema
from nailgun import consts from nailgun import consts
from nailgun.db import db from nailgun.db import db
from nailgun.db.sqlalchemy import models from nailgun.db.sqlalchemy import models
@ -127,9 +125,6 @@ class Cluster(NailgunObject):
#: Serializer for Cluster #: Serializer for Cluster
serializer = ClusterSerializer serializer = ClusterSerializer
#: Cluster JSON schema
schema = cluster_schema.single_schema
@classmethod @classmethod
def create(cls, data): def create(cls, data):
"""Create Cluster instance with specified parameters in DB. """Create Cluster instance with specified parameters in DB.

View File

@ -29,16 +29,6 @@ class MasterNodeSettings(NailgunObject):
serializer = MasterNodeSettingsSerializer serializer = MasterNodeSettingsSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ActionLog",
"description": "Serialized ActionLog object",
"type": "object",
"properties": {
"settings": {"type": "object"}
}
}
@classmethod @classmethod
def get_one(cls, fail_if_not_found=False, lock_for_update=False): def get_one(cls, fail_if_not_found=False, lock_for_update=False):
"""Get one instance from table. """Get one instance from table.

View File

@ -63,48 +63,6 @@ class Node(NailgunObject):
#: Serializer for Node #: Serializer for Node
serializer = NodeSerializer serializer = NodeSerializer
#: Node JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Node",
"description": "Serialized Node object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"name": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.NODE_STATUSES)
},
"group_id": {"type": "number"},
"meta": {"type": "object"},
"mac": {"type": "string"},
"manufacturer": {"type": "string"},
"platform_name": {"type": "string"},
"kernel_params": {"type": "string"},
"progress": {"type": "number"},
"os_platform": {"type": "string"},
"pending_addition": {"type": "boolean"},
"pending_deletion": {"type": "boolean"},
"error_type": {
"type": "string",
"enum": list(consts.NODE_ERRORS)
},
"error_msg": {"type": "string"},
"online": {"type": "boolean"},
"labels": {
"type": "object",
"additionalProperties": {
"type": ["string", "null"]
}
},
"roles": {"type": "array"},
"pending_roles": {"type": "array"},
"agent_checksum": {"type": "string"}
}
}
@classmethod @classmethod
def delete(cls, instance): def delete(cls, instance):
fire_callback_on_node_delete(instance) fire_callback_on_node_delete(instance)

View File

@ -30,19 +30,6 @@ class NodeGroup(NailgunObject):
model = DBNodeGroup model = DBNodeGroup
serializer = NodeGroupSerializer serializer = NodeGroupSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "NodeGroup",
"description": "Serialized NodeGroup object",
"type": "object",
"properties": {
"id": {"type": "integer"},
"cluster_id": {"type": "integer"},
"name": {"type": "string"},
},
"required": ["cluster_id", "name"],
}
@classmethod @classmethod
def create(cls, data): def create(cls, data):
new_group = super(NodeGroup, cls).create(data) new_group = super(NodeGroup, cls).create(data)

View File

@ -16,7 +16,6 @@
from datetime import datetime from datetime import datetime
from nailgun import consts
from nailgun.db.sqlalchemy import models from nailgun.db.sqlalchemy import models
from nailgun.errors import errors from nailgun.errors import errors
@ -35,30 +34,6 @@ class Notification(NailgunObject):
model = models.Notification model = models.Notification
serializer = NotificationSerializer serializer = NotificationSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Notification",
"description": "Serialized Notification object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"node_id": {"type": "number"},
"task_id": {"type": "number"},
"time": {"type": "string"},
"date": {"type": "string"},
"topic": {
"type": "string",
"enum": list(consts.NOTIFICATION_TOPICS)
},
"message": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.NOTIFICATION_STATUSES)
}
}
}
@classmethod @classmethod
def create(cls, data): def create(cls, data):
"""Creates and returns a notification instance. """Creates and returns a notification instance.

View File

@ -42,42 +42,6 @@ class Release(NailgunObject):
#: Serializer for Release #: Serializer for Release
serializer = release_serializer.ReleaseSerializer serializer = release_serializer.ReleaseSerializer
#: Release JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Release",
"description": "Serialized Release object",
"type": "object",
"required": [
"name",
"operating_system"
],
"properties": {
"id": {"type": "number"},
"name": {"type": "string"},
"version": {"type": "string"},
"can_update_from_versions": {"type": "array"},
"description": {"type": "string"},
"operating_system": {"type": "string"},
"state": {
"type": "string",
"enum": list(consts.RELEASE_STATES)
},
"networks_metadata": {"type": "array"},
"attributes_metadata": {"type": "object"},
"volumes_metadata": {"type": "object"},
"modes_metadata": {"type": "object"},
"roles_metadata": {"type": "object"},
"network_roles_metadata": {"type": "array"},
"wizard_metadata": {"type": "object"},
"roles": {"type": "array"},
"clusters": {"type": "array"},
"is_deployable": {"type": "boolean"},
"vmware_attributes_metadata": {"type": "object"},
"modes": {"type": "array"}
}
}
@classmethod @classmethod
def create(cls, data): def create(cls, data):
"""Create Release instance with specified parameters in DB. """Create Release instance with specified parameters in DB.

View File

@ -39,31 +39,6 @@ class Task(NailgunObject):
model = models.Task model = models.Task
serializer = TaskSerializer serializer = TaskSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Task",
"description": "Serialized Task object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"parent_id": {"type": "number"},
"name": {
"type": "string",
"enum": list(consts.TASK_NAMES)
},
"message": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.TASK_STATUSES)
},
"progress": {"type": "number"},
"weight": {"type": "number"},
"cache": {"type": "object"},
"result": {"type": "object"}
}
}
@classmethod @classmethod
def create_subtask(cls, instance, name): def create_subtask(cls, instance, name):
if name not in consts.TASK_NAMES: if name not in consts.TASK_NAMES:

View File

@ -24,11 +24,15 @@ from itertools import ifilter
import re import re
import uuid import uuid
from sqlalchemy import inspect as sqlalchemy_inspect
import jsonschema import jsonschema
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
import six import six
from six.moves import range from six.moves import range
from nailgun.api.v1.validators.json_schema import action_log
from nailgun.test.base import BaseIntegrationTest from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import BaseTestCase from nailgun.test.base import BaseTestCase
from nailgun.utils import reverse from nailgun.utils import reverse
@ -55,7 +59,7 @@ class TestObjects(BaseIntegrationTest):
def test_filter_by(self): def test_filter_by(self):
names = cycle('ABCD') names = cycle('ABCD')
os = cycle(['CentOS', 'Ubuntu']) os = cycle([consts.RELEASE_OS.centos, consts.RELEASE_OS.ubuntu])
for i in range(12): for i in range(12):
self.env.create_release( self.env.create_release(
name=names.next(), name=names.next(),
@ -66,7 +70,7 @@ class TestObjects(BaseIntegrationTest):
query_filtered = objects.ReleaseCollection.filter_by( query_filtered = objects.ReleaseCollection.filter_by(
objects.ReleaseCollection.all(), objects.ReleaseCollection.all(),
name="A", name="A",
operating_system="CentOS" operating_system=consts.RELEASE_OS.centos
) )
self.assertIsInstance(query_filtered, NoCacheQuery) self.assertIsInstance(query_filtered, NoCacheQuery)
self.assertEqual( self.assertEqual(
@ -75,13 +79,13 @@ class TestObjects(BaseIntegrationTest):
) )
for r in query_filtered: for r in query_filtered:
self.assertEqual(r.name, "A") self.assertEqual(r.name, "A")
self.assertEqual(r.operating_system, "CentOS") self.assertEqual(r.operating_system, consts.RELEASE_OS.centos)
# filtering iterable - returns ifilter # filtering iterable - returns ifilter
iterable_filtered = objects.ReleaseCollection.filter_by( iterable_filtered = objects.ReleaseCollection.filter_by(
list(objects.ReleaseCollection.all()), list(objects.ReleaseCollection.all()),
name="A", name="A",
operating_system="CentOS" operating_system=consts.RELEASE_OS.centos
) )
self.assertIsInstance(iterable_filtered, ifilter) self.assertIsInstance(iterable_filtered, ifilter)
self.assertEqual( self.assertEqual(
@ -90,7 +94,7 @@ class TestObjects(BaseIntegrationTest):
) )
for r in iterable_filtered: for r in iterable_filtered:
self.assertEqual(r.name, "A") self.assertEqual(r.name, "A")
self.assertEqual(r.operating_system, "CentOS") self.assertEqual(r.operating_system, consts.RELEASE_OS.centos)
iterable_filtered = objects.ReleaseCollection.filter_by( iterable_filtered = objects.ReleaseCollection.filter_by(
list(), list(),
@ -101,7 +105,7 @@ class TestObjects(BaseIntegrationTest):
def test_filter_by_not(self): def test_filter_by_not(self):
names = cycle('ABCDE') names = cycle('ABCDE')
os = cycle(['CentOS', 'Ubuntu']) os = cycle([consts.RELEASE_OS.centos, consts.RELEASE_OS.ubuntu])
# create releases: we'll have only two releases with both # create releases: we'll have only two releases with both
# name A and operating_system CentOS # name A and operating_system CentOS
@ -115,7 +119,7 @@ class TestObjects(BaseIntegrationTest):
query_filtered = objects.ReleaseCollection.filter_by_not( query_filtered = objects.ReleaseCollection.filter_by_not(
objects.ReleaseCollection.all(), objects.ReleaseCollection.all(),
name="A", name="A",
operating_system="CentOS" operating_system=consts.RELEASE_OS.centos
) )
self.assertIsInstance(query_filtered, NoCacheQuery) self.assertIsInstance(query_filtered, NoCacheQuery)
self.assertEqual( self.assertEqual(
@ -124,15 +128,16 @@ class TestObjects(BaseIntegrationTest):
) )
for r in query_filtered: for r in query_filtered:
if r.name == "A": if r.name == "A":
self.assertNotEqual(r.operating_system, "CentOS") self.assertNotEqual(r.operating_system,
elif r.operating_system == "CentOS": consts.RELEASE_OS.centos)
elif r.operating_system == consts.RELEASE_OS.centos:
self.assertNotEqual(r.name, "A") self.assertNotEqual(r.name, "A")
# filtering iterable - returns ifilter # filtering iterable - returns ifilter
iterable_filtered = objects.ReleaseCollection.filter_by_not( iterable_filtered = objects.ReleaseCollection.filter_by_not(
list(objects.ReleaseCollection.all()), list(objects.ReleaseCollection.all()),
name="A", name="A",
operating_system="CentOS" operating_system=consts.RELEASE_OS.centos
) )
self.assertIsInstance(iterable_filtered, ifilter) self.assertIsInstance(iterable_filtered, ifilter)
self.assertEqual( self.assertEqual(
@ -141,8 +146,9 @@ class TestObjects(BaseIntegrationTest):
) )
for r in iterable_filtered: for r in iterable_filtered:
if r.name == "A": if r.name == "A":
self.assertNotEqual(r.operating_system, "CentOS") self.assertNotEqual(r.operating_system,
elif r.operating_system == "CentOS": consts.RELEASE_OS.centos)
elif r.operating_system == consts.RELEASE_OS.centos:
self.assertNotEqual(r.name, "A") self.assertNotEqual(r.name, "A")
@ -274,7 +280,8 @@ class TestNodeObject(BaseIntegrationTest):
'pending_addition': True}] 'pending_addition': True}]
self.env.create( self.env.create(
cluster_kwargs={ cluster_kwargs={
'net_provider': 'neutron'}, 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
},
nodes_kwargs=nodes) nodes_kwargs=nodes)
cluster = self.env.clusters[0] cluster = self.env.clusters[0]
@ -322,7 +329,8 @@ class TestNodeObject(BaseIntegrationTest):
'pending_addition': True}] 'pending_addition': True}]
self.env.create( self.env.create(
cluster_kwargs={ cluster_kwargs={
'net_provider': 'neutron'}, 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
},
nodes_kwargs=nodes) nodes_kwargs=nodes)
cluster = self.env.clusters[0] cluster = self.env.clusters[0]
@ -375,10 +383,14 @@ class TestNodeObject(BaseIntegrationTest):
"mac", "mac",
"meta", "meta",
"name", "name",
"agent_checksum" "agent_checksum",
"uuid",
"timestamp",
"nic_interfaces",
"attributes",
] ]
fields = set( fields = set(
objects.Node.schema["properties"].keys() c.key for c in sqlalchemy_inspect(objects.Node.model).attrs
) - set(exclude_fields) ) - set(exclude_fields)
for f in fields: for f in fields:
@ -416,9 +428,10 @@ class TestNodeObject(BaseIntegrationTest):
self.assertNotEqual(node_db.meta["disks"], data["meta"]["disks"]) self.assertNotEqual(node_db.meta["disks"], data["meta"]["disks"])
# test status handling # test status handling
for status in ('provisioning', 'error'): for status in (consts.NODE_STATUSES.provisioning,
consts.NODE_STATUSES.error):
node_db.status = status node_db.status = status
data["status"] = "discover" data["status"] = consts.NODE_STATUSES.discover
objects.Node.update_by_agent(node_db, copy.deepcopy(data)) objects.Node.update_by_agent(node_db, copy.deepcopy(data))
self.assertEqual(node_db.status, status) self.assertEqual(node_db.status, status)
@ -601,66 +614,76 @@ class TestTaskObject(BaseIntegrationTest):
{'roles': ['cinder']}]) {'roles': ['cinder']}])
def _node_should_be_error_with_type(self, node, error_type): def _node_should_be_error_with_type(self, node, error_type):
self.assertEquals(node.status, 'error') self.assertEquals(node.status, consts.NODE_STATUSES.error)
self.assertEquals(node.error_type, error_type) self.assertEquals(node.error_type, error_type)
self.assertEquals(node.progress, 0) self.assertEquals(node.progress, 0)
def _nodes_should_not_be_error(self, nodes): def _nodes_should_not_be_error(self, nodes):
for node in nodes: for node in nodes:
self.assertEquals(node.status, 'discover') self.assertEquals(node.status, consts.NODE_STATUSES.discover)
@property @property
def cluster(self): def cluster(self):
return self.env.clusters[0] return self.env.clusters[0]
def test_update_nodes_to_error_if_deployment_task_failed(self): def test_update_nodes_to_error_if_deployment_task_failed(self):
self.cluster.nodes[0].status = 'deploying' self.cluster.nodes[0].status = consts.NODE_STATUSES.deploying
self.cluster.nodes[0].progress = 12 self.cluster.nodes[0].progress = 12
task = Task(name='deployment', cluster=self.cluster, status='error') task = Task(name=consts.TASK_NAMES.deployment,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
objects.Task._update_cluster_data(task) objects.Task._update_cluster_data(task)
self.db.flush() self.db.flush()
self.assertEquals(self.cluster.status, 'error') self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
self._node_should_be_error_with_type(self.cluster.nodes[0], 'deploy') self._node_should_be_error_with_type(self.cluster.nodes[0],
consts.NODE_ERRORS.deploy)
self._nodes_should_not_be_error(self.cluster.nodes[1:]) self._nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_error_if_deploy_task_failed(self): def test_update_cluster_to_error_if_deploy_task_failed(self):
task = Task(name='deploy', cluster=self.cluster, status='error') task = Task(name=consts.TASK_NAMES.deploy,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
objects.Task._update_cluster_data(task) objects.Task._update_cluster_data(task)
self.db.flush() self.db.flush()
self.assertEquals(self.cluster.status, 'error') self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
def test_update_nodes_to_error_if_provision_task_failed(self): def test_update_nodes_to_error_if_provision_task_failed(self):
self.cluster.nodes[0].status = 'provisioning' self.cluster.nodes[0].status = consts.NODE_STATUSES.provisioning
self.cluster.nodes[0].progress = 12 self.cluster.nodes[0].progress = 12
task = Task(name='provision', cluster=self.cluster, status='error') task = Task(name=consts.TASK_NAMES.provision,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
objects.Task._update_cluster_data(task) objects.Task._update_cluster_data(task)
self.db.flush() self.db.flush()
self.assertEquals(self.cluster.status, 'error') self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
self._node_should_be_error_with_type(self.cluster.nodes[0], self._node_should_be_error_with_type(self.cluster.nodes[0],
'provision') consts.NODE_ERRORS.provision)
self._nodes_should_not_be_error(self.cluster.nodes[1:]) self._nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_operational(self): def test_update_cluster_to_operational(self):
task = Task(name='deploy', cluster=self.cluster, status='ready') task = Task(name=consts.TASK_NAMES.deploy,
cluster=self.cluster,
status=consts.TASK_STATUSES.ready)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
objects.Task._update_cluster_data(task) objects.Task._update_cluster_data(task)
self.db.flush() self.db.flush()
self.assertEquals(self.cluster.status, 'operational') self.assertEquals(self.cluster.status,
consts.CLUSTER_STATUSES.operational)
def test_update_vms_conf(self): def test_update_vms_conf(self):
kvm_node = self.cluster.nodes[0] kvm_node = self.cluster.nodes[0]
@ -669,7 +692,8 @@ class TestTaskObject(BaseIntegrationTest):
objects.Node.set_vms_conf(kvm_node, objects.Node.set_vms_conf(kvm_node,
[{'id': 1, 'cluster_id': self.cluster.id}]) [{'id': 1, 'cluster_id': self.cluster.id}])
task = Task(name=consts.TASK_NAMES.spawn_vms, task = Task(name=consts.TASK_NAMES.spawn_vms,
cluster=self.cluster, status='ready') cluster=self.cluster,
status=consts.TASK_STATUSES.ready)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
@ -680,61 +704,67 @@ class TestTaskObject(BaseIntegrationTest):
if consts.VIRTUAL_NODE_TYPES.virt in node.roles: if consts.VIRTUAL_NODE_TYPES.virt in node.roles:
self.assertTrue(node.attributes.vms_conf[0].get('created')) self.assertTrue(node.attributes.vms_conf[0].get('created'))
else: else:
self.assertNotEquals(node.status, 'ready') self.assertNotEquals(node.status, consts.NODE_STATUSES.ready)
def test_update_if_parent_task_is_ready_all_nodes_should_be_ready(self): def test_update_if_parent_task_is_ready_all_nodes_should_be_ready(self):
for node in self.cluster.nodes: for node in self.cluster.nodes:
node.status = 'ready' node.status = consts.NODE_STATUSES.ready
node.progress = 100 node.progress = 100
self.cluster.nodes[0].status = 'deploying' self.cluster.nodes[0].status = consts.NODE_STATUSES.deploying
self.cluster.nodes[0].progress = 24 self.cluster.nodes[0].progress = 24
task = Task(name='deploy', cluster=self.cluster, status='ready') task = Task(name=consts.TASK_NAMES.deploy,
cluster=self.cluster,
status=consts.TASK_STATUSES.ready)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
objects.Task._update_cluster_data(task) objects.Task._update_cluster_data(task)
self.db.flush() self.db.flush()
self.assertEquals(self.cluster.status, 'operational') self.assertEquals(self.cluster.status,
consts.CLUSTER_STATUSES.operational)
for node in self.cluster.nodes: for node in self.cluster.nodes:
self.assertEquals(node.status, 'ready') self.assertEquals(node.status, consts.NODE_STATUSES.ready)
self.assertEquals(node.progress, 100) self.assertEquals(node.progress, 100)
def test_update_cluster_status_if_task_was_already_in_error_status(self): def test_update_cluster_status_if_task_was_already_in_error_status(self):
for node in self.cluster.nodes: for node in self.cluster.nodes:
node.status = 'provisioning' node.status = consts.NODE_STATUSES.provisioning
node.progress = 12 node.progress = 12
task = Task(name='provision', cluster=self.cluster, status='error') task = Task(name=consts.TASK_NAMES.provision,
cluster=self.cluster,
status=consts.TASK_STATUSES.error)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
data = {'status': 'error', 'progress': 100} data = {'status': consts.TASK_STATUSES.error, 'progress': 100}
objects.Task.update(task, data) objects.Task.update(task, data)
self.db.flush() self.db.flush()
self.assertEquals(self.cluster.status, 'error') self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.error)
self.assertEquals(task.status, 'error') self.assertEquals(task.status, consts.TASK_STATUSES.error)
for node in self.cluster.nodes: for node in self.cluster.nodes:
self.assertEquals(node.status, 'error') self.assertEquals(node.status, consts.NODE_STATUSES.error)
self.assertEquals(node.progress, 0) self.assertEquals(node.progress, 0)
def test_do_not_set_cluster_to_error_if_validation_failed(self): def test_do_not_set_cluster_to_error_if_validation_failed(self):
for task_name in ['check_before_deployment', 'check_networks']: for task_name in [consts.TASK_NAMES.check_before_deployment,
consts.TASK_NAMES.check_networks]:
supertask = Task( supertask = Task(
name='deploy', name=consts.TASK_NAMES.deploy,
cluster=self.cluster, cluster=self.cluster,
status='error') status=consts.TASK_STATUSES.error)
check_task = Task( check_task = Task(
name=task_name, name=task_name,
cluster=self.cluster, cluster=self.cluster,
status='error') status=consts.TASK_STATUSES.error)
supertask.subtasks.append(check_task) supertask.subtasks.append(check_task)
self.db.add(check_task) self.db.add(check_task)
@ -743,10 +773,10 @@ class TestTaskObject(BaseIntegrationTest):
objects.Task._update_cluster_data(supertask) objects.Task._update_cluster_data(supertask)
self.db.flush() self.db.flush()
self.assertEquals(self.cluster.status, 'new') self.assertEquals(self.cluster.status, consts.CLUSTER_STATUSES.new)
def test_get_task_by_uuid_returns_task(self): def test_get_task_by_uuid_returns_task(self):
task = Task(name='deploy') task = Task(name=consts.TASK_NAMES.deploy)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
task_by_uuid = objects.Task.get_by_uuid(task.uuid) task_by_uuid = objects.Task.get_by_uuid(task.uuid)
@ -759,7 +789,7 @@ class TestTaskObject(BaseIntegrationTest):
fail_if_not_found=True) fail_if_not_found=True)
def test_task_wrong_status_filtered(self): def test_task_wrong_status_filtered(self):
task = Task(name='deploy') task = Task(name=consts.TASK_NAMES.deploy)
self.db.add(task) self.db.add(task)
self.db.flush() self.db.flush()
@ -808,7 +838,7 @@ class TestActionLogObject(BaseIntegrationTest):
instance_to_validate = jsonutils.loads(objects.ActionLog.to_json(al)) instance_to_validate = jsonutils.loads(objects.ActionLog.to_json(al))
self.assertNotRaises(jsonschema.ValidationError, jsonschema.validate, self.assertNotRaises(jsonschema.ValidationError, jsonschema.validate,
instance_to_validate, objects.ActionLog.schema) instance_to_validate, action_log.schema)
def test_validate_json_schema_failure(self): def test_validate_json_schema_failure(self):
object_data = { object_data = {
@ -825,7 +855,7 @@ class TestActionLogObject(BaseIntegrationTest):
instance_to_validate = jsonutils.loads(objects.ActionLog.to_json(al)) instance_to_validate = jsonutils.loads(objects.ActionLog.to_json(al))
self.assertRaises(jsonschema.ValidationError, jsonschema.validate, self.assertRaises(jsonschema.ValidationError, jsonschema.validate,
instance_to_validate, objects.ActionLog.schema) instance_to_validate, action_log.schema)
def test_get_by_uuid_method(self): def test_get_by_uuid_method(self):
object_data = { object_data = {
@ -899,7 +929,7 @@ class TestClusterObject(BaseTestCase):
def _get_network_role_metadata(self, **kwargs): def _get_network_role_metadata(self, **kwargs):
network_role = { network_role = {
'id': 'test_network_role', 'id': 'test_network_role',
'default_mapping': 'public', 'default_mapping': consts.NETWORKS.public,
'properties': { 'properties': {
'subnet': True, 'subnet': True,
'gateway': False, 'gateway': False,