Updating database models to fix time stamp errors

* Updated DB models to use sqlalchemy models TimeStampMixIn from
  our own version as our version had a bug in created_at timestamp
  being the same for all created clusters.
* As part of sqlalchemy models TimeStampMixIn, updated_at is an
  optional field, therefore changes to table definition was
  required.
* Updated API to reflect optional fields (e.g. updated_at)
* Added test to verify created_at, updated_at and deleted_at times
  are in correct order for corresponding changes (e.g. create,
  update and delete).

closes-bug: 1428896
Change-Id: I0047a197e48cbc017d7f750da797967108da423c
This commit is contained in:
dagnello
2015-03-25 14:59:10 -07:00
committed by Abitha Palaniappan
parent 6564f5ecb2
commit d5cb1c6276
7 changed files with 89 additions and 41 deletions

View File

@@ -74,7 +74,10 @@ class Cluster(base.APIBase):
# only add fields we expose in the api
if hasattr(self, k):
self.fields.append(k)
setattr(self, k, kwargs.get(k, wtypes.Unset))
if kwargs.get(k) is None:
setattr(self, k, wtypes.Unset)
else:
setattr(self, k, kwargs.get(k))
id = wtypes.text
"UUID of cluster"

View File

@@ -45,7 +45,7 @@ def upgrade():
sa.Column('volume_size', sa.Integer(), nullable=True),
sa.Column('deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
@@ -57,7 +57,7 @@ def upgrade():
sa.Column('status', sa.String(length=50), nullable=False),
sa.Column('deleted', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ),
sa.PrimaryKeyConstraint('id')

View File

@@ -87,24 +87,10 @@ def model_query(context, model, *args, **kwargs):
return query
def capture_timestamp(record_values):
"""Captures required timestamp in for provided record dictionary.
This helper function should be used when a cluster/node record is being
updated or deleted. In either case it will update the appropriate
timestamp based on the value of the status key. If the status key/value
pair is not provided, the 'updated_at' timestamp is added/updated.
:param record_values: dictionary with record values for update or save
:return: record_values dict with appropriate timestamp captured.
"""
if ('status' in record_values) and (
record_values['status'] == models.Status.DELETED):
def soft_delete(record_values):
"""Mark this object as deleted."""
record_values['deleted'] = True
record_values['deleted_at'] = timeutils.utcnow()
else:
record_values['updated_at'] = timeutils.utcnow()
return record_values
class Connection(api.Connection):
@@ -149,7 +135,10 @@ class Connection(api.Connection):
cluster_query = (model_query(context, models.Cluster)
.filter_by(id=cluster_id))
cluster_values = capture_timestamp(cluster_values)
# if status is set to deleted, soft delete this cluster record
if ('status' in cluster_values) and (
cluster_values['status'] == models.Status.DELETED):
soft_delete(cluster_values)
cluster_query.update(cluster_values)
@@ -181,7 +170,10 @@ class Connection(api.Connection):
def update_node(self, context, node_values, node_id):
node_query = (model_query(context, models.Node).filter_by(id=node_id))
node_values = capture_timestamp(node_values)
# if status is set to deleted, soft delete this node record
if ('status' in node_values) and (
node_values['status'] == models.Status.DELETED):
soft_delete(node_values)
node_query.update(node_values)
@@ -220,8 +212,6 @@ class Connection(api.Connection):
def update_cluster_deleting(self, context, cluster_id):
values = {'status': models.Status.DELETING}
values = capture_timestamp(values)
cluster_query = (model_query(context, models.Cluster)
.filter_by(id=cluster_id))

View File

@@ -18,7 +18,6 @@ from cue.db.sqlalchemy import types
import uuid
from oslo.db.sqlalchemy import models
from oslo.utils import timeutils
import sqlalchemy as sa
from sqlalchemy.ext import declarative
@@ -48,13 +47,9 @@ class ProjectMixin(object):
"""Project mixin, add to subclasses that have a project."""
project_id = sa.Column(sa.String(36))
class TimeMixin(object):
created_at = sa.Column('created_at', sa.DateTime(),
default=timeutils.utcnow(), nullable=False)
updated_at = sa.Column('updated_at', sa.DateTime(),
default=timeutils.utcnow(), nullable=False)
deleted_at = sa.Column('deleted_at', sa.DateTime(),
nullable=True)
BASE = declarative.declarative_base(cls=CueBase)
class SoftDeleteMixin(object):
deleted_at = sa.Column(sa.DateTime)
deleted = sa.Column(sa.Boolean, default=0)

View File

@@ -17,6 +17,7 @@
from cue.db.sqlalchemy import base
from cue.db.sqlalchemy import types
from oslo.db.sqlalchemy import models
import sqlalchemy as sa
@@ -40,7 +41,8 @@ class Endpoint(base.BASE, base.IdMixin):
sa.Index("endpoints_nodes_id_idx", "node_id", unique=False)
class Node(base.BASE, base.IdMixin, base.TimeMixin):
class Node(base.BASE, base.IdMixin, models.TimestampMixin,
base.SoftDeleteMixin):
__tablename__ = 'nodes'
cluster_id = sa.Column(
@@ -49,12 +51,12 @@ class Node(base.BASE, base.IdMixin, base.TimeMixin):
flavor = sa.Column(sa.String(36), nullable=False)
instance_id = sa.Column(sa.String(36), nullable=True)
status = sa.Column(sa.String(50), nullable=False)
deleted = sa.Column(sa.Boolean(), default=False, nullable=False)
sa.Index("nodes_id_idx", "id", unique=True)
sa.Index("nodes_cluster_id_idx", "cluster_id", unique=False)
class Cluster(base.BASE, base.IdMixin, base.TimeMixin):
class Cluster(base.BASE, base.IdMixin, models.TimestampMixin,
base.SoftDeleteMixin):
__tablename__ = 'clusters'
project_id = sa.Column(sa.String(36), nullable=False)
@@ -64,5 +66,4 @@ class Cluster(base.BASE, base.IdMixin, base.TimeMixin):
flavor = sa.Column(sa.String(50), nullable=False)
size = sa.Column(sa.Integer(), default=1, nullable=False)
volume_size = sa.Column(sa.Integer(), nullable=True)
deleted = sa.Column(sa.Boolean(), default=False, nullable=False)
sa.Index("clusters_cluster_id_idx", "cluster_id", unique=True)

View File

@@ -67,9 +67,10 @@ class ClusterValidationMixin(object):
self.assertEqual(unicode(cluster_ref.created_at.isoformat()),
cluster_cmp["created_at"],
"Invalid cluster created_at value")
self.assertEqual(unicode(cluster_ref.updated_at.isoformat()),
cluster_cmp["updated_at"],
"Invalid cluster updated_at value")
if cluster_ref.updated_at:
self.assertEqual(unicode(cluster_ref.updated_at.isoformat()),
cluster_cmp["updated_at"],
"Invalid cluster updated_at value")
def validate_endpoint_values(self, endpoints_ref, endpoints_cmp):
self.assertEqual(len(endpoints_ref), len(endpoints_cmp),

View File

@@ -14,6 +14,8 @@
"""
Tests for the API /clusters/ controller methods.
"""
from cue.db.sqlalchemy import api as db_api
from cue.db.sqlalchemy import models
from cue import objects
from cue.tests import api
@@ -60,6 +62,7 @@ class TestListClusters(api.FunctionalTest,
# verify number of clusters received
self.assertEqual(len(data), num_of_clusters,
"Invalid number of clusters returned")
for i in range(num_of_clusters):
# verify cluster
self.validate_cluster_values(clusters[i], data[i])
@@ -192,6 +195,61 @@ class TestCreateCluster(api.FunctionalTest,
data.namespace["faultstring"],
'Invalid faultstring received.')
def test_create_two_clusters_verify_time_stamps(self):
"""test time stamps times at creation and delete."""
api_cluster_1 = test_utils.create_api_test_cluster()
api_cluster_2 = test_utils.create_api_test_cluster()
# Create two clusters
data_1 = self.post_json('/clusters', params=api_cluster_1.as_dict(),
headers=self.auth_headers, status=202)
data_2 = self.post_json('/clusters', params=api_cluster_2.as_dict(),
headers=self.auth_headers, status=202)
# retrieve cluster objects
cluster_1 = objects.Cluster.get_cluster_by_id(self.context,
data_1.json["id"])
cluster_2 = objects.Cluster.get_cluster_by_id(self.context,
data_2.json["id"])
# verify second cluster was created after first by created_at time
self.assertEqual(True, cluster_2.created_at > cluster_1.created_at,
"Second cluster was not created after first")
cluster_1_created_at = cluster_1.created_at
# issue delete request cluster for cluster_1
self.delete('/clusters/' + data_1.json["id"],
headers=self.auth_headers)
# retrieve cluster_1
cluster_1 = objects.Cluster.get_cluster_by_id(self.context,
data_1.json["id"])
# verify updated_at time is after created_at
self.assertEqual(True, cluster_1.updated_at > cluster_1.created_at,
"Cluster updated at time is invalid")
# verify created_at time did not change
self.assertEqual(cluster_1_created_at, cluster_1.created_at,
"Cluster created_at time has changed")
# delete cluster_1
cluster = objects.Cluster(deleted=True, status=models.Status.DELETED)
cluster.update(self.context, data_1.json["id"])
# retrieve deleted (soft) cluster
cluster_query = db_api.model_query(self.context, models.Cluster,
read_deleted=True).filter_by(
id=data_1.json["id"])
cluster_1 = cluster_query.one()
# verify deleted_at time is after created_at
self.assertEqual(True, cluster_1.deleted_at > cluster_1.created_at,
"Cluster deleted_at time is invalid")
# verify updated_at time is after deleted_at
self.assertEqual(True, cluster_1.updated_at > cluster_1.deleted_at,
"Cluster deleted_at time is invalid")
def test_create_invalid_volume_size(self):
"""test with invalid volume_size parameter."""