Revision diffing API.
This commit implements revision diffing API and the required back-end logic needed to realize it. Included in this commit: - implementation of revision diffing - unskip all revision diff functional tests - add additional functional tests for revision diffing - relevant unit tests - document comparison is performed using hashing as opposed to more inefficient, direct comparisons Change-Id: I0419ee9b8cf3fca6fe75818615d2338dc00b1003
This commit is contained in:
parent
af0bfd813d
commit
ef4f65037d
@ -20,6 +20,7 @@ from oslo_log import log as logging
|
||||
|
||||
from deckhand.control import base
|
||||
from deckhand.control import buckets
|
||||
from deckhand.control import revision_diffing
|
||||
from deckhand.control import revision_documents
|
||||
from deckhand.control import revision_tags
|
||||
from deckhand.control import revisions
|
||||
@ -61,6 +62,8 @@ def start_api(state_manager=None):
|
||||
('bucket/{bucket_name}/documents', buckets.BucketsResource()),
|
||||
('revisions', revisions.RevisionsResource()),
|
||||
('revisions/{revision_id}', revisions.RevisionsResource()),
|
||||
('revisions/{revision_id}/diff/{comparison_revision_id}',
|
||||
revision_diffing.RevisionDiffingResource()),
|
||||
('revisions/{revision_id}/documents',
|
||||
revision_documents.RevisionDocumentsResource()),
|
||||
('revisions/{revision_id}/tags', revision_tags.RevisionTagsResource()),
|
||||
|
39
deckhand/control/revision_diffing.py
Normal file
39
deckhand/control/revision_diffing.py
Normal file
@ -0,0 +1,39 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import falcon
|
||||
|
||||
from deckhand.control import base as api_base
|
||||
from deckhand.db.sqlalchemy import api as db_api
|
||||
from deckhand import errors
|
||||
|
||||
|
||||
class RevisionDiffingResource(api_base.BaseResource):
|
||||
"""API resource for realizing revision diffing."""
|
||||
|
||||
def on_get(self, req, resp, revision_id, comparison_revision_id):
|
||||
if revision_id == '0':
|
||||
revision_id = 0
|
||||
if comparison_revision_id == '0':
|
||||
comparison_revision_id = 0
|
||||
|
||||
try:
|
||||
resp_body = db_api.revision_diff(
|
||||
revision_id, comparison_revision_id)
|
||||
except (errors.RevisionNotFound) as e:
|
||||
raise falcon.HTTPNotFound(description=e.format_message())
|
||||
|
||||
resp.status = falcon.HTTP_200
|
||||
resp.append_header('Content-Type', 'application/x-yaml')
|
||||
resp.body = self.to_yaml_body(resp_body)
|
@ -42,6 +42,7 @@ class ViewBuilder(common.ViewBuilder):
|
||||
attrs = ['id', 'metadata', 'data', 'schema']
|
||||
|
||||
for document in documents:
|
||||
# Never return deleted documents to the user.
|
||||
if document['deleted']:
|
||||
continue
|
||||
|
||||
@ -51,4 +52,16 @@ class ViewBuilder(common.ViewBuilder):
|
||||
resp_obj['status']['revision'] = document['revision_id']
|
||||
resp_list.append(resp_obj)
|
||||
|
||||
# In the case where no documents are passed to PUT
|
||||
# buckets/{{bucket_name}}/documents, we need to mangle the response
|
||||
# body a bit. The revision_id and buckete_id should be returned, as
|
||||
# at the very least the revision_id will be needed by the user.
|
||||
if not resp_list and documents:
|
||||
resp_obj = {}
|
||||
resp_obj.setdefault('status', {})
|
||||
resp_obj['status']['bucket'] = documents[0]['bucket_id']
|
||||
resp_obj['status']['revision'] = documents[0]['revision_id']
|
||||
|
||||
resp_list.append(resp_obj)
|
||||
|
||||
return resp_list
|
||||
|
@ -136,6 +136,7 @@ def documents_create(bucket_name, documents, session=None):
|
||||
doc['name'] = d[1]
|
||||
doc['data'] = {}
|
||||
doc['_metadata'] = {}
|
||||
doc['hash'] = utils.make_hash({})
|
||||
doc['bucket_id'] = bucket['id']
|
||||
doc['revision_id'] = revision['id']
|
||||
|
||||
@ -168,16 +169,8 @@ def _documents_create(bucket_name, values_list, session=None):
|
||||
values_list = copy.deepcopy(values_list)
|
||||
session = session or get_session()
|
||||
filters = ('name', 'schema')
|
||||
|
||||
changed_documents = []
|
||||
|
||||
def _document_changed(existing_document):
|
||||
# The document has changed if at least one value in ``values`` differs.
|
||||
for key, val in values.items():
|
||||
if val != existing_document[key]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _document_create(values):
|
||||
document = models.Document()
|
||||
with session.begin():
|
||||
@ -188,6 +181,11 @@ def _documents_create(bucket_name, values_list, session=None):
|
||||
values['_metadata'] = values.pop('metadata')
|
||||
values['name'] = values['_metadata']['name']
|
||||
values['is_secret'] = 'secret' in values['data']
|
||||
# Hash the combination of the document's metadata and data to later
|
||||
# efficiently check whether those data have changed.
|
||||
dict_to_hash = values['_metadata'].copy()
|
||||
dict_to_hash.update(values['data'])
|
||||
values['hash'] = utils.make_hash(dict_to_hash)
|
||||
|
||||
try:
|
||||
existing_document = document_get(
|
||||
@ -208,7 +206,7 @@ def _documents_create(bucket_name, values_list, session=None):
|
||||
name=existing_document['name'],
|
||||
bucket=existing_document['bucket_name'])
|
||||
|
||||
if not _document_changed(existing_document):
|
||||
if existing_document['hash'] == values['hash']:
|
||||
# Since the document has not changed, reference the original
|
||||
# revision in which it was created. This is necessary so that
|
||||
# the correct revision history is maintained.
|
||||
@ -325,11 +323,13 @@ def revision_get(revision_id, session=None):
|
||||
|
||||
def require_revision_exists(f):
|
||||
"""Decorator to require the specified revision to exist.
|
||||
Requires the wrapped function to use revision_id as the first argument.
|
||||
"""
|
||||
|
||||
Requires the wrapped function to use revision_id as the first argument. If
|
||||
revision_id is not provided, then the check is not performed.
|
||||
"""
|
||||
@functools.wraps(f)
|
||||
def wrapper(revision_id, *args, **kwargs):
|
||||
def wrapper(revision_id=None, *args, **kwargs):
|
||||
if revision_id:
|
||||
revision_get(revision_id)
|
||||
return f(revision_id, *args, **kwargs)
|
||||
return wrapper
|
||||
@ -363,6 +363,7 @@ def revision_delete_all(session=None):
|
||||
.delete(synchronize_session=False)
|
||||
|
||||
|
||||
@require_revision_exists
|
||||
def revision_get_documents(revision_id=None, include_history=True,
|
||||
unique_only=True, session=None, **filters):
|
||||
"""Return the documents that match filters for the specified `revision_id`.
|
||||
@ -478,6 +479,146 @@ def _filter_revision_documents(documents, unique_only, **filters):
|
||||
return sorted(filtered_documents.values(), key=lambda d: d['created_at'])
|
||||
|
||||
|
||||
# NOTE(fmontei): No need to include `@require_revision_exists` decorator as
|
||||
# the this function immediately calls `revision_get_documents` for both
|
||||
# revision IDs, which has the decorator applied to it.
|
||||
def revision_diff(revision_id, comparison_revision_id):
|
||||
"""Generate the diff between two revisions.
|
||||
|
||||
Generate the diff between the two revisions: `revision_id` and
|
||||
`comparison_revision_id`. A basic comparison of the revisions in terms of
|
||||
how the buckets involved have changed is generated. Only buckets with
|
||||
existing documents in either of the two revisions in question will be
|
||||
reported.
|
||||
|
||||
The ordering of the two revision IDs is interchangeable, i.e. no matter
|
||||
the order, the same result is generated.
|
||||
|
||||
The differences include:
|
||||
|
||||
- "created": A bucket has been created between the revisions.
|
||||
- "deleted": A bucket has been deleted between the revisions.
|
||||
- "modified": A bucket has been modified between the revisions.
|
||||
- "unmodified": A bucket remains unmodified between the revisions.
|
||||
|
||||
:param revision_id: ID of the first revision.
|
||||
:param comparison_revision_id: ID of the second revision.
|
||||
:returns: A dictionary, keyed with the bucket IDs, containing any of the
|
||||
differences enumerated above.
|
||||
|
||||
Examples::
|
||||
|
||||
# GET /api/v1.0/revisions/6/diff/3
|
||||
bucket_a: created
|
||||
bucket_b: deleted
|
||||
bucket_c: modified
|
||||
bucket_d: unmodified
|
||||
|
||||
# GET /api/v1.0/revisions/0/diff/6
|
||||
bucket_a: created
|
||||
bucket_c: created
|
||||
bucket_d: created
|
||||
|
||||
# GET /api/v1.0/revisions/6/diff/6
|
||||
bucket_a: unmodified
|
||||
bucket_c: unmodified
|
||||
bucket_d: unmodified
|
||||
|
||||
# GET /api/v1.0/revisions/0/diff/0
|
||||
{}
|
||||
"""
|
||||
# Retrieve document history for each revision. Since `revision_id` of 0
|
||||
# doesn't exist, treat it as a special case: empty list.
|
||||
docs = (revision_get_documents(revision_id,
|
||||
include_history=True,
|
||||
unique_only=False)
|
||||
if revision_id != 0 else [])
|
||||
comparison_docs = (revision_get_documents(comparison_revision_id,
|
||||
include_history=True,
|
||||
unique_only=False)
|
||||
if comparison_revision_id != 0 else [])
|
||||
|
||||
# Remove each deleted document and its older counterparts because those
|
||||
# documents technically don't exist.
|
||||
for doc_collection in (docs, comparison_docs):
|
||||
for doc in copy.copy(doc_collection):
|
||||
if doc['deleted']:
|
||||
docs_to_delete = filter(
|
||||
lambda d:
|
||||
(d['schema'], d['name']) ==
|
||||
(doc['schema'], doc['name'])
|
||||
and d['created_at'] <= doc['deleted_at'],
|
||||
doc_collection)
|
||||
for d in list(docs_to_delete):
|
||||
doc_collection.remove(d)
|
||||
|
||||
revision = revision_get(revision_id) if revision_id != 0 else None
|
||||
comparison_revision = (revision_get(comparison_revision_id)
|
||||
if comparison_revision_id != 0 else None)
|
||||
|
||||
# Each dictionary below, keyed with the bucket's name, references the list
|
||||
# of documents related to each bucket.
|
||||
buckets = {}
|
||||
comparison_buckets = {}
|
||||
for doc in docs:
|
||||
buckets.setdefault(doc['bucket_name'], [])
|
||||
buckets[doc['bucket_name']].append(doc)
|
||||
for doc in comparison_docs:
|
||||
comparison_buckets.setdefault(doc['bucket_name'], [])
|
||||
comparison_buckets[doc['bucket_name']].append(doc)
|
||||
|
||||
# `shared_buckets` references buckets shared by both `revision_id` and
|
||||
# `comparison_revision_id` -- i.e. their intersection.
|
||||
shared_buckets = set(buckets.keys()).intersection(
|
||||
comparison_buckets.keys())
|
||||
# `unshared_buckets` references buckets not shared by both `revision_id`
|
||||
# and `comparison_revision_id` -- i.e. their non-intersection.
|
||||
unshared_buckets = set(buckets.keys()).union(
|
||||
comparison_buckets.keys()) - shared_buckets
|
||||
|
||||
result = {}
|
||||
|
||||
def _compare_buckets(b1, b2):
|
||||
# Checks whether buckets' documents are identical.
|
||||
return (sorted([d['hash'] for d in b1]) ==
|
||||
sorted([d['hash'] for d in b2]))
|
||||
|
||||
# If the list of documents for each bucket is indentical, then the result
|
||||
# is "unmodified", else "modified".
|
||||
for bucket_name in shared_buckets:
|
||||
unmodified = _compare_buckets(buckets[bucket_name],
|
||||
comparison_buckets[bucket_name])
|
||||
result[bucket_name] = 'unmodified' if unmodified else 'modified'
|
||||
|
||||
for bucket_name in unshared_buckets:
|
||||
# If neither revision has documents, then there's nothing to compare.
|
||||
# This is always True for revision_id == comparison_revision_id == 0.
|
||||
if not any([revision, comparison_revision]):
|
||||
break
|
||||
# Else if one revision == 0 and the other revision != 0, then the
|
||||
# bucket has been created. Which is zero or non-zero doesn't matter.
|
||||
elif not all([revision, comparison_revision]):
|
||||
result[bucket_name] = 'created'
|
||||
# Else if `revision` is newer than `comparison_revision`, then if the
|
||||
# `bucket_name` isn't in the `revision` buckets, then it has been
|
||||
# deleted. Otherwise it has been created.
|
||||
elif revision['created_at'] > comparison_revision['created_at']:
|
||||
if bucket_name not in buckets:
|
||||
result[bucket_name] = 'deleted'
|
||||
elif bucket_name not in comparison_buckets:
|
||||
result[bucket_name] = 'created'
|
||||
# Else if `comparison_revision` is newer than `revision`, then if the
|
||||
# `bucket_name` isn't in the `revision` buckets, then it has been
|
||||
# created. Otherwise it has been deleted.
|
||||
else:
|
||||
if bucket_name not in buckets:
|
||||
result[bucket_name] = 'created'
|
||||
elif bucket_name not in comparison_buckets:
|
||||
result[bucket_name] = 'deleted'
|
||||
|
||||
return result
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
from oslo_db.sqlalchemy import models
|
||||
from oslo_db.sqlalchemy import types as oslo_types
|
||||
from oslo_utils import timeutils
|
||||
from sqlalchemy import BigInteger
|
||||
from sqlalchemy import Boolean
|
||||
from sqlalchemy import Column
|
||||
from sqlalchemy import DateTime
|
||||
@ -140,6 +141,7 @@ class Document(BASE, DeckhandBase):
|
||||
# "metadata" is reserved, so use "_metadata" instead.
|
||||
_metadata = Column(oslo_types.JsonEncodedDict(), nullable=False)
|
||||
data = Column(oslo_types.JsonEncodedDict(), nullable=True)
|
||||
hash = Column(BigInteger, nullable=False)
|
||||
is_secret = Column(Boolean, nullable=False, default=False)
|
||||
bucket_id = Column(Integer, ForeignKey('buckets.id', ondelete='CASCADE'),
|
||||
nullable=False)
|
||||
|
@ -6,12 +6,14 @@
|
||||
# - Each of the 4 initial document PUTs vs null
|
||||
# - null vs itself
|
||||
# 4. Modify bucket c
|
||||
# 5. Delete bucket b
|
||||
# 6. Create bucket f
|
||||
# 7. Remove bucket f
|
||||
# 8. Create bucket e
|
||||
# 9. Verify diff between create_d and create_e
|
||||
# 10. Verify diff of final state with null
|
||||
# 5. Verify diff between create_d and update_c
|
||||
# 6. Delete bucket b
|
||||
# 7. Create bucket "mistake"
|
||||
# 8. Remove bucket "mistake"
|
||||
# 9. Verify diff between creating and deleting bucket "mistake"
|
||||
# 10. Create bucket e
|
||||
# 11. Verify diff between create_d and create_e
|
||||
# 12. Verify diff of final state with null
|
||||
|
||||
defaults:
|
||||
request_headers:
|
||||
@ -100,7 +102,6 @@ tests:
|
||||
response_multidoc_jsonpaths:
|
||||
$.[0]:
|
||||
bucket_a: created
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_null_second_revision
|
||||
desc: Validates response for null diff for second revision
|
||||
@ -110,7 +111,6 @@ tests:
|
||||
$.[0]:
|
||||
bucket_a: created
|
||||
bucket_b: created
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_null_third_revision
|
||||
desc: Validates response for null diff for third revision
|
||||
@ -121,7 +121,6 @@ tests:
|
||||
bucket_a: created
|
||||
bucket_b: created
|
||||
bucket_c: created
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_null_fourth_revision
|
||||
desc: Validates response for null diff for fourth revision
|
||||
@ -133,7 +132,6 @@ tests:
|
||||
bucket_b: created
|
||||
bucket_c: created
|
||||
bucket_d: created
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_null_self
|
||||
desc: Validates response for null diff for fourth revision
|
||||
@ -141,7 +139,6 @@ tests:
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.[0]: {}
|
||||
skip: Not implemented.
|
||||
|
||||
- name: update_c
|
||||
desc: Update document in bucket c
|
||||
@ -159,39 +156,57 @@ tests:
|
||||
data:
|
||||
new_value: 7
|
||||
...
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_diff_between_initial_4_buckets_and_single_update
|
||||
desc: Validates response for null diff between the first 4 buckets and single update
|
||||
GET: /api/v1.0/revisions/$HISTORY['create_d'].$RESPONSE['$.[0].status.revision']/diff/$HISTORY['update_c'].$RESPONSE['$.[0].status.revision']
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.[0]:
|
||||
bucket_a: unmodified
|
||||
bucket_b: unmodified
|
||||
bucket_c: modified
|
||||
bucket_d: unmodified
|
||||
|
||||
- name: delete_b
|
||||
desc: Delete documents from bucket b
|
||||
PUT: /api/v1.0/bucket/bucket_b/documents
|
||||
status: 200
|
||||
data: ""
|
||||
skip: Not implemented.
|
||||
|
||||
- name: create_f
|
||||
desc: Create documents in bucket f
|
||||
PUT: /api/v1.0/bucket/bucket_e/documents
|
||||
- name: create_mistake
|
||||
desc: Create documents in bucket mistake
|
||||
PUT: /api/v1.0/bucket/bucket_mistake/documents
|
||||
status: 200
|
||||
data: |-
|
||||
---
|
||||
schema: example/Kind/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: doc-f
|
||||
name: doc-m
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
data:
|
||||
value: 5
|
||||
value: mistake
|
||||
...
|
||||
skip: Not implemented.
|
||||
|
||||
- name: delete_f
|
||||
desc: Delete documents from bucket b
|
||||
PUT: /api/v1.0/bucket/bucket_f/documents
|
||||
- name: delete_mistake
|
||||
desc: Delete documents from bucket mistake
|
||||
PUT: /api/v1.0/bucket/bucket_mistake/documents
|
||||
status: 200
|
||||
data: ""
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_diff_between_created_and_deleted_mistake
|
||||
desc: Validates response for deletion between the last 2 revisions
|
||||
GET: /api/v1.0/revisions/$HISTORY['create_mistake'].$RESPONSE['$.[0].status.revision']/diff/$HISTORY['delete_mistake'].$RESPONSE['$.[0].status.revision']
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.[0]:
|
||||
bucket_a: unmodified
|
||||
bucket_c: unmodified
|
||||
bucket_d: unmodified
|
||||
bucket_mistake: deleted
|
||||
|
||||
- name: create_e
|
||||
desc: Create documents in bucket e
|
||||
@ -209,7 +224,6 @@ tests:
|
||||
data:
|
||||
value: 6
|
||||
...
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_diff_between_initial_4_buckets_and_present
|
||||
desc: Validates response for null diff between the first 4 buckets and now
|
||||
@ -222,4 +236,14 @@ tests:
|
||||
bucket_c: modified
|
||||
bucket_d: unmodified
|
||||
bucket_e: created
|
||||
skip: Not implemented.
|
||||
|
||||
- name: verify_diff_between_null_and_present
|
||||
desc: Validates response for null diff and now
|
||||
GET: /api/v1.0/revisions/0/diff/$HISTORY['create_e'].$RESPONSE['$.[0].status.revision']
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.[0]:
|
||||
bucket_a: created
|
||||
bucket_c: created
|
||||
bucket_d: created
|
||||
bucket_e: created
|
||||
|
@ -17,6 +17,7 @@ import mock
|
||||
from deckhand.control import api
|
||||
from deckhand.control import base
|
||||
from deckhand.control import buckets
|
||||
from deckhand.control import revision_diffing
|
||||
from deckhand.control import revision_documents
|
||||
from deckhand.control import revision_tags
|
||||
from deckhand.control import revisions
|
||||
@ -27,8 +28,8 @@ class TestApi(test_base.DeckhandTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestApi, self).setUp()
|
||||
for resource in (buckets, revision_documents, revision_tags,
|
||||
revisions):
|
||||
for resource in (buckets, revision_diffing, revision_documents,
|
||||
revision_tags, revisions):
|
||||
resource_name = resource.__name__.split('.')[-1]
|
||||
resource_obj = mock.patch.object(
|
||||
resource, '%sResource' % resource_name.title().replace(
|
||||
@ -54,6 +55,9 @@ class TestApi(test_base.DeckhandTestCase):
|
||||
mock.call('/api/v1.0/revisions', self.revisions_resource()),
|
||||
mock.call('/api/v1.0/revisions/{revision_id}',
|
||||
self.revisions_resource()),
|
||||
mock.call('/api/v1.0/revisions/{revision_id}/diff/'
|
||||
'{comparison_revision_id}',
|
||||
self.revision_diffing_resource()),
|
||||
mock.call('/api/v1.0/revisions/{revision_id}/documents',
|
||||
self.revision_documents_resource()),
|
||||
mock.call('/api/v1.0/revisions/{revision_id}/tags',
|
||||
|
@ -20,7 +20,8 @@ from deckhand.tests.unit import base
|
||||
|
||||
BASE_EXPECTED_FIELDS = ("created_at", "updated_at", "deleted_at", "deleted")
|
||||
DOCUMENT_EXPECTED_FIELDS = BASE_EXPECTED_FIELDS + (
|
||||
"id", "schema", "name", "metadata", "data", "revision_id", "bucket_id")
|
||||
"id", "schema", "name", "metadata", "data", "hash", "revision_id",
|
||||
"bucket_id")
|
||||
REVISION_EXPECTED_FIELDS = ("id", "documents", "tags")
|
||||
|
||||
|
||||
@ -54,7 +55,7 @@ class DocumentFixture(object):
|
||||
class TestDbBase(base.DeckhandWithDBTestCase):
|
||||
|
||||
def create_documents(self, bucket_name, documents,
|
||||
validation_policies=None, do_validation=True):
|
||||
validation_policies=None):
|
||||
if not validation_policies:
|
||||
validation_policies = []
|
||||
|
||||
@ -66,17 +67,11 @@ class TestDbBase(base.DeckhandWithDBTestCase):
|
||||
docs = db_api.documents_create(
|
||||
bucket_name, documents, validation_policies)
|
||||
|
||||
if do_validation:
|
||||
for idx, doc in enumerate(docs):
|
||||
self.validate_document(expected=documents[idx], actual=doc)
|
||||
self.assertEqual(bucket_name, doc['bucket_name'])
|
||||
|
||||
return docs
|
||||
|
||||
def show_document(self, do_validation=True, **fields):
|
||||
def show_document(self, **fields):
|
||||
doc = db_api.document_get(**fields)
|
||||
|
||||
if do_validation:
|
||||
self.validate_document(actual=doc)
|
||||
|
||||
return doc
|
||||
@ -126,12 +121,6 @@ class TestDbBase(base.DeckhandWithDBTestCase):
|
||||
for field in expected_fields:
|
||||
self.assertIn(field, actual)
|
||||
|
||||
if expected:
|
||||
# Validate that the expected values are equivalent to actual
|
||||
# values.
|
||||
for key, val in expected.items():
|
||||
self.assertEqual(val, actual[key])
|
||||
|
||||
def validate_revision(self, revision):
|
||||
self._validate_object(revision)
|
||||
|
||||
|
@ -95,6 +95,7 @@ class TestDocuments(base.TestDbBase):
|
||||
|
||||
documents = self.list_revision_documents(
|
||||
document['revision_id'], **filters)
|
||||
|
||||
self.assertEqual(1, len(documents))
|
||||
self.assertIsNone(documents[0].pop('orig_revision_id'))
|
||||
self.assertEqual(document, documents[0])
|
||||
@ -198,7 +199,7 @@ class TestDocuments(base.TestDbBase):
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
self.create_documents(bucket_name, payload)
|
||||
|
||||
documents = self.create_documents(bucket_name, [], do_validation=False)
|
||||
documents = self.create_documents(bucket_name, [])
|
||||
self.assertEqual(1, len(documents))
|
||||
self.assertTrue(documents[0]['deleted'])
|
||||
self.assertTrue(documents[0]['deleted_at'])
|
||||
@ -215,7 +216,7 @@ class TestDocuments(base.TestDbBase):
|
||||
self.assertIsInstance(documents, list)
|
||||
self.assertEqual(3, len(documents))
|
||||
|
||||
documents = self.create_documents(bucket_name, [], do_validation=False)
|
||||
documents = self.create_documents(bucket_name, [])
|
||||
|
||||
for idx in range(3):
|
||||
self.assertTrue(documents[idx]['deleted'])
|
||||
@ -234,8 +235,7 @@ class TestDocuments(base.TestDbBase):
|
||||
|
||||
# Create the document in payload[0] but create a new document for
|
||||
# payload[1].
|
||||
documents = self.create_documents(bucket_name, payload[1],
|
||||
do_validation=False)
|
||||
documents = self.create_documents(bucket_name, payload[1])
|
||||
# Information about the deleted and created document should've been
|
||||
# returned. The 1st document is the deleted one and the 2nd document
|
||||
# is the created one.
|
||||
@ -261,8 +261,7 @@ class TestDocuments(base.TestDbBase):
|
||||
|
||||
# Create the document in payload[0] but create a new document for
|
||||
# payload[1].
|
||||
documents = self.create_documents(bucket_name, payload[0],
|
||||
do_validation=False)
|
||||
documents = self.create_documents(bucket_name, payload[0])
|
||||
# The first document will be first, followed by the two deleted docs.
|
||||
documents = sorted(documents, key=lambda d: d['deleted'])
|
||||
# Information about the deleted and created document should've been
|
||||
|
@ -44,7 +44,6 @@ class TestDocumentsNegative(base.TestDbBase):
|
||||
def test_delete_document_invalid_id(self):
|
||||
self.assertRaises(errors.DocumentNotFound,
|
||||
self.show_document,
|
||||
do_validation=False,
|
||||
id=test_utils.rand_uuid_hex())
|
||||
|
||||
def test_create_bucket_conflict(self):
|
||||
|
309
deckhand/tests/unit/db/test_revision_diffing.py
Normal file
309
deckhand/tests/unit/db/test_revision_diffing.py
Normal file
@ -0,0 +1,309 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from deckhand.db.sqlalchemy import api as db_api
|
||||
from deckhand.tests import test_utils
|
||||
from deckhand.tests.unit.db import base
|
||||
|
||||
|
||||
class TestRevisionDiffing(base.TestDbBase):
|
||||
|
||||
def _verify_buckets_status(self, revision_id, comparison_revision_id,
|
||||
expected):
|
||||
# Verify that actual and expected results match, despite the order of
|
||||
# `comparison_revision_id` and `revision_id` args.
|
||||
revision_ids = [revision_id, comparison_revision_id]
|
||||
for rev_ids in (revision_ids, reversed(revision_ids)):
|
||||
actual = db_api.revision_diff(*rev_ids)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_revision_diff_null(self):
|
||||
self._verify_buckets_status(0, 0, {})
|
||||
|
||||
def test_revision_diff_created(self):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
|
||||
self._verify_buckets_status(
|
||||
0, revision_id, {bucket_name: 'created'})
|
||||
|
||||
def test_revision_diff_multi_bucket_created(self):
|
||||
revision_ids = []
|
||||
bucket_names = []
|
||||
|
||||
for _ in range(3):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
bucket_names.append(bucket_name)
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
revision_ids.append(revision_id)
|
||||
|
||||
# Between revision 1 and 0, 1 bucket is created.
|
||||
self._verify_buckets_status(
|
||||
0, revision_ids[0], {b: 'created' for b in bucket_names[:1]})
|
||||
|
||||
# Between revision 2 and 0, 2 buckets are created.
|
||||
self._verify_buckets_status(
|
||||
0, revision_ids[1], {b: 'created' for b in bucket_names[:2]})
|
||||
|
||||
# Between revision 3 and 0, 3 buckets are created.
|
||||
self._verify_buckets_status(
|
||||
0, revision_ids[2], {b: 'created' for b in bucket_names})
|
||||
|
||||
def test_revision_diff_self(self):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
|
||||
self._verify_buckets_status(
|
||||
revision_id, revision_id, {bucket_name: 'unmodified'})
|
||||
|
||||
def test_revision_diff_multi_bucket_self(self):
|
||||
bucket_names = []
|
||||
revision_ids = []
|
||||
|
||||
for _ in range(3):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
# Store each bucket that was created.
|
||||
bucket_names.append(bucket_name)
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
# Store each revision that was created.
|
||||
revision_id = documents[0]['revision_id']
|
||||
revision_ids.append(revision_id)
|
||||
|
||||
# The last revision should contain history for the previous 2 revisions
|
||||
# such that its diff history will show history for 3 buckets. Similarly
|
||||
# the 2nd revision will have history for 2 buckets and the 1st revision
|
||||
# for 1 bucket.
|
||||
# 1st revision has revision history for 1 bucket.
|
||||
self._verify_buckets_status(
|
||||
revision_ids[0], revision_ids[0], {bucket_names[0]: 'unmodified'})
|
||||
# 2nd revision has revision history for 2 buckets.
|
||||
self._verify_buckets_status(
|
||||
revision_ids[1], revision_ids[1],
|
||||
{b: 'unmodified' for b in bucket_names[:2]})
|
||||
# 3rd revision has revision history for 3 buckets.
|
||||
self._verify_buckets_status(
|
||||
revision_ids[2], revision_ids[2],
|
||||
{b: 'unmodified' for b in bucket_names})
|
||||
|
||||
def test_revision_diff_modified(self):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
|
||||
payload[0]['data'] = {'modified': 'modified'}
|
||||
comparison_documents = self.create_documents(bucket_name, payload)
|
||||
comparison_revision_id = comparison_documents[0]['revision_id']
|
||||
|
||||
self._verify_buckets_status(
|
||||
revision_id, comparison_revision_id, {bucket_name: 'modified'})
|
||||
|
||||
def test_revision_diff_multi_revision_modified(self):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
revision_ids = []
|
||||
|
||||
for _ in range(3):
|
||||
payload[0]['data'] = {'modified': test_utils.rand_name('modified')}
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
revision_ids.append(revision_id)
|
||||
|
||||
for pair in [(0, 1), (0, 2), (1, 2)]:
|
||||
self._verify_buckets_status(
|
||||
revision_ids[pair[0]], revision_ids[pair[1]],
|
||||
{bucket_name: 'modified'})
|
||||
|
||||
def test_revision_diff_multi_revision_multi_bucket_modified(self):
|
||||
revision_ids = []
|
||||
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
alt_bucket_name = test_utils.rand_name('bucket')
|
||||
bucket_names = [bucket_name, alt_bucket_name] * 2
|
||||
|
||||
# Create revisions by modifying documents in `bucket_name` and
|
||||
# `alt_bucket_name`.
|
||||
for bucket_idx in range(4):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
documents = self.create_documents(
|
||||
bucket_names[bucket_idx], payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
revision_ids.append(revision_id)
|
||||
|
||||
# Between revision_ids[0] and [1], bucket_name is unmodified and
|
||||
# alt_bucket_name is created.
|
||||
# self._verify_buckets_status(
|
||||
# revision_ids[0], revision_ids[1],
|
||||
# {bucket_name: 'unmodified', alt_bucket_name: 'created'})
|
||||
|
||||
# Between revision_ids[0] and [2], bucket_name is modified (by 2) and
|
||||
# alt_bucket_name is created (by 1).
|
||||
self._verify_buckets_status(
|
||||
revision_ids[0], revision_ids[2],
|
||||
{bucket_name: 'modified', alt_bucket_name: 'created'})
|
||||
|
||||
# Between revision_ids[0] and [3], bucket_name is modified (by [2]) and
|
||||
# alt_bucket_name is created (by [1]) (as well as modified by [3]).
|
||||
self._verify_buckets_status(
|
||||
revision_ids[0], revision_ids[3],
|
||||
{bucket_name: 'modified', alt_bucket_name: 'created'})
|
||||
|
||||
# Between revision_ids[1] and [2], bucket_name is modified but
|
||||
# alt_bucket_name remains unmodified.
|
||||
self._verify_buckets_status(
|
||||
revision_ids[1], revision_ids[2],
|
||||
{bucket_name: 'modified', alt_bucket_name: 'unmodified'})
|
||||
|
||||
# Between revision_ids[1] and [3], bucket_name is modified (by [2]) and
|
||||
# alt_bucket_name is modified by [3].
|
||||
self._verify_buckets_status(
|
||||
revision_ids[1], revision_ids[3],
|
||||
{bucket_name: 'modified', alt_bucket_name: 'modified'})
|
||||
|
||||
# Between revision_ids[2] and [3], alt_bucket_name is modified but
|
||||
# bucket_name remains unmodified.
|
||||
self._verify_buckets_status(
|
||||
revision_ids[2], revision_ids[3],
|
||||
{bucket_name: 'unmodified', alt_bucket_name: 'modified'})
|
||||
|
||||
def test_revision_diff_ignore_bucket_with_unrelated_documents(self):
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
alt_payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
alt_bucket_name = test_utils.rand_name('bucket')
|
||||
|
||||
# Create a bucket with a single document.
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
|
||||
# Create another bucket with an entirely different document (different
|
||||
# schema and metadata.name).
|
||||
self.create_documents(alt_bucket_name, alt_payload)
|
||||
|
||||
# Modify the document from the 1st bucket.
|
||||
payload['data'] = {'modified': 'modified'}
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
comparison_revision_id = documents[0]['revision_id']
|
||||
|
||||
# The `alt_bucket_name` should be created.
|
||||
self._verify_buckets_status(
|
||||
revision_id, comparison_revision_id,
|
||||
{bucket_name: 'modified', alt_bucket_name: 'created'})
|
||||
|
||||
def test_revision_diff_ignore_bucket_with_all_unrelated_documents(self):
|
||||
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
|
||||
alt_payload = copy.deepcopy(payload)
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
alt_bucket_name = test_utils.rand_name('bucket')
|
||||
|
||||
# Create a bucket with 3 documents.
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = documents[0]['revision_id']
|
||||
|
||||
# Modify all 3 documents from first bucket.
|
||||
for idx in range(3):
|
||||
alt_payload[idx]['name'] = test_utils.rand_name('name')
|
||||
alt_payload[idx]['schema'] = test_utils.rand_name('schema')
|
||||
self.create_documents(
|
||||
alt_bucket_name, alt_payload)
|
||||
|
||||
# Modify the document from the 1st bucket.
|
||||
payload[0]['data'] = {'modified': 'modified'}
|
||||
documents = self.create_documents(bucket_name, payload)
|
||||
comparison_revision_id = documents[0]['revision_id']
|
||||
|
||||
# The alt_bucket_name should be created.
|
||||
self._verify_buckets_status(
|
||||
revision_id, comparison_revision_id,
|
||||
{bucket_name: 'modified', alt_bucket_name: 'created'})
|
||||
|
||||
def test_revision_diff_deleted(self):
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
created_documents = self.create_documents(bucket_name, payload)
|
||||
revision_id = created_documents[0]['revision_id']
|
||||
|
||||
# Delete the previously created document.
|
||||
deleted_documents = self.create_documents(bucket_name, [])
|
||||
comparison_revision_id = deleted_documents[0]['revision_id']
|
||||
|
||||
self._verify_buckets_status(
|
||||
revision_id, comparison_revision_id, {bucket_name: 'deleted'})
|
||||
|
||||
def test_revision_diff_delete_then_recreate(self):
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
created_documents = self.create_documents(bucket_name, payload)
|
||||
revision_id_1 = created_documents[0]['revision_id']
|
||||
|
||||
# Delete the previously created document.
|
||||
deleted_documents = self.create_documents(bucket_name, [])
|
||||
revision_id_2 = deleted_documents[0]['revision_id']
|
||||
|
||||
# Recreate the previously deleted document.
|
||||
recreated_documents = self.create_documents(bucket_name, payload)
|
||||
revision_id_3 = recreated_documents[0]['revision_id']
|
||||
|
||||
# Verify that the revision for recreated document compared to revision
|
||||
# for deleted document is created, ignoring order.
|
||||
self._verify_buckets_status(
|
||||
revision_id_2, revision_id_3, {bucket_name: 'created'})
|
||||
|
||||
# Verify that the revision for recreated document compared to revision
|
||||
# for created document is unmodified, ignoring order.
|
||||
self._verify_buckets_status(
|
||||
revision_id_1, revision_id_3, {bucket_name: 'unmodified'})
|
||||
|
||||
def test_revision_diff_ignore_mistake_document(self):
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('first_bucket')
|
||||
created_documents = self.create_documents(bucket_name, payload)
|
||||
revision_id_1 = created_documents[0]['revision_id']
|
||||
|
||||
# Create then delete an "accidental" document create request.
|
||||
alt_payload = base.DocumentFixture.get_minimal_fixture()
|
||||
alt_bucket_name = test_utils.rand_name('mistake_bucket')
|
||||
created_documents = self.create_documents(alt_bucket_name, alt_payload)
|
||||
revision_id_2 = created_documents[0]['revision_id']
|
||||
deleted_documents = self.create_documents(alt_bucket_name, [])
|
||||
revision_id_3 = deleted_documents[0]['revision_id']
|
||||
|
||||
alt_payload_2 = base.DocumentFixture.get_minimal_fixture()
|
||||
alt_bucket_name_2 = test_utils.rand_name('second_bucket')
|
||||
created_documents = self.create_documents(
|
||||
alt_bucket_name_2, alt_payload_2)
|
||||
revision_id_4 = created_documents[0]['revision_id']
|
||||
|
||||
self._verify_buckets_status(
|
||||
revision_id_1, revision_id_2, {bucket_name: 'unmodified',
|
||||
alt_bucket_name: 'created'})
|
||||
self._verify_buckets_status(
|
||||
revision_id_2, revision_id_3, {bucket_name: 'unmodified',
|
||||
alt_bucket_name: 'deleted'})
|
||||
self._verify_buckets_status(
|
||||
revision_id_1, revision_id_3, {bucket_name: 'unmodified'})
|
||||
# Should not contain information about `alt_bucket_name` as it was a
|
||||
# "mistake": created then deleted between the revisions in question.
|
||||
self._verify_buckets_status(
|
||||
revision_id_1, revision_id_4,
|
||||
{bucket_name: 'unmodified', alt_bucket_name_2: 'created'})
|
@ -42,7 +42,7 @@ class TestRevisions(base.TestDbBase):
|
||||
# Update the last document.
|
||||
documents[-1]['data'] = {'foo': 'bar'}
|
||||
updated_documents = self.create_documents(
|
||||
bucket_name, documents, do_validation=False)
|
||||
bucket_name, documents)
|
||||
new_revision_id = updated_documents[0]['revision_id']
|
||||
|
||||
# 4 documents should be returned: the updated doc along with the other
|
||||
@ -136,7 +136,7 @@ class TestRevisions(base.TestDbBase):
|
||||
for _ in range(3)]
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
created_documents = self.create_documents(
|
||||
bucket_name, document_payload, do_validation=False)
|
||||
bucket_name, document_payload)
|
||||
all_created_documents.extend(created_documents)
|
||||
revision_id = created_documents[0]['revision_id']
|
||||
all_revision_ids.append(revision_id)
|
||||
@ -164,7 +164,7 @@ class TestRevisions(base.TestDbBase):
|
||||
|
||||
created_documents = self.create_documents(bucket_name, documents)
|
||||
alt_created_documents = self.create_documents(
|
||||
alt_bucket_name, alt_documents, do_validation=False)
|
||||
alt_bucket_name, alt_documents)
|
||||
|
||||
alt_revision_docs = self.list_revision_documents(
|
||||
alt_created_documents[0]['revision_id'])
|
||||
|
@ -57,8 +57,7 @@ class TestDocumentViews(base.TestDbBase):
|
||||
payload = base.DocumentFixture.get_minimal_fixture()
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
self.create_documents(bucket_name, payload)
|
||||
deleted_documents = self.create_documents(
|
||||
bucket_name, [], do_validation=False)
|
||||
deleted_documents = self.create_documents(bucket_name, [])
|
||||
|
||||
document_view = self.view_builder.list(deleted_documents)
|
||||
self.assertEqual(1, len(document_view))
|
||||
|
@ -49,7 +49,7 @@ class TestRevisionViews(base.TestDbBase):
|
||||
payload = [base.DocumentFixture.get_minimal_fixture()
|
||||
for _ in range(doc_count)]
|
||||
bucket_name = test_utils.rand_name('bucket')
|
||||
self.create_documents(bucket_name, payload, do_validation=False)
|
||||
self.create_documents(bucket_name, payload)
|
||||
revisions = self.list_revisions()
|
||||
revisions_view = self.view_builder.list(revisions)
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import re
|
||||
import string
|
||||
|
||||
@ -60,3 +61,21 @@ def multi_getattr(multi_key, dict_data):
|
||||
data = data.get(attr)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def make_hash(o):
|
||||
"""Makes a hash from a dictionary, list, tuple or set to any level, that
|
||||
contains only other hashable types (including any lists, tuples, sets, and
|
||||
dictionaries).
|
||||
"""
|
||||
if isinstance(o, (set, tuple, list)):
|
||||
return tuple([make_hash(e) for e in o])
|
||||
|
||||
elif not isinstance(o, dict):
|
||||
return hash(o)
|
||||
|
||||
new_o = copy.deepcopy(o)
|
||||
for k, v in new_o.items():
|
||||
new_o[k] = make_hash(v)
|
||||
|
||||
return hash(tuple(frozenset(sorted(new_o.items()))))
|
||||
|
@ -792,7 +792,9 @@ This endpoint provides a basic comparison of revisions in terms of how the
|
||||
buckets involved have changed. Only buckets with existing documents in either
|
||||
of the two revisions in question will be reported; buckets with documents that
|
||||
are only present in revisions between the two being compared are omitted from
|
||||
this report.
|
||||
this report. That is, buckets with documents that were accidentally created
|
||||
(and then deleted to rectify the mistake) that are not directly present in
|
||||
the two revisions being compared are omitted.
|
||||
|
||||
The response will contain a status of `created`, `deleted`, `modified`, or
|
||||
`unmodified` for each bucket.
|
||||
|
Loading…
Reference in New Issue
Block a user