Merge "Document staging api"
This commit is contained in:
commit
90b292f209
1
AUTHORS
1
AUTHORS
|
@ -2,6 +2,7 @@ Alan Meadows <alan.meadows@gmail.com>
|
|||
Anthony Lin <anthony.jclin@gmail.com>
|
||||
Bryan Strassner <bryan.strassner@gmail.com>
|
||||
Felipe Monteiro <felipe.monteiro@att.com>
|
||||
Hassan Kaous <hkyq8@mst.edu>
|
||||
Mark Burnett <mark.m.burnett@gmail.com>
|
||||
One-Fine-Day <vd789v@att.com>
|
||||
Pete Birley <pete@port.direct>
|
||||
|
|
|
@ -61,7 +61,7 @@ def upgrade():
|
|||
'action_command_audit',
|
||||
# ID (ULID) of the audit
|
||||
sa.Column('id', types.String(26), primary_key=True),
|
||||
# The ID of action this audit record
|
||||
# The ID of the action for this audit record
|
||||
sa.Column('action_id', types.String(26), nullable=False),
|
||||
# The text indicating command invoked
|
||||
sa.Column('command', sa.Text, nullable=False),
|
||||
|
@ -73,6 +73,25 @@ def upgrade():
|
|||
server_default=func.now()),
|
||||
)
|
||||
|
||||
op.create_table(
|
||||
'api_locks',
|
||||
# ID (ULID) of the lock
|
||||
sa.Column('id', types.String(26), primary_key=True),
|
||||
# The category/type of the lock
|
||||
sa.Column('lock_type', types.String(20), nullable=False),
|
||||
# Timestamp of when the lock was acquired
|
||||
sa.Column('datetime',
|
||||
types.TIMESTAMP(timezone=True),
|
||||
server_default=func.now(),
|
||||
nullable=False),
|
||||
# Expires
|
||||
sa.Column('expires', types.Integer, nullable=False, default=60),
|
||||
# A marker if the lock is released
|
||||
sa.Column('released', types.Boolean, nullable=False, default=False),
|
||||
sa.Column('user', types.String(64), nullable=False),
|
||||
sa.Column('reference_id', types.String(36), nullable=False),
|
||||
)
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""
|
||||
|
@ -81,3 +100,4 @@ def downgrade():
|
|||
op.drop_table('actions')
|
||||
op.drop_table('preflight_validation_failures')
|
||||
op.drop_table('action_command_audit')
|
||||
op.drop_table('api_locks')
|
||||
|
|
20
docs/API.md
20
docs/API.md
|
@ -90,6 +90,9 @@ indicates that the specified collection should be deleted when the Shipyard
|
|||
Buffer is committed. If a POST to the commitconfigdocs is in progress, this
|
||||
POST should be rejected with a 409 error.
|
||||
|
||||
Important:
|
||||
The expected input type for this request is 'Content-Type: application/x-yaml'
|
||||
|
||||
##### Query Parameters
|
||||
* bufferMode=append|replace|**rejectOnContents**
|
||||
Indicates how the existing Shipyard Buffer should be handled. By default,
|
||||
|
@ -121,6 +124,9 @@ Returns the source documents for a collection of documents
|
|||
* version=committed|**buffer**
|
||||
Return the documents for the version specified - buffer by default.
|
||||
|
||||
Important:
|
||||
The output type for this request is 'Content-Type: application/x-yaml'
|
||||
|
||||
##### Responses
|
||||
* 200 OK
|
||||
If documents can be retrieved.
|
||||
|
@ -141,6 +147,10 @@ consider collections in any way.
|
|||
|
||||
#### GET /v1.0/renderedconfigdocs
|
||||
Returns the full set of configdocs in their rendered form.
|
||||
|
||||
Important:
|
||||
The output type for this request is 'Content-Type: application/x-yaml'
|
||||
|
||||
##### Query Parameters
|
||||
* version=committed|**buffer**
|
||||
Return the documents for the version specified - buffer by default.
|
||||
|
@ -180,7 +190,7 @@ a 400 response. With force=true, allows for the commit to succeed (with a 200
|
|||
response) even if there are validation failures from downstream components. The
|
||||
aggregate response of validation failures will be returned in this case, but
|
||||
the invalid documents will still be moved from the Shipyard Buffer to the
|
||||
Committed Documents.
|
||||
Committed Documents.
|
||||
|
||||
##### Responses
|
||||
* 200 OK
|
||||
|
@ -188,7 +198,7 @@ If the validations are successful. Returns an "empty" structure as as response
|
|||
indicating no errors. A 200 may also be returned if there are validation
|
||||
failures, but the force=true query parameter was specified. In this case, the
|
||||
response will contain the list of validations.
|
||||
* 400 Bad Request
|
||||
* 400 Bad Request
|
||||
If the validations fail. Returns a populated response structure containing
|
||||
the aggregation of the failed validations.
|
||||
* 409 Conflict
|
||||
|
@ -290,13 +300,13 @@ airflow.
|
|||
##### Responses
|
||||
* 201 Created
|
||||
If the action is created successfully, and all preconditions to run the DAG
|
||||
are successful. The response body is the action entity created.
|
||||
are successful. The response body is the action entity created.
|
||||
* 400 Bad Request
|
||||
If the action name doesn't exist, or the input entity is otherwise malformed.
|
||||
If the action name doesn't exist, or the input entity is otherwise malformed.
|
||||
* 409 Conflict
|
||||
For any failed pre-run validations. The response body is the action entity
|
||||
created, with the failed validations. The DAG will not begin execution in this
|
||||
case.
|
||||
case.
|
||||
|
||||
##### Example
|
||||
```
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
# Create a workflow action
|
||||
# POST /api/v1.0/actions
|
||||
#"workflow_orchestrator:create_actions": "rule:admin_required"
|
||||
#"workflow_orchestrator:create_action": "rule:admin_required"
|
||||
|
||||
# Retreive an action by its id
|
||||
# GET /api/v1.0/actions/{action_id}
|
||||
|
@ -25,3 +25,20 @@
|
|||
# POST /api/v1.0/actions/{action_id}/control/{control_verb}
|
||||
#"workflow_orchestrator:invoke_action_control": "rule:admin_required"
|
||||
|
||||
# Ingest configuration documents for the site design
|
||||
# POST /api/v1.0/configdocs/{collection_id}
|
||||
#"workflow_orchestrator:create_configdocs": "rule:admin_required"
|
||||
|
||||
# Retrieve a collection of configuration documents
|
||||
# GET /api/v1.0/configdocs/{collection_id}
|
||||
#"workflow_orchestrator:get_configdocs": "rule:admin_required"
|
||||
|
||||
# Move documents from the Shipyard buffer to the committed documents
|
||||
# POST /api/v1.0/commitconfigdocs
|
||||
#"workflow_orchestrator:commit_configdocs": "rule:admin_required"
|
||||
|
||||
# Retrieve the configuration documents rendered by Deckhand into a
|
||||
# complete design
|
||||
# GET /api/v1.0/renderedconfigdocs
|
||||
#"workflow_orchestrator:get_renderedconfigdocs": "rule:admin_required"
|
||||
|
||||
|
|
|
@ -7,11 +7,10 @@
|
|||
# From shipyard_airflow
|
||||
#
|
||||
|
||||
# FQDN for the armada service (string value)
|
||||
#host = armada-int.ucp
|
||||
|
||||
# Port for the armada service (integer value)
|
||||
#port = 8000
|
||||
# The service type for the service playing the role of Armada. The specified
|
||||
# type is used to perform the service lookup in the Keystone service catalog.
|
||||
# (string value)
|
||||
#service_type = armada
|
||||
|
||||
|
||||
[base]
|
||||
|
@ -42,11 +41,10 @@
|
|||
# From shipyard_airflow
|
||||
#
|
||||
|
||||
# FQDN for the deckhand service (string value)
|
||||
#host = deckhand-int.ucp
|
||||
|
||||
# Port for the deckhand service (integer value)
|
||||
#port = 80
|
||||
# The service type for the service playing the role of Deckhand. The specified
|
||||
# type is used to perform the service lookup in the Keystone service catalog.
|
||||
# (string value)
|
||||
#service_type = deckhand
|
||||
|
||||
|
||||
[drydock]
|
||||
|
@ -55,20 +53,10 @@
|
|||
# From shipyard_airflow
|
||||
#
|
||||
|
||||
# FQDN for the drydock service (string value)
|
||||
#host = drydock-int.ucp
|
||||
|
||||
# Port for the drydock service (integer value)
|
||||
#port = 9000
|
||||
|
||||
# TEMPORARY: password for drydock (string value)
|
||||
#token = bigboss
|
||||
|
||||
# TEMPORARY: location of drydock yaml file (string value)
|
||||
#site_yaml = /usr/local/airflow/plugins/drydock.yaml
|
||||
|
||||
# TEMPORARY: location of promenade yaml file (string value)
|
||||
#prom_yaml = /usr/local/airflow/plugins/promenade.yaml
|
||||
# The service type for the service playing the role of Drydock. The specified
|
||||
# type is used to perform the service lookup in the Keystone service catalog.
|
||||
# (string value)
|
||||
#service_type = physicalprovisioner
|
||||
|
||||
|
||||
[healthcheck]
|
||||
|
@ -84,34 +72,6 @@
|
|||
#endpoint = /api/v1.0/health
|
||||
|
||||
|
||||
[keystone]
|
||||
|
||||
#
|
||||
# From shipyard_airflow
|
||||
#
|
||||
|
||||
# The url for OpenStack Authentication (string value)
|
||||
#OS_AUTH_URL = http://keystone-api.ucp:80/v3
|
||||
|
||||
# OpenStack project name (string value)
|
||||
#OS_PROJECT_NAME = service
|
||||
|
||||
# The OpenStack user domain name (string value)
|
||||
#OS_USER_DOMAIN_NAME = Default
|
||||
|
||||
# The OpenStack username (string value)
|
||||
#OS_USERNAME = shipyard
|
||||
|
||||
# THe OpenStack password for the shipyard svc acct (string value)
|
||||
#OS_PASSWORD = password
|
||||
|
||||
# The OpenStack user domain name (string value)
|
||||
#OS_REGION_NAME = Regionone
|
||||
|
||||
# The OpenStack identity api version (integer value)
|
||||
#OS_IDENTITY_API_VERSION = 3
|
||||
|
||||
|
||||
[keystone_authtoken]
|
||||
|
||||
#
|
||||
|
@ -303,8 +263,7 @@
|
|||
# From shipyard_airflow
|
||||
#
|
||||
|
||||
# FQDN for the shipyard service (string value)
|
||||
#host = shipyard-int.ucp
|
||||
|
||||
# Port for the shipyard service (integer value)
|
||||
#port = 9000
|
||||
# The service type for the service playing the role of Shipyard. The specified
|
||||
# type is used to perform the service lookup in the Keystone service catalog.
|
||||
# (string value)
|
||||
#service_type = shipyard
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
""" Module providing the oslo_config based configuration for Shipyard
|
||||
"""
|
||||
import logging
|
||||
|
||||
import keystoneauth1.loading as ks_loading
|
||||
|
@ -75,14 +77,13 @@ SECTIONS = [
|
|||
title='Shipyard connection info',
|
||||
options=[
|
||||
cfg.StrOpt(
|
||||
'host',
|
||||
default='shipyard-int.ucp',
|
||||
help='FQDN for the shipyard service'
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'port',
|
||||
default=9000,
|
||||
help='Port for the shipyard service'
|
||||
'service_type',
|
||||
default='shipyard',
|
||||
help=(
|
||||
'The service type for the service playing the role '
|
||||
'of Shipyard. The specified type is used to perform '
|
||||
'the service lookup in the Keystone service catalog. '
|
||||
)
|
||||
),
|
||||
]
|
||||
),
|
||||
|
@ -91,14 +92,13 @@ SECTIONS = [
|
|||
title='Deckhand connection info',
|
||||
options=[
|
||||
cfg.StrOpt(
|
||||
'host',
|
||||
default='deckhand-int.ucp',
|
||||
help='FQDN for the deckhand service'
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'port',
|
||||
default=80,
|
||||
help='Port for the deckhand service'
|
||||
'service_type',
|
||||
default='deckhand',
|
||||
help=(
|
||||
'The service type for the service playing the role '
|
||||
'of Deckhand. The specified type is used to perform '
|
||||
'the service lookup in the Keystone service catalog. '
|
||||
)
|
||||
),
|
||||
]
|
||||
),
|
||||
|
@ -107,14 +107,13 @@ SECTIONS = [
|
|||
title='Armada connection info',
|
||||
options=[
|
||||
cfg.StrOpt(
|
||||
'host',
|
||||
default='armada-int.ucp',
|
||||
help='FQDN for the armada service'
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'port',
|
||||
default=8000,
|
||||
help='Port for the armada service'
|
||||
'service_type',
|
||||
default='armada',
|
||||
help=(
|
||||
'The service type for the service playing the role '
|
||||
'of Armada. The specified type is used to perform '
|
||||
'the service lookup in the Keystone service catalog. '
|
||||
)
|
||||
),
|
||||
]
|
||||
),
|
||||
|
@ -123,32 +122,13 @@ SECTIONS = [
|
|||
title='Drydock connection info',
|
||||
options=[
|
||||
cfg.StrOpt(
|
||||
'host',
|
||||
default='drydock-int.ucp',
|
||||
help='FQDN for the drydock service'
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'port',
|
||||
default=9000,
|
||||
help='Port for the drydock service'
|
||||
),
|
||||
# TODO(Bryan Strassner) Remove this when integrated
|
||||
cfg.StrOpt(
|
||||
'token',
|
||||
default='bigboss',
|
||||
help='TEMPORARY: password for drydock'
|
||||
),
|
||||
# TODO(Bryan Strassner) Remove this when integrated
|
||||
cfg.StrOpt(
|
||||
'site_yaml',
|
||||
default='/usr/local/airflow/plugins/drydock.yaml',
|
||||
help='TEMPORARY: location of drydock yaml file'
|
||||
),
|
||||
# TODO(Bryan Strassner) Remove this when integrated
|
||||
cfg.StrOpt(
|
||||
'prom_yaml',
|
||||
default='/usr/local/airflow/plugins/promenade.yaml',
|
||||
help='TEMPORARY: location of promenade yaml file'
|
||||
'service_type',
|
||||
default='physicalprovisioner',
|
||||
help=(
|
||||
'The service type for the service playing the role '
|
||||
'of Drydock. The specified type is used to perform '
|
||||
'the service lookup in the Keystone service catalog. '
|
||||
)
|
||||
),
|
||||
]
|
||||
),
|
||||
|
@ -168,56 +148,11 @@ SECTIONS = [
|
|||
),
|
||||
]
|
||||
),
|
||||
# TODO (Bryan Strassner) This section is in use by the operators we send
|
||||
# to the airflow pod(s). Needs to be refactored out
|
||||
# when those operators are updated.
|
||||
ConfigSection(
|
||||
name='keystone',
|
||||
title='Keystone connection and credential information',
|
||||
options=[
|
||||
cfg.StrOpt(
|
||||
'OS_AUTH_URL',
|
||||
default='http://keystone-api.ucp:80/v3',
|
||||
help='The url for OpenStack Authentication'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'OS_PROJECT_NAME',
|
||||
default='service',
|
||||
help='OpenStack project name'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'OS_USER_DOMAIN_NAME',
|
||||
default='Default',
|
||||
help='The OpenStack user domain name'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'OS_USERNAME',
|
||||
default='shipyard',
|
||||
help='The OpenStack username'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'OS_PASSWORD',
|
||||
default='password',
|
||||
help='THe OpenStack password for the shipyard svc acct'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'OS_REGION_NAME',
|
||||
default='Regionone',
|
||||
help='The OpenStack user domain name'
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'OS_IDENTITY_API_VERSION',
|
||||
default=3,
|
||||
help='The OpenStack identity api version'
|
||||
),
|
||||
]
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
"""
|
||||
Registers all the sections in this module.
|
||||
""" Registers all the sections in this module.
|
||||
"""
|
||||
for section in SECTIONS:
|
||||
conf.register_group(
|
||||
|
@ -226,9 +161,6 @@ def register_opts(conf):
|
|||
help=section.help))
|
||||
conf.register_opts(section.options, group=section.name)
|
||||
|
||||
# TODO (Bryan Strassner) is there a better, more general way to do this,
|
||||
# or is password enough? Probably need some guidance
|
||||
# from someone with more experience in this space.
|
||||
conf.register_opts(
|
||||
ks_loading.get_auth_plugin_conf_options('password'),
|
||||
group='keystone_authtoken'
|
||||
|
@ -236,12 +168,16 @@ def register_opts(conf):
|
|||
|
||||
|
||||
def list_opts():
|
||||
""" List the options identified by this configuration
|
||||
"""
|
||||
return {
|
||||
section.name: section.options for section in SECTIONS
|
||||
}
|
||||
|
||||
|
||||
def parse_args(args=None, usage=None, default_config_files=None):
|
||||
""" Triggers the parsing of the arguments/configs
|
||||
"""
|
||||
CONF(args=args,
|
||||
project='shipyard',
|
||||
usage=usage,
|
||||
|
|
|
@ -78,7 +78,6 @@ class ActionsResource(BaseResource):
|
|||
# respond with the action and location for checking status
|
||||
resp.status = falcon.HTTP_201
|
||||
resp.body = self.to_json(action)
|
||||
# TODO (Bryan Strassner) figure out the right way to do this:
|
||||
resp.location = '/api/v1.0/actions/{}'.format(action['id'])
|
||||
|
||||
def create_action(self, action, context):
|
||||
|
@ -245,16 +244,17 @@ class ActionsResource(BaseResource):
|
|||
dag_id, self.to_json(conf_value)))
|
||||
|
||||
try:
|
||||
resp = requests.get(req_url, timeout=15)
|
||||
resp = requests.get(req_url, timeout=(5, 15))
|
||||
self.info(context,
|
||||
'Response code from Airflow trigger_dag: %s' %
|
||||
resp.status_code)
|
||||
# any 4xx/5xx will be HTTPError, which are RequestException
|
||||
resp.raise_for_status()
|
||||
response = resp.json()
|
||||
self.info(context,
|
||||
'Response from Airflow trigger_dag: %s' %
|
||||
response)
|
||||
except (RequestException) as rex:
|
||||
except RequestException as rex:
|
||||
self.error(context, "Request to airflow failed: %s" % rex.args)
|
||||
raise ApiError(
|
||||
title='Unable to complete request to Airflow',
|
||||
|
@ -267,24 +267,10 @@ class ActionsResource(BaseResource):
|
|||
}],
|
||||
retry=True, )
|
||||
|
||||
# Returns error response if API call returns
|
||||
# response code other than 200
|
||||
if response["http_response_code"] != 200:
|
||||
raise ApiError(
|
||||
title='Unable to invoke workflow',
|
||||
description=(
|
||||
'Airflow URL not found by Shipyard.',
|
||||
'Shipyard configuration is missing web_server value'),
|
||||
status=falcon.HTTP_503,
|
||||
error_list=[{
|
||||
'message': response['output']
|
||||
}],
|
||||
retry=True, )
|
||||
else:
|
||||
dag_time = self._exhume_date(dag_id,
|
||||
response['output']['stdout'])
|
||||
dag_execution_date = dag_time.strftime('%Y-%m-%dT%H:%M:%S')
|
||||
return dag_execution_date
|
||||
dag_time = self._exhume_date(dag_id,
|
||||
response['output']['stdout'])
|
||||
dag_execution_date = dag_time.strftime('%Y-%m-%dT%H:%M:%S')
|
||||
return dag_execution_date
|
||||
|
||||
def _exhume_date(self, dag_id, log_string):
|
||||
# we are unable to use the response time because that
|
||||
|
|
|
@ -22,10 +22,14 @@ from shipyard_airflow.control.actions_steps_id_api import ActionsStepsResource
|
|||
from shipyard_airflow.control.actions_validations_id_api import \
|
||||
ActionsValidationsResource
|
||||
from shipyard_airflow.control.base import BaseResource, ShipyardRequest
|
||||
from shipyard_airflow.control.configdocs_api import (CommitConfigDocsResource,
|
||||
ConfigDocsResource)
|
||||
from shipyard_airflow.control.health import HealthResource
|
||||
from shipyard_airflow.control.middleware import (AuthMiddleware,
|
||||
ContextMiddleware,
|
||||
LoggingMiddleware)
|
||||
from shipyard_airflow.control.rendered_configdocs_api import \
|
||||
RenderedConfigDocsResource
|
||||
from shipyard_airflow.errors import (AppError, default_error_serializer,
|
||||
default_exception_handler)
|
||||
|
||||
|
@ -55,6 +59,9 @@ def start_api():
|
|||
ActionsStepsResource()),
|
||||
('/actions/{action_id}/validations/{validation_id}',
|
||||
ActionsValidationsResource()),
|
||||
('/configdocs/{collection_id}', ConfigDocsResource()),
|
||||
('/commitconfigdocs', CommitConfigDocsResource()),
|
||||
('/renderedconfigdocs', RenderedConfigDocsResource()),
|
||||
]
|
||||
|
||||
# Set up the 1.0 routes
|
||||
|
|
|
@ -0,0 +1,217 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
api_lock provides for a rudimentary lock mechanism using
|
||||
the database to sync across multiple shipyard instances.
|
||||
Also provided is the api_lock decorator to allow for resources
|
||||
to easily declare the lock they should be able to acquire before
|
||||
executing.
|
||||
"""
|
||||
from enum import Enum
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
import falcon
|
||||
import ulid
|
||||
|
||||
from shipyard_airflow.db.db import SHIPYARD_DB
|
||||
from shipyard_airflow.errors import ApiError
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def api_lock(api_lock_type):
|
||||
"""
|
||||
Decorator to handle allowing a resource method to institute a lock
|
||||
based on the specified lock type.
|
||||
These locks are intended for use around methods such as on_post
|
||||
and on_get, etc...
|
||||
"""
|
||||
def lock_decorator(func):
|
||||
@wraps(func)
|
||||
def func_wrapper(self, req, resp, *args, **kwargs):
|
||||
lock = ApiLock(api_lock_type,
|
||||
req.context.external_marker,
|
||||
req.context.user)
|
||||
try:
|
||||
lock.acquire()
|
||||
return func(self, req, resp, *args, **kwargs)
|
||||
except ApiLockAcquireError:
|
||||
raise ApiError(
|
||||
title='Blocked by another process',
|
||||
description=(
|
||||
'Another process is currently blocking this request '
|
||||
'with a lock for {}. Lock expires in not more '
|
||||
'than {} seconds'.format(
|
||||
lock.lock_type_name,
|
||||
lock.expires
|
||||
)
|
||||
),
|
||||
status=falcon.HTTP_409,
|
||||
retry=False,
|
||||
)
|
||||
finally:
|
||||
lock.release()
|
||||
return func_wrapper
|
||||
return lock_decorator
|
||||
|
||||
|
||||
class ApiLockType(Enum):
|
||||
"""
|
||||
ApiLockType defines the kinds of locks that can be set up using
|
||||
this locking mechanism.
|
||||
"""
|
||||
CONFIGDOCS_UPDATE = {'name': 'configdocs_update', 'expires': 60}
|
||||
|
||||
|
||||
class ApiLock(object):
|
||||
"""
|
||||
Api Lock provides for a simple locking mechanism for shipyard's
|
||||
API classes. The lock provided is intended to block conflicting
|
||||
activity across containers by using the backing database
|
||||
to calculate if a lock is currently held.
|
||||
|
||||
The mechanism is as follows:
|
||||
1) Attempt to write a lock record to the database such that:
|
||||
there is no lock in the database of the same kind already that
|
||||
is not either released or expired
|
||||
1a) If the insert fails, the lock is not acquired.
|
||||
2) Query the database for the latest lock of the type provided.
|
||||
If the lock's id matches the ID of the process trying to
|
||||
acquire the lock, the process should succeed.
|
||||
If the lock ID doesn't match the record returned, the
|
||||
process attempting to acquire a lock is blocked/failed.
|
||||
(Note that the intended use is not to queue requests, but
|
||||
rather fail them if something else holds the lock)
|
||||
3) Upon completion of the activity, the lock holder will update
|
||||
the lock record to indicate that it is released.
|
||||
|
||||
The database query used for insert will only insert if there
|
||||
is not already an active lock record for the given type.
|
||||
If the insert inserts zero rows, this indicates that the lock
|
||||
is not acquired, and does not require a subsequent query.
|
||||
|
||||
The subsequent query is always used when the lock record insert
|
||||
has been succesful, to handle race conditions. The select query
|
||||
orders by both date and id, Whereby the randomness of the id
|
||||
provides for a tiebreaker.
|
||||
|
||||
All locks expire based on their lock type, and default to
|
||||
60 seconds
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
api_lock_type,
|
||||
reference_id,
|
||||
user,
|
||||
lock_db=SHIPYARD_DB):
|
||||
"""
|
||||
Set up the Api Lock, using the input ApiLockType.
|
||||
Generates a ULID to represent this lock
|
||||
:param api_lock_type: the ApiLockType for this lock
|
||||
:param reference_id: the calling process' id provided for
|
||||
purposes of correlation
|
||||
:param user: the calling process' user for purposes of
|
||||
tracking
|
||||
"""
|
||||
if (not isinstance(api_lock_type, ApiLockType) or
|
||||
api_lock_type.value.get('name') is None):
|
||||
raise ApiLockSetupError(
|
||||
message='ApiLock requires a valid ApiLockType'
|
||||
)
|
||||
self.lock_id = ulid.ulid()
|
||||
self.lock_type_name = api_lock_type.value.get('name')
|
||||
self.expires = api_lock_type.value.get('expires', 60)
|
||||
self.reference_id = reference_id
|
||||
self.user = user
|
||||
self.lock_db = lock_db
|
||||
|
||||
def acquire(self):
|
||||
"""
|
||||
Acquires a lock
|
||||
Responds with an ApiLockAcquireError if the lock is not
|
||||
acquired
|
||||
"""
|
||||
LOG.info('Acquiring lock type: %s. Lock id: %s.',
|
||||
self.lock_type_name,
|
||||
self.lock_id)
|
||||
|
||||
holds_lock = False
|
||||
insert_worked = self.lock_db.insert_api_lock(
|
||||
lock_id=self.lock_id,
|
||||
lock_type=self.lock_type_name,
|
||||
expires=self.expires,
|
||||
user=self.user,
|
||||
reference_id=self.reference_id
|
||||
)
|
||||
LOG.info('Insert lock %s %s',
|
||||
self.lock_id,
|
||||
'succeeded' if insert_worked else 'failed')
|
||||
|
||||
if insert_worked:
|
||||
lock_retrieved = self.lock_db.get_api_lock(
|
||||
lock_type=self.lock_type_name
|
||||
)
|
||||
holds_lock = lock_retrieved == self.lock_id
|
||||
LOG.info(
|
||||
'Lock %s is currently held. This lock is %s. Match=%s',
|
||||
lock_retrieved,
|
||||
self.lock_id,
|
||||
holds_lock
|
||||
)
|
||||
|
||||
if not holds_lock:
|
||||
LOG.info('Api Lock not acquired')
|
||||
raise ApiLockAcquireError()
|
||||
|
||||
def release(self):
|
||||
"""
|
||||
Release the lock
|
||||
"""
|
||||
try:
|
||||
self.lock_db.release_api_lock(self.lock_id)
|
||||
except Exception as error:
|
||||
# catching Exception because this is a non-fatal case
|
||||
# and has no expected action to be taken.
|
||||
LOG.error('Exception raised during release of api lock: %s. '
|
||||
'Unreleased lock for %s will expire in not more than '
|
||||
'%s seconds. Exception: %s',
|
||||
self.lock_id,
|
||||
self.lock_type_name,
|
||||
self.expires,
|
||||
str(error))
|
||||
|
||||
|
||||
class ApiLockError(Exception):
|
||||
"""
|
||||
Base exception for all api lock exceptions
|
||||
"""
|
||||
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
super().__init__()
|
||||
|
||||
|
||||
class ApiLockSetupError(ApiLockError):
|
||||
"""
|
||||
Specifies that there was a problem during setup of the lock
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ApiLockAcquireError(ApiLockError):
|
||||
"""
|
||||
Signals to the calling process that this lock was not acquired
|
||||
"""
|
||||
pass
|
|
@ -24,6 +24,10 @@ from shipyard_airflow.errors import InvalidFormatError
|
|||
|
||||
|
||||
class BaseResource(object):
|
||||
"""
|
||||
The base resource for Shipyard entities/api handlers. This class
|
||||
provides some reusable functionality.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger('shipyard.control')
|
||||
|
||||
|
@ -46,9 +50,7 @@ class BaseResource(object):
|
|||
validation
|
||||
"""
|
||||
has_input = False
|
||||
if ((req.content_length is not None or req.content_length != 0) and
|
||||
(req.content_type is not None and
|
||||
req.content_type.lower() == 'application/json')):
|
||||
if req.content_length > 0 and 'application/json' in req.content_type:
|
||||
raw_body = req.stream.read(req.content_length or 0)
|
||||
if raw_body is not None:
|
||||
has_input = True
|
||||
|
|
|
@ -0,0 +1,178 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Resources representing the configdocs API for shipyard
|
||||
"""
|
||||
import falcon
|
||||
from oslo_config import cfg
|
||||
|
||||
from shipyard_airflow import policy
|
||||
from shipyard_airflow.control import configdocs_helper
|
||||
from shipyard_airflow.control.api_lock import (api_lock, ApiLockType)
|
||||
from shipyard_airflow.control.base import BaseResource
|
||||
from shipyard_airflow.control.configdocs_helper import (BufferMode,
|
||||
ConfigdocsHelper)
|
||||
from shipyard_airflow.errors import ApiError
|
||||
|
||||
CONF = cfg.CONF
|
||||
VERSION_VALUES = ['buffer', 'committed']
|
||||
|
||||
|
||||
class ConfigDocsResource(BaseResource):
|
||||
"""
|
||||
Configdocs handles the creation and retrieval of configuration
|
||||
documents into Shipyard.
|
||||
"""
|
||||
|
||||
@policy.ApiEnforcer('workflow_orchestrator:create_configdocs')
|
||||
@api_lock(ApiLockType.CONFIGDOCS_UPDATE)
|
||||
def on_post(self, req, resp, collection_id):
|
||||
"""
|
||||
Ingests a collection of documents
|
||||
"""
|
||||
document_data = req.stream.read(req.content_length or 0)
|
||||
helper = ConfigdocsHelper(req.context.external_marker)
|
||||
validations = self.post_collection(
|
||||
helper=helper,
|
||||
collection_id=collection_id,
|
||||
document_data=document_data,
|
||||
buffer_mode_param=req.params.get('buffermode')
|
||||
)
|
||||
resp.location = '/api/v1.0/configdocs/{}'.format(collection_id)
|
||||
resp.body = self.to_json(validations)
|
||||
resp.status = falcon.HTTP_201
|
||||
|
||||
@policy.ApiEnforcer('workflow_orchestrator:get_configdocs')
|
||||
def on_get(self, req, resp, collection_id):
|
||||
"""
|
||||
Returns a collection of documents
|
||||
"""
|
||||
version = (req.params.get('version') or 'buffer')
|
||||
self._validate_version_parameter(version)
|
||||
helper = ConfigdocsHelper(req.context.external_marker)
|
||||
# Not reformatting to JSON or YAML since just passing through
|
||||
resp.body = self.get_collection(
|
||||
helper=helper,
|
||||
collection_id=collection_id,
|
||||
version=version
|
||||
)
|
||||
resp.append_header('Content-Type', 'application/x-yaml')
|
||||
resp.status = falcon.HTTP_200
|
||||
|
||||
def _validate_version_parameter(self, version):
|
||||
# performs validation of version parameter
|
||||
if version.lower() not in VERSION_VALUES:
|
||||
raise ApiError(
|
||||
title='Invalid version query parameter specified',
|
||||
description=(
|
||||
'version must be {}'.format(', '.join(VERSION_VALUES))
|
||||
),
|
||||
status=falcon.HTTP_400,
|
||||
retry=False,
|
||||
)
|
||||
|
||||
def get_collection(self,
|
||||
helper,
|
||||
collection_id,
|
||||
version='buffer'):
|
||||
"""
|
||||
Attempts to retrieve the specified collection of documents
|
||||
either from the buffer or committed version, as specified
|
||||
"""
|
||||
return helper.get_collection_docs(version, collection_id)
|
||||
|
||||
def post_collection(self,
|
||||
helper,
|
||||
collection_id,
|
||||
document_data,
|
||||
buffer_mode_param=None):
|
||||
"""
|
||||
Ingest the collection after checking preconditions
|
||||
"""
|
||||
if buffer_mode_param is None:
|
||||
buffer_mode = BufferMode.REJECTONCONTENTS
|
||||
else:
|
||||
buffer_mode = ConfigdocsHelper.get_buffer_mode(buffer_mode_param)
|
||||
|
||||
if helper.is_buffer_valid_for_bucket(collection_id,
|
||||
buffer_mode):
|
||||
buffer_revision = helper.add_collection(
|
||||
collection_id,
|
||||
document_data
|
||||
)
|
||||
return helper.get_deckhand_validation_status(
|
||||
buffer_revision
|
||||
)
|
||||
else:
|
||||
raise ApiError(
|
||||
title='Invalid collection specified for buffer',
|
||||
description='Buffermode : {}'.format(buffer_mode.value),
|
||||
status=falcon.HTTP_409,
|
||||
error_list=[{
|
||||
'message': ('Buffer is either not empty or the '
|
||||
'collection already exists in buffer. '
|
||||
'Setting a different buffermode may '
|
||||
'provide the desired functionality')
|
||||
}],
|
||||
retry=False,
|
||||
)
|
||||
|
||||
|
||||
class CommitConfigDocsResource(BaseResource):
|
||||
"""
|
||||
Commits the buffered configdocs, if the validations pass (or are
|
||||
overridden (force = true))
|
||||
Returns the list of validations.
|
||||
"""
|
||||
|
||||
unable_to_commmit = 'Unable to commit configuration documents'
|
||||
|
||||
@policy.ApiEnforcer('workflow_orchestrator:commit_configdocs')
|
||||
@api_lock(ApiLockType.CONFIGDOCS_UPDATE)
|
||||
def on_post(self, req, resp):
|
||||
"""
|
||||
Get validations from all UCP components
|
||||
Functionality does not exist yet
|
||||
"""
|
||||
# force query parameter is False unless explicitly true
|
||||
force = req.get_param_as_bool(name='force') or False
|
||||
helper = ConfigdocsHelper(req.context.external_marker)
|
||||
validations = self.commit_configdocs(helper, force)
|
||||
resp.body = self.to_json(validations)
|
||||
resp.status = validations.get('code', falcon.HTTP_200)
|
||||
|
||||
def commit_configdocs(self, helper, force):
|
||||
"""
|
||||
Attempts to commit the configdocs
|
||||
"""
|
||||
if helper.is_buffer_empty():
|
||||
raise ApiError(
|
||||
title=CommitConfigDocsResource.unable_to_commmit,
|
||||
description='There are no documents in the buffer to commit',
|
||||
status=falcon.HTTP_409,
|
||||
retry=True
|
||||
)
|
||||
validations = helper.get_validations_for_buffer()
|
||||
if force or validations.get('status') == 'Valid':
|
||||
helper.tag_buffer(configdocs_helper.COMMITTED)
|
||||
if force and validations.get('status') == 'Invalid':
|
||||
# override the status in the response
|
||||
validations['code'] = falcon.HTTP_200
|
||||
if validations.get('message'):
|
||||
validations['message'] = (
|
||||
validations['message'] + ' FORCED SUCCESS'
|
||||
)
|
||||
else:
|
||||
validations['message'] = 'FORCED SUCCESS'
|
||||
return validations
|
|
@ -0,0 +1,609 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Configdocs helper primarily masquerades as a layer in front of
|
||||
Deckhand, providing a representation of a buffer and a committed
|
||||
bucket for Shipyard
|
||||
"""
|
||||
import enum
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import falcon
|
||||
from oslo_config import cfg
|
||||
import requests
|
||||
|
||||
from shipyard_airflow.control.deckhand_client import (
|
||||
DeckhandClient,
|
||||
DeckhandPaths,
|
||||
DeckhandRejectedInputError,
|
||||
DeckhandResponseError,
|
||||
DocumentExistsElsewhereError,
|
||||
NoRevisionsExistError
|
||||
)
|
||||
from shipyard_airflow.control.service_endpoints import (
|
||||
Endpoints,
|
||||
get_endpoint,
|
||||
get_token
|
||||
)
|
||||
from shipyard_airflow.errors import ApiError, AppError
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# keys for the revision dict, and consistency of strings for committed
|
||||
# and buffer.
|
||||
COMMITTED = 'committed'
|
||||
BUFFER = 'buffer'
|
||||
LATEST = 'latest'
|
||||
REVISION_COUNT = 'count'
|
||||
|
||||
# string for rollback_commit consistency
|
||||
ROLLBACK_COMMIT = 'rollback_commit'
|
||||
|
||||
|
||||
class BufferMode(enum.Enum):
|
||||
"""
|
||||
Enumeration of the valid values for BufferMode
|
||||
"""
|
||||
REJECTONCONTENTS = 'rejectoncontents'
|
||||
APPEND = 'append'
|
||||
REPLACE = 'replace'
|
||||
|
||||
|
||||
class ConfigdocsHelper(object):
|
||||
"""
|
||||
ConfigdocsHelper provides a layer to represent the buffer and committed
|
||||
versions of design documents.
|
||||
A new configdocs_helper is intended to be used for each invocation of the
|
||||
service.
|
||||
"""
|
||||
|
||||
def __init__(self, context_marker):
|
||||
"""
|
||||
Sets up this Configdocs helper with the supplied
|
||||
context marker
|
||||
"""
|
||||
self.deckhand = DeckhandClient(context_marker)
|
||||
self.context_marker = context_marker
|
||||
# The revision_dict indicates the revisions that are
|
||||
# associated with the buffered and committed doc sets. There
|
||||
# is a risk of this being out of sync if there is high volume
|
||||
# of commits and adds of docs going on in parallel, but not
|
||||
# really much more than if this data is used in subsequent
|
||||
# statements following a call within a method.
|
||||
self.revision_dict = None
|
||||
|
||||
@staticmethod
|
||||
def get_buffer_mode(buffermode_string):
|
||||
"""
|
||||
Checks the buffer mode for valid values.
|
||||
"""
|
||||
if buffermode_string:
|
||||
try:
|
||||
buffer_mode = BufferMode(buffermode_string.lower())
|
||||
return buffer_mode
|
||||
except ValueError:
|
||||
return None
|
||||
return BufferMode.REJECTONCONTENTS
|
||||
|
||||
def is_buffer_empty(self):
|
||||
""" Check if the buffer is empty. """
|
||||
return self._get_buffer_revision() is None
|
||||
|
||||
def is_collection_in_buffer(self, collection_id):
|
||||
"""
|
||||
Returns if the collection is represented in the buffer
|
||||
"""
|
||||
if self.is_buffer_empty():
|
||||
return False
|
||||
|
||||
# If there is no committed revision, then it's 0.
|
||||
# new revision is ok because we just checked for buffer emptiness
|
||||
old_revision_id = self._get_committed_rev_id()
|
||||
if old_revision_id is None:
|
||||
old_revision_id = 0
|
||||
try:
|
||||
diff = self.deckhand.get_diff(
|
||||
old_revision_id=old_revision_id,
|
||||
new_revision_id=self._get_buffer_rev_id()
|
||||
)
|
||||
# the collection is in the buffer if it's not unmodified
|
||||
return diff.get(collection_id, 'unmodified') != 'unmodified'
|
||||
except DeckhandResponseError as drex:
|
||||
raise AppError(
|
||||
title='Unable to retrieve revisions',
|
||||
description=(
|
||||
'Deckhand has responded unexpectedly: {}:{}'.format(
|
||||
drex.status_code,
|
||||
drex.response_message
|
||||
)
|
||||
),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False,
|
||||
)
|
||||
|
||||
def is_buffer_valid_for_bucket(self, collection_id, buffermode):
|
||||
"""
|
||||
Indicates if the buffer as it currently is, may be written to
|
||||
for the specified collection, based on the buffermode.
|
||||
"""
|
||||
# can always write if buffer is empty.
|
||||
if self.is_buffer_empty():
|
||||
return True
|
||||
# from here down, the buffer is NOT empty.
|
||||
# determine steps by the buffermode
|
||||
if buffermode == BufferMode.REJECTONCONTENTS:
|
||||
return False
|
||||
elif buffermode == BufferMode.APPEND:
|
||||
return not self.is_collection_in_buffer(collection_id)
|
||||
# replace the buffer with last commit.
|
||||
elif buffermode == BufferMode.REPLACE:
|
||||
committed_rev_id = None
|
||||
committed_rev = self._get_committed_revision()
|
||||
if committed_rev:
|
||||
committed_rev_id = committed_rev['id']
|
||||
if committed_rev_id is None:
|
||||
# TODO (bryan-strassner) use rollback to 0 if/when that
|
||||
# is implemented in deckhand.
|
||||
# reset_to_empty has side effect of deleting history
|
||||
# from deckhand although it is only the corner case
|
||||
# where no commit has ever been done.
|
||||
self.deckhand.reset_to_empty()
|
||||
else:
|
||||
self.deckhand.rollback(committed_rev_id)
|
||||
return True
|
||||
|
||||
def _get_revision_dict(self):
|
||||
"""
|
||||
Returns a dictionary with values representing the revisions in
|
||||
Deckhand that Shipyard cares about - committed, buffer,
|
||||
and latest, as well as a count of revisions
|
||||
Committed and buffer are revisions associated with the
|
||||
shipyard tags. If either of those are not present in deckhand,
|
||||
returns None for the value.
|
||||
Latest holds the revision information for the newest revision.
|
||||
"""
|
||||
# return the cached instance version of the revision dict.
|
||||
if self.revision_dict is not None:
|
||||
return self.revision_dict
|
||||
# or generate one for the cache
|
||||
committed_revision = None
|
||||
buffer_revision = None
|
||||
latest_revision = None
|
||||
revision_count = 0
|
||||
try:
|
||||
revisions = self.deckhand.get_revision_list()
|
||||
revision_count = len(revisions)
|
||||
if revisions:
|
||||
latest_revision = revisions[-1]
|
||||
for revision in reversed(revisions):
|
||||
tags = revision.get('tags', [])
|
||||
if COMMITTED in tags or ROLLBACK_COMMIT in tags:
|
||||
committed_revision = revision
|
||||
break
|
||||
else:
|
||||
# there are buffer revisions, only grab it on
|
||||
# the first pass through
|
||||
# if the first revision is committed, or if there
|
||||
# are no revsisions, buffer revsision stays None
|
||||
if buffer_revision is None:
|
||||
buffer_revision = revision
|
||||
except NoRevisionsExistError:
|
||||
# the values of None/None/None/0 are fine
|
||||
pass
|
||||
except DeckhandResponseError as drex:
|
||||
raise AppError(
|
||||
title='Unable to retrieve revisions',
|
||||
description=(
|
||||
'Deckhand has responded unexpectedly: {}:{}'.format(
|
||||
drex.status_code,
|
||||
drex.response_message
|
||||
)
|
||||
),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False,
|
||||
)
|
||||
self.revision_dict = {
|
||||
COMMITTED: committed_revision,
|
||||
BUFFER: buffer_revision,
|
||||
LATEST: latest_revision,
|
||||
REVISION_COUNT: revision_count
|
||||
}
|
||||
return self.revision_dict
|
||||
|
||||
def _get_buffer_revision(self):
|
||||
# convenience helper to drill down to Buffer revision
|
||||
return self._get_revision_dict().get(BUFFER)
|
||||
|
||||
def _get_buffer_rev_id(self):
|
||||
# convenience helper to drill down to Buffer revision id
|
||||
buf_rev = self._get_revision_dict().get(BUFFER)
|
||||
return buf_rev['id'] if buf_rev else None
|
||||
|
||||
def _get_latest_revision(self):
|
||||
# convenience helper to drill down to latest revision
|
||||
return self._get_revision_dict().get(LATEST)
|
||||
|
||||
def _get_latest_rev_id(self):
|
||||
# convenience helper to drill down to latest revision id
|
||||
latest_rev = self._get_revision_dict().get(LATEST)
|
||||
return latest_rev['id'] if latest_rev else None
|
||||
|
||||
def _get_committed_revision(self):
|
||||
# convenience helper to drill down to committed revision
|
||||
return self._get_revision_dict().get(COMMITTED)
|
||||
|
||||
def _get_committed_rev_id(self):
|
||||
# convenience helper to drill down to committed revision id
|
||||
committed_rev = self._get_revision_dict().get(COMMITTED)
|
||||
return committed_rev['id'] if committed_rev else None
|
||||
|
||||
def get_collection_docs(self, version, collection_id):
|
||||
"""
|
||||
Returns the requested collection of docs based on the version
|
||||
specifier. Since the default is the buffer, only return
|
||||
committed if explicitly stated. No need to further check the
|
||||
parameter for validity here.
|
||||
"""
|
||||
LOG.info('Retrieving collection %s from %s', collection_id, version)
|
||||
if version == COMMITTED:
|
||||
return self._get_committed_docs(collection_id)
|
||||
return self._get_doc_from_buffer(collection_id)
|
||||
|
||||
def _get_doc_from_buffer(self, collection_id):
|
||||
"""
|
||||
Returns the collection if it exists in the buffer.
|
||||
If the buffer contains the collection, the latest
|
||||
representation is what we want.
|
||||
"""
|
||||
# Need to guard with this check for buffer to ensure
|
||||
# that the collection is not just carried through unmodified
|
||||
# into the buffer, and is actually represented.
|
||||
if self.is_collection_in_buffer(collection_id):
|
||||
# prior check for collection in buffer means the buffer
|
||||
# revision exists
|
||||
buffer_id = self._get_buffer_rev_id()
|
||||
return self.deckhand.get_docs_from_revision(
|
||||
revision_id=buffer_id,
|
||||
bucket_id=collection_id
|
||||
)
|
||||
raise ApiError(
|
||||
title='No documents to retrieve',
|
||||
description=('The Shipyard buffer is empty or does not contain '
|
||||
'this collection'),
|
||||
status=falcon.HTTP_404,
|
||||
retry=False,
|
||||
)
|
||||
|
||||
def _get_committed_docs(self, collection_id):
|
||||
"""
|
||||
Returns the collection if it exists as committed.
|
||||
"""
|
||||
committed_id = self._get_committed_rev_id()
|
||||
if committed_id:
|
||||
return self.deckhand.get_docs_from_revision(
|
||||
revision_id=committed_id,
|
||||
bucket_id=collection_id
|
||||
)
|
||||
# if there is no committed...
|
||||
raise ApiError(
|
||||
title='No documents to retrieve',
|
||||
description='There is no committed version of this collection',
|
||||
status=falcon.HTTP_404,
|
||||
retry=False,
|
||||
)
|
||||
|
||||
def get_rendered_configdocs(self, version=BUFFER):
|
||||
"""
|
||||
Returns the rendered configuration documents for the specified
|
||||
revision (by name BUFFER, COMMITTED)
|
||||
"""
|
||||
revision_dict = self._get_revision_dict()
|
||||
if version in (BUFFER, COMMITTED):
|
||||
if revision_dict.get(version):
|
||||
revision_id = revision_dict.get(version).get('id')
|
||||
return self.deckhand.get_rendered_docs_from_revision(
|
||||
revision_id=revision_id
|
||||
)
|
||||
else:
|
||||
raise ApiError(
|
||||
title='This revision does not exist',
|
||||
description='{} version does not exist'.format(version),
|
||||
status=falcon.HTTP_404,
|
||||
retry=False,
|
||||
)
|
||||
|
||||
def get_validations_for_buffer(self):
|
||||
"""
|
||||
Convenience method to do validations for buffer version.
|
||||
"""
|
||||
buffer_rev_id = self._get_buffer_rev_id()
|
||||
if buffer_rev_id:
|
||||
return self.get_validations_for_revision(buffer_rev_id)
|
||||
raise AppError(
|
||||
title='Unable to start validation of buffer',
|
||||
description=('Buffer revision id could not be determined from'
|
||||
'Deckhand'),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _get_design_reference(revision_id):
|
||||
# Constructs the design reference as json for use by other components
|
||||
design_reference = {
|
||||
"rel": "design",
|
||||
"href": "deckhand+{}/rendered-documents".format(
|
||||
DeckhandPaths.RENDERED_REVISION_DOCS.value.format(revision_id)
|
||||
),
|
||||
"type": "application/x-yaml"
|
||||
}
|
||||
return json.dumps(design_reference)
|
||||
|
||||
@staticmethod
|
||||
def _get_validation_endpoints():
|
||||
# returns the list of validation endpoint supported
|
||||
val_ep = '{}/validatedesign'
|
||||
return [
|
||||
{'name': 'Drydock',
|
||||
'url': val_ep.format(get_endpoint(Endpoints.DRYDOCK))},
|
||||
{'name': 'Armada',
|
||||
'url': val_ep.format(get_endpoint(Endpoints.ARMADA))},
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _get_validation_threads(validation_endpoints,
|
||||
revision_id,
|
||||
context_marker):
|
||||
# create a list of validation threads from the endpoints
|
||||
validation_threads = []
|
||||
for endpoint in validation_endpoints:
|
||||
# create a holder for things we need back from the threads
|
||||
response = {'response': None}
|
||||
exception = {'exception': None}
|
||||
validation_threads.append(
|
||||
{
|
||||
'thread': threading.Thread(
|
||||
target=ConfigdocsHelper._get_validations_for_component,
|
||||
args=(
|
||||
endpoint['url'],
|
||||
ConfigdocsHelper._get_design_reference(
|
||||
revision_id
|
||||
),
|
||||
response,
|
||||
exception,
|
||||
context_marker
|
||||
)
|
||||
),
|
||||
'name': endpoint['name'],
|
||||
'url': endpoint['url'],
|
||||
'response': response,
|
||||
'exception': exception
|
||||
}
|
||||
)
|
||||
return validation_threads
|
||||
|
||||
@staticmethod
|
||||
def _get_validations_for_component(url,
|
||||
design_reference,
|
||||
response,
|
||||
exception,
|
||||
context_marker):
|
||||
# Invoke the POST for validation
|
||||
try:
|
||||
headers = {
|
||||
'X-Context-Marker': context_marker,
|
||||
'X-Auth-Token': get_token(),
|
||||
'content-type': 'application/x-yaml'
|
||||
}
|
||||
|
||||
http_resp = requests.post(url,
|
||||
headers=headers,
|
||||
data=design_reference,
|
||||
timeout=(5, 30))
|
||||
http_resp.raise_for_status()
|
||||
raw_response = http_resp.decode('utf-8')
|
||||
response_dict = json.loads(raw_response)
|
||||
response['response'] = response_dict
|
||||
except Exception as ex:
|
||||
# catch anything exceptional as a failure to validate
|
||||
LOG.error(ex)
|
||||
exception['exception'] = ex
|
||||
|
||||
def get_validations_for_revision(self, revision_id):
|
||||
"""
|
||||
Use the endpoints for each of the UCP components to validate the
|
||||
version indicated. Uses:
|
||||
https://github.com/att-comdev/ucp-integration/blob/master/docs/api-conventions.md#post-v10validatedesign
|
||||
format.
|
||||
"""
|
||||
resp_msgs = []
|
||||
|
||||
validation_threads = ConfigdocsHelper._get_validation_threads(
|
||||
ConfigdocsHelper._get_validation_endpoints(),
|
||||
revision_id,
|
||||
self.context_marker
|
||||
)
|
||||
# trigger each validation in parallel
|
||||
for validation_thread in validation_threads:
|
||||
if validation_thread.get('thread'):
|
||||
validation_thread.get('thread').start()
|
||||
# wait for all validations to return
|
||||
for validation_thread in validation_threads:
|
||||
validation_thread.get('thread').join()
|
||||
|
||||
# check on the response, extract the validations
|
||||
error_count = 0
|
||||
for validation_thread in validation_threads:
|
||||
val_response = validation_thread.get('response')['response']
|
||||
if (not val_response or
|
||||
validation_thread.get('exception')['exception']):
|
||||
# exception was raised, or no body was returned.
|
||||
raise AppError(
|
||||
title='Unable to properly validate configdocs',
|
||||
description=(
|
||||
'Invocation of validation by {} has failed'.format(
|
||||
validation_thread.get('name')
|
||||
)
|
||||
),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False,
|
||||
)
|
||||
if not val_response:
|
||||
raise AppError(
|
||||
title='An invalid response was returned by validation',
|
||||
description='No valid response status from {}'.format(
|
||||
validation_thread.get('name')),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False,
|
||||
)
|
||||
# invalid status needs collection of messages
|
||||
# valid status means that it passed. No messages to collect
|
||||
msg_list = val_response.get('details').get('messageList')
|
||||
for msg in msg_list:
|
||||
if msg.get('error'):
|
||||
error_count = error_count + 1
|
||||
resp_msgs.append(
|
||||
{
|
||||
'name': validation_thread.get('name'),
|
||||
'message': msg.get('message'),
|
||||
'error': True
|
||||
}
|
||||
)
|
||||
else:
|
||||
resp_msgs.append(
|
||||
{
|
||||
'name': validation_thread.get('name'),
|
||||
'message': msg.get('message'),
|
||||
'error': False
|
||||
}
|
||||
)
|
||||
# Deckhand does it differently. Incorporate those validation
|
||||
# failures
|
||||
dh_validations = self._get_deckhand_validations(revision_id)
|
||||
error_count += len(dh_validations)
|
||||
resp_msgs.extend(dh_validations)
|
||||
# return the formatted status response
|
||||
return ConfigdocsHelper._format_validations_to_status(
|
||||
resp_msgs,
|
||||
error_count
|
||||
)
|
||||
|
||||
def get_deckhand_validation_status(self, revision_id):
|
||||
"""
|
||||
Returns the status object representing the deckhand validation
|
||||
results
|
||||
"""
|
||||
dh_validations = self._get_deckhand_validations(revision_id)
|
||||
error_count = len(dh_validations)
|
||||
return ConfigdocsHelper._format_validations_to_status(
|
||||
dh_validations,
|
||||
error_count
|
||||
)
|
||||
|
||||
def _get_deckhand_validations(self, revision_id):
|
||||
# Returns any validations that deckhand has on hand for this
|
||||
# revision.
|
||||
resp_msgs = []
|
||||
deckhand_val = self.deckhand.get_all_revision_validations(revision_id)
|
||||
if deckhand_val.get('results'):
|
||||
for dh_result in deckhand_val.get('results'):
|
||||
if dh_result.get('errors'):
|
||||
for error in dh_result.get('errors'):
|
||||
resp_msgs.append(
|
||||
{
|
||||
'name': dh_result.get('name'),
|
||||
'message': error.get('message'),
|
||||
'error': True
|
||||
}
|
||||
)
|
||||
return resp_msgs
|
||||
|
||||
@staticmethod
|
||||
def _format_validations_to_status(val_msgs, error_count):
|
||||
# Using a list of validation messages and an error count,
|
||||
# formulates and returns a status response dict
|
||||
|
||||
status = 'Valid'
|
||||
message = 'Validations succeeded'
|
||||
code = falcon.HTTP_200
|
||||
if error_count > 0:
|
||||
status = 'Invalid'
|
||||
message = 'Validations failed'
|
||||
code = falcon.HTTP_400
|
||||
|
||||
return {
|
||||
"kind": "Status",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {},
|
||||
"status": status,
|
||||
"message": message,
|
||||
"reason": "Validation",
|
||||
"details": {
|
||||
"errorCount": error_count,
|
||||
"messageList": val_msgs,
|
||||
},
|
||||
"code": code
|
||||
}
|
||||
|
||||
def tag_buffer(self, tag):
|
||||
"""
|
||||
Convenience method to tag the buffer version.
|
||||
"""
|
||||
buffer_rev_id = self._get_buffer_rev_id()
|
||||
if buffer_rev_id is None:
|
||||
raise AppError(
|
||||
title='Unable to tag buffer as {}'.format(tag),
|
||||
description=('Buffer revision id could not be determined from'
|
||||
'Deckhand'),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False,
|
||||
)
|
||||
self.tag_revision(buffer_rev_id, tag)
|
||||
|
||||
def tag_revision(self, revision_id, tag):
|
||||
"""
|
||||
Tags the specified revision with the specified tag
|
||||
"""
|
||||
self.deckhand.tag_revision(revision_id=revision_id, tag=tag)
|
||||
|
||||
def add_collection(self, collection_id, document_string):
|
||||
"""
|
||||
Triggers a call to Deckhand to add a collection(bucket)
|
||||
Documents are assumed to be a string input, not a
|
||||
collection.
|
||||
Returns the id of the buffer version.
|
||||
"""
|
||||
try:
|
||||
self.deckhand.put_bucket(collection_id, document_string)
|
||||
except DocumentExistsElsewhereError as deee:
|
||||
LOG.info('Deckhand has rejected this input because an included '
|
||||
'document exists in another bucket already')
|
||||
raise ApiError(
|
||||
title='Documents may not exist in more than one collection',
|
||||
description=deee.response_message,
|
||||
status=falcon.HTTP_409
|
||||
)
|
||||
except DeckhandRejectedInputError as drie:
|
||||
LOG.info('Deckhand has rejected this input because: %s',
|
||||
drie.response_message)
|
||||
raise ApiError(
|
||||
title="Document(s) invalid syntax or otherwise unsuitable",
|
||||
description=drie.response_message,
|
||||
)
|
||||
# reset the revision dict so it regenerates.
|
||||
self.revision_dict = None
|
||||
return self._get_buffer_rev_id()
|
|
@ -0,0 +1,517 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Enacapsulates a deckhand API client
|
||||
"""
|
||||
import enum
|
||||
import logging
|
||||
|
||||
from oslo_config import cfg
|
||||
import requests
|
||||
from requests.exceptions import RequestException
|
||||
import yaml
|
||||
|
||||
from shipyard_airflow.control.service_endpoints import (Endpoints,
|
||||
get_endpoint,
|
||||
get_token)
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeckhandPaths(enum.Enum):
|
||||
"""
|
||||
Enumeration of the paths to deckhand
|
||||
"""
|
||||
BUCKET_DOCS = '/bucket/{}/documents'
|
||||
RENDERED_REVISION_DOCS = '/revisions/{}/rendered-documents'
|
||||
REVISION = '/revisions/{}'
|
||||
REVISION_DIFF = '/revisions/{}/diff/{}'
|
||||
REVISION_DOCS = '/revisions/{}/documents'
|
||||
REVISION_LIST = '/revisions'
|
||||
REVISION_TAG_LIST = '/revisions/{}/tags'
|
||||
REVISION_TAG = '/revisions/{}/tags/{}'
|
||||
REVISION_VALIDATION_LIST = '/revisions/{}/validations'
|
||||
REVISION_VALIDATION = '/revisions/{}/validations/{}'
|
||||
REVISION_VALIDATION_ENTRY = (
|
||||
'/revisions/{}/validations/{}/entries/{}'
|
||||
)
|
||||
ROLLBACK = '/rollback/{}'
|
||||
|
||||
|
||||
class DeckhandClient(object):
|
||||
"""
|
||||
A rudimentary client for deckhand in lieu of a provided client
|
||||
"""
|
||||
def __init__(self, context_marker):
|
||||
"""
|
||||
Sets up this Deckhand client with the supplied context marker
|
||||
"""
|
||||
self.context_marker = context_marker
|
||||
|
||||
_deckhand_svc_url = None
|
||||
|
||||
@staticmethod
|
||||
def get_path(path):
|
||||
"""
|
||||
Looks up and returns the Deckhand path as a full url pattern
|
||||
string if passed a value from DeckhandPaths
|
||||
"""
|
||||
if not DeckhandClient._deckhand_svc_url:
|
||||
DeckhandClient._deckhand_svc_url = get_endpoint(
|
||||
Endpoints.DECKHAND
|
||||
)
|
||||
return DeckhandClient._deckhand_svc_url + path.value
|
||||
|
||||
def get_latest_revision(self):
|
||||
"""
|
||||
Retrieve the latest revision object, or raise a
|
||||
NoRevisionsExistError if there are no revisions
|
||||
"""
|
||||
revision_list = self.get_revision_list()
|
||||
if revision_list:
|
||||
return revision_list[-1]
|
||||
else:
|
||||
raise NoRevisionsExistError()
|
||||
|
||||
def get_revision_list(self):
|
||||
"""
|
||||
Returns the list of revision dictionary objects
|
||||
"""
|
||||
response = self._get_request(
|
||||
DeckhandClient.get_path(DeckhandPaths.REVISION_LIST)
|
||||
)
|
||||
self._handle_bad_response(response)
|
||||
revisions = yaml.load(response.text)
|
||||
return revisions.get('results', [])
|
||||
|
||||
def get_revision_count(self):
|
||||
"""
|
||||
Returns the count of revisions in deckhand
|
||||
"""
|
||||
response = self._get_request(
|
||||
DeckhandClient.get_path(DeckhandPaths.REVISION_LIST)
|
||||
)
|
||||
self._handle_bad_response(response)
|
||||
revisions = yaml.load(response.text)
|
||||
return revisions['count']
|
||||
|
||||
def get_latest_rev_id(self):
|
||||
"""
|
||||
Returns the latest revision's id
|
||||
"""
|
||||
try:
|
||||
return self.get_latest_revision().get('id', 0)
|
||||
except NoRevisionsExistError:
|
||||
return 0
|
||||
|
||||
def put_bucket(self, bucket_name, documents):
|
||||
"""
|
||||
Issues a put against deckhand to store a bucket
|
||||
"""
|
||||
url = DeckhandClient.get_path(
|
||||
DeckhandPaths.BUCKET_DOCS
|
||||
).format(bucket_name)
|
||||
|
||||
response = self._put_request(url, document_data=documents)
|
||||
if response.status_code == 400:
|
||||
# bad input
|
||||
raise DeckhandRejectedInputError(
|
||||
# TODO (bryan-strassner) The response from DH is json and
|
||||
# should probably be picked apart into a message here instead
|
||||
# of being escaped json when it's done
|
||||
response_message=response.text,
|
||||
status_code=response.status_code
|
||||
)
|
||||
if response.status_code == 409:
|
||||
# conflicting bucket
|
||||
raise DocumentExistsElsewhereError(
|
||||
# TODO (bryan-strassner) The response from DH is json and
|
||||
# should probably be picked apart into a message here instead
|
||||
# of being escaped json when it's done
|
||||
response_message=response.text
|
||||
)
|
||||
self._handle_bad_response(response)
|
||||
|
||||
def tag_revision(self, revision_id, tag):
|
||||
"""
|
||||
Adds the supplied tag to the specified revision
|
||||
"""
|
||||
url = DeckhandClient.get_path(
|
||||
DeckhandPaths.REVISION_TAG
|
||||
).format(revision_id, tag)
|
||||
|
||||
response = self._post_request(url)
|
||||
response.raise_for_status()
|
||||
return yaml.load(response.text)
|
||||
|
||||
def rollback(self, target_revision_id):
|
||||
"""
|
||||
Triggers deckhand to make a new revision that matches exactly
|
||||
the state of documents/buckets of a prior revision.
|
||||
"""
|
||||
url = DeckhandClient.get_path(
|
||||
DeckhandPaths.ROLLBACK
|
||||
).format(target_revision_id)
|
||||
|
||||
response = self._post_request(url)
|
||||
self._handle_bad_response(response)
|
||||
|
||||
def reset_to_empty(self):
|
||||
"""
|
||||
Warning, this will prompt deckhand to delete everything. gone.
|
||||
"""
|
||||
url = DeckhandClient.get_path(DeckhandPaths.REVISION_LIST)
|
||||
response = self._delete_request(url)
|
||||
self._handle_bad_response(response)
|
||||
|
||||
def get_diff(self, old_revision_id, new_revision_id):
|
||||
"""
|
||||
Retrieves the bucket-based difference between revisions.
|
||||
"""
|
||||
url = DeckhandClient.get_path(
|
||||
DeckhandPaths.REVISION_DIFF
|
||||
).format(old_revision_id, new_revision_id)
|
||||
|
||||
response = self._get_request(url)
|
||||
self._handle_bad_response(response)
|
||||
diff = yaml.load(response.text)
|
||||
return diff
|
||||
|
||||
def get_docs_from_revision(self, revision_id, bucket_id=None):
|
||||
"""
|
||||
Retrieves the collection of docs from the revision specified
|
||||
for the bucket_id specified
|
||||
:returns: a string representing the response.text from Deckhand
|
||||
"""
|
||||
url = DeckhandClient.get_path(
|
||||
DeckhandPaths.REVISION_DOCS
|
||||
).format(revision_id)
|
||||
|
||||
# if a bucket_id is specified, limit the response to a bucket
|
||||
query = None
|
||||
if bucket_id is not None:
|
||||
query = {'status.bucket': bucket_id}
|
||||
response = self._get_request(url, params=query)
|
||||
self._handle_bad_response(response)
|
||||
return response.text
|
||||
|
||||
def get_rendered_docs_from_revision(self, revision_id, bucket_id=None):
|
||||
"""
|
||||
Returns the full set of rendered documents for a revision
|
||||
"""
|
||||
url = DeckhandClient.get_path(
|
||||
DeckhandPaths.RENDERED_REVISION_DOCS
|
||||
).format(revision_id)
|
||||
|
||||
query = None
|
||||
if bucket_id is not None:
|
||||
query = {'status.bucket': bucket_id}
|
||||
response = self._get_request(url, params=query)
|
||||
self._handle_bad_response(response)
|
||||
return response.text
|
||||
|
||||
@staticmethod
|
||||
def _build_validation_base(dh_validation):
|
||||
# creates the base structure for validation response
|
||||
return {
|
||||
'count': dh_validation.get('count'),
|
||||
'results': []
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _build_validation_entry(dh_val_entry):
|
||||
# creates a validation entry by stripping off the URL
|
||||
# that the end user can't use anyway.
|
||||
dh_val_entry.pop('url', None)
|
||||
return dh_val_entry
|
||||
|
||||
def get_all_revision_validations(self, revision_id):
|
||||
"""
|
||||
Collects and returns the yamls of the validations for a
|
||||
revision
|
||||
"""
|
||||
val_resp = {}
|
||||
response = self._get_base_validation_resp(revision_id)
|
||||
# if the base call is no good, stop.
|
||||
self._handle_bad_response(response)
|
||||
all_validation_resp = yaml.load(response.text)
|
||||
if all_validation_resp:
|
||||
val_resp = DeckhandClient._build_validation_base(
|
||||
all_validation_resp
|
||||
)
|
||||
validations = all_validation_resp.get('results')
|
||||
for validation_subset in validations:
|
||||
subset_name = validation_subset.get('name')
|
||||
subset_response = self._get_subset_validation_response(
|
||||
revision_id,
|
||||
subset_name
|
||||
)
|
||||
# don't fail hard on a single subset not replying
|
||||
# TODO (bryan-strassner) maybe this should fail hard?
|
||||
# what if they all fail?
|
||||
if subset_response.status_code >= 400:
|
||||
LOG.error(
|
||||
'Failed to retrieve %s validations for revision %s',
|
||||
subset_name, revision_id
|
||||
)
|
||||
val_subset = yaml.load(subset_response.text)
|
||||
entries = val_subset.get('results')
|
||||
for entry in entries:
|
||||
entry_id = entry.get('id')
|
||||
e_resp = self._get_entry_validation_response(revision_id,
|
||||
subset_name,
|
||||
entry_id)
|
||||
if e_resp.status_code >= 400:
|
||||
# don't fail hard on a single entry not working
|
||||
# TODO (bryan-strassner) maybe this should fail hard?
|
||||
# what if they all fail?
|
||||
LOG.error(
|
||||
'Failed to retrieve entry %s for category %s '
|
||||
'for revision %s',
|
||||
entry_id, subset_name, revision_id
|
||||
)
|
||||
entry = yaml.load(e_resp.text)
|
||||
val_resp.get('results').append(
|
||||
DeckhandClient._build_validation_entry(entry)
|
||||
)
|
||||
return val_resp
|
||||
|
||||
def _get_base_validation_resp(self, revision_id):
|
||||
# wraps getting the base validation response
|
||||
url = DeckhandClient.get_path(
|
||||
DeckhandPaths.REVISION_VALIDATION_LIST
|
||||
).format(revision_id)
|
||||
|
||||
return self._get_request(url)
|
||||
|
||||
def _get_subset_validation_response(self, revision_id, subset_name):
|
||||
# wraps getting the subset level of detail of validations
|
||||
subset_url = DeckhandClient.get_path(
|
||||
DeckhandPaths.REVISION_VALIDATION
|
||||
).format(revision_id, subset_name)
|
||||
|
||||
return self._get_request(subset_url)
|
||||
|
||||
def _get_entry_validation_response(self,
|
||||
revision_id,
|
||||
subset_name,
|
||||
entry_id):
|
||||
# wraps getting the entry level detail of validation
|
||||
e_url = DeckhandClient.get_path(
|
||||
DeckhandPaths.REVISION_VALIDATION_ENTRY
|
||||
).format(revision_id, subset_name, entry_id)
|
||||
|
||||
e_resp = self._get_request(e_url)
|
||||
|
||||
@staticmethod
|
||||
def _handle_bad_response(response, threshold=400):
|
||||
# common handler for bad responses from invoking Deckhand
|
||||
# rasises a DeckhandResponseError if the response status
|
||||
# is >= threshold
|
||||
if response.status_code >= threshold:
|
||||
LOG.error(
|
||||
('An undesired response was returned by Deckhand. '
|
||||
'Status: %s above threshold %s. Response text: %s'),
|
||||
response.status_code,
|
||||
threshold,
|
||||
response.text
|
||||
)
|
||||
raise DeckhandResponseError(
|
||||
status_code=response.status_code,
|
||||
response_message=response.text
|
||||
)
|
||||
|
||||
# the _get, _put, _post functions below automatically supply
|
||||
# the following headers:
|
||||
# content-type: application/x-yaml
|
||||
# X-Context-Marker: {the context marker}
|
||||
# X-Auth-Token: {a current auth token}
|
||||
|
||||
@staticmethod
|
||||
def _log_request(method, url, params=None):
|
||||
# logs the details of a request being made
|
||||
LOG.info('Invoking %s %s', method, url)
|
||||
param_str = ''
|
||||
if params:
|
||||
param_str = ', '.join(
|
||||
"{}={}".format(key, val) for (key, val) in params.items()
|
||||
)
|
||||
LOG.info('Including parameters: %s', param_str)
|
||||
|
||||
def _put_request(self, url, document_data=None, params=None):
|
||||
# invokes a PUT against the specified URL with the
|
||||
# supplied document_data body
|
||||
try:
|
||||
headers = {
|
||||
'X-Context-Marker': self.context_marker,
|
||||
'X-Auth-Token': get_token()
|
||||
}
|
||||
if document_data is not None:
|
||||
headers['content-type'] = 'application/x-yaml'
|
||||
|
||||
DeckhandClient._log_request('PUT', url, params)
|
||||
response = requests.put(url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
data=document_data,
|
||||
timeout=(5, 30))
|
||||
return response
|
||||
except RequestException as rex:
|
||||
LOG.error(rex)
|
||||
raise DeckhandAccessError(
|
||||
response_message=(
|
||||
'Unable to Invoke deckhand: {}'.format(str(rex)),
|
||||
)
|
||||
)
|
||||
|
||||
def _get_request(self, url, params=None):
|
||||
# invokes a GET against the specified URL
|
||||
try:
|
||||
headers = {
|
||||
'content-type': 'application/x-yaml',
|
||||
'X-Context-Marker': self.context_marker,
|
||||
'X-Auth-Token': get_token()
|
||||
}
|
||||
|
||||
DeckhandClient._log_request('GET', url, params)
|
||||
response = requests.get(url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
timeout=(5, 30))
|
||||
return response
|
||||
except RequestException as rex:
|
||||
LOG.error(rex)
|
||||
raise DeckhandAccessError(
|
||||
response_message=(
|
||||
'Unable to Invoke deckhand: {}'.format(str(rex)),
|
||||
)
|
||||
)
|
||||
|
||||
def _post_request(self, url, document_data=None, params=None):
|
||||
# invokes a POST against the specified URL with the
|
||||
# supplied document_data body
|
||||
try:
|
||||
headers = {
|
||||
'X-Context-Marker': self.context_marker,
|
||||
'X-Auth-Token': get_token()
|
||||
}
|
||||
if document_data is not None:
|
||||
headers['content-type'] = 'application/x-yaml'
|
||||
|
||||
DeckhandClient._log_request('POST', url, params)
|
||||
response = requests.post(url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
data=document_data,
|
||||
timeout=(5, 30))
|
||||
return response
|
||||
except RequestException as rex:
|
||||
LOG.error(rex)
|
||||
raise DeckhandAccessError(
|
||||
response_message=(
|
||||
'Unable to Invoke deckhand: {}'.format(str(rex)),
|
||||
)
|
||||
)
|
||||
|
||||
def _delete_request(self, url, params=None):
|
||||
# invokes a DELETE against the specified URL
|
||||
try:
|
||||
headers = {
|
||||
'X-Context-Marker': self.context_marker,
|
||||
'X-Auth-Token': get_token()
|
||||
}
|
||||
|
||||
DeckhandClient._log_request('DELETE', url, params)
|
||||
response = requests.delete(url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
timeout=(5, 30))
|
||||
return response
|
||||
except RequestException as rex:
|
||||
LOG.error(rex)
|
||||
raise DeckhandAccessError(
|
||||
response_message=(
|
||||
'Unable to Invoke deckhand: {}'.format(str(rex)),
|
||||
)
|
||||
)
|
||||
|
||||
#
|
||||
# Deckhand stateful messages wrapped as exceptions
|
||||
#
|
||||
|
||||
|
||||
class DeckhandStatefulError(Exception):
|
||||
"""
|
||||
Base exception for errors that indicate some stateful-based
|
||||
condition in deckhand. Not intended for use directly
|
||||
"""
|
||||
def __init__(self, response_message=None):
|
||||
super().__init__()
|
||||
self.response_message = response_message
|
||||
|
||||
|
||||
class NoRevisionsExistError(DeckhandStatefulError):
|
||||
"""
|
||||
Indicates that no revisions exist when trying to retrieve the latest
|
||||
revision (Deckhand is empty)
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class DocumentExistsElsewhereError(DeckhandStatefulError):
|
||||
"""
|
||||
Indicates that a document being added is active in
|
||||
a bucket that doesn't match the bucket currently
|
||||
being added to deckhand.
|
||||
"""
|
||||
pass
|
||||
|
||||
#
|
||||
# Deckhand processing failures reported by Deckhand
|
||||
#
|
||||
|
||||
|
||||
class DeckhandResponseError(Exception):
|
||||
"""
|
||||
Indicates that a response was returned from
|
||||
Deckhand that was not expected
|
||||
"""
|
||||
def __init__(self, status_code, response_message=None):
|
||||
super().__init__()
|
||||
self.status_code = status_code
|
||||
self.response_message = response_message
|
||||
|
||||
|
||||
class DeckhandRejectedInputError(DeckhandResponseError):
|
||||
"""
|
||||
Indicates that Deckhand has rejected input for some reason
|
||||
This is usually accompanied by a 400 status code from Deckhand
|
||||
"""
|
||||
pass
|
||||
|
||||
#
|
||||
# Errors accessing Deckhand
|
||||
#
|
||||
|
||||
|
||||
class DeckhandAccessError(Exception):
|
||||
"""
|
||||
Used to indicate that accessing Deckhand has failed.
|
||||
This is not the same as a bad response from Deckhand:
|
||||
See DeckhandResponseError
|
||||
"""
|
||||
def __init__(self, response_message=None):
|
||||
super().__init__()
|
||||
self.response_message = response_message
|
|
@ -0,0 +1,67 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Module for resources representing the renderedconfigdocs API
|
||||
"""
|
||||
|
||||
import falcon
|
||||
from oslo_config import cfg
|
||||
|
||||
from shipyard_airflow import policy
|
||||
from shipyard_airflow.control.base import BaseResource
|
||||
from shipyard_airflow.control.configdocs_helper import ConfigdocsHelper
|
||||
from shipyard_airflow.errors import ApiError
|
||||
|
||||
CONF = cfg.CONF
|
||||
VERSION_VALUES = ['buffer', 'committed']
|
||||
|
||||
|
||||
class RenderedConfigDocsResource(BaseResource):
|
||||
"""
|
||||
RenderedConfigDocsResource represents the retrieval of configdocs
|
||||
in a complete or rendered state.
|
||||
"""
|
||||
|
||||
@policy.ApiEnforcer('workflow_orchestrator:get_renderedconfigdocs')
|
||||
def on_get(self, req, resp):
|
||||
"""
|
||||
Returns the whole set of rendered documents
|
||||
"""
|
||||
version = (req.params.get('version') or 'buffer')
|
||||
self._validate_version_parameter(version)
|
||||
helper = ConfigdocsHelper(req.context.external_marker)
|
||||
resp.body = self.get_rendered_configdocs(
|
||||
helper=helper,
|
||||
version=version
|
||||
)
|
||||
resp.append_header('Content-Type', 'application/x-yaml')
|
||||
resp.status = falcon.HTTP_200
|
||||
|
||||
def _validate_version_parameter(self, version):
|
||||
# performs validation of version parameter
|
||||
if version.lower() not in VERSION_VALUES:
|
||||
raise ApiError(
|
||||
title='Invalid version query parameter specified',
|
||||
description=(
|
||||
'version must be {}'.format(', '.join(VERSION_VALUES))
|
||||
),
|
||||
status=falcon.HTTP_400,
|
||||
retry=False,
|
||||
)
|
||||
|
||||
def get_rendered_configdocs(self, helper, version='buffer'):
|
||||
"""
|
||||
Get and return the rendered configdocs from the helper/Deckhand
|
||||
"""
|
||||
return helper.get_rendered_configdocs(version)
|
|
@ -0,0 +1,126 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Module to encapsulate thet endpoint lookup for the services used
|
||||
by Shipyard
|
||||
"""
|
||||
|
||||
import enum
|
||||
import logging
|
||||
|
||||
import falcon
|
||||
from keystoneauth1 import session
|
||||
from keystoneauth1.identity import v3
|
||||
from keystoneauth1.exceptions.auth import AuthorizationFailure
|
||||
from keystoneauth1.exceptions.catalog import EndpointNotFound
|
||||
from oslo_config import cfg
|
||||
from shipyard_airflow.errors import AppError
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Endpoints(enum.Enum):
|
||||
"""
|
||||
Enumeration of the endpoints that are served by this endpoint manager
|
||||
"""
|
||||
SHIPYARD = 'shipyard'
|
||||
DRYDOCK = 'drydock'
|
||||
ARMADA = 'armada'
|
||||
DECKHAND = 'deckhand'
|
||||
|
||||
|
||||
def _get_service_type(endpoint):
|
||||
"""
|
||||
Because these values should not be used until after initialization,
|
||||
they cannot be directly associated with the enum. Thie method takes
|
||||
the enum value and retrieves the values when accessed the first time.
|
||||
:param Endpoints endpoint: The endpoint to look up
|
||||
:returns: The service type value for the named endpoint
|
||||
:rtype: str
|
||||
:raises AppError: if not provided a valid Endpoints enumeration value
|
||||
"""
|
||||
if isinstance(endpoint, Endpoints):
|
||||
endpoint_values = {
|
||||
Endpoints.SHIPYARD: CONF.shipyard.service_type,
|
||||
Endpoints.DRYDOCK: CONF.drydock.service_type,
|
||||
Endpoints.ARMADA: CONF.armada.service_type,
|
||||
Endpoints.DECKHAND: CONF.deckhand.service_type,
|
||||
}
|
||||
return endpoint_values.get(endpoint)
|
||||
raise AppError(
|
||||
title='Endpoint is not known',
|
||||
description=(
|
||||
'Shipyard is trying to reach an unknown endpoint: {}'.format(
|
||||
endpoint.name
|
||||
)
|
||||
),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False
|
||||
)
|
||||
|
||||
|
||||
def get_endpoint(endpoint):
|
||||
"""
|
||||
Wraps calls to keystone for lookup of an endpoint by service type
|
||||
:param Endpoints endpoint: The endpoint to look up
|
||||
:returns: The url string of the endpoint
|
||||
:rtype: str
|
||||
:raises AppError: if the endpoint cannot be resolved
|
||||
"""
|
||||
service_type = _get_service_type(endpoint)
|
||||
try:
|
||||
return _get_ks_session().get_endpoint(
|
||||
interface='internal',
|
||||
service_type=service_type)
|
||||
except EndpointNotFound:
|
||||
LOG.error('Could not find a public interface for %s',
|
||||
endpoint.name)
|
||||
raise AppError(
|
||||
title='Can not access service endpoint',
|
||||
description=(
|
||||
'Keystone catalog has no internal endpoint for service type: '
|
||||
'{}'.format(service_type)
|
||||
),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False)
|
||||
|
||||
|
||||
def get_token():
|
||||
"""
|
||||
Returns the simple token string for a token acquired from keystone
|
||||
"""
|
||||
return _get_ks_session().get_auth_headers().get('X-Auth-Token')
|
||||
|
||||
|
||||
def _get_ks_session():
|
||||
# Establishes a keystone session
|
||||
keystone_auth = {}
|
||||
for attr in ('auth_url', 'password', 'project_domain_name',
|
||||
'project_name', 'username', 'user_domain_name'):
|
||||
keystone_auth[attr] = CONF.get('keystone_authtoken').get(attr)
|
||||
try:
|
||||
auth = v3.Password(**keystone_auth)
|
||||
return session.Session(auth=auth)
|
||||
except AuthorizationFailure as aferr:
|
||||
LOG.error('Could not authorize against keystone: %s',
|
||||
str(aferr))
|
||||
raise AppError(
|
||||
title='Could not authorize Shipyard against Keystone',
|
||||
description=(
|
||||
'Keystone has reqjected the authorization request by Shipyard'
|
||||
),
|
||||
status=falcon.HTTP_500,
|
||||
retry=False
|
||||
)
|
|
@ -11,6 +11,10 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
module for reused or baseclass portions of DB access
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import sqlalchemy
|
||||
|
@ -98,19 +102,19 @@ class DbAccess:
|
|||
"""
|
||||
Performs a parameterized insert
|
||||
"""
|
||||
self.perform_change_dml(query, **kwargs)
|
||||
return self.perform_change_dml(query, **kwargs)
|
||||
|
||||
def perform_update(self, query, **kwargs):
|
||||
"""
|
||||
Performs a parameterized update
|
||||
"""
|
||||
self.perform_change_dml(query, **kwargs)
|
||||
return self.perform_change_dml(query, **kwargs)
|
||||
|
||||
def perform_delete(self, query, **kwargs):
|
||||
"""
|
||||
Performs a parameterized delete
|
||||
"""
|
||||
self.perform_change_dml(query, **kwargs)
|
||||
return self.perform_change_dml(query, **kwargs)
|
||||
|
||||
def perform_change_dml(self, query, **kwargs):
|
||||
"""
|
||||
|
@ -119,4 +123,4 @@ class DbAccess:
|
|||
LOG.debug('Query: %s', query)
|
||||
if query is not None:
|
||||
with self.get_engine().connect() as connection:
|
||||
connection.execute(query, **kwargs)
|
||||
return connection.execute(query, **kwargs)
|
||||
|
|
|
@ -148,6 +148,63 @@ class ShipyardDbAccess(DbAccess):
|
|||
:user )
|
||||
''')
|
||||
|
||||
# Insert a lock record if there's not a conflicting one
|
||||
INSERT_API_LOCK = sqlalchemy.sql.text('''
|
||||
INSERT INTO
|
||||
api_locks (
|
||||
"id",
|
||||
"lock_type",
|
||||
"datetime",
|
||||
"expires",
|
||||
"released",
|
||||
"user",
|
||||
"reference_id"
|
||||
)
|
||||
SELECT
|
||||
:id,
|
||||
:lock_type,
|
||||
CURRENT_TIMESTAMP,
|
||||
:expires,
|
||||
'false',
|
||||
:user,
|
||||
:reference_id
|
||||
WHERE
|
||||
NOT EXISTS (SELECT
|
||||
"id"
|
||||
FROM
|
||||
api_locks
|
||||
WHERE
|
||||
"lock_type" = :lock_type
|
||||
AND "released" = 'false'
|
||||
AND "datetime" + (interval '1 second' * "expires") > now())
|
||||
''')
|
||||
|
||||
# Find the latest active lock for the type
|
||||
SELECT_LATEST_LOCK_BY_TYPE = sqlalchemy.sql.text('''
|
||||
SELECT
|
||||
"id"
|
||||
FROM
|
||||
api_locks
|
||||
WHERE
|
||||
"lock_type" = :lock_type
|
||||
AND "datetime" + (interval '1 second' * "expires") > now()
|
||||
AND "released" = 'false'
|
||||
ORDER BY
|
||||
"datetime" DESC,
|
||||
"id" DESC
|
||||
LIMIT 1
|
||||
''')
|
||||
|
||||
# Releasea a lock
|
||||
UPDATE_LOCK_RELEASE = sqlalchemy.sql.text('''
|
||||
UPDATE
|
||||
api_locks
|
||||
SET
|
||||
"released" = 'true'
|
||||
WHERE
|
||||
"id" = :id
|
||||
''')
|
||||
|
||||
def __init__(self):
|
||||
DbAccess.__init__(self)
|
||||
|
||||
|
@ -155,6 +212,7 @@ class ShipyardDbAccess(DbAccess):
|
|||
"""
|
||||
Returns the connection string for this db connection
|
||||
"""
|
||||
|
||||
return CONF.base.postgresql_db
|
||||
|
||||
def update_db(self):
|
||||
|
@ -192,9 +250,8 @@ class ShipyardDbAccess(DbAccess):
|
|||
ShipyardDbAccess.SELECT_ACTION_BY_ID, action_id=action_id)
|
||||
if actions_array:
|
||||
return actions_array[0]
|
||||
else:
|
||||
# Not found
|
||||
return None
|
||||
# Not found
|
||||
return None
|
||||
|
||||
def get_preflight_validation_fails(self):
|
||||
"""
|
||||
|
@ -211,8 +268,8 @@ class ShipyardDbAccess(DbAccess):
|
|||
validation_id=validation_id)
|
||||
if validation_array:
|
||||
return validation_array[0]
|
||||
else:
|
||||
return None
|
||||
# No validations
|
||||
return None
|
||||
|
||||
def get_validation_by_action_id(self, action_id):
|
||||
"""
|
||||
|
@ -253,3 +310,41 @@ class ShipyardDbAccess(DbAccess):
|
|||
action_id=ac_audit['action_id'],
|
||||
command=ac_audit['command'],
|
||||
user=ac_audit['user'])
|
||||
|
||||
def insert_api_lock(self,
|
||||
lock_id,
|
||||
lock_type,
|
||||
expires,
|
||||
user,
|
||||
reference_id):
|
||||
"""
|
||||
Inserts an api lock
|
||||
"""
|
||||
result = self.perform_insert(ShipyardDbAccess.INSERT_API_LOCK,
|
||||
id=lock_id,
|
||||
lock_type=lock_type,
|
||||
expires=expires,
|
||||
user=user,
|
||||
reference_id=reference_id)
|
||||
return result.rowcount == 1
|
||||
|
||||
def get_api_lock(self, lock_type):
|
||||
"""
|
||||
Retreives the id of the newest current lock for the given
|
||||
type.
|
||||
"""
|
||||
result_dict = self.get_as_dict_array(
|
||||
ShipyardDbAccess.SELECT_LATEST_LOCK_BY_TYPE,
|
||||
lock_type=lock_type
|
||||
)
|
||||
if result_dict:
|
||||
return result_dict[0].get('id')
|
||||
# No lock found
|
||||
return None
|
||||
|
||||
def release_api_lock(self, lock_id):
|
||||
"""
|
||||
Marks the lock specified by the id as released.
|
||||
"""
|
||||
self.perform_update(ShipyardDbAccess.UPDATE_LOCK_RELEASE,
|
||||
id=lock_id)
|
||||
|
|
|
@ -98,6 +98,44 @@ class ShipyardPolicy(object):
|
|||
'method': 'POST'
|
||||
}]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
'workflow_orchestrator:create_configdocs',
|
||||
RULE_ADMIN_REQUIRED,
|
||||
'Ingest configuration documents for the site design',
|
||||
[{
|
||||
'path': '/api/v1.0/configdocs/{collection_id}',
|
||||
'method': 'POST'
|
||||
}]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
'workflow_orchestrator:get_configdocs',
|
||||
RULE_ADMIN_REQUIRED,
|
||||
'Retrieve a collection of configuration documents',
|
||||
[{
|
||||
'path': '/api/v1.0/configdocs/{collection_id}',
|
||||
'method': 'GET'
|
||||
}]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
'workflow_orchestrator:commit_configdocs',
|
||||
RULE_ADMIN_REQUIRED,
|
||||
('Move documents from the Shipyard buffer to the committed '
|
||||
'documents'),
|
||||
[{
|
||||
'path': '/api/v1.0/commitconfigdocs',
|
||||
'method': 'POST'
|
||||
}]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
'workflow_orchestrator:get_renderedconfigdocs',
|
||||
RULE_ADMIN_REQUIRED,
|
||||
('Retrieve the configuration documents rendered by Deckhand into '
|
||||
'a complete design'),
|
||||
[{
|
||||
'path': '/api/v1.0/renderedconfigdocs',
|
||||
'method': 'GET'
|
||||
}]
|
||||
),
|
||||
]
|
||||
|
||||
# Regions Policy
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
A fake response object for testing
|
||||
"""
|
||||
|
||||
|
||||
class FakeResponse():
|
||||
def __init__(self, status_code, text):
|
||||
self.status_code = status_code
|
||||
self.text = text
|
|
@ -0,0 +1,146 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Tests for the configdocs_api
|
||||
"""
|
||||
from mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from shipyard_airflow.control.configdocs_helper import ConfigdocsHelper
|
||||
from shipyard_airflow.control.configdocs_api import (CommitConfigDocsResource,
|
||||
ConfigDocsResource)
|
||||
from shipyard_airflow.errors import ApiError
|
||||
|
||||
|
||||
def test__validate_version_parameter():
|
||||
"""
|
||||
test of the version parameter validation
|
||||
"""
|
||||
cdr = ConfigDocsResource()
|
||||
with pytest.raises(ApiError):
|
||||
cdr._validate_version_parameter('asdfjkl')
|
||||
|
||||
for version in ('buffer', 'committed'):
|
||||
try:
|
||||
cdr._validate_version_parameter(version)
|
||||
except ApiError:
|
||||
# should not raise an exception.
|
||||
assert False
|
||||
|
||||
|
||||
def test_get_collection():
|
||||
helper = None
|
||||
with patch.object(ConfigdocsHelper, 'get_collection_docs') as mock_method:
|
||||
cdr = ConfigDocsResource()
|
||||
helper = ConfigdocsHelper('')
|
||||
cdr.get_collection(helper, 'apples')
|
||||
|
||||
mock_method.assert_called_once_with('buffer', 'apples')
|
||||
|
||||
|
||||
def test_post_collection():
|
||||
"""
|
||||
Tests the post collection method of the ConfigdocsResource
|
||||
"""
|
||||
helper = None
|
||||
collection_id = 'trees'
|
||||
document_data = 'lots of info'
|
||||
with patch.object(ConfigdocsHelper, 'add_collection') as mock_method:
|
||||
cdr = ConfigDocsResource()
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.is_buffer_valid_for_bucket = lambda a, b: True
|
||||
helper.get_deckhand_validation_status = (
|
||||
lambda a: ConfigdocsHelper._format_validations_to_status([], 0)
|
||||
)
|
||||
cdr.post_collection(helper=helper,
|
||||
collection_id=collection_id,
|
||||
document_data=document_data)
|
||||
|
||||
mock_method.assert_called_once_with(collection_id, document_data)
|
||||
|
||||
with pytest.raises(ApiError):
|
||||
cdr = ConfigDocsResource()
|
||||
helper = ConfigdocsHelper('')
|
||||
# not valid for bucket
|
||||
helper.is_buffer_valid_for_bucket = lambda a, b: False
|
||||
helper.get_deckhand_validation_status = (
|
||||
lambda a: ConfigdocsHelper._format_validations_to_status([], 0)
|
||||
)
|
||||
cdr.post_collection(helper=helper,
|
||||
collection_id=collection_id,
|
||||
document_data=document_data)
|
||||
|
||||
|
||||
def test_commit_configdocs():
|
||||
"""
|
||||
Tests the CommitConfigDocsResource method commit_configdocs
|
||||
"""
|
||||
ccdr = CommitConfigDocsResource()
|
||||
commit_resp = None
|
||||
with patch.object(ConfigdocsHelper, 'tag_buffer') as mock_method:
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.is_buffer_empty = lambda: False
|
||||
helper.get_validations_for_buffer = lambda: {'status': 'Valid'}
|
||||
commit_resp = ccdr.commit_configdocs(helper, False)
|
||||
|
||||
mock_method.assert_called_once_with('committed')
|
||||
assert commit_resp['status'] == 'Valid'
|
||||
|
||||
commit_resp = None
|
||||
with patch.object(ConfigdocsHelper, 'tag_buffer') as mock_method:
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.is_buffer_empty = lambda: False
|
||||
helper.get_validations_for_buffer = (
|
||||
lambda: {
|
||||
'status': 'Invalid',
|
||||
'code': '400 Bad Request',
|
||||
'message': 'this is a mock response'
|
||||
}
|
||||
)
|
||||
commit_resp = ccdr.commit_configdocs(helper, False)
|
||||
assert '400' in commit_resp['code']
|
||||
assert commit_resp['message'] is not None
|
||||
assert commit_resp['status'] == 'Invalid'
|
||||
|
||||
|
||||
def test_commit_configdocs_force():
|
||||
"""
|
||||
Tests the CommitConfigDocsResource method commit_configdocs
|
||||
"""
|
||||
ccdr = CommitConfigDocsResource()
|
||||
commit_resp = None
|
||||
with patch.object(ConfigdocsHelper, 'tag_buffer') as mock_method:
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.is_buffer_empty = lambda: False
|
||||
helper.get_validations_for_buffer = lambda: {'status': 'Invalid'}
|
||||
commit_resp = ccdr.commit_configdocs(helper, True)
|
||||
|
||||
mock_method.assert_called_once_with('committed')
|
||||
print(commit_resp)
|
||||
assert '200' in commit_resp['code']
|
||||
assert 'FORCED' in commit_resp['message']
|
||||
assert commit_resp['status'] == 'Invalid'
|
||||
|
||||
|
||||
def test_commit_configdocs_buffer_err():
|
||||
"""
|
||||
Tests the CommitConfigDocsResource method commit_configdocs
|
||||
"""
|
||||
ccdr = CommitConfigDocsResource()
|
||||
|
||||
with pytest.raises(ApiError):
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.is_buffer_empty = lambda: True
|
||||
helper.get_validations_for_buffer = lambda: {'status': 'Valid'}
|
||||
ccdr.commit_configdocs(helper, False)
|
|
@ -0,0 +1,643 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
import yaml
|
||||
|
||||
import pytest
|
||||
|
||||
from .fake_response import FakeResponse
|
||||
from shipyard_airflow.control import configdocs_helper
|
||||
from shipyard_airflow.control.configdocs_helper import (BufferMode,
|
||||
ConfigdocsHelper)
|
||||
from shipyard_airflow.control.deckhand_client import (DeckhandClient,
|
||||
DeckhandResponseError,
|
||||
NoRevisionsExistError)
|
||||
from shipyard_airflow.errors import ApiError, AppError
|
||||
|
||||
REV_BUFFER_DICT = {
|
||||
'committed': {'id': 3,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop', 'slop'],
|
||||
'tags': ['committed'],
|
||||
'validationPolicies': {}},
|
||||
'buffer': {'id': 5,
|
||||
'url': 'url5',
|
||||
'createdAt': '2017-07-16T21:23Z',
|
||||
'buckets': ['mop', 'chum'],
|
||||
'tags': ['deckhand_sez_hi'],
|
||||
'validationPolicies': {}},
|
||||
'latest': {'id': 5,
|
||||
'url': 'url5',
|
||||
'createdAt': '2017-07-16T21:23Z',
|
||||
'buckets': ['mop', 'chum'],
|
||||
'tags': ['deckhand_sez_hi'],
|
||||
'validationPolicies': {}},
|
||||
'revision_count': 5
|
||||
}
|
||||
|
||||
DIFF_BUFFER_DICT = {
|
||||
'mop': 'unmodified',
|
||||
'chum': 'created',
|
||||
'slop': 'deleted'
|
||||
}
|
||||
|
||||
REV_BUFF_EMPTY_DICT = {
|
||||
'committed': {'id': 3,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop'],
|
||||
'tags': ['committed'],
|
||||
'validationPolicies': {}},
|
||||
'buffer': None,
|
||||
'latest': {'id': 3,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop'],
|
||||
'tags': ['committed'],
|
||||
'validationPolicies': {}},
|
||||
'revision_count': 3
|
||||
}
|
||||
|
||||
DIFF_BUFF_EMPTY_DICT = {
|
||||
'mop': 'unmodified'
|
||||
}
|
||||
|
||||
REV_NO_COMMIT_DICT = {
|
||||
'committed': None,
|
||||
'buffer': {'id': 3,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop'],
|
||||
'tags': [],
|
||||
'validationPolicies': {}},
|
||||
'latest': {'id': 3,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop'],
|
||||
'tags': [],
|
||||
'validationPolicies': {}},
|
||||
'revision_count': 3
|
||||
}
|
||||
|
||||
DIFF_NO_COMMIT_DICT = {
|
||||
'mop': 'created'
|
||||
}
|
||||
|
||||
REV_EMPTY_DICT = {
|
||||
'committed': None,
|
||||
'buffer': None,
|
||||
'latest': None,
|
||||
'revision_count': 0
|
||||
}
|
||||
|
||||
DIFF_EMPTY_DICT = {}
|
||||
|
||||
REV_COMMIT_AND_BUFFER_DICT = {
|
||||
'committed': {'id': 1,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop'],
|
||||
'tags': ['committed'],
|
||||
'validationPolicies': {}},
|
||||
'buffer': {'id': 3,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop'],
|
||||
'tags': [],
|
||||
'validationPolicies': {}},
|
||||
'latest': {'id': 3,
|
||||
'url': 'url3',
|
||||
'createdAt': '2017-07-14T21:23Z',
|
||||
'buckets': ['mop'],
|
||||
'tags': [],
|
||||
'validationPolicies': {}},
|
||||
'revision_count': 3
|
||||
}
|
||||
|
||||
DIFF_COMMIT_AND_BUFFER_DICT = {
|
||||
'mop': 'modified'
|
||||
}
|
||||
|
||||
|
||||
def test_construct_configdocs_helper():
|
||||
"""
|
||||
Creates a configdoc helper, tests that the context_marker
|
||||
is passed to the sub-helper
|
||||
"""
|
||||
marker = 'marker'
|
||||
helper = ConfigdocsHelper(marker)
|
||||
assert helper.deckhand.context_marker == marker
|
||||
assert helper.context_marker == marker
|
||||
|
||||
|
||||
def test_get_buffer_mode():
|
||||
"""
|
||||
ensures that strings passed to get_buffer_mode are properly handled
|
||||
"""
|
||||
# None cases
|
||||
assert ConfigdocsHelper.get_buffer_mode(
|
||||
''
|
||||
) == BufferMode.REJECTONCONTENTS
|
||||
assert ConfigdocsHelper.get_buffer_mode(
|
||||
None
|
||||
) == BufferMode.REJECTONCONTENTS
|
||||
|
||||
# valid cases
|
||||
assert ConfigdocsHelper.get_buffer_mode(
|
||||
'rejectoncontents'
|
||||
) == BufferMode.REJECTONCONTENTS
|
||||
assert ConfigdocsHelper.get_buffer_mode(
|
||||
'append'
|
||||
) == BufferMode.APPEND
|
||||
assert ConfigdocsHelper.get_buffer_mode(
|
||||
'replace'
|
||||
) == BufferMode.REPLACE
|
||||
|
||||
# case insensitive
|
||||
assert ConfigdocsHelper.get_buffer_mode(
|
||||
'ReJEcTOnConTenTs'
|
||||
) == BufferMode.REJECTONCONTENTS
|
||||
|
||||
# bad value
|
||||
assert ConfigdocsHelper.get_buffer_mode(
|
||||
'hippopotomus'
|
||||
) is None
|
||||
|
||||
|
||||
def test_is_buffer_emtpy():
|
||||
"""
|
||||
Test the method to check if the configdocs buffer is empty
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper._get_revision_dict = lambda: REV_BUFFER_DICT
|
||||
assert not helper.is_buffer_empty()
|
||||
|
||||
helper._get_revision_dict = lambda: REV_BUFF_EMPTY_DICT
|
||||
assert helper.is_buffer_empty()
|
||||
|
||||
helper._get_revision_dict = lambda: REV_NO_COMMIT_DICT
|
||||
assert not helper.is_buffer_empty()
|
||||
|
||||
helper._get_revision_dict = lambda: REV_EMPTY_DICT
|
||||
assert helper.is_buffer_empty()
|
||||
|
||||
|
||||
def test_is_collection_in_buffer():
|
||||
"""
|
||||
Test that collections are found in the buffer
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper._get_revision_dict = lambda: REV_BUFFER_DICT
|
||||
helper.deckhand.get_diff = (
|
||||
lambda old_revision_id, new_revision_id: DIFF_BUFFER_DICT
|
||||
)
|
||||
# mop is not in buffer; chum and slop are in buffer.
|
||||
# unmodified means it is not in buffer
|
||||
assert not helper.is_collection_in_buffer('mop')
|
||||
# created means it is in buffer
|
||||
assert helper.is_collection_in_buffer('slop')
|
||||
# deleted means it is in buffer
|
||||
assert helper.is_collection_in_buffer('chum')
|
||||
|
||||
def _raise_dre():
|
||||
raise DeckhandResponseError(
|
||||
status_code=9000,
|
||||
response_message='This is bogus'
|
||||
)
|
||||
|
||||
helper._get_revision_dict = _raise_dre
|
||||
|
||||
with pytest.raises(DeckhandResponseError):
|
||||
helper.is_collection_in_buffer('does not matter')
|
||||
|
||||
|
||||
def test_is_buffer_valid_for_bucket():
|
||||
"""
|
||||
def is_buffer_valid_for_bucket(self, collection_id, buffermode)
|
||||
"""
|
||||
|
||||
def set_revision_dict(helper, revision_dict, diff_dict):
|
||||
helper._get_revision_dict = lambda: revision_dict
|
||||
helper.deckhand.get_diff = lambda: diff_dict
|
||||
|
||||
helper = ConfigdocsHelper('')
|
||||
helper._get_revision_dict = lambda: REV_BUFFER_DICT
|
||||
helper.deckhand.get_diff = (
|
||||
lambda old_revision_id, new_revision_id: DIFF_BUFFER_DICT
|
||||
)
|
||||
helper.deckhand.rollback = lambda target_revision_id: (
|
||||
set_revision_dict(helper, REV_BUFF_EMPTY_DICT, DIFF_BUFF_EMPTY_DICT)
|
||||
)
|
||||
# patch the deckhand client method to set to empty if reset_to_empty
|
||||
# is invoked.
|
||||
helper.deckhand.reset_to_empty = lambda: (
|
||||
set_revision_dict(helper, REV_EMPTY_DICT, DIFF_EMPTY_DICT)
|
||||
)
|
||||
|
||||
# can add 'mop' on append, it was unmodified in buffer
|
||||
# can add 'water' on append, it was not anywhere before
|
||||
# can't add 'slop' on append, it was deleted (modified in buffer)
|
||||
# can't add 'chum' on append, it is already in buffer
|
||||
# can't add anything on rejectoncontents
|
||||
# can add anything on replace
|
||||
assert helper.is_buffer_valid_for_bucket('mop', BufferMode.APPEND)
|
||||
assert helper.is_buffer_valid_for_bucket('water', BufferMode.APPEND)
|
||||
assert not helper.is_buffer_valid_for_bucket('slop', BufferMode.APPEND)
|
||||
assert not helper.is_buffer_valid_for_bucket('chum', BufferMode.APPEND)
|
||||
assert not helper.is_buffer_valid_for_bucket('new',
|
||||
BufferMode.REJECTONCONTENTS)
|
||||
|
||||
# because of the patched methods above, replace mode will set the
|
||||
# buffer/diff to values as if it were rolled back.
|
||||
assert helper.is_buffer_valid_for_bucket('mop', BufferMode.REPLACE)
|
||||
# should be able to replace mode even if no buffer contents.
|
||||
assert helper.is_buffer_valid_for_bucket('mop', BufferMode.REPLACE)
|
||||
# in rolled back state as per last two commands, should be ok
|
||||
# to use rejectoncontents to add.
|
||||
assert helper.is_buffer_valid_for_bucket('mop',
|
||||
BufferMode.REJECTONCONTENTS)
|
||||
|
||||
# set up as if there is no committed revision yet
|
||||
helper._get_revision_dict = lambda: REV_NO_COMMIT_DICT
|
||||
helper.deckhand.get_diff = (
|
||||
lambda old_revision_id, new_revision_id: DIFF_NO_COMMIT_DICT
|
||||
)
|
||||
|
||||
assert helper.is_buffer_valid_for_bucket('slop', BufferMode.APPEND)
|
||||
assert helper.is_buffer_valid_for_bucket('chum', BufferMode.APPEND)
|
||||
# buffer is not empty, reject on contents.
|
||||
assert not helper.is_buffer_valid_for_bucket('new',
|
||||
BufferMode.REJECTONCONTENTS)
|
||||
|
||||
# This should rollback to "nothing" using reset to empty.
|
||||
assert helper.is_buffer_valid_for_bucket('mop', BufferMode.REPLACE)
|
||||
assert helper.is_buffer_empty()
|
||||
|
||||
# set up as if there is nothing in deckhand.
|
||||
helper._get_revision_dict = lambda: REV_EMPTY_DICT
|
||||
helper.deckhand.get_diff = (
|
||||
lambda old_revision_id, new_revision_id: DIFF_EMPTY_DICT
|
||||
)
|
||||
# should be able to add in any mode.
|
||||
assert helper.is_buffer_valid_for_bucket('slop', BufferMode.APPEND)
|
||||
assert helper.is_buffer_valid_for_bucket('chum', BufferMode.APPEND)
|
||||
assert helper.is_buffer_valid_for_bucket('new',
|
||||
BufferMode.REJECTONCONTENTS)
|
||||
assert helper.is_buffer_valid_for_bucket('mop', BufferMode.REPLACE)
|
||||
|
||||
|
||||
def test__get_revision_dict_no_commit():
|
||||
"""
|
||||
Tests the processing of revision dict response from dechand
|
||||
with a buffer version, but no committed revision
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.deckhand.get_revision_list = lambda: yaml.load("""
|
||||
---
|
||||
- id: 1
|
||||
url: https://deckhand/api/v1.0/revisions/1
|
||||
createdAt: 2017-07-14T21:23Z
|
||||
buckets: [mop]
|
||||
tags: [a, b, c]
|
||||
validationPolicies:
|
||||
site-deploy-validation:
|
||||
status: failed
|
||||
- id: 2
|
||||
url: https://deckhand/api/v1.0/revisions/2
|
||||
createdAt: 2017-07-16T01:15Z
|
||||
buckets: [flop, mop]
|
||||
tags: [b]
|
||||
validationPolicies:
|
||||
site-deploy-validation:
|
||||
status: succeeded
|
||||
...
|
||||
""")
|
||||
rev_dict = helper._get_revision_dict()
|
||||
committed = rev_dict.get(configdocs_helper.COMMITTED)
|
||||
buffer = rev_dict.get(configdocs_helper.BUFFER)
|
||||
latest = rev_dict.get(configdocs_helper.LATEST)
|
||||
count = rev_dict.get(configdocs_helper.REVISION_COUNT)
|
||||
assert committed is None
|
||||
assert buffer.get('id') == 2
|
||||
assert latest.get('id') == 2
|
||||
assert count == 2
|
||||
|
||||
|
||||
def test__get_revision_dict_empty():
|
||||
"""
|
||||
Tests the processing of revision dict response from dechand
|
||||
where the response is an empty list
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.deckhand.get_revision_list = lambda: []
|
||||
rev_dict = helper._get_revision_dict()
|
||||
committed = rev_dict.get(configdocs_helper.COMMITTED)
|
||||
buffer = rev_dict.get(configdocs_helper.BUFFER)
|
||||
latest = rev_dict.get(configdocs_helper.LATEST)
|
||||
count = rev_dict.get(configdocs_helper.REVISION_COUNT)
|
||||
assert committed is None
|
||||
assert buffer is None
|
||||
assert latest is None
|
||||
assert count == 0
|
||||
|
||||
|
||||
def test__get_revision_dict_commit_no_buff():
|
||||
"""
|
||||
Tests the processing of revision dict response from dechand
|
||||
with a committed and no buffer revision
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.deckhand.get_revision_list = lambda: yaml.load("""
|
||||
---
|
||||
- id: 1
|
||||
url: https://deckhand/api/v1.0/revisions/1
|
||||
createdAt: 2017-07-14T21:23Z
|
||||
buckets: [mop]
|
||||
tags: [a, b, c]
|
||||
validationPolicies:
|
||||
site-deploy-validation:
|
||||
status: failed
|
||||
- id: 2
|
||||
url: https://deckhand/api/v1.0/revisions/2
|
||||
createdAt: 2017-07-16T01:15Z
|
||||
buckets: [flop, mop]
|
||||
tags: [b, committed]
|
||||
validationPolicies:
|
||||
site-deploy-validation:
|
||||
status: succeeded
|
||||
...
|
||||
""")
|
||||
rev_dict = helper._get_revision_dict()
|
||||
committed = rev_dict.get(configdocs_helper.COMMITTED)
|
||||
buffer = rev_dict.get(configdocs_helper.BUFFER)
|
||||
latest = rev_dict.get(configdocs_helper.LATEST)
|
||||
count = rev_dict.get(configdocs_helper.REVISION_COUNT)
|
||||
assert committed.get('id') == 2
|
||||
assert buffer is None
|
||||
assert latest.get('id') == 2
|
||||
assert count == 2
|
||||
|
||||
|
||||
def test__get_revision_dict_commit_and_buff():
|
||||
"""
|
||||
Tests the processing of revision dict response from dechand
|
||||
with a committed and a buffer revision
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.deckhand.get_revision_list = lambda: yaml.load("""
|
||||
---
|
||||
- id: 1
|
||||
url: https://deckhand/api/v1.0/revisions/1
|
||||
createdAt: 2017-07-14T21:23Z
|
||||
buckets: [mop]
|
||||
tags: [a, b, committed]
|
||||
validationPolicies:
|
||||
site-deploy-validation:
|
||||
status: failed
|
||||
- id: 2
|
||||
url: https://deckhand/api/v1.0/revisions/2
|
||||
createdAt: 2017-07-16T01:15Z
|
||||
buckets: [flop, mop]
|
||||
tags: [b]
|
||||
validationPolicies:
|
||||
site-deploy-validation:
|
||||
status: succeeded
|
||||
...
|
||||
""")
|
||||
rev_dict = helper._get_revision_dict()
|
||||
committed = rev_dict.get(configdocs_helper.COMMITTED)
|
||||
buffer = rev_dict.get(configdocs_helper.BUFFER)
|
||||
latest = rev_dict.get(configdocs_helper.LATEST)
|
||||
count = rev_dict.get(configdocs_helper.REVISION_COUNT)
|
||||
assert committed.get('id') == 1
|
||||
assert buffer.get('id') == 2
|
||||
assert latest.get('id') == 2
|
||||
assert count == 2
|
||||
|
||||
|
||||
def test__get_revision_dict_errs():
|
||||
"""
|
||||
tests getting a revision dictionary method when the deckhand
|
||||
client has raised an exception
|
||||
"""
|
||||
def _raise_dre():
|
||||
raise DeckhandResponseError(
|
||||
status_code=9000,
|
||||
response_message='This is bogus'
|
||||
)
|
||||
|
||||
def _raise_nree():
|
||||
raise NoRevisionsExistError()
|
||||
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.deckhand.get_revision_list = _raise_dre
|
||||
|
||||
with pytest.raises(AppError):
|
||||
helper._get_revision_dict()
|
||||
|
||||
helper.deckhand.get_revision_list = _raise_nree
|
||||
rev_dict = helper._get_revision_dict()
|
||||
committed = rev_dict.get(configdocs_helper.COMMITTED)
|
||||
buffer = rev_dict.get(configdocs_helper.BUFFER)
|
||||
latest = rev_dict.get(configdocs_helper.LATEST)
|
||||
count = rev_dict.get(configdocs_helper.REVISION_COUNT)
|
||||
assert committed is None
|
||||
assert buffer is None
|
||||
assert latest is None
|
||||
assert count == 0
|
||||
|
||||
|
||||
def test_get_collection_docs():
|
||||
"""
|
||||
Returns the representation of the yaml docs from deckhand
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.deckhand.get_docs_from_revision = (
|
||||
lambda revision_id, bucket_id: "{'yaml': 'yaml'}"
|
||||
)
|
||||
helper._get_revision_dict = lambda: REV_EMPTY_DICT
|
||||
helper.deckhand.get_diff = (
|
||||
lambda old_revision_id, new_revision_id: DIFF_EMPTY_DICT
|
||||
)
|
||||
|
||||
with pytest.raises(ApiError):
|
||||
helper.get_collection_docs(configdocs_helper.BUFFER, 'mop')
|
||||
|
||||
with pytest.raises(ApiError):
|
||||
helper.get_collection_docs(configdocs_helper.COMMITTED, 'mop')
|
||||
|
||||
helper._get_revision_dict = lambda: REV_COMMIT_AND_BUFFER_DICT
|
||||
helper.deckhand.get_diff = (
|
||||
lambda old_revision_id, new_revision_id: DIFF_COMMIT_AND_BUFFER_DICT
|
||||
)
|
||||
yaml_str = helper.get_collection_docs(configdocs_helper.BUFFER, 'mop')
|
||||
print(yaml_str)
|
||||
assert len(yaml_str) == 16
|
||||
|
||||
yaml_str = helper.get_collection_docs(configdocs_helper.COMMITTED, 'mop')
|
||||
print(yaml_str)
|
||||
assert len(yaml_str) == 16
|
||||
|
||||
|
||||
def _fake_get_validation_endpoints():
|
||||
val_ep = '{}/validatedesign'
|
||||
return [
|
||||
{'name': 'Drydock', 'url': val_ep.format('drydock')},
|
||||
{'name': 'Armada', 'url': val_ep.format('armada')},
|
||||
]
|
||||
|
||||
|
||||
def _fake_get_validations_for_component(url,
|
||||
design_reference,
|
||||
response,
|
||||
exception,
|
||||
context_marker):
|
||||
"""
|
||||
Responds with a status response
|
||||
"""
|
||||
response['response'] = json.loads(("""
|
||||
{
|
||||
"kind": "Status",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {},
|
||||
"status": "Failure",
|
||||
"message": "%s",
|
||||
"reason": "appropriate reason phrase",
|
||||
"details": {
|
||||
"errorCount": 2,
|
||||
"messageList": [
|
||||
{ "message" : "broke it 1", "error": true},
|
||||
{ "message" : "speeling error", "error": true}
|
||||
]
|
||||
},
|
||||
"code": 400
|
||||
}
|
||||
|
||||
""") % url)
|
||||
|
||||
|
||||
def test_get_validations_for_revision():
|
||||
"""
|
||||
Tets the functionality of the get_validations_for_revision method
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
hold_ve = helper.__class__._get_validation_endpoints
|
||||
hold_vfc = helper.__class__._get_validations_for_component
|
||||
helper.__class__._get_validation_endpoints = (
|
||||
_fake_get_validation_endpoints
|
||||
)
|
||||
helper.__class__._get_validations_for_component = (
|
||||
_fake_get_validations_for_component
|
||||
)
|
||||
helper._get_deckhand_validations = lambda revision_id: []
|
||||
try:
|
||||
val_status = helper.get_validations_for_revision(3)
|
||||
err_count = val_status['details']['errorCount']
|
||||
err_list_count = len(val_status['details']['messageList'])
|
||||
assert err_count == err_list_count
|
||||
assert val_status['details']['errorCount'] == 4
|
||||
finally:
|
||||
helper.__class__._get_validation_endpoints = hold_ve
|
||||
helper.__class__._get_validations_for_component = hold_vfc
|
||||
|
||||
|
||||
FK_VAL_BASE_RESP = FakeResponse(status_code=200, text="""
|
||||
---
|
||||
count: 2
|
||||
next: null
|
||||
prev: null
|
||||
results:
|
||||
- name: deckhand-schema-validation
|
||||
url: https://deckhand/a/url/too/long/for/pep8
|
||||
status: success
|
||||
- name: promenade-site-validation
|
||||
url: https://deckhand/a/url/too/long/for/pep8
|
||||
status: failure
|
||||
...
|
||||
""")
|
||||
|
||||
FK_VAL_SUBSET_RESP = FakeResponse(status_code=200, text="""
|
||||
---
|
||||
count: 1
|
||||
next: null
|
||||
prev: null
|
||||
results:
|
||||
- id: 0
|
||||
url: https://deckhand/a/url/too/long/for/pep8
|
||||
status: failure
|
||||
...
|
||||
""")
|
||||
|
||||
|
||||
FK_VAL_ENTRY_RESP = FakeResponse(status_code=200, text="""
|
||||
---
|
||||
name: promenade-site-validation
|
||||
url: https://deckhand/a/url/too/long/for/pep8
|
||||
status: failure
|
||||
createdAt: 2017-07-16T02:03Z
|
||||
expiresAfter: null
|
||||
expiresAt: null
|
||||
errors:
|
||||
- documents:
|
||||
- schema: promenade/Node/v1
|
||||
name: node-document-name
|
||||
- schema: promenade/Masters/v1
|
||||
name: kubernetes-masters
|
||||
message: Node has master role, but not included in cluster masters list.
|
||||
...
|
||||
""")
|
||||
|
||||
|
||||
def test__get_deckhand_validations():
|
||||
"""
|
||||
Tets the functionality of processing a response from deckhand
|
||||
"""
|
||||
helper = ConfigdocsHelper('')
|
||||
helper.deckhand._get_base_validation_resp = (
|
||||
lambda revision_id: FK_VAL_BASE_RESP
|
||||
)
|
||||
helper.deckhand._get_subset_validation_response = (
|
||||
lambda reivsion_id, subset_name: FK_VAL_SUBSET_RESP
|
||||
)
|
||||
helper.deckhand._get_entry_validation_response = (
|
||||
lambda reivsion_id, subset_name, entry_id: FK_VAL_ENTRY_RESP
|
||||
)
|
||||
assert len(helper._get_deckhand_validations(5)) == 2
|
||||
|
||||
|
||||
def test_tag_buffer():
|
||||
"""
|
||||
Tests that the tag buffer method attempts to tag the right version
|
||||
"""
|
||||
with patch.object(ConfigdocsHelper, 'tag_revision') as mock_method:
|
||||
helper = ConfigdocsHelper('')
|
||||
helper._get_revision_dict = lambda: REV_BUFFER_DICT
|
||||
helper.tag_buffer('artful')
|
||||
|
||||
mock_method.assert_called_once_with(5, 'artful')
|
||||
|
||||
|
||||
def test_add_collection():
|
||||
"""
|
||||
Tests the adding of a collection to deckhand - primarily
|
||||
error handling
|
||||
"""
|
||||
with patch.object(DeckhandClient, 'put_bucket') as mock_method:
|
||||
helper = ConfigdocsHelper('')
|
||||
helper._get_revision_dict = lambda: REV_BUFFER_DICT
|
||||
assert helper.add_collection('mop', 'yaml:yaml') == 5
|
||||
|
||||
mock_method.assert_called_once_with('mop', 'yaml:yaml')
|
|
@ -0,0 +1,52 @@
|
|||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from shipyard_airflow.control.configdocs_helper import ConfigdocsHelper
|
||||
from shipyard_airflow.control.rendered_configdocs_api import (
|
||||
RenderedConfigDocsResource
|
||||
)
|
||||
from shipyard_airflow.errors import ApiError
|
||||
|
||||
|
||||
def test__validate_version_parameter():
|
||||
"""
|
||||
test of the version parameter validation
|
||||
"""
|
||||
rcdr = RenderedConfigDocsResource()
|
||||
with pytest.raises(ApiError):
|
||||
rcdr._validate_version_parameter('asdfjkl')
|
||||
|
||||
try:
|
||||
rcdr._validate_version_parameter('buffer')
|
||||
rcdr._validate_version_parameter('committed')
|
||||
except:
|
||||
assert False
|
||||
|
||||
|
||||
def test_get_rendered_configdocs():
|
||||
"""
|
||||
Tests the RenderedConfigDocsResource method get_rendered_configdocs
|
||||
"""
|
||||
rcdr = RenderedConfigDocsResource()
|
||||
|
||||
with patch.object(
|
||||
ConfigdocsHelper, 'get_rendered_configdocs'
|
||||
) as mock_method:
|
||||
helper = ConfigdocsHelper('')
|
||||
rcdr.get_rendered_configdocs(helper, version='buffer')
|
||||
|
||||
mock_method.assert_called_once_with('buffer')
|
Loading…
Reference in New Issue