Merge "Adding support for error details on cluster creation"

This commit is contained in:
Jenkins 2015-11-23 19:05:24 +00:00 committed by Gerrit Code Review
commit bbcfbc0139
11 changed files with 122 additions and 4 deletions

View File

@ -118,6 +118,9 @@ class Cluster(base.APIBase):
authentication = wtypes.wsattr(AuthenticationCredential)
"Authentication for accessing message brokers"
error_detail = wsme.wsattr(wtypes.text, mandatory=False)
"Error detail(s) associated with cluster"
def get_complete_cluster(context, cluster_id):
"""Helper to retrieve the api-compatible full structure of a cluster."""

View File

@ -0,0 +1,41 @@
# -*- encoding: utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add error_detail column in clusters
Revision ID: 17c428e0479e
Revises: 244aa473e595
Create Date: 2015-11-11 12:01:10.769280
"""
# revision identifiers, used by Alembic.
revision = '17c428e0479e'
down_revision = '244aa473e595'
from alembic import op
from oslo_config import cfg
import sqlalchemy as sa
def upgrade():
op.add_column('clusters', sa.Column('error_detail', sa.Text))
def downgrade():
db_connection = cfg.CONF.database.connection
if db_connection != "sqlite://": # pragma: nocover
op.drop_column('clusters', 'error_detail')

View File

@ -73,6 +73,7 @@ class Cluster(base.BASE, base.IdMixin, models.TimestampMixin,
flavor = sa.Column(sa.String(50), nullable=False)
size = sa.Column(sa.Integer(), default=1, nullable=False)
volume_size = sa.Column(sa.Integer(), nullable=True)
error_detail = sa.Column(sa.Text(), nullable=True)
sa.Index("clusters_cluster_id_idx", "cluster_id", unique=True)

View File

@ -38,6 +38,7 @@ class Cluster(base.CueObject):
'created_at': obj_utils.datetime_or_str_or_none,
'updated_at': obj_utils.datetime_or_str_or_none,
'deleted_at': obj_utils.datetime_or_str_or_none,
'error_detail': obj_utils.str_or_none,
}
@staticmethod

View File

@ -74,6 +74,7 @@ def create_cluster(cluster_id, node_ids, user_network_id,
inject={'proto': 'http'}),
os_common.CheckFor(
name="check cluster status",
details="waiting for RabbitMQ clustered status",
rebind={'check_var': "clustering_status"},
check_value='OK',
retry_delay_seconds=10),

View File

@ -153,6 +153,7 @@ def create_cluster_node(cluster_id, node_number, node_id, graph_flow,
provides="vm_status_%d" % node_number),
os_common.CheckFor(
name="check vm status %s" % node_name,
details="waiting for ACTIVE VM status",
rebind={'check_var': "vm_status_%d" % node_number},
check_value='ACTIVE',
retry_delay_seconds=10),

View File

@ -68,5 +68,11 @@ class UpdateClusterRecord(task.Task):
else:
cluster_values['status'] = models.Status.ERROR
# Extract exception information
if 'flow_failures' in kwargs:
cluster_values['error_detail'] = '\n'.join(
[str(value) for value in
kwargs['flow_failures'].values()])
cluster = objects.Cluster(**cluster_values)
cluster.update(request_context, cluster_id)

View File

@ -53,6 +53,8 @@ class MigrationFunctionalTests(base.BaseTestCase):
('3917e931a55a', ['clusters', 'endpoints', 'nodes', 'broker',
'broker_metadata']),
('244aa473e595', ['clusters', 'endpoints', 'nodes', 'broker',
'broker_metadata']),
('17c428e0479e', ['clusters', 'endpoints', 'nodes', 'broker',
'broker_metadata'])
])

View File

@ -46,6 +46,7 @@ class ModelsTests(base.FunctionalTestCase):
"created_at": timeutils.utcnow(),
"updated_at": timeutils.utcnow(),
"deleted_at": timeutils.utcnow(),
"error_detail": "My cluster's error(s) detail",
}
cluster = models.Cluster()
@ -75,6 +76,9 @@ class ModelsTests(base.FunctionalTestCase):
"Invalid updated_at value")
self.assertEqual(cluster_values["deleted_at"], cluster.deleted_at,
"Invalid deleted_at value")
self.assertEqual(cluster_values["error_detail"],
cluster.error_detail,
"Invalid error_detail value")
db_session = sql_api.get_session()
cluster.save(db_session)
@ -107,6 +111,9 @@ class ModelsTests(base.FunctionalTestCase):
"Invalid updated_at value")
self.assertEqual(cluster_values["deleted_at"], cluster_db.deleted_at,
"Invalid deleted_at value")
self.assertEqual(cluster_values["error_detail"],
cluster_db.error_detail,
"Invalid error_detail value")
def test_create_node_model(self):
"""Verifies a new cluster record is created in DB."""

View File

@ -93,7 +93,7 @@ class CreateClusterTests(base.FunctionalTestCase):
cluster_values = {
"project_id": self.context.tenant_id,
"name": "RabbitCluster",
"network_id": str(uuid.uuid4()),
"network_id": self.valid_network['id'],
"flavor": "1",
"size": 3,
}
@ -165,7 +165,7 @@ class CreateClusterTests(base.FunctionalTestCase):
cluster_values = {
"project_id": self.context.tenant_id,
"name": "RabbitCluster",
"network_id": str(uuid.uuid4()),
"network_id": self.valid_network['id'],
"flavor": "1",
"size": 10,
}
@ -191,6 +191,55 @@ class CreateClusterTests(base.FunctionalTestCase):
self.assertEqual(vm_list, self.nova_client.servers.list())
self.assertEqual(port_list, self.neutron_client.list_ports())
def test_create_cluster_invalid_user_network(self):
invalid_network_id = str(uuid.uuid4())
cluster_size = 3
flow_store = {
'image': self.valid_image.id,
'flavor': self.valid_flavor.id,
"port": self.port,
"context": self.context.to_dict(),
"erlang_cookie": str(uuid.uuid4()),
"default_rabbit_user": 'rabbit',
"default_rabbit_pass": str(uuid.uuid4()),
}
cluster_values = {
"project_id": self.context.tenant_id,
"name": "RabbitCluster",
"network_id": invalid_network_id,
"flavor": "1",
"size": cluster_size,
}
new_cluster = objects.Cluster(**cluster_values)
new_cluster.create(self.context)
nodes = objects.Node.get_nodes_by_cluster_id(self.context,
new_cluster.id)
node_ids = []
for node in nodes:
node_ids.append(node.id)
flow = create_cluster(new_cluster.id,
node_ids,
invalid_network_id,
self.management_network['id'])
try:
engines.run(flow, store=flow_store)
except taskflow_exc.WrappedFailure as err:
cluster_ref = objects.Cluster.get_cluster_by_id(self.context,
new_cluster.id)
self.assertEqual(cluster_size, len(err._causes))
for failure in err._causes:
self.assertEqual(cluster_ref.error_detail,
failure.__str__())
else:
self.fail("Expected taskflow_exc.WrappedFailure exception.")
def tearDown(self):
for vm_id in self.new_vm_list:
self.nova_client.servers.delete(vm_id)

View File

@ -24,11 +24,13 @@ class CheckFor(task.Task):
retry_delay_seconds=None,
retry_delay_ms=None,
name=None,
details=None,
**kwargs):
super(CheckFor, self).__init__(name=name, **kwargs)
self.check_value = check_value
self.sleep_time = 0
self.details = details
if retry_delay_seconds:
self.sleep_time = retry_delay_seconds
@ -39,8 +41,12 @@ class CheckFor(task.Task):
if check_var == self.check_value:
return self.check_value
else:
raise AssertionError("expected %s, got %s" %
(self.check_value, check_var))
error_string = "expected %s, got %s" % (self.check_value,
check_var)
if self.details is not None:
error_string += ", message: %s" % self.details
raise AssertionError(error_string)
def revert(self, check_var, *args, **kwargs):
if self.sleep_time != 0: