Fix cluster update behavior

This patch contains several fixes to the cluster-update behavior:

- It makes the CLUSTER_UPDATE action invoke the cluster.do_update()
  method.
- It makes the cluster.status is correctly set when action is executed.
- It removes the extra calls to cluster.store() during update action.
- It allows the cluster.set_status() call to accept keyword arguments
  when doing update. [This is opening a door for optimization in other
  places where set_status() is called.]
- It ensures the cached data of the cluster is refreshed, i.e. the
  profile object.

Change-Id: Ib5ec075b29f51f813c99337359c80d5108b9a25d
This commit is contained in:
tengqm 2015-09-27 05:06:42 -04:00
parent 9c8904d9c8
commit 0c069369a0
4 changed files with 58 additions and 21 deletions

View File

@ -178,18 +178,24 @@ class ClusterAction(base.Action):
return result, reason
def do_update(self):
res = self.cluster.do_update(self.context)
if not res:
reason = _('Cluster update failed.')
self.cluster.set_status(self.context, self.cluster.ERROR, reason)
return self.RES_ERROR, reason
profile_id = self.inputs.get('new_profile_id')
for node in self.cluster.nodes:
kwargs = {
'user': self.context.user,
'project': self.context.project,
'domain': self.context.domain,
'name': 'node_update_%s' % node.id[:8],
'cause': base.CAUSE_DERIVED,
'inputs': {
'new_profile_id': profile_id,
}
},
'user': self.context.user,
'project': self.context.project,
'domain': self.context.domain,
}
action = base.Action(node.id, 'NODE_UPDATE', **kwargs)
action.store(self.context)
@ -199,19 +205,17 @@ class ClusterAction(base.Action):
dispatcher.start_action(action_id=action.id)
# Wait for nodes to complete update
result = self.RES_OK
reason = _('Cluster update completed.')
if len(self.cluster.nodes) > 0:
result, new_reason = self._wait_for_dependents()
if result != self.RES_OK:
self.cluster.set_status(self.context, self.cluster.WARNING,
new_reason)
return result, new_reason
# TODO(anyone): this seems an overhead
self.cluster.profile_id = profile_id
self.cluster.store(self.context)
self.cluster.set_status(self.context, self.cluster.ACTIVE, reason)
reason = _('Cluster update completed.')
self.cluster.set_status(self.context, self.cluster.ACTIVE, reason,
profile_id=profile_id)
return self.RES_OK, reason
def _delete_nodes(self, nodes):

View File

@ -236,8 +236,16 @@ class Cluster(object):
}
return info
def set_status(self, context, status, reason=None):
'''Set status of the cluster.'''
def set_status(self, context, status, reason=None, **kwargs):
"""Set status of the cluster.
:param context: A DB session for accessing the backend database.
:param status: A string providing the new status of the cluster.
:param reason: A string containing the reason for the status change.
It can be omitted when invoking this method.
:param dict kwargs: Other optional attributes to be updated.
:returns: Nothing.
"""
values = {}
now = timeutils.utcnow()
@ -257,6 +265,16 @@ class Cluster(object):
self.status_reason = reason
values['status_reason'] = reason
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
values[k] = v
# There is a possibility that the profile id is changed
if 'profile_id' in values:
self.rt['profile'] = profile_base.Profile.load(context,
self.profile_id)
db_api.cluster_update(context, self.id, values)
# TODO(anyone): generate event record
return

View File

@ -418,10 +418,9 @@ class ClusterActionTest(base.SenlinTestCase):
self.assertEqual(1, n_action_1.set_status.call_count)
self.assertEqual(1, n_action_2.set_status.call_count)
self.assertEqual(2, mock_start.call_count)
self.assertEqual('FAKE_PROFILE', cluster.profile_id)
cluster.store.assert_called_once_with(action.context)
cluster.set_status.assert_called_once_with(
action.context, 'ACTIVE', 'Cluster update completed.')
action.context, 'ACTIVE', 'Cluster update completed.',
profile_id='FAKE_PROFILE')
def test_do_update_empty_cluster(self, mock_load):
cluster = mock.Mock()
@ -437,11 +436,9 @@ class ClusterActionTest(base.SenlinTestCase):
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('Cluster update completed.', res_msg)
self.assertEqual('FAKE_PROFILE', cluster.profile_id)
cluster.store.assert_called_once_with(action.context)
cluster.set_status.assert_called_once_with(
action.context, 'ACTIVE', 'Cluster update completed.')
action.context, 'ACTIVE', 'Cluster update completed.',
profile_id='FAKE_PROFILE')
@mock.patch.object(db_api, 'action_add_dependency')
@mock.patch.object(dispatcher, 'start_action')

View File

@ -272,11 +272,13 @@ class TestCluster(base.SenlinTestCase):
self.assertIsNotNone(cluster.created_time)
self.assertIsNone(cluster.updated_time)
cluster.set_status(self.context, cluster.ACTIVE, 'Update succeeded')
cluster.set_status(self.context, cluster.ACTIVE, 'Update succeeded',
data={'key': 'value'})
self.assertEqual(cluster.ACTIVE, cluster.status)
self.assertEqual('Update succeeded', cluster.status_reason)
self.assertIsNotNone(cluster.created_time)
self.assertIsNotNone(cluster.updated_time)
self.assertEqual({'key': 'value'}, cluster.data)
# set status without a reason
reason = cluster.status_reason
@ -292,6 +294,22 @@ class TestCluster(base.SenlinTestCase):
self.assertIsNotNone(cluster.updated_time)
self.assertIsNotNone(cluster.deleted_time)
def test_cluster_set_status_with_new_profile(self):
cluster = clusterm.Cluster('test-cluster', 0, self.profile.id,
project=self.context.project)
cluster.store(self.context)
cluster.status = cluster.UPDATING
self._create_profile('NEW_PROFILE')
cluster.set_status(self.context, cluster.ACTIVE, 'Update succeeded',
profile_id='NEW_PROFILE')
self.assertEqual(cluster.ACTIVE, cluster.status)
self.assertEqual('Update succeeded', cluster.status_reason)
self.assertIsNotNone(cluster.updated_time)
self.assertEqual('NEW_PROFILE', cluster.profile_id)
self.assertEqual('NEW_PROFILE', cluster.rt['profile'].id)
def test_cluster_do_create_wrong_status(self):
cluster = clusterm.Cluster('test-cluster', 0, self.profile.id,
project=self.context.project)