A Python library for code common to TripleO CLI and TripleO UI.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

158 lines
6.4KB

  1. # Copyright 2016 Red Hat, Inc.
  2. # All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. import collections
  16. import logging
  17. from mistral_lib import actions
  18. from tripleo_common.actions import parameters as parameters_actions
  19. from tripleo_common.actions import templates
  20. from tripleo_common import constants
  21. from tripleo_common import update
  22. LOG = logging.getLogger(__name__)
  23. def get_group_resources_after_delete(groupname, res_to_delete, resources):
  24. group = next(res for res in resources if
  25. res.resource_name == groupname and
  26. res.resource_type == constants.RESOURCE_GROUP_TYPE)
  27. members = []
  28. for res in resources:
  29. stack_name, stack_id = next(
  30. x['href'] for x in res.links if
  31. x['rel'] == 'stack').rsplit('/', 2)[1:]
  32. # desired new count of nodes after delete operation should be
  33. # count of all existing nodes in ResourceGroup which are not
  34. # in set of nodes being deleted. Also nodes in any delete state
  35. # from a previous failed update operation are not included in
  36. # overall count (if such nodes exist)
  37. if (stack_id == group.physical_resource_id and
  38. res not in res_to_delete and
  39. not res.resource_status.startswith('DELETE')):
  40. members.append(res)
  41. return members
  42. class ScaleDownAction(templates.ProcessTemplatesAction):
  43. """Deletes overcloud nodes
  44. Before calling this method, ensure you have updated the plan
  45. with any templates or environment files as needed.
  46. """
  47. def __init__(self, timeout, nodes=[],
  48. container=constants.DEFAULT_CONTAINER_NAME):
  49. self.nodes = nodes
  50. self.timeout_mins = timeout
  51. super(ScaleDownAction, self).__init__(container)
  52. def _update_stack(self, parameters={},
  53. timeout_mins=constants.STACK_TIMEOUT_DEFAULT,
  54. context=None):
  55. # TODO(rbrady): migrate _update_stack to it's own action and update
  56. # the workflow for scale down
  57. # update the plan parameters with the scaled down parameters
  58. update_params_action = parameters_actions.UpdateParametersAction(
  59. parameters, self.container)
  60. updated_plan = update_params_action.run(context)
  61. if isinstance(updated_plan, actions.Result):
  62. return updated_plan
  63. processed_data = super(ScaleDownAction, self).run(context)
  64. if isinstance(processed_data, actions.Result):
  65. return processed_data
  66. update.add_breakpoints_cleanup_into_env(processed_data['environment'])
  67. fields = processed_data.copy()
  68. fields['timeout_mins'] = timeout_mins
  69. fields['existing'] = True
  70. # As we do a PATCH update when deleting nodes, parameters set for a
  71. # stack before upgrade to newton (ex. ComputeRemovalPolicies),
  72. # would still take precedence over the ones set in parameter_defaults
  73. # after upgrade. Clear these parameters for backward compatibility.
  74. fields['clear_parameters'] = list(parameters.keys())
  75. LOG.debug('stack update params: %s', fields)
  76. self.get_orchestration_client(context).stacks.update(self.container,
  77. **fields)
  78. def _get_removal_params_from_heat(self, resources_by_role, resources):
  79. stack_params = {}
  80. for role, role_resources in resources_by_role.items():
  81. param_name = "{0}Count".format(role)
  82. # get real count of nodes for each role. *Count stack parameters
  83. # can not be used because stack parameters return parameters
  84. # passed by user no matter if previous update operation succeeded
  85. # or not
  86. group_members = get_group_resources_after_delete(
  87. role, role_resources, resources)
  88. stack_params[param_name] = str(len(group_members))
  89. # add instance resource names into removal_policies
  90. # so heat knows which instances should be removed
  91. removal_param = "{0}RemovalPolicies".format(role)
  92. stack_params[removal_param] = [{
  93. 'resource_list': [r.resource_name for r in role_resources]
  94. }]
  95. # force reset the removal_policies_mode to 'append'
  96. # as 'update' can lead to deletion of unintended nodes.
  97. removal_mode = "{0}RemovalPoliciesMode".format(role)
  98. stack_params[removal_mode] = 'append'
  99. return stack_params
  100. def run(self, context):
  101. heatclient = self.get_orchestration_client(context)
  102. resources = heatclient.resources.list(self.container, nested_depth=5)
  103. resources_by_role = collections.defaultdict(list)
  104. instance_list = list(self.nodes)
  105. for res in resources:
  106. try:
  107. instance_list.remove(res.physical_resource_id)
  108. except ValueError:
  109. continue
  110. stack_name, stack_id = next(
  111. x['href'] for x in res.links if
  112. x['rel'] == 'stack').rsplit('/', 2)[1:]
  113. # get resource to remove from resource group (it's parent resource
  114. # of nova server)
  115. role_resource = next(x for x in resources if
  116. x.physical_resource_id == stack_id)
  117. # get the role name which is parent resource name in Heat
  118. role = role_resource.parent_resource
  119. resources_by_role[role].append(role_resource)
  120. resources_by_role = dict(resources_by_role)
  121. if instance_list:
  122. raise ValueError(
  123. "Couldn't find following instances in stack %s: %s" %
  124. (self.container, ','.join(instance_list)))
  125. # decrease count for each role (or resource group) and set removal
  126. # policy for each resource group
  127. stack_params = self._get_removal_params_from_heat(
  128. resources_by_role, resources)
  129. self._update_stack(parameters=stack_params, context=context)