TripleO Ansible project repository. Contains playbooks for use with TripleO OpenStack deployments.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

255 lines
11 KiB

  1. # Copyright 2020 Red Hat, Inc.
  2. # All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  5. # not use this file except in compliance with the License. You may obtain
  6. # a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  12. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  13. # License for the specific language governing permissions and limitations
  14. # under the License.
  15. DOCUMENTATION = """
  16. ---
  17. module: tripleo_all_nodes_data
  18. author:
  19. - James Slagle (@slagle) <jslagle@redhat.com>
  20. version_added: '2.8'
  21. short_description: Renders the all_nodes data for TripleO as group_vars
  22. notes: []
  23. description:
  24. - This module renders the all_nodes data for TripleO as group_vars which are
  25. then available on overcloud nodes.
  26. options:
  27. forks:
  28. description:
  29. - The number of forks to spawn in parallel to compute the data for each
  30. service. Defaults to the forks set for ansible.
  31. required: False
  32. """
  33. EXAMPLES = """
  34. - name: Render all_nodes data
  35. tripleo_all_nodes_data:
  36. """
  37. import json
  38. from multiprocessing import Manager, Process
  39. import os
  40. import traceback
  41. from ansible.errors import AnsibleError
  42. from ansible.plugins.action import ActionBase
  43. from ansible.plugins.filter import ipaddr
  44. from ansible.utils.display import Display
  45. DISPLAY = Display()
  46. class ActionModule(ActionBase):
  47. """Renders the all_nodes data for TripleO as group_vars"""
  48. def compute_service(self, service, all_nodes):
  49. DISPLAY.vv("Processing {}".format(service))
  50. # <service>_enabled: true
  51. all_nodes[service + '_enabled'] = True
  52. # <service>_node_ips: <list of ips>
  53. DISPLAY.vv(" Computing data for {}_node_ips".format(service))
  54. service_network = self.service_net_map.get(
  55. service + '_network', 'ctlplane')
  56. service_hosts = self.groups.get(service, [])
  57. service_node_ips = list(
  58. map(lambda host: self.h_vars[host][service_network + '_ip'],
  59. service_hosts))
  60. for extra_node_ip in self.all_nodes_extra_map_data.get(
  61. service + '_node_ips', []):
  62. if extra_node_ip not in service_node_ips:
  63. service_node_ips.append(extra_node_ip)
  64. all_nodes[service + '_node_ips'] = service_node_ips
  65. if self.nova_additional_cell:
  66. # <service>_cell_node_names: <list of hostnames>
  67. v = service_network + '_hostname'
  68. service_cell_node_names = \
  69. list(map(lambda host: self.h_vars[host][v],
  70. service_hosts))
  71. all_nodes[service + '_cell_node_names'] = \
  72. service_cell_node_names
  73. else:
  74. # <service>_node_names: <list of hostnames>
  75. DISPLAY.vv(" Computing data for {}_node_names".format(service))
  76. v = service_network + '_hostname'
  77. service_node_names = \
  78. list(map(lambda host: self.h_vars[host][v],
  79. service_hosts))
  80. for extra_node_name in self.all_nodes_extra_map_data.get(
  81. service + '_node_names', []):
  82. if extra_node_name not in service_node_names:
  83. service_node_names.append(extra_node_name)
  84. all_nodes[service + '_node_names'] = service_node_names
  85. # <service>_short_node_names: <list of hostnames>
  86. DISPLAY.vv(" Computing data for {}_short_node_names".format(service))
  87. service_short_node_names = \
  88. list(map(lambda host: self.h_vars[host]['inventory_hostname'],
  89. service_hosts))
  90. for extra_short_node_name in self.all_nodes_extra_map_data.get(
  91. service + '_short_node_names', []):
  92. if extra_short_node_name not in service_node_names:
  93. service_short_node_names.append(extra_short_node_name)
  94. all_nodes[service + '_short_node_names'] = \
  95. service_short_node_names
  96. # <service>_short_bootstrap_node_name: hostname
  97. DISPLAY.vv(" Computing data for {}_short_bootstrap_node_name".format(service))
  98. if self.all_nodes_extra_map_data.get(
  99. service + '_short_bootstrap_node_name', None):
  100. v = service + '_short_bootstrap_node_name'
  101. service_hosts += self.all_nodes_extra_map_data[v]
  102. service_hosts.sort()
  103. if service_hosts:
  104. all_nodes[service + '_short_bootstrap_node_name'] = \
  105. service_hosts[0]
  106. # <service>_bootstrap_node_ip: hostname
  107. DISPLAY.vv(" Computing data for {}_short_bootstrap_node_ip".format(service))
  108. if self.all_nodes_extra_map_data.get(
  109. service + '_bootstrap_node_ip', None):
  110. v = service + '_bootstrap_node_ip'
  111. service_bootstrap_node_ips = \
  112. service_node_ips.append(self.all_nodes_extra_map_data[v])
  113. else:
  114. service_bootstrap_node_ips = service_node_ips
  115. if service_bootstrap_node_ips:
  116. all_nodes[service + '_bootstrap_node_ip'] = \
  117. service_bootstrap_node_ips[0]
  118. def process_services(self, enabled_services, all_nodes, forks):
  119. # This breaks up the enabled_services list into smaller lists with
  120. # length equal to the number of forks.
  121. enabled_services_length = len(enabled_services)
  122. for i in range(0, enabled_services_length, forks):
  123. # It would be nice to be able to use multiprocessing.Pool here,
  124. # however, that resulted in many pickle errors.
  125. # For each smaller list, spawn a process to compute each service in
  126. # that chunk.
  127. end = i + forks
  128. if end > enabled_services_length:
  129. end = enabled_services_length
  130. processes = [Process(target=self.compute_service,
  131. args=(enabled_services[x], all_nodes))
  132. for x in range(i, end)]
  133. [p.start() for p in processes]
  134. [p.join() for p in processes]
  135. [p.terminate() for p in processes]
  136. def compute_all_nodes(self, all_nodes, task_vars):
  137. DISPLAY.vv("Starting compute and render for all_nodes data")
  138. # Internal Ansible objects for inventory and variables
  139. inventory = self._task.get_variable_manager()._inventory
  140. self.groups = inventory.get_groups_dict()
  141. # host_vars
  142. self.h_vars = self._task.get_variable_manager().get_vars()['hostvars']
  143. # Needed tripleo variables for convenience
  144. self.service_net_map = task_vars['service_net_map']
  145. self.nova_additional_cell = task_vars['nova_additional_cell']
  146. self.all_nodes_extra_map_data = task_vars['all_nodes_extra_map_data']
  147. net_vip_map = task_vars['net_vip_map']
  148. enabled_services = task_vars['enabled_services']
  149. primary_role_name = task_vars['primary_role_name']
  150. enabled_services += self.all_nodes_extra_map_data.get(
  151. 'enabled_services', [])
  152. # make enabled_services unique and sorted
  153. enabled_services = list(set(enabled_services))
  154. enabled_services.sort()
  155. all_nodes['enabled_services'] = enabled_services
  156. forks = self._task.args.get('forks', task_vars['ansible_forks'])
  157. DISPLAY.vv("forks set to {}".format(forks))
  158. self.process_services(enabled_services, all_nodes, forks)
  159. # <service>: service_network
  160. DISPLAY.vv("Computing data for service_net_map")
  161. for key, value in self.service_net_map.items():
  162. all_nodes[key] = value
  163. # all values from all_nodes_extra_map_data when nova_additional_cell
  164. if self.nova_additional_cell:
  165. for key, value in self.all_nodes_extra_map_data.items():
  166. all_nodes[key] = value
  167. # redis_vip: ip
  168. DISPLAY.vv("Computing data for redis_vip")
  169. if 'redis' in enabled_services or self.nova_additional_cell:
  170. if 'redis_vip' in self.all_nodes_extra_map_data:
  171. all_nodes['redis_vip'] = self.all_nodes_extra_map_data['redis_vip']
  172. elif 'redis' in net_vip_map:
  173. all_nodes['redis_vip'] = net_vip_map['redis']
  174. # ovn_dbs_vip: ip
  175. DISPLAY.vv("Computing data for ovn_dbs_vip")
  176. if 'ovn_dbs' in enabled_services or self.nova_additional_cell:
  177. if 'ovn_dbs_vip' in self.all_nodes_extra_map_data:
  178. all_nodes['ovn_dbs_vip'] = \
  179. self.all_nodes_extra_map_data['ovn_dbs_vip']
  180. elif 'ovn_dbs' in net_vip_map:
  181. all_nodes['ovn_dbs_vip'] = net_vip_map['ovn_dbs']
  182. DISPLAY.vv("Computing data for top level vars")
  183. all_nodes['deploy_identifier'] = task_vars['deploy_identifier']
  184. all_nodes['stack_action'] = task_vars['stack_action']
  185. all_nodes['stack_update_type'] = task_vars['stack_update_type']
  186. all_nodes['container_cli'] = task_vars['container_cli']
  187. # controller_node_<ips/names>
  188. # note that these are supposed to be strings, not lists
  189. DISPLAY.vv("Computing data for controller node ips/names")
  190. primary_hosts = self.groups.get(primary_role_name, [])
  191. all_nodes['controller_node_ips'] = \
  192. ','.join(list(map(lambda host: self.h_vars[host]['ctlplane_ip'],
  193. primary_hosts)))
  194. all_nodes['controller_node_names'] = \
  195. ','.join(list(map(lambda host: self.h_vars[host]['inventory_hostname'],
  196. primary_hosts)))
  197. DISPLAY.vv("Done")
  198. def run(self, tmp=None, task_vars=None):
  199. """Renders the all_nodes data for TripleO as group_vars"""
  200. manager = Manager()
  201. all_nodes = manager.dict()
  202. try:
  203. self.compute_all_nodes(all_nodes, task_vars)
  204. all_nodes = dict(all_nodes)
  205. all_nodes_path = os.path.join(task_vars['playbook_dir'],
  206. 'group_vars', 'overcloud.json')
  207. with open(all_nodes_path, 'w') as f:
  208. DISPLAY.vv("Rendering all_nodes to {}".format(all_nodes_path))
  209. json.dump(all_nodes, f, sort_keys=True, indent=4)
  210. except Exception as e:
  211. DISPLAY.error(traceback.format_exc())
  212. raise AnsibleError(str(e))
  213. finally:
  214. manager.shutdown()
  215. # multiprocessing can hang the plugin exit if there are still
  216. # references to the Manager() object. Even though we have called
  217. # .shutdown(), clean up all_nodes just to be safe.
  218. all_nodes = None
  219. DISPLAY.vv("returning")
  220. return dict(all_nodes=all_nodes)