Sahara aims to provide users with simple means to provision a Hadoop cluster by specifying several parameters like Hadoop version, cluster topology, nodes hardware details and a few more.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

clusters.py 4.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. # Copyright (c) 2016 Red Hat, Inc.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
  12. # implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import six
  16. from sahara.api import acl
  17. from sahara.service.api.v2 import clusters as api
  18. from sahara.service import validation as v
  19. from sahara.service.validations import clusters as v_c
  20. from sahara.service.validations import clusters_scaling as v_c_s
  21. from sahara.service.validations import clusters_schema as v_c_schema
  22. import sahara.utils.api as u
  23. rest = u.RestV2('clusters', __name__)
  24. @rest.get('/clusters')
  25. @acl.enforce("data-processing:clusters:get_all")
  26. @v.check_exists(api.get_cluster, 'marker')
  27. @v.validate(None, v.validate_pagination_limit)
  28. def clusters_list():
  29. result = api.get_clusters(**u.get_request_args().to_dict())
  30. for c in result:
  31. u._replace_hadoop_version_plugin_version(c)
  32. u._replace_tenant_id_project_id(c)
  33. return u.render(res=result, name='clusters')
  34. @rest.post('/clusters')
  35. @acl.enforce("data-processing:clusters:create")
  36. @v.validate(v_c_schema.CLUSTER_SCHEMA_V2,
  37. v_c.check_one_or_multiple_clusters_create)
  38. def clusters_create(data):
  39. # renaming hadoop_version -> plugin_version
  40. # this can be removed once APIv1 is deprecated
  41. data['hadoop_version'] = data['plugin_version']
  42. del data['plugin_version']
  43. if data.get('count', None) is not None:
  44. result = api.create_multiple_clusters(data)
  45. for c in result['clusters']:
  46. u._replace_hadoop_version_plugin_version(c['cluster'])
  47. u._replace_tenant_id_project_id(c['cluster'])
  48. return u.render(result)
  49. else:
  50. result = api.create_cluster(data).to_wrapped_dict()
  51. u._replace_hadoop_version_plugin_version(result['cluster'])
  52. u._replace_tenant_id_project_id(result['cluster'])
  53. return u.render(result)
  54. @rest.put('/clusters/<cluster_id>')
  55. @acl.enforce("data-processing:clusters:scale")
  56. @v.check_exists(api.get_cluster, 'cluster_id')
  57. @v.validate(v_c_schema.CLUSTER_SCALING_SCHEMA_V2, v_c_s.check_cluster_scaling)
  58. def clusters_scale(cluster_id, data):
  59. result = u.to_wrapped_dict_no_render(
  60. api.scale_cluster, cluster_id, data)
  61. u._replace_hadoop_version_plugin_version(result['cluster'])
  62. u._replace_tenant_id_project_id(result['cluster'])
  63. return u.render(result)
  64. @rest.get('/clusters/<cluster_id>')
  65. @acl.enforce("data-processing:clusters:get")
  66. @v.check_exists(api.get_cluster, 'cluster_id')
  67. def clusters_get(cluster_id):
  68. data = u.get_request_args()
  69. show_events = six.text_type(
  70. data.get('show_progress', 'false')).lower() == 'true'
  71. result = u.to_wrapped_dict_no_render(
  72. api.get_cluster, cluster_id, show_events)
  73. u._replace_hadoop_version_plugin_version(result['cluster'])
  74. u._replace_tenant_id_project_id(result['cluster'])
  75. return u.render(result)
  76. @rest.patch('/clusters/<cluster_id>')
  77. @acl.enforce("data-processing:clusters:modify")
  78. @v.check_exists(api.get_cluster, 'cluster_id')
  79. @v.validate(v_c_schema.CLUSTER_UPDATE_SCHEMA, v_c.check_cluster_update)
  80. def clusters_update(cluster_id, data):
  81. result = u.to_wrapped_dict_no_render(
  82. api.update_cluster, cluster_id, data)
  83. u._replace_hadoop_version_plugin_version(result['cluster'])
  84. u._replace_tenant_id_project_id(result['cluster'])
  85. return u.render(result)
  86. @rest.delete('/clusters/<cluster_id>')
  87. @acl.enforce("data-processing:clusters:delete")
  88. @v.check_exists(api.get_cluster, 'cluster_id')
  89. @v.validate(v_c_schema.CLUSTER_DELETE_SCHEMA_V2, v_c.check_cluster_delete)
  90. def clusters_delete(cluster_id):
  91. data = u.request_data()
  92. force = data.get('force', False)
  93. stack_name = api.get_cluster(cluster_id).get(
  94. 'extra', {}).get(
  95. 'heat_stack_name', None)
  96. api.terminate_cluster(cluster_id, force=force)
  97. if force:
  98. return u.render({"stack_name": stack_name}, status=200)
  99. else:
  100. return u.render(res=None, status=204)