Browse Source

Prepare Sahara core for plugin split

On the effort to make Sahara more user and operators friendly
we are splitting the plugins from Sahara core.

The main goal of this change is to facilitate installation,
maintainance and upgrade of plugins. With the plugins outside
the main Sahara code, operators will be able to install a subset
of plugins, as well as upgrade to newer versions of the plugins
without having to wait for a new version of OpenStack to be
released. As well, it aims to facilitate new contributors to
develop and maintain their own plugins.

Sahara Spec: https://specs.openstack.org/openstack/sahara-specs/specs/rocky/plugins-outside-sahara-core.html

Change-Id: I7ed0945fd82e37daaf6b29f947d3dba06db9d158
Telles Nobrega 7 months ago
parent
commit
92686f5a10
100 changed files with 143 additions and 27493 deletions
  1. 1
    0
      .gitignore
  2. 44
    6
      .zuul.yaml
  3. 62
    0
      devstack/plugin.sh
  4. 19
    0
      devstack/settings
  5. 5
    0
      releasenotes/notes/plugins-split-from-sahara-core-9ffc5e5d06c9239c.yaml
  6. 0
    0
      sahara/plugins/ambari/__init__.py
  7. 0
    363
      sahara/plugins/ambari/client.py
  8. 0
    155
      sahara/plugins/ambari/common.py
  9. 0
    334
      sahara/plugins/ambari/configs.py
  10. 0
    719
      sahara/plugins/ambari/deploy.py
  11. 0
    130
      sahara/plugins/ambari/edp_engine.py
  12. 0
    252
      sahara/plugins/ambari/ha_helper.py
  13. 0
    149
      sahara/plugins/ambari/health.py
  14. 0
    298
      sahara/plugins/ambari/plugin.py
  15. 0
    145
      sahara/plugins/ambari/requests_helper.py
  16. 0
    1276
      sahara/plugins/ambari/resources/configs-2.3.json
  17. 0
    1331
      sahara/plugins/ambari/resources/configs-2.4.json
  18. 0
    2008
      sahara/plugins/ambari/resources/configs-2.5.json
  19. 0
    2008
      sahara/plugins/ambari/resources/configs-2.6.json
  20. 0
    80
      sahara/plugins/ambari/resources/generate_config.py
  21. 0
    8
      sahara/plugins/ambari/resources/images/centos/disable_ambari
  22. 0
    12
      sahara/plugins/ambari/resources/images/centos/disable_certificate_check
  23. 0
    20
      sahara/plugins/ambari/resources/images/centos/disable_firewall
  24. 0
    12
      sahara/plugins/ambari/resources/images/centos/disable_selinux
  25. 0
    31
      sahara/plugins/ambari/resources/images/centos/setup_java_home
  26. 0
    11
      sahara/plugins/ambari/resources/images/centos/unlimited_security_artifacts
  27. 0
    9
      sahara/plugins/ambari/resources/images/centos/wget_repo
  28. 0
    31
      sahara/plugins/ambari/resources/images/common/add_jar
  29. 0
    17
      sahara/plugins/ambari/resources/images/common/fix_tls_ambari_agent
  30. 0
    14
      sahara/plugins/ambari/resources/images/common/mysql_connector_java_link
  31. 0
    41
      sahara/plugins/ambari/resources/images/common/oracle_java
  32. 0
    111
      sahara/plugins/ambari/resources/images/image.yaml
  33. 0
    10
      sahara/plugins/ambari/resources/images/ubuntu/wget_repo
  34. 0
    226
      sahara/plugins/ambari/validation.py
  35. 12
    6
      sahara/plugins/castellan_utils.py
  36. 0
    0
      sahara/plugins/cdh/__init__.py
  37. 0
    234
      sahara/plugins/cdh/abstractversionhandler.py
  38. 0
    0
      sahara/plugins/cdh/client/__init__.py
  39. 0
    145
      sahara/plugins/cdh/client/api_client.py
  40. 0
    240
      sahara/plugins/cdh/client/clusters.py
  41. 0
    84
      sahara/plugins/cdh/client/cms.py
  42. 0
    90
      sahara/plugins/cdh/client/hosts.py
  43. 0
    143
      sahara/plugins/cdh/client/http_client.py
  44. 0
    166
      sahara/plugins/cdh/client/resource.py
  45. 0
    108
      sahara/plugins/cdh/client/role_config_groups.py
  46. 0
    187
      sahara/plugins/cdh/client/roles.py
  47. 0
    528
      sahara/plugins/cdh/client/services.py
  48. 0
    684
      sahara/plugins/cdh/client/types.py
  49. 0
    62
      sahara/plugins/cdh/client/users.py
  50. 0
    837
      sahara/plugins/cdh/cloudera_utils.py
  51. 0
    116
      sahara/plugins/cdh/commands.py
  52. 0
    307
      sahara/plugins/cdh/config_helper.py
  53. 0
    46
      sahara/plugins/cdh/confighints_helper.py
  54. 0
    120
      sahara/plugins/cdh/db_helper.py
  55. 0
    4
      sahara/plugins/cdh/db_resources/create_hive_db.sql
  56. 0
    4
      sahara/plugins/cdh/db_resources/create_sentry_db.sql
  57. 0
    124
      sahara/plugins/cdh/deploy.py
  58. 0
    103
      sahara/plugins/cdh/edp_engine.py
  59. 0
    78
      sahara/plugins/cdh/exceptions.py
  60. 0
    145
      sahara/plugins/cdh/health.py
  61. 0
    125
      sahara/plugins/cdh/plugin.py
  62. 0
    471
      sahara/plugins/cdh/plugin_utils.py
  63. 0
    0
      sahara/plugins/cdh/v5_11_0/__init__.py
  64. 0
    28
      sahara/plugins/cdh/v5_11_0/cloudera_utils.py
  65. 0
    101
      sahara/plugins/cdh/v5_11_0/config_helper.py
  66. 0
    168
      sahara/plugins/cdh/v5_11_0/deploy.py
  67. 0
    47
      sahara/plugins/cdh/v5_11_0/edp_engine.py
  68. 0
    43
      sahara/plugins/cdh/v5_11_0/images.py
  69. 0
    23
      sahara/plugins/cdh/v5_11_0/plugin_utils.py
  70. 0
    63
      sahara/plugins/cdh/v5_11_0/resources/cdh_config.py
  71. 0
    14
      sahara/plugins/cdh/v5_11_0/resources/cdh_config.sh
  72. 0
    440
      sahara/plugins/cdh/v5_11_0/resources/flume-agent.json
  73. 0
    164
      sahara/plugins/cdh/v5_11_0/resources/flume-service.json
  74. 0
    122
      sahara/plugins/cdh/v5_11_0/resources/hbase-gateway.json
  75. 0
    452
      sahara/plugins/cdh/v5_11_0/resources/hbase-hbaserestserver.json
  76. 0
    458
      sahara/plugins/cdh/v5_11_0/resources/hbase-hbasethriftserver.json
  77. 0
    518
      sahara/plugins/cdh/v5_11_0/resources/hbase-master.json
  78. 0
    878
      sahara/plugins/cdh/v5_11_0/resources/hbase-regionserver.json
  79. 0
    668
      sahara/plugins/cdh/v5_11_0/resources/hbase-service.json
  80. 0
    128
      sahara/plugins/cdh/v5_11_0/resources/hdfs-balancer.json
  81. 0
    656
      sahara/plugins/cdh/v5_11_0/resources/hdfs-datanode.json
  82. 0
    344
      sahara/plugins/cdh/v5_11_0/resources/hdfs-failovercontroller.json
  83. 0
    116
      sahara/plugins/cdh/v5_11_0/resources/hdfs-gateway.json
  84. 0
    452
      sahara/plugins/cdh/v5_11_0/resources/hdfs-httpfs.json
  85. 0
    458
      sahara/plugins/cdh/v5_11_0/resources/hdfs-journalnode.json
  86. 0
    872
      sahara/plugins/cdh/v5_11_0/resources/hdfs-namenode.json
  87. 0
    410
      sahara/plugins/cdh/v5_11_0/resources/hdfs-nfsgateway.json
  88. 0
    458
      sahara/plugins/cdh/v5_11_0/resources/hdfs-secondarynamenode.json
  89. 0
    1340
      sahara/plugins/cdh/v5_11_0/resources/hdfs-service.json
  90. 0
    98
      sahara/plugins/cdh/v5_11_0/resources/hive-gateway.json
  91. 0
    434
      sahara/plugins/cdh/v5_11_0/resources/hive-hivemetastore.json
  92. 0
    848
      sahara/plugins/cdh/v5_11_0/resources/hive-hiveserver2.json
  93. 0
    15
      sahara/plugins/cdh/v5_11_0/resources/hive-metastore-sentry-safety.xml
  94. 0
    12
      sahara/plugins/cdh/v5_11_0/resources/hive-server2-sentry-safety.xml
  95. 0
    656
      sahara/plugins/cdh/v5_11_0/resources/hive-service.json
  96. 0
    61
      sahara/plugins/cdh/v5_11_0/resources/hive-site.xml
  97. 0
    344
      sahara/plugins/cdh/v5_11_0/resources/hive-webhcat.json
  98. 0
    248
      sahara/plugins/cdh/v5_11_0/resources/hue-hue_load_balancer.json
  99. 0
    392
      sahara/plugins/cdh/v5_11_0/resources/hue-hue_server.json
  100. 0
    0
      sahara/plugins/cdh/v5_11_0/resources/hue-kt_renewer.json

+ 1
- 0
.gitignore View File

@@ -2,6 +2,7 @@
2 2
 *.egg[s]
3 3
 *.log
4 4
 *.py[co]
5
+*.un~
5 6
 .coverage
6 7
 .testrepository
7 8
 .tox

+ 44
- 6
.zuul.yaml View File

@@ -12,17 +12,48 @@
12 12
       jobs:
13 13
         - openstack-tox-pylint:
14 14
             voting: false
15
-        - sahara-tests-scenario
16
-        - sahara-tests-tempest
15
+        - sahara-tests-scenario:
16
+            required-projects:
17
+              - openstack/sahara-plugin-ambari
18
+              - openstack/sahara-plugin-cdh
19
+              - openstack/sahara-plugin-mapr
20
+              - openstack/sahara-plugin-spark
21
+              - openstack/sahara-plugin-storm
22
+              - openstack/sahara-plugin-vanilla
23
+        - sahara-tests-tempest:
24
+            required-projects:
25
+              - openstack/sahara-plugin-ambari
26
+              - openstack/sahara-plugin-cdh
27
+              - openstack/sahara-plugin-mapr
28
+              - openstack/sahara-plugin-spark
29
+              - openstack/sahara-plugin-storm
30
+              - openstack/sahara-plugin-vanilla
17 31
         - openstack-tox-cover:
18 32
             voting: false
19
-        - sahara-grenade
33
+        - sahara-grenade:
34
+            voting: false
20 35
     gate:
21 36
       queue: sahara
22 37
       jobs:
23
-        - sahara-tests-scenario
24
-        - sahara-tests-tempest
25
-        - sahara-grenade
38
+        - sahara-tests-scenario:
39
+            required-projects:
40
+              - openstack/sahara-plugin-ambari
41
+              - openstack/sahara-plugin-cdh
42
+              - openstack/sahara-plugin-mapr
43
+              - openstack/sahara-plugin-spark
44
+              - openstack/sahara-plugin-storm
45
+              - openstack/sahara-plugin-vanilla
46
+        - sahara-tests-tempest:
47
+            required-projects:
48
+              - openstack/sahara-plugin-ambari
49
+              - openstack/sahara-plugin-cdh
50
+              - openstack/sahara-plugin-mapr
51
+              - openstack/sahara-plugin-spark
52
+              - openstack/sahara-plugin-storm
53
+              - openstack/sahara-plugin-vanilla
54
+        - sahara-grenade:
55
+            voting: false
56
+
26 57
     experimental:
27 58
       jobs:
28 59
         - sahara-buildimages-ambari
@@ -61,6 +92,13 @@
61 92
     nodeset: centos-7
62 93
     run: playbooks/buildimages/run.yaml
63 94
     timeout: 7200
95
+    required-projects:
96
+      - openstack/sahara-plugin-ambari
97
+      - openstack/sahara-plugin-cdh
98
+      - openstack/sahara-plugin-mapr
99
+      - openstack/sahara-plugin-spark
100
+      - openstack/sahara-plugin-storm
101
+      - openstack/sahara-plugin-vanilla
64 102
 
65 103
 - job:
66 104
     name: sahara-buildimages-ambari

+ 62
- 0
devstack/plugin.sh View File

@@ -206,6 +206,42 @@ function install_sahara {
206 206
     fi
207 207
 }
208 208
 
209
+# install_ambari() - Collect source and prepare
210
+function install_ambari {
211
+    git_clone $AMBARI_PLUGIN_REPO $AMBARI_PLUGIN_DIR $AMBARI_PLUGIN_BRANCH
212
+    setup_develop $AMBARI_PLUGIN_DIR
213
+}
214
+
215
+# install_cdh() - Collect source and prepare
216
+function install_cdh {
217
+    git_clone $CDH_PLUGIN_REPO $CDH_PLUGIN_DIR $CDH_PLUGIN_BRANCH
218
+    setup_develop $CDH_PLUGIN_DIR
219
+}
220
+
221
+# install_mapr() - Collect source and prepare
222
+function install_mapr {
223
+    git_clone $MAPR_PLUGIN_REPO $MAPR_PLUGIN_DIR $MAPR_PLUGIN_BRANCH
224
+    setup_develop $MAPR_PLUGIN_DIR
225
+}
226
+
227
+# install_spark() - Collect source and prepare
228
+function install_spark {
229
+    git_clone $SPARK_PLUGIN_REPO $SPARK_PLUGIN_DIR $SPARK_PLUGIN_BRANCH
230
+    setup_develop $SPARK_PLUGIN_DIR
231
+}
232
+
233
+# install_storm() - Collect source and prepare
234
+function install_storm {
235
+    git_clone $STORM_PLUGIN_REPO $STORM_PLUGIN_DIR $STORM_PLUGIN_BRANCH
236
+    setup_develop $STORM_PLUGIN_DIR
237
+}
238
+
239
+# install_vanilla() - Collect source and prepare
240
+function install_vanilla {
241
+    git_clone $VANILLA_PLUGIN_REPO $VANILLA_PLUGIN_DIR $VANILLA_PLUGIN_BRANCH
242
+    setup_develop $VANILLA_PLUGIN_DIR
243
+}
244
+
209 245
 # install_python_saharaclient() - Collect source and prepare
210 246
 function install_python_saharaclient {
211 247
     if use_library_from_git "python-saharaclient"; then
@@ -284,11 +320,37 @@ function is_sahara_enabled {
284 320
     fi
285 321
 }
286 322
 
323
+function is_plugin_enabled {
324
+    if [ "${SAHARA_ENABLED_PLUGINS/$1}" = "$SAHARA_ENABLED_PLUGINS" ] ; then
325
+        return 1
326
+    else
327
+        return 0
328
+    fi
329
+}
330
+
287 331
 # Dispatcher for Sahara plugin
288 332
 if is_service_enabled sahara; then
289 333
     if [[ "$1" == "stack" && "$2" == "install" ]]; then
290 334
         echo_summary "Installing sahara"
291 335
         install_sahara
336
+        if is_plugin_enabled ambari; then
337
+            install_ambari
338
+        fi
339
+        if is_plugin_enabled cdh; then
340
+            install_cdh
341
+        fi
342
+        if is_plugin_enabled mapr; then
343
+            install_mapr
344
+        fi
345
+        if is_plugin_enabled spark; then
346
+            install_spark
347
+        fi
348
+        if is_plugin_enabled storm; then
349
+            install_storm
350
+        fi
351
+        if is_plugin_enabled vanilla; then
352
+            install_vanilla
353
+        fi
292 354
         install_python_saharaclient
293 355
         cleanup_sahara
294 356
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then

+ 19
- 0
devstack/settings View File

@@ -6,10 +6,29 @@
6 6
 # Set up default directories
7 7
 SAHARACLIENT_DIR=$DEST/python-saharaclient
8 8
 SAHARA_DIR=$DEST/sahara
9
+AMBARI_PLUGIN_DIR=$DEST/sahara-plugin-ambari
10
+CDH_PLUGIN_DIR=$DEST/sahara-plugin-cdh
11
+MAPR_PLUGIN_DIR=$DEST/sahara-plugin-mapr
12
+SPARK_PLUGIN_DIR=$DEST/sahara-plugin-spark
13
+STORM_PLUGIN_DIR=$DEST/sahara-plugin-storm
14
+VANILLA_PLUGIN_DIR=$DEST/sahara-plugin-vanilla
9 15
 
10 16
 SAHARACLIENT_REPO=${SAHARACLIENT_REPO:-\
11 17
 ${GIT_BASE}/openstack/python-saharaclient.git}
12 18
 SAHARACLIENT_BRANCH=${SAHARACLIENT_BRANCH:-master}
19
+AMBARI_PLUGIN_REPO=${AMBARI_PLUGIN_REPO:-http://git.openstack.org/cgit/openstack/sahara-plugin-ambari/}
20
+AMBARI_PLUGIN_BRANCH=${AMBARI_PLUGIN_BRANCH:-master}
21
+CDH_PLUGIN_REPO=${CDH_PLUGIN_REPO:-http://git.openstack.org/cgit/openstack/sahara-plugin-cdh/}
22
+CDH_PLUGIN_BRANCH=${CDH_PLUGIN_BRANCH:-master}
23
+MAPR_PLUGIN_REPO=${MAPR_PLUGIN_REPO:-http://git.openstack.org/cgit/openstack/sahara-plugin-mapr/}
24
+MAPR_PLUGIN_BRANCH=${MAPR_PLUGIN_BRANCH:-master}
25
+SPARK_PLUGIN_REPO=${SPARK_PLUGIN_REPO:-http://git.openstack.org/cgit/openstack/sahara-plugin-spark/}
26
+SPARK_PLUGIN_BRANCH=${SPARK_PLUGIN_BRANCH:-master}
27
+STORM_PLUGIN_REPO=${STORM_PLUGIN_REPO:-http://git.openstack.org/cgit/openstack/sahara-plugin-storm/}
28
+STORM_PLUGIN_BRANCH=${STORM_PLUGIN_BRANCH:-master}
29
+VANILLA_PLUGIN_REPO=${VANILLA_PLUGIN_REPO:-http://git.openstack.org/cgit/openstack/sahara-plugin-vanilla/}
30
+VANILLA_PLUGIN_BRANCH=${VANILLA_PLUGIN_BRANCH:-master}
31
+
13 32
 
14 33
 SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
15 34
 SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf

+ 5
- 0
releasenotes/notes/plugins-split-from-sahara-core-9ffc5e5d06c9239c.yaml View File

@@ -0,0 +1,5 @@
1
+---
2
+features:
3
+  - |
4
+    In an effort to improve Sahara's usuability and manutenability we are
5
+    splitting the plugins from Sahara core into their own repositories.

+ 0
- 0
sahara/plugins/ambari/__init__.py View File


+ 0
- 363
sahara/plugins/ambari/client.py View File

@@ -1,363 +0,0 @@
1
-# Copyright (c) 2015 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-
17
-from oslo_log import log as logging
18
-from oslo_serialization import jsonutils
19
-from requests import auth
20
-
21
-from sahara import context
22
-from sahara.i18n import _
23
-from sahara.plugins.ambari import requests_helper as r_helper
24
-from sahara.plugins import exceptions as p_exc
25
-
26
-
27
-LOG = logging.getLogger(__name__)
28
-
29
-
30
-class AmbariNotFound(Exception):
31
-    pass
32
-
33
-
34
-class AmbariClient(object):
35
-    def __init__(self, instance, port="8080", **kwargs):
36
-        kwargs.setdefault("username", "admin")
37
-        kwargs.setdefault("password", "admin")
38
-
39
-        self._port = port
40
-        self._base_url = "http://{host}:{port}/api/v1".format(
41
-            host=instance.management_ip, port=port)
42
-        self._instance = instance
43
-        self._http_client = instance.remote().get_http_client(port)
44
-        self._headers = {"X-Requested-By": "sahara"}
45
-        self._auth = auth.HTTPBasicAuth(kwargs["username"], kwargs["password"])
46
-        self._default_client_args = {"verify": False, "auth": self._auth,
47
-                                     "headers": self._headers}
48
-
49
-    def __enter__(self):
50
-        return self
51
-
52
-    def __exit__(self, type, value, traceback):
53
-        self.close()
54
-
55
-    def close(self):
56
-        self._instance.remote().close_http_session(self._port)
57
-
58
-    def get(self, *args, **kwargs):
59
-        kwargs.update(self._default_client_args)
60
-        return self._http_client.get(*args, **kwargs)
61
-
62
-    def post(self, *args, **kwargs):
63
-        kwargs.update(self._default_client_args)
64
-        return self._http_client.post(*args, **kwargs)
65
-
66
-    def put(self, *args, **kwargs):
67
-        kwargs.update(self._default_client_args)
68
-        return self._http_client.put(*args, **kwargs)
69
-
70
-    def delete(self, *args, **kwargs):
71
-        kwargs.update(self._default_client_args)
72
-        return self._http_client.delete(*args, **kwargs)
73
-
74
-    def get_alerts_data(self, cluster):
75
-        url = self._base_url + "/clusters/%s/alerts?fields=*" % cluster.name
76
-        resp = self.get(url)
77
-        data = self.check_response(resp)
78
-        return data.get('items', [])
79
-
80
-    @staticmethod
81
-    def check_response(resp, handle_not_found=False):
82
-        if handle_not_found and resp.status_code == 404:
83
-            raise AmbariNotFound()
84
-        resp.raise_for_status()
85
-        if resp.text:
86
-            return jsonutils.loads(resp.text)
87
-
88
-    @staticmethod
89
-    def req_id(response):
90
-        if not response.text:
91
-            raise p_exc.HadoopProvisionError("Cannot find request id. "
92
-                                             "No response body")
93
-        body = jsonutils.loads(response.text)
94
-        if "Requests" not in body or "id" not in body["Requests"]:
95
-            raise p_exc.HadoopProvisionError("Cannot find request id. "
96
-                                             "Unexpected response format")
97
-        return body["Requests"]["id"]
98
-
99
-    def import_credential(self, cl_name, alias, data):
100
-        url = self._base_url + "/clusters/%s/credentials/%s" % (cl_name, alias)
101
-        resp = self.post(url, data=jsonutils.dumps(data))
102
-        self.check_response(resp)
103
-
104
-    def get_credential(self, cl_name, alias):
105
-        url = self._base_url + "/clusters/%s/credentials/%s" % (cl_name, alias)
106
-        resp = self.get(url)
107
-        self.check_response(resp, handle_not_found=True)
108
-
109
-    def regenerate_keytabs(self, cl_name):
110
-        url = (self._base_url +
111
-               "/clusters/%s?regenerate_keytabs=missing" % cl_name)
112
-        data = jsonutils.dumps({"Clusters": {"security_type": "KERBEROS"}})
113
-        resp = self.put(url, data=data)
114
-        self.check_response(resp)
115
-        return self.req_id(resp)
116
-
117
-    def get_registered_hosts(self):
118
-        url = self._base_url + "/hosts"
119
-        resp = self.get(url)
120
-        data = self.check_response(resp)
121
-        return data.get("items", [])
122
-
123
-    def get_host_info(self, host):
124
-        url = self._base_url + "/hosts/%s" % host
125
-        resp = self.get(url)
126
-        data = self.check_response(resp)
127
-        return data.get("Hosts", {})
128
-
129
-    def update_user_password(self, user, old_password, new_password):
130
-        url = self._base_url + "/users/%s" % user
131
-        data = jsonutils.dumps({
132
-            "Users": {
133
-                "old_password": old_password,
134
-                "password": new_password
135
-            }
136
-        })
137
-        resp = self.put(url, data=data)
138
-        self.check_response(resp)
139
-
140
-    def create_blueprint(self, name, data):
141
-        url = self._base_url + "/blueprints/%s" % name
142
-        resp = self.post(url, data=jsonutils.dumps(data))
143
-        return self.check_response(resp)
144
-
145
-    def create_cluster(self, name, data):
146
-        url = self._base_url + "/clusters/%s" % name
147
-        resp = self.post(url, data=jsonutils.dumps(data))
148
-        return self.check_response(resp).get("Requests")
149
-
150
-    def add_host_to_cluster(self, instance):
151
-        cluster_name = instance.cluster.name
152
-        hostname = instance.fqdn()
153
-        url = self._base_url + "/clusters/{cluster}/hosts/{hostname}".format(
154
-            cluster=cluster_name, hostname=hostname)
155
-        resp = self.post(url)
156
-        self.check_response(resp)
157
-
158
-    def get_config_groups(self, cluster):
159
-        url = self._base_url + "/clusters/%s/config_groups" % cluster.name
160
-        resp = self.get(url)
161
-        return self.check_response(resp)
162
-
163
-    def get_detailed_config_group(self, cluster, cfg_id):
164
-        url = self._base_url + "/clusters/%s/config_groups/%s" % (
165
-            cluster.name, cfg_id)
166
-        resp = self.get(url)
167
-        return self.check_response(resp)
168
-
169
-    def remove_config_group(self, cluster, cfg_id):
170
-        url = self._base_url + "/clusters/%s/config_groups/%s" % (
171
-            cluster.name, cfg_id)
172
-        resp = self.delete(url)
173
-        return self.check_response(resp)
174
-
175
-    def create_config_group(self, cluster, data):
176
-        url = self._base_url + "/clusters/%s/config_groups" % cluster.name
177
-        resp = self.post(url, data=jsonutils.dumps(data))
178
-        return self.check_response(resp)
179
-
180
-    def add_service_to_host(self, inst, service):
181
-        url = "{pref}/clusters/{cluster}/hosts/{host}/host_components/{proc}"
182
-        url = url.format(pref=self._base_url, cluster=inst.cluster.name,
183
-                         host=inst.fqdn(), proc=service)
184
-        self.check_response(self.post(url))
185
-
186
-    def start_service_on_host(self, inst, service, final_state):
187
-        url = "{pref}/clusters/{cluster}/hosts/{host}/host_components/{proc}"
188
-        url = url.format(
189
-            pref=self._base_url, cluster=inst.cluster.name, host=inst.fqdn(),
190
-            proc=service)
191
-        data = {
192
-            'HostRoles': {
193
-                'state': final_state
194
-            },
195
-            'RequestInfo': {
196
-                'context': "Starting service {service}, moving to state "
197
-                           "{state}".format(service=service, state=final_state)
198
-            }
199
-        }
200
-        resp = self.put(url, data=jsonutils.dumps(data))
201
-        self.check_response(resp)
202
-        # return req_id to check health of request
203
-        return self.req_id(resp)
204
-
205
-    def decommission_nodemanagers(self, cluster_name, instances):
206
-        url = self._base_url + "/clusters/%s/requests" % cluster_name
207
-        data = r_helper.build_nodemanager_decommission_request(cluster_name,
208
-                                                               instances)
209
-        resp = self.post(url, data=jsonutils.dumps(data))
210
-        self.wait_ambari_request(self.req_id(resp), cluster_name)
211
-
212
-    def decommission_datanodes(self, cluster_name, instances):
213
-        url = self._base_url + "/clusters/%s/requests" % cluster_name
214
-        data = r_helper.build_datanode_decommission_request(cluster_name,
215
-                                                            instances)
216
-        resp = self.post(url, data=jsonutils.dumps(data))
217
-        self.wait_ambari_request(self.req_id(resp), cluster_name)
218
-
219
-    def remove_process_from_host(self, cluster_name, instance, process):
220
-        url = self._base_url + "/clusters/%s/hosts/%s/host_components/%s" % (
221
-            cluster_name, instance.fqdn(), process)
222
-        resp = self.delete(url)
223
-
224
-        return self.check_response(resp)
225
-
226
-    def stop_process_on_host(self, cluster_name, instance, process):
227
-        url = self._base_url + "/clusters/%s/hosts/%s/host_components/%s" % (
228
-            cluster_name, instance.fqdn(), process)
229
-        check_installed_resp = self.check_response(self.get(url))
230
-
231
-        if check_installed_resp["HostRoles"]["state"] != "INSTALLED":
232
-            data = {"HostRoles": {"state": "INSTALLED"},
233
-                    "RequestInfo": {"context": "Stopping %s" % process}}
234
-            resp = self.put(url, data=jsonutils.dumps(data))
235
-
236
-            self.wait_ambari_request(self.req_id(resp), cluster_name)
237
-
238
-    def restart_namenode(self, cluster_name, instance):
239
-        url = self._base_url + "/clusters/%s/requests" % cluster_name
240
-        data = r_helper.build_namenode_restart_request(cluster_name, instance)
241
-        resp = self.post(url, data=jsonutils.dumps(data))
242
-        self.wait_ambari_request(self.req_id(resp), cluster_name)
243
-
244
-    def restart_resourcemanager(self, cluster_name, instance):
245
-        url = self._base_url + "/clusters/%s/requests" % cluster_name
246
-        data = r_helper.build_resourcemanager_restart_request(cluster_name,
247
-                                                              instance)
248
-        resp = self.post(url, data=jsonutils.dumps(data))
249
-        self.wait_ambari_request(self.req_id(resp), cluster_name)
250
-
251
-    def restart_service(self, cluster_name, service_name):
252
-        url = self._base_url + "/clusters/{}/services/{}".format(
253
-            cluster_name, service_name)
254
-
255
-        data = r_helper.build_stop_service_request(service_name)
256
-        resp = self.put(url, data=jsonutils.dumps(data))
257
-        self.wait_ambari_request(self.req_id(resp), cluster_name)
258
-
259
-        data = r_helper.build_start_service_request(service_name)
260
-        resp = self.put(url, data=jsonutils.dumps(data))
261
-        self.wait_ambari_request(self.req_id(resp), cluster_name)
262
-
263
-    def delete_host(self, cluster_name, instance):
264
-        url = self._base_url + "/clusters/%s/hosts/%s" % (cluster_name,
265
-                                                          instance.fqdn())
266
-        resp = self.delete(url)
267
-        return self.check_response(resp)
268
-
269
-    def check_request_status(self, cluster_name, req_id):
270
-        url = self._base_url + "/clusters/%s/requests/%d" % (cluster_name,
271
-                                                             req_id)
272
-        resp = self.get(url)
273
-        return self.check_response(resp).get("Requests")
274
-
275
-    def list_host_processes(self, cluster_name, instance):
276
-        url = self._base_url + "/clusters/%s/hosts/%s" % (
277
-            cluster_name, instance.fqdn())
278
-        resp = self.get(url)
279
-        body = jsonutils.loads(resp.text)
280
-
281
-        procs = [p["HostRoles"]["component_name"]
282
-                 for p in body["host_components"]]
283
-        return procs
284
-
285
-    def set_up_mirror(self, stack_version, os_type, repo_id, repo_url):
286
-        url = self._base_url + (
287
-            "/stacks/HDP/versions/%s/operating_systems/%s/repositories/%s") % (
288
-                stack_version, os_type, repo_id)
289
-        data = {
290
-            "Repositories": {
291
-                "base_url": repo_url,
292
-                "verify_base_url": True
293
-            }
294
-        }
295
-        resp = self.put(url, data=jsonutils.dumps(data))
296
-        self.check_response(resp)
297
-
298
-    def set_rack_info_for_instance(self, cluster_name, instance, rack_name):
299
-        url = self._base_url + "/clusters/%s/hosts/%s" % (
300
-            cluster_name, instance.fqdn())
301
-        data = {
302
-            "Hosts": {
303
-                "rack_info": rack_name
304
-            }
305
-        }
306
-        resp = self.put(url, data=jsonutils.dumps(data))
307
-        self.check_response(resp)
308
-
309
-    def get_request_info(self, cluster_name, request_id):
310
-        url = self._base_url + ("/clusters/%s/requests/%s" %
311
-                                (cluster_name, request_id))
312
-        resp = self.check_response(self.get(url))
313
-        return resp.get('Requests')
314
-
315
-    def wait_ambari_requests(self, requests, cluster_name):
316
-        requests = set(requests)
317
-        failed = []
318
-        context.sleep(20)
319
-        while len(requests) > 0:
320
-            completed, not_completed = set(), set()
321
-            for req_id in requests:
322
-                request = self.get_request_info(cluster_name, req_id)
323
-                status = request.get("request_status")
324
-                if status == 'COMPLETED':
325
-                    completed.add(req_id)
326
-                elif status in ['IN_PROGRESS', 'PENDING']:
327
-                    not_completed.add(req_id)
328
-                else:
329
-                    failed.append(request)
330
-            if failed:
331
-                msg = _("Some Ambari request(s) "
332
-                        "not in COMPLETED state: %(description)s.")
333
-                descrs = []
334
-                for req in failed:
335
-                    descr = _(
336
-                        "request %(id)d: %(name)s - in status %(status)s")
337
-                    descrs.append(descr %
338
-                                  {'id': req.get("id"),
339
-                                   'name': req.get("request_context"),
340
-                                   'status': req.get("request_status")})
341
-                raise p_exc.HadoopProvisionError(msg % {'description': descrs})
342
-            requests = not_completed
343
-            context.sleep(5)
344
-            LOG.debug("Waiting for %d ambari request(s) to be completed",
345
-                      len(not_completed))
346
-        LOG.debug("All ambari requests have been completed")
347
-
348
-    def wait_ambari_request(self, request_id, cluster_name):
349
-        context.sleep(20)
350
-        while True:
351
-            status = self.check_request_status(cluster_name, request_id)
352
-            LOG.debug("Task %(context)s in %(status)s state. "
353
-                      "Completed %(percent).1f%%",
354
-                      {'context': status["request_context"],
355
-                       'status': status["request_status"],
356
-                       'percent': status["progress_percent"]})
357
-            if status["request_status"] == "COMPLETED":
358
-                return
359
-            if status["request_status"] in ["IN_PROGRESS", "PENDING"]:
360
-                context.sleep(5)
361
-            else:
362
-                raise p_exc.HadoopProvisionError(
363
-                    _("Ambari request in %s state") % status["request_status"])

+ 0
- 155
sahara/plugins/ambari/common.py View File

@@ -1,155 +0,0 @@
1
-# Copyright (c) 2015 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-from sahara.plugins import kerberos
17
-
18
-# define service names
19
-
20
-AMBARI_SERVICE = "Ambari"
21
-FALCON_SERVICE = "Falcon"
22
-FLUME_SERVICE = "Flume"
23
-HBASE_SERVICE = "HBase"
24
-HDFS_SERVICE = "HDFS"
25
-HIVE_SERVICE = "Hive"
26
-KAFKA_SERVICE = "Kafka"
27
-KNOX_SERVICE = "Knox"
28
-MAPREDUCE2_SERVICE = "MAPREDUCE2"
29
-OOZIE_SERVICE = "Oozie"
30
-RANGER_SERVICE = "Ranger"
31
-SLIDER_SERVICE = "Slider"
32
-SPARK_SERVICE = "Spark"
33
-SQOOP_SERVICE = "Sqoop"
34
-STORM_SERVICE = "Storm"
35
-YARN_SERVICE = "YARN"
36
-ZOOKEEPER_SERVICE = "ZooKeeper"
37
-
38
-# define process names
39
-
40
-AMBARI_SERVER = "Ambari"
41
-APP_TIMELINE_SERVER = "YARN Timeline Server"
42
-DATANODE = "DataNode"
43
-DRPC_SERVER = "DRPC Server"
44
-FALCON_SERVER = "Falcon Server"
45
-FLUME_HANDLER = "Flume"
46
-HBASE_MASTER = "HBase Master"
47
-HBASE_REGIONSERVER = "HBase RegionServer"
48
-HISTORYSERVER = "MapReduce History Server"
49
-HIVE_METASTORE = "Hive Metastore"
50
-HIVE_SERVER = "HiveServer"
51
-KAFKA_BROKER = "Kafka Broker"
52
-KNOX_GATEWAY = "Knox Gateway"
53
-NAMENODE = "NameNode"
54
-NIMBUS = "Nimbus"
55
-NODEMANAGER = "NodeManager"
56
-OOZIE_SERVER = "Oozie"
57
-RANGER_ADMIN = "Ranger Admin"
58
-RANGER_USERSYNC = "Ranger Usersync"
59
-RESOURCEMANAGER = "ResourceManager"
60
-SECONDARY_NAMENODE = "SecondaryNameNode"
61
-SLIDER = "Slider"
62
-SPARK_JOBHISTORYSERVER = "Spark History Server"
63
-SQOOP = "Sqoop"
64
-STORM_UI_SERVER = "Storm UI Server"
65
-SUPERVISOR = "Supervisor"
66
-ZOOKEEPER_SERVER = "ZooKeeper"
67
-JOURNAL_NODE = "JournalNode"
68
-
69
-
70
-PROC_MAP = {
71
-    AMBARI_SERVER: ["METRICS_COLLECTOR"],
72
-    APP_TIMELINE_SERVER: ["APP_TIMELINE_SERVER"],
73
-    DATANODE: ["DATANODE"],
74
-    DRPC_SERVER: ["DRPC_SERVER"],
75
-    FALCON_SERVER: ["FALCON_SERVER"],
76
-    HBASE_MASTER: ["HBASE_MASTER"],
77
-    HBASE_REGIONSERVER: ["HBASE_REGIONSERVER"],
78
-    HISTORYSERVER: ["HISTORYSERVER"],
79
-    HIVE_METASTORE: ["HIVE_METASTORE"],
80
-    HIVE_SERVER: ["HIVE_SERVER", "MYSQL_SERVER", "WEBHCAT_SERVER"],
81
-    KAFKA_BROKER: ["KAFKA_BROKER"],
82
-    KNOX_GATEWAY: ["KNOX_GATEWAY"],
83
-    NAMENODE: ["NAMENODE"],
84
-    NIMBUS: ["NIMBUS"],
85
-    NODEMANAGER: ["NODEMANAGER"],
86
-    OOZIE_SERVER: ["OOZIE_SERVER", "PIG"],
87
-    RANGER_ADMIN: ["RANGER_ADMIN"],
88
-    RANGER_USERSYNC: ["RANGER_USERSYNC"],
89
-    RESOURCEMANAGER: ["RESOURCEMANAGER"],
90
-    SECONDARY_NAMENODE: ["SECONDARY_NAMENODE"],
91
-    SLIDER: ["SLIDER"],
92
-    SPARK_JOBHISTORYSERVER: ["SPARK_JOBHISTORYSERVER"],
93
-    SQOOP: ["SQOOP"],
94
-    STORM_UI_SERVER: ["STORM_UI_SERVER"],
95
-    SUPERVISOR: ["SUPERVISOR"],
96
-    ZOOKEEPER_SERVER: ["ZOOKEEPER_SERVER"],
97
-    JOURNAL_NODE: ["JOURNALNODE"]
98
-}
99
-
100
-CLIENT_MAP = {
101
-    APP_TIMELINE_SERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
102
-    DATANODE: ["HDFS_CLIENT"],
103
-    FALCON_SERVER: ["FALCON_CLIENT"],
104
-    FLUME_HANDLER: ["FLUME_HANDLER"],
105
-    HBASE_MASTER: ["HBASE_CLIENT"],
106
-    HBASE_REGIONSERVER: ["HBASE_CLIENT"],
107
-    HISTORYSERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
108
-    HIVE_METASTORE: ["HIVE_CLIENT"],
109
-    HIVE_SERVER: ["HIVE_CLIENT"],
110
-    NAMENODE: ["HDFS_CLIENT"],
111
-    NODEMANAGER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
112
-    OOZIE_SERVER: ["OOZIE_CLIENT", "TEZ_CLIENT"],
113
-    RESOURCEMANAGER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"],
114
-    SECONDARY_NAMENODE: ["HDFS_CLIENT"],
115
-    SPARK_JOBHISTORYSERVER: ["SPARK_CLIENT"],
116
-    ZOOKEEPER_SERVER: ["ZOOKEEPER_CLIENT"]
117
-}
118
-
119
-KERBEROS_CLIENT = 'KERBEROS_CLIENT'
120
-ALL_LIST = ["METRICS_MONITOR"]
121
-
122
-# types of HA
123
-NAMENODE_HA = "NameNode HA"
124
-RESOURCEMANAGER_HA = "ResourceManager HA"
125
-HBASE_REGIONSERVER_HA = "HBase RegionServer HA"
126
-
127
-
128
-def get_ambari_proc_list(node_group):
129
-    procs = []
130
-    for sp in node_group.node_processes:
131
-        procs.extend(PROC_MAP.get(sp, []))
132
-    return procs
133
-
134
-
135
-def get_clients(cluster):
136
-    procs = []
137
-    for ng in cluster.node_groups:
138
-        procs.extend(ng.node_processes)
139
-
140
-    clients = []
141
-    for proc in procs:
142
-        clients.extend(CLIENT_MAP.get(proc, []))
143
-    clients = list(set(clients))
144
-    clients.extend(ALL_LIST)
145
-    if kerberos.is_kerberos_security_enabled(cluster):
146
-        clients.append(KERBEROS_CLIENT)
147
-    return clients
148
-
149
-
150
-def instances_have_process(instances, process):
151
-    for i in instances:
152
-        if process in i.node_group.node_processes:
153
-            return True
154
-
155
-    return False

+ 0
- 334
sahara/plugins/ambari/configs.py View File

@@ -1,334 +0,0 @@
1
-# Copyright (c) 2015 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-
17
-from oslo_serialization import jsonutils
18
-import six
19
-
20
-from sahara.i18n import _
21
-from sahara.plugins.ambari import common
22
-from sahara.plugins import provisioning
23
-from sahara.plugins import utils
24
-from sahara.swift import swift_helper
25
-from sahara.utils import files
26
-
27
-
28
-CONFIGS = {}
29
-OBJ_CONFIGS = {}
30
-CFG_PROCESS_MAP = {
31
-    "admin-properties": common.RANGER_SERVICE,
32
-    "ams-env": common.AMBARI_SERVICE,
33
-    "ams-hbase-env": common.AMBARI_SERVICE,
34
-    "ams-hbase-policy": common.AMBARI_SERVICE,
35
-    "ams-hbase-security-site": common.AMBARI_SERVICE,
36
-    "ams-hbase-site": common.AMBARI_SERVICE,
37
-    "ams-site": common.AMBARI_SERVICE,
38
-    "capacity-scheduler": common.YARN_SERVICE,
39
-    "cluster-env": "general",
40
-    "core-site": common.HDFS_SERVICE,
41
-    "falcon-env": common.FALCON_SERVICE,
42
-    "falcon-runtime.properties": common.FALCON_SERVICE,
43
-    "falcon-startup.properties": common.FALCON_SERVICE,
44
-    "flume-env": common.FLUME_SERVICE,
45
-    "gateway-site": common.KNOX_SERVICE,
46
-    "hadoop-env": common.HDFS_SERVICE,
47
-    "hadoop-policy": common.HDFS_SERVICE,
48
-    "hbase-env": common.HBASE_SERVICE,
49
-    "hbase-policy": common.HBASE_SERVICE,
50
-    "hbase-site": common.HBASE_SERVICE,
51
-    "hdfs-site": common.HDFS_SERVICE,
52
-    "hive-env": common.HIVE_SERVICE,
53
-    "hive-site": common.HIVE_SERVICE,
54
-    "hiveserver2-site": common.HIVE_SERVICE,
55
-    "kafka-broker": common.KAFKA_SERVICE,
56
-    "kafka-env": common.KAFKA_SERVICE,
57
-    "knox-env": common.KNOX_SERVICE,
58
-    "mapred-env": common.YARN_SERVICE,
59
-    "mapred-site": common.YARN_SERVICE,
60
-    "oozie-env": common.OOZIE_SERVICE,
61
-    "oozie-site": common.OOZIE_SERVICE,
62
-    "ranger-env": common.RANGER_SERVICE,
63
-    "ranger-hbase-plugin-properties": common.HBASE_SERVICE,
64
-    "ranger-hdfs-plugin-properties": common.HDFS_SERVICE,
65
-    "ranger-hive-plugin-properties": common.HIVE_SERVICE,
66
-    "ranger-knox-plugin-properties": common.KNOX_SERVICE,
67
-    "ranger-site": common.RANGER_SERVICE,
68
-    "ranger-storm-plugin-properties": common.STORM_SERVICE,
69
-    "spark-defaults": common.SPARK_SERVICE,
70
-    "spark-env": common.SPARK_SERVICE,
71
-    "sqoop-env": common.SQOOP_SERVICE,
72
-    "storm-env": common.STORM_SERVICE,
73
-    "storm-site": common.STORM_SERVICE,
74
-    "tez-site": common.OOZIE_SERVICE,
75
-    "usersync-properties": common.RANGER_SERVICE,
76
-    "yarn-env": common.YARN_SERVICE,
77
-    "yarn-site": common.YARN_SERVICE,
78
-    "zoo.cfg": common.ZOOKEEPER_SERVICE,
79
-    "zookeeper-env": common.ZOOKEEPER_SERVICE
80
-}
81
-
82
-
83
-SERVICES_TO_CONFIGS_MAP = None
84
-
85
-
86
-def get_service_to_configs_map():
87
-    global SERVICES_TO_CONFIGS_MAP
88
-    if SERVICES_TO_CONFIGS_MAP:
89
-        return SERVICES_TO_CONFIGS_MAP
90
-    data = {}
91
-    for (key, item) in six.iteritems(CFG_PROCESS_MAP):
92
-        if item not in data:
93
-            data[item] = []
94
-        data[item].append(key)
95
-    SERVICES_TO_CONFIGS_MAP = data
96
-    return SERVICES_TO_CONFIGS_MAP
97
-
98
-
99
-ng_confs = [
100
-    "dfs.datanode.data.dir",
101
-    "dtnode_heapsize",
102
-    "mapreduce.map.java.opts",
103
-    "mapreduce.map.memory.mb",
104
-    "mapreduce.reduce.java.opts",
105
-    "mapreduce.reduce.memory.mb",
106
-    "mapreduce.task.io.sort.mb",
107
-    "nodemanager_heapsize",
108
-    "yarn.app.mapreduce.am.command-opts",
109
-    "yarn.app.mapreduce.am.resource.mb",
110
-    "yarn.nodemanager.resource.cpu-vcores",
111
-    "yarn.nodemanager.resource.memory-mb",
112
-    "yarn.scheduler.maximum-allocation-mb",
113
-    "yarn.scheduler.minimum-allocation-mb"
114
-]
115
-
116
-
117
-use_base_repos_cfg = provisioning.Config(
118
-    "Enable external repos on instances", 'general', 'cluster', priority=1,
119
-    default_value=True, config_type="bool")
120
-hdp_repo_cfg = provisioning.Config(
121
-    "HDP repo URL", "general", "cluster", priority=1, default_value="")
122
-hdp_utils_repo_cfg = provisioning.Config(
123
-    "HDP-UTILS repo URL", "general", "cluster", priority=1, default_value="")
124
-autoconfigs_strategy = provisioning.Config(
125
-    "Auto-configuration strategy", 'general', 'cluster', priority=1,
126
-    config_type='dropdown',
127
-    default_value='NEVER_APPLY',
128
-    config_values=[(v, v) for v in [
129
-        'NEVER_APPLY', 'ALWAYS_APPLY', 'ONLY_STACK_DEFAULTS_APPLY',
130
-    ]],
131
-)
132
-ambari_pkg_install_timeout = provisioning.Config(
133
-    "Ambari Agent Package Install timeout", "general", "cluster",
134
-    priority=1, default_value="1800")
135
-
136
-
137
-def _get_service_name(service):
138
-    return CFG_PROCESS_MAP.get(service, service)
139
-
140
-
141
-def _get_config_group(group, param, plugin_version):
142
-    if not CONFIGS or plugin_version not in CONFIGS:
143
-        load_configs(plugin_version)
144
-    for section, process in six.iteritems(CFG_PROCESS_MAP):
145
-        if process == group and param in CONFIGS[plugin_version][section]:
146
-            return section
147
-
148
-
149
-def _get_param_scope(param):
150
-    if param in ng_confs:
151
-        return "node"
152
-    else:
153
-        return "cluster"
154
-
155
-
156
-def _get_ha_params():
157
-    enable_namenode_ha = provisioning.Config(
158
-        name=common.NAMENODE_HA,
159
-        applicable_target="general",
160
-        scope="cluster",
161
-        config_type="bool",
162
-        default_value=False,
163
-        is_optional=True,
164
-        description=_("Enable NameNode HA"),
165
-        priority=1)
166
-
167
-    enable_resourcemanager_ha = provisioning.Config(
168
-        name=common.RESOURCEMANAGER_HA,
169
-        applicable_target="general",
170
-        scope="cluster",
171
-        config_type="bool",
172
-        default_value=False,
173
-        is_optional=True,
174
-        description=_("Enable ResourceManager HA"),
175
-        priority=1)
176
-
177
-    enable_regionserver_ha = provisioning.Config(
178
-        name=common.HBASE_REGIONSERVER_HA,
179
-        applicable_target="general",
180
-        scope="cluster",
181
-        config_type="bool",
182
-        default_value=False,
183
-        is_optional=True,
184
-        description=_("Enable HBase RegionServer HA"),
185
-        priority=1)
186
-
187
-    return [enable_namenode_ha,
188
-            enable_resourcemanager_ha,
189
-            enable_regionserver_ha]
190
-
191
-
192
-def load_configs(version):
193
-    if OBJ_CONFIGS.get(version):
194
-        return OBJ_CONFIGS[version]
195
-    cfg_path = "plugins/ambari/resources/configs-%s.json" % version
196
-    vanilla_cfg = jsonutils.loads(files.get_file_text(cfg_path))
197
-    CONFIGS[version] = vanilla_cfg
198
-    sahara_cfg = [hdp_repo_cfg, hdp_utils_repo_cfg, use_base_repos_cfg,
199
-                  autoconfigs_strategy, ambari_pkg_install_timeout]
200
-    for service, confs in vanilla_cfg.items():
201
-        for k, v in confs.items():
202
-            sahara_cfg.append(provisioning.Config(
203
-                k, _get_service_name(service), _get_param_scope(k),
204
-                default_value=v))
205
-
206
-    sahara_cfg.extend(_get_ha_params())
207
-    OBJ_CONFIGS[version] = sahara_cfg
208
-    return sahara_cfg
209
-
210
-
211
-def _get_config_value(cluster, key):
212
-    return cluster.cluster_configs.get("general", {}).get(key.name,
213
-                                                          key.default_value)
214
-
215
-
216
-def use_base_repos_needed(cluster):
217
-    return _get_config_value(cluster, use_base_repos_cfg)
218
-
219
-
220
-def get_hdp_repo_url(cluster):
221
-    return _get_config_value(cluster, hdp_repo_cfg)
222
-
223
-
224
-def get_hdp_utils_repo_url(cluster):
225
-    return _get_config_value(cluster, hdp_utils_repo_cfg)
226
-
227
-
228
-def get_auto_configuration_strategy(cluster):
229
-    return _get_config_value(cluster, autoconfigs_strategy)
230
-
231
-
232
-def get_ambari_pkg_install_timeout(cluster):
233
-    return _get_config_value(cluster, ambari_pkg_install_timeout)
234
-
235
-
236
-def _serialize_ambari_configs(configs):
237
-    return list(map(lambda x: {x: configs[x]}, configs))
238
-
239
-
240
-def _create_ambari_configs(sahara_configs, plugin_version):
241
-    configs = {}
242
-    for service, params in six.iteritems(sahara_configs):
243
-        if service == "general" or service == "Kerberos":
244
-            # General and Kerberos configs are designed for Sahara, not for
245
-            # the plugin
246
-            continue
247
-        for k, v in six.iteritems(params):
248
-            group = _get_config_group(service, k, plugin_version)
249
-            configs.setdefault(group, {})
250
-            configs[group].update({k: v})
251
-    return configs
252
-
253
-
254
-def _make_paths(dirs, suffix):
255
-    return ",".join([d + suffix for d in dirs])
256
-
257
-
258
-def get_instance_params_mapping(inst):
259
-    configs = _create_ambari_configs(inst.node_group.node_configs,
260
-                                     inst.node_group.cluster.hadoop_version)
261
-    storage_paths = inst.storage_paths()
262
-    configs.setdefault("hdfs-site", {})
263
-    configs["hdfs-site"]["dfs.datanode.data.dir"] = _make_paths(
264
-        storage_paths, "/hdfs/data")
265
-    configs["hdfs-site"]["dfs.journalnode.edits.dir"] = _make_paths(
266
-        storage_paths, "/hdfs/journalnode")
267
-    configs["hdfs-site"]["dfs.namenode.checkpoint.dir"] = _make_paths(
268
-        storage_paths, "/hdfs/namesecondary")
269
-    configs["hdfs-site"]["dfs.namenode.name.dir"] = _make_paths(
270
-        storage_paths, "/hdfs/namenode")
271
-    configs.setdefault("yarn-site", {})
272
-    configs["yarn-site"]["yarn.nodemanager.local-dirs"] = _make_paths(
273
-        storage_paths, "/yarn/local")
274
-    configs["yarn-site"]["yarn.nodemanager.log-dirs"] = _make_paths(
275
-        storage_paths, "/yarn/log")
276
-    configs["yarn-site"][
277
-        "yarn.timeline-service.leveldb-timeline-store.path"] = _make_paths(
278
-            storage_paths, "/yarn/timeline")
279
-    configs.setdefault("oozie-site", {})
280
-    configs["oozie-site"][
281
-        "oozie.service.AuthorizationService.security.enabled"] = "false"
282
-    return configs
283
-
284
-
285
-def get_instance_params(inst):
286
-    return _serialize_ambari_configs(get_instance_params_mapping(inst))
287
-
288
-
289
-def get_cluster_params(cluster):
290
-    configs = _create_ambari_configs(cluster.cluster_configs,
291
-                                     cluster.hadoop_version)
292
-    swift_configs = {x["name"]: x["value"]
293
-                     for x in swift_helper.get_swift_configs()}
294
-    configs.setdefault("core-site", {})
295
-    configs["core-site"].update(swift_configs)
296
-    if utils.get_instance(cluster, common.RANGER_ADMIN):
297
-        configs.setdefault("admin-properties", {})
298
-        configs["admin-properties"]["db_root_password"] = (
299
-            cluster.extra["ranger_db_password"])
300
-    return _serialize_ambari_configs(configs)
301
-
302
-
303
-def get_config_group(instance):
304
-    params = get_instance_params_mapping(instance)
305
-    groups = []
306
-    for (service, targets) in six.iteritems(get_service_to_configs_map()):
307
-        current_group = {
308
-            'cluster_name': instance.cluster.name,
309
-            'group_name': "%s:%s" % (
310
-                instance.cluster.name, instance.instance_name),
311
-            'tag': service,
312
-            'description': "Config group for scaled "
313
-                           "node %s" % instance.instance_name,
314
-            'hosts': [
315
-                {
316
-                    'host_name': instance.fqdn()
317
-                }
318
-            ],
319
-            'desired_configs': []
320
-        }
321
-        at_least_one_added = False
322
-        for target in targets:
323
-            configs = params.get(target, {})
324
-            if configs:
325
-                current_group['desired_configs'].append({
326
-                    'type': target,
327
-                    'properties': configs,
328
-                    'tag': instance.instance_name
329
-                })
330
-                at_least_one_added = True
331
-        if at_least_one_added:
332
-            # Config Group without overridden data is not interesting
333
-            groups.append({'ConfigGroup': current_group})
334
-    return groups

+ 0
- 719
sahara/plugins/ambari/deploy.py View File

@@ -1,719 +0,0 @@
1
-# Copyright (c) 2015 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-
17
-import functools
18
-import telnetlib  # nosec
19
-
20
-from oslo_log import log as logging
21
-from oslo_utils import uuidutils
22
-
23
-from sahara import conductor
24
-from sahara import context
25
-from sahara.i18n import _
26
-from sahara.plugins.ambari import client as ambari_client
27
-from sahara.plugins.ambari import common as p_common
28
-from sahara.plugins.ambari import configs
29
-from sahara.plugins.ambari import ha_helper
30
-from sahara.plugins import kerberos
31
-from sahara.plugins import utils as plugin_utils
32
-from sahara.topology import topology_helper as t_helper
33
-from sahara.utils import cluster_progress_ops as cpo
34
-from sahara.utils import poll_utils
35
-
36
-
37
-LOG = logging.getLogger(__name__)
38
-conductor = conductor.API
39
-
40
-
41
-repo_id_map = {
42
-    "2.3": {
43
-        "HDP": "HDP-2.3",
44
-        "HDP-UTILS": "HDP-UTILS-1.1.0.20"
45
-    },
46
-    "2.4": {
47
-        "HDP": "HDP-2.4",
48
-        "HDP-UTILS": "HDP-UTILS-1.1.0.20"
49
-    },
50
-    "2.5": {
51
-        "HDP": "HDP-2.5",
52
-        "HDP-UTILS": "HDP-UTILS-1.1.0.21"
53
-    },
54
-    "2.6": {
55
-        "HDP": "HDP-2.6",
56
-        "HDP-UTILS": "HDP-UTILS-1.1.0.22"
57
-    },
58
-}
59
-
60
-os_type_map = {
61
-    "centos6": "redhat6",
62
-    "redhat6": "redhat6",
63
-    "centos7": "redhat7",
64
-    "redhat7": "redhat7",
65
-    "ubuntu14": "ubuntu14"
66
-}
67
-
68
-
69
-@cpo.event_wrapper(True, step=_("Set up Ambari management console"),
70
-                   param=('cluster', 0))
71
-def setup_ambari(cluster):
72
-    LOG.debug("Set up Ambari management console")
73
-    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
74
-    ambari_settings = ("agent.package.install.task.timeout=%s"
75
-                       % configs.get_ambari_pkg_install_timeout(cluster))
76
-    with ambari.remote() as r:
77
-        sudo = functools.partial(r.execute_command, run_as_root=True)
78
-        sudo("rngd -r /dev/urandom -W 4096")
79
-        r.replace_remote_line("/etc/ambari-server/conf/ambari.properties",
80
-                              "agent.package.install.task.timeout=",
81
-                              ambari_settings)
82
-        sudo("ambari-server setup -s -j"
83
-             " `cut -f2 -d \"=\" /etc/profile.d/99-java.sh`", timeout=1800)
84
-        # the following change must be after ambari-setup, or it would be
85
-        # overwritten (probably because it's not part of the base set of
86
-        # keywords/values handled by ambari-setup).
87
-        r.append_to_file("/etc/ambari-server/conf/ambari.properties",
88
-                         "server.startup.web.timeout=180", run_as_root=True)
89
-        redirect_file = "/tmp/%s" % uuidutils.generate_uuid()
90
-        sudo("service ambari-server start >{rfile} && "
91
-             "cat {rfile} && rm {rfile}".format(rfile=redirect_file))
92
-    LOG.debug("Ambari management console installed")
93
-
94
-
95
-def setup_agents(cluster, instances=None):
96
-    LOG.debug("Set up Ambari agents")
97
-    manager_address = plugin_utils.get_instance(
98
-        cluster, p_common.AMBARI_SERVER).fqdn()
99
-    if not instances:
100
-        instances = plugin_utils.get_instances(cluster)
101
-    _setup_agents(instances, manager_address)
102
-
103
-
104
-def _setup_agents(instances, manager_address):
105
-    cpo.add_provisioning_step(
106
-        instances[0].cluster.id, _("Set up Ambari agents"), len(instances))
107
-    with context.ThreadGroup() as tg:
108
-        for inst in instances:
109
-            tg.spawn("hwx-agent-setup-%s" % inst.id,
110
-                     _setup_agent, inst, manager_address)
111
-    LOG.debug("Ambari agents have been installed")
112
-
113
-
114
-def _disable_repos_on_inst(instance):
115
-    with context.set_current_instance_id(instance_id=instance.instance_id):
116
-        with instance.remote() as r:
117
-            sudo = functools.partial(r.execute_command, run_as_root=True)
118
-            if r.get_os_distrib() == "ubuntu":
119
-                sudo("mv /etc/apt/sources.list /etc/apt/sources.list.tmp")
120
-            else:
121
-                tmp_name = "/tmp/yum.repos.d-%s" % instance.instance_id[:8]
122
-                # moving to other folder
123
-                sudo("mv /etc/yum.repos.d/ {fold_name}".format(
124
-                    fold_name=tmp_name))
125
-                sudo("mkdir /etc/yum.repos.d")
126
-
127
-
128
-def disable_repos(cluster):
129
-    if configs.use_base_repos_needed(cluster):
130
-        LOG.debug("Using base repos")
131
-        return
132
-    instances = plugin_utils.get_instances(cluster)
133
-    with context.ThreadGroup() as tg:
134
-        for inst in instances:
135
-            tg.spawn("disable-repos-%s" % inst.instance_name,
136
-                     _disable_repos_on_inst, inst)
137
-
138
-
139
-@cpo.event_wrapper(True)
140
-def _setup_agent(instance, ambari_address):
141
-    with instance.remote() as r:
142
-        sudo = functools.partial(r.execute_command, run_as_root=True)
143
-        r.replace_remote_string("/etc/ambari-agent/conf/ambari-agent.ini",
144
-                                "localhost", ambari_address)
145
-        try:
146
-            sudo("ambari-agent start")
147
-        except Exception as e:
148
-            # workaround for ubuntu, because on ubuntu the ambari agent
149
-            # starts automatically after image boot
150
-            msg = _("Restart of ambari-agent is needed for host {}, "
151
-                    "reason: {}").format(instance.fqdn(), e)
152
-            LOG.exception(msg)
153
-            sudo("ambari-agent restart")
154
-        # for correct installing packages
155
-        r.update_repository()
156
-
157
-
158
-@cpo.event_wrapper(True, step=_("Wait Ambari accessible"),
159
-                   param=('cluster', 0))
160
-def wait_ambari_accessible(cluster):
161
-    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
162
-    kwargs = {"host": ambari.management_ip, "port": 8080}
163
-    poll_utils.poll(_check_port_accessible, kwargs=kwargs, timeout=300)
164
-
165
-
166
-def _check_port_accessible(host, port):
167
-    try:
168
-        conn = telnetlib.Telnet(host, port)
169
-        conn.close()
170
-        return True
171
-    except IOError:
172
-        return False
173
-
174
-
175
-def resolve_package_conflicts(cluster, instances=None):
176
-    if not instances:
177
-        instances = plugin_utils.get_instances(cluster)
178
-    for instance in instances:
179
-        with instance.remote() as r:
180
-            if r.get_os_distrib() == 'ubuntu':
181
-                try:
182
-                    r.execute_command(
183
-                        "apt-get remove -y libmysql-java", run_as_root=True)
184
-                except Exception:
185
-                    LOG.warning("Can't remove libmysql-java, "
186
-                                "it's probably not installed")
187
-
188
-
189
-def _prepare_ranger(cluster):
190
-    ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN)
191
-    if not ranger:
192
-        return
193
-    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
194
-    with ambari.remote() as r:
195
-        sudo = functools.partial(r.execute_command, run_as_root=True)
196
-        sudo("ambari-server setup --jdbc-db=mysql "
197
-             "--jdbc-driver=/usr/share/java/mysql-connector-java.jar")
198
-    init_db_template = (
199
-        "create user 'root'@'%' identified by '{password}';\n"
200
-        "set password for 'root'@'localhost' = password('{password}');")
201
-    password = uuidutils.generate_uuid()
202
-    extra = cluster.extra.to_dict() if cluster.extra else {}
203
-    extra["ranger_db_password"] = password
204
-    ctx = context.ctx()
205
-    conductor.cluster_update(ctx, cluster, {"extra": extra})
206
-    with ranger.remote() as r:
207
-        sudo = functools.partial(r.execute_command, run_as_root=True)
208
-        # TODO(sreshetnyak): add ubuntu support
209
-        sudo("yum install -y mysql-server")
210
-        sudo("service mysqld start")
211
-        r.write_file_to("/tmp/init.sql",
212
-                        init_db_template.format(password=password))
213
-        sudo("mysql < /tmp/init.sql")
214
-        sudo("rm /tmp/init.sql")
215
-
216
-
217
-@cpo.event_wrapper(True, step=_("Prepare Hive"), param=('cluster', 0))
218
-def prepare_hive(cluster):
219
-    hive = plugin_utils.get_instance(cluster, p_common.HIVE_SERVER)
220
-    if not hive:
221
-        return
222
-    with hive.remote() as r:
223
-        r.execute_command(
224
-            'sudo su - -c  "hadoop fs -mkdir /user/oozie/conf" hdfs')
225
-        r.execute_command(
226
-            'sudo su - -c  "hadoop fs -copyFromLocal '
227
-            '/etc/hive/conf/hive-site.xml '
228
-            '/user/oozie/conf/hive-site.xml" hdfs')
229
-
230
-
231
-@cpo.event_wrapper(True, step=_("Update default Ambari password"),
232
-                   param=('cluster', 0))
233
-def update_default_ambari_password(cluster):
234
-    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
235
-    new_password = uuidutils.generate_uuid()
236
-    with ambari_client.AmbariClient(ambari) as client:
237
-        client.update_user_password("admin", "admin", new_password)
238
-    extra = cluster.extra.to_dict() if cluster.extra else {}
239
-    extra["ambari_password"] = new_password
240
-    ctx = context.ctx()
241
-    conductor.cluster_update(ctx, cluster, {"extra": extra})
242
-    cluster = conductor.cluster_get(ctx, cluster.id)
243
-
244
-
245
-@cpo.event_wrapper(True, step=_("Wait registration of hosts"),
246
-                   param=('cluster', 0))
247
-def wait_host_registration(cluster, instances):
248
-    with _get_ambari_client(cluster) as client:
249
-        kwargs = {"client": client, "instances": instances}
250
-        poll_utils.poll(_check_host_registration, kwargs=kwargs, timeout=600)
251
-
252
-
253
-def _check_host_registration(client, instances):
254
-    hosts = client.get_registered_hosts()
255
-    registered_host_names = [h["Hosts"]["host_name"] for h in hosts]
256
-    for instance in instances:
257
-        if instance.fqdn() not in registered_host_names:
258
-            return False
259
-    return True
260
-
261
-
262
-@cpo.event_wrapper(True, step=_("Set up HDP repositories"),
263
-                   param=('cluster', 0))
264
-def _set_up_hdp_repos(cluster, hdp_repo, hdp_utils_repo):
265
-    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
266
-    pv = cluster.hadoop_version
267
-    repos = repo_id_map[pv]
268
-    with _get_ambari_client(cluster) as client:
269
-        os_type = os_type_map[client.get_host_info(ambari.fqdn())["os_type"]]
270
-        if hdp_repo:
271
-            client.set_up_mirror(pv, os_type, repos["HDP"], hdp_repo)
272
-        if hdp_utils_repo:
273
-            client.set_up_mirror(pv, os_type, repos["HDP-UTILS"],
274
-                                 hdp_utils_repo)
275
-
276
-
277
-def set_up_hdp_repos(cluster):
278
-    hdp_repo = configs.get_hdp_repo_url(cluster)
279
-    hdp_utils_repo = configs.get_hdp_utils_repo_url(cluster)
280
-    if hdp_repo or hdp_utils_repo:
281
-        _set_up_hdp_repos(cluster, hdp_repo, hdp_utils_repo)
282
-
283
-
284
-def get_kdc_server(cluster):
285
-    return plugin_utils.get_instance(
286
-        cluster, p_common.AMBARI_SERVER)
287
-
288
-
289
-def _prepare_kerberos(cluster, instances=None):
290
-    if instances is None:
291
-        kerberos.deploy_infrastructure(cluster, get_kdc_server(cluster))
292
-        kerberos.prepare_policy_files(cluster)
293
-    else:
294
-        server = None
295
-        if not kerberos.using_existing_kdc(cluster):
296
-            server = get_kdc_server(cluster)
297
-        kerberos.setup_clients(cluster, server)
298
-        kerberos.prepare_policy_files(cluster)
299
-
300
-
301
-def prepare_kerberos(cluster, instances=None):
302
-    if kerberos.is_kerberos_security_enabled(cluster):
303
-        _prepare_kerberos(cluster, instances)
304
-
305
-
306
-def _serialize_mit_kdc_kerberos_env(cluster):
307
-    return {
308
-        'kerberos-env': {
309
-            "realm": kerberos.get_realm_name(cluster),
310
-            "kdc_type": "mit-kdc",
311
-            "kdc_host": kerberos.get_kdc_host(
312
-                cluster, get_kdc_server(cluster)),
313
-            "admin_server_host": kerberos.get_kdc_host(
314
-                cluster, get_kdc_server(cluster)),
315
-            'encryption_types': 'aes256-cts-hmac-sha1-96',
316
-            'ldap_url': '', 'container_dn': '',
317
-        }
318
-    }
319
-
320
-
321
-def _serialize_krb5_configs(cluster):
322
-    return {
323
-        "krb5-conf": {
324
-            "properties_attributes": {},
325
-            "properties": {
326
-                "manage_krb5_conf": "false"
327
-            }
328
-        }
329
-    }
330
-
331
-
332
-def _get_credentials(cluster):
333
-    return [{
334
-        "alias": "kdc.admin.credential",
335
-        "principal": kerberos.get_admin_principal(cluster),
336
-        "key": kerberos.get_server_password(cluster),
337
-        "type": "TEMPORARY"
338
-    }]
339
-
340
-
341
-def get_host_group_components(cluster, processes):
342
-    result = []
343
-    for proc in processes:
344
-        result.append({'name': proc})
345
-    return result
346
-
347
-
348
-@cpo.event_wrapper(True, step=_("Create Ambari blueprint"),
349
-                   param=('cluster', 0))
350
-def create_blueprint(cluster):
351
-    _prepare_ranger(cluster)
352
-    cluster = conductor.cluster_get(context.ctx(), cluster.id)
353
-    host_groups = []
354
-    for ng in cluster.node_groups:
355
-        procs = p_common.get_ambari_proc_list(ng)
356
-        procs.extend(p_common.get_clients(cluster))
357
-        for instance in ng.instances:
358
-            hg = {
359
-                "name": instance.instance_name,
360
-                "configurations": configs.get_instance_params(instance),
361
-                "components": get_host_group_components(cluster, procs)
362
-            }
363
-            host_groups.append(hg)
364
-    bp = {
365
-        "Blueprints": {
366
-            "stack_name": "HDP",
367
-            "stack_version": cluster.hadoop_version,
368
-        },
369
-        "host_groups": host_groups,
370
-        "configurations": configs.get_cluster_params(cluster)
371
-    }
372
-
373
-    if kerberos.is_kerberos_security_enabled(cluster):
374
-        bp['configurations'].extend([
375
-            _serialize_mit_kdc_kerberos_env(cluster),
376
-            _serialize_krb5_configs(cluster)
377
-        ])
378
-        bp['Blueprints']['security'] = {'type': 'KERBEROS'}
379
-
380
-    general_configs = cluster.cluster_configs.get("general", {})
381
-    if (general_configs.get(p_common.NAMENODE_HA) or
382
-            general_configs.get(p_common.RESOURCEMANAGER_HA) or
383
-            general_configs.get(p_common.HBASE_REGIONSERVER_HA)):
384
-        bp = ha_helper.update_bp_ha_common(cluster, bp)
385
-
386
-    if general_configs.get(p_common.NAMENODE_HA):
387
-        bp = ha_helper.update_bp_for_namenode_ha(cluster, bp)
388
-
389
-    if general_configs.get(p_common.RESOURCEMANAGER_HA):
390
-        bp = ha_helper.update_bp_for_resourcemanager_ha(cluster, bp)
391
-
392
-    if general_configs.get(p_common.HBASE_REGIONSERVER_HA):
393
-        bp = ha_helper.update_bp_for_hbase_ha(cluster, bp)
394
-
395
-    with _get_ambari_client(cluster) as client:
396
-        return client.create_blueprint(cluster.name, bp)
397
-
398
-
399
-def _build_ambari_cluster_template(cluster):
400
-    cl_tmpl = {
401
-        "blueprint": cluster.name,
402
-        "default_password": uuidutils.generate_uuid(),
403
-        "host_groups": []
404
-    }
405
-
406
-    if cluster.use_autoconfig:
407
-        strategy = configs.get_auto_configuration_strategy(cluster)
408
-        cl_tmpl["config_recommendation_strategy"] = strategy
409
-
410
-    if kerberos.is_kerberos_security_enabled(cluster):
411
-        cl_tmpl["credentials"] = _get_credentials(cluster)
412
-        cl_tmpl["security"] = {"type": "KERBEROS"}
413
-    topology = _get_topology_data(cluster)
414
-    for ng in cluster.node_groups:
415
-        for instance in ng.instances:
416
-            host = {"fqdn": instance.fqdn()}
417
-            if t_helper.is_data_locality_enabled():
418
-                host["rack_info"] = topology[instance.instance_name]
419
-            cl_tmpl["host_groups"].append({
420
-                "name": instance.instance_name,
421
-                "hosts": [host]
422
-            })
423
-    return cl_tmpl
424
-
425
-
426
-@cpo.event_wrapper(True, step=_("Start cluster"), param=('cluster', 0))
427
-def start_cluster(cluster):
428
-    ambari_template = _build_ambari_cluster_template(cluster)
429
-    with _get_ambari_client(cluster) as client:
430
-        req_id = client.create_cluster(cluster.name, ambari_template)["id"]
431
-        client.wait_ambari_request(req_id, cluster.name)
432
-
433
-
434
-@cpo.event_wrapper(True)
435
-def _add_host_to_cluster(instance, client):
436
-    client.add_host_to_cluster(instance)
437
-
438
-
439
-def add_new_hosts(cluster, instances):
440
-    with _get_ambari_client(cluster) as client:
441
-        cpo.add_provisioning_step(
442
-            cluster.id, _("Add new hosts"), len(instances))
443
-        for inst in instances:
444
-            _add_host_to_cluster(inst, client)
445
-
446
-
447
-@cpo.event_wrapper(True, step=_("Generate config groups"),
448
-                   param=('cluster', 0))
449
-def manage_config_groups(cluster, instances):
450
-    groups = []
451
-    for instance in instances:
452
-        groups.extend(configs.get_config_group(instance))
453
-    with _get_ambari_client(cluster) as client:
454
-        client.create_config_group(cluster, groups)
455
-
456
-
457
-@cpo.event_wrapper(True, step=_("Cleanup config groups"),
458
-                   param=('cluster', 0))
459
-def cleanup_config_groups(cluster, instances):
460
-    to_remove = set()
461
-    for instance in instances:
462
-        cfg_name = "%s:%s" % (cluster.name, instance.instance_name)
463
-        to_remove.add(cfg_name)
464
-    with _get_ambari_client(cluster) as client:
465
-        config_groups = client.get_config_groups(cluster)
466
-        for group in config_groups['items']:
467
-            cfg_id = group['ConfigGroup']['id']
468
-            detailed = client.get_detailed_config_group(cluster, cfg_id)
469
-            cfg_name = detailed['ConfigGroup']['group_name']
470
-            # we have config group per host
471
-            if cfg_name in to_remove:
472
-                client.remove_config_group(cluster, cfg_id)
473
-
474
-
475
-@cpo.event_wrapper(True, step=_("Regenerate keytabs for Kerberos"),
476
-                   param=('cluster', 0))
477
-def _regenerate_keytabs(cluster):
478
-    with _get_ambari_client(cluster) as client:
479
-        alias = "kdc.admin.credential"
480
-        try:
481
-            client.get_credential(cluster.name, alias)
482
-        except ambari_client.AmbariNotFound:
483
-            # credentials are missing
484
-            data = {
485
-                'Credential': {
486
-                    "principal": kerberos.get_admin_principal(cluster),
487
-                    "key": kerberos.get_server_password(cluster),
488
-                    "type": "TEMPORARY"
489
-                }
490
-            }
491
-
492
-            client.import_credential(cluster.name, alias, data)
493
-
494
-        req_id = client.regenerate_keytabs(cluster.name)
495
-        client.wait_ambari_request(req_id, cluster.name)
496
-
497
-
498
-@cpo.event_wrapper(True, step=_("Install services on hosts"),
499
-                   param=('cluster', 0))
500
-def _install_services_to_hosts(cluster, instances):
501
-    requests_ids = []
502
-    with _get_ambari_client(cluster) as client:
503
-        clients = p_common.get_clients(cluster)
504
-        for instance in instances:
505
-            services = p_common.get_ambari_proc_list(instance.node_group)
506
-            services.extend(clients)
507
-            for service in services:
508
-                client.add_service_to_host(instance, service)
509
-                requests_ids.append(
510
-                    client.start_service_on_host(
511
-                        instance, service, 'INSTALLED'))
512
-        client.wait_ambari_requests(requests_ids, cluster.name)
513
-
514
-
515
-@cpo.event_wrapper(True, step=_("Start services on hosts"),
516
-                   param=('cluster', 0))
517
-def _start_services_on_hosts(cluster, instances):
518
-    with _get_ambari_client(cluster) as client:
519
-        # all services added and installed, let's start them
520
-        requests_ids = []
521
-        for instance in instances:
522
-            services = p_common.get_ambari_proc_list(instance.node_group)
523
-            services.extend(p_common.ALL_LIST)
524
-            for service in services:
525
-                requests_ids.append(
526
-                    client.start_service_on_host(
527
-                        instance, service, 'STARTED'))
528
-        client.wait_ambari_requests(requests_ids, cluster.name)
529
-
530
-
531
-def manage_host_components(cluster, instances):
532
-    _install_services_to_hosts(cluster, instances)
533
-    if kerberos.is_kerberos_security_enabled(cluster):
534
-        _regenerate_keytabs(cluster)
535
-    _start_services_on_hosts(cluster, instances)
536
-
537
-
538
-@cpo.event_wrapper(True, step=_("Decommission NodeManagers and DataNodes"),
539
-                   param=('cluster', 0))
540
-def decommission_hosts(cluster, instances):
541
-    nodemanager_instances = filter(
542
-        lambda i: p_common.NODEMANAGER in i.node_group.node_processes,
543
-        instances)
544
-    if len(nodemanager_instances) > 0:
545
-        decommission_nodemanagers(cluster, nodemanager_instances)
546
-
547
-    datanode_instances = filter(
548
-        lambda i: p_common.DATANODE in i.node_group.node_processes,
549
-        instances)
550
-    if len(datanode_instances) > 0:
551
-        decommission_datanodes(cluster, datanode_instances)
552
-
553
-
554
-def decommission_nodemanagers(cluster, instances):
555
-    with _get_ambari_client(cluster) as client:
556
-        client.decommission_nodemanagers(cluster.name, instances)
557
-
558
-
559
-def decommission_datanodes(cluster, instances):
560
-    with _get_ambari_client(cluster) as client:
561
-        client.decommission_datanodes(cluster.name, instances)
562
-
563
-
564
-def restart_namenode(cluster, instance):
565
-    with _get_ambari_client(cluster) as client:
566
-        client.restart_namenode(cluster.name, instance)
567
-
568
-
569
-def restart_resourcemanager(cluster, instance):
570
-    with _get_ambari_client(cluster) as client:
571
-        client.restart_resourcemanager(cluster.name, instance)
572
-
573
-
574
-@cpo.event_wrapper(True, step=_("Restart NameNodes and ResourceManagers"),
575
-                   param=('cluster', 0))
576
-def restart_nns_and_rms(cluster):
577
-    nns = plugin_utils.get_instances(cluster, p_common.NAMENODE)
578
-    for nn in nns:
579
-        restart_namenode(cluster, nn)
580
-
581
-    rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER)
582
-    for rm in rms:
583
-        restart_resourcemanager(cluster, rm)
584
-
585
-
586
-def restart_service(cluster, service_name):
587
-    with _get_ambari_client(cluster) as client:
588
-        client.restart_service(cluster.name, service_name)
589
-
590
-
591
-@cpo.event_wrapper(True, step=_("Remove hosts"), param=('cluster', 0))
592
-def remove_services_from_hosts(cluster, instances):
593
-    for inst in instances:
594
-        LOG.debug("Stopping and removing processes from host %s", inst.fqdn())
595
-        _remove_services_from_host(cluster, inst)
596
-        LOG.debug("Removing the host %s", inst.fqdn())
597
-        _remove_host(cluster, inst)
598
-
599
-
600
-def _remove_services_from_host(cluster, instance):
601
-    with _get_ambari_client(cluster) as client:
602
-        hdp_processes = client.list_host_processes(cluster.name, instance)
603
-        for proc in hdp_processes:
604
-            LOG.debug("Stopping process %(proc)s on host %(fqdn)s ",
605
-                      {'proc': proc, 'fqdn': instance.fqdn()})
606
-            client.stop_process_on_host(cluster.name, instance, proc)
607
-
608
-            LOG.debug("Removing process %(proc)s from host %(fqdn)s ",
609
-                      {'proc': proc, 'fqdn': instance.fqdn()})
610
-            client.remove_process_from_host(cluster.name, instance, proc)
611
-
612
-    _wait_all_processes_removed(cluster, instance)
613
-
614
-
615
-def _remove_host(cluster, inst):
616
-    with _get_ambari_client(cluster) as client:
617
-        client.delete_host(cluster.name, inst)
618
-
619
-
620
-def _wait_all_processes_removed(cluster, instance):
621
-    with _get_ambari_client(cluster) as client:
622
-        while True:
623
-            hdp_processes = client.list_host_processes(cluster.name, instance)
624
-            if not hdp_processes:
625
-                return
626
-            context.sleep(5)
627
-
628
-
629
-def _get_ambari_client(cluster):
630
-    ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
631
-    password = cluster.extra["ambari_password"]
632
-    return ambari_client.AmbariClient(ambari, password=password)
633
-
634
-
635
-def _get_topology_data(cluster):
636
-    if not t_helper.is_data_locality_enabled():
637
-        return {}
638
-
639
-    LOG.warning("Node group awareness is not implemented in YARN yet "
640
-                "so enable_hypervisor_awareness set to False "
641
-                "explicitly")
642
-    return t_helper.generate_topology_map(cluster, is_node_awareness=False)
643
-
644
-
645
-@cpo.event_wrapper(True)
646
-def _configure_topology_data(cluster, inst, client):
647
-    topology = _get_topology_data(cluster)
648
-    client.set_rack_info_for_instance(
649
-        cluster.name, inst, topology[inst.instance_name])
650
-
651
-
652
-@cpo.event_wrapper(True, step=_("Restart HDFS and MAPREDUCE2 services"),
653
-                   param=('cluster', 0))
654
-def _restart_hdfs_and_mapred_services(cluster, client):
655
-    client.restart_service(cluster.name, p_common.HDFS_SERVICE)
656
-    client.restart_service(cluster.name, p_common.MAPREDUCE2_SERVICE)
657
-
658
-
659
-def configure_rack_awareness(cluster, instances):
660
-    if not t_helper.is_data_locality_enabled():
661
-        return
662
-
663
-    with _get_ambari_client(cluster) as client:
664
-        cpo.add_provisioning_step(
665
-            cluster.id, _("Configure rack awareness"), len(instances))
666
-        for inst in instances:
667
-            _configure_topology_data(cluster, inst, client)
668
-        _restart_hdfs_and_mapred_services(cluster, client)
669
-
670
-
671
-@cpo.event_wrapper(True)
672
-def _add_hadoop_swift_jar(instance, new_jar):
673
-    with instance.remote() as r:
674
-        code, out = r.execute_command(
675
-            "test -f %s" % new_jar, raise_when_error=False)
676
-        if code == 0:
677
-            # get ambari hadoop version (e.g.: 2.7.1.2.3.4.0-3485)
678
-            code, amb_hadoop_version = r.execute_command(
679
-                "sudo hadoop version | grep 'Hadoop' | awk '{print $2}'")
680
-            amb_hadoop_version = amb_hadoop_version.strip()
681
-            # get special code of ambari hadoop version(e.g.:2.3.4.0-3485)
682
-            amb_code = '.'.join(amb_hadoop_version.split('.')[3:])
683
-            origin_jar = (
684
-                "/usr/hdp/{}/hadoop-mapreduce/hadoop-openstack-{}.jar".format(
685
-                    amb_code, amb_hadoop_version))
686
-            r.execute_command("sudo cp {} {}".format(new_jar, origin_jar))
687
-        else:
688
-            LOG.warning("The {jar_file} file cannot be found "
689
-                        "in the {dir} directory so Keystone API v3 "
690
-                        "is not enabled for this cluster."
691
-                        .format(jar_file="hadoop-openstack.jar",
692
-                                dir="/opt"))
693
-
694
-
695
-def add_hadoop_swift_jar(instances):
696
-    new_jar = "/opt/hadoop-openstack.jar"
697
-    cpo.add_provisioning_step(instances[0].cluster.id,
698
-                              _("Add Hadoop Swift jar to instances"),
699
-                              len(instances))
700
-    for inst in instances:
701
-        _add_hadoop_swift_jar(inst, new_jar)
702
-
703
-
704
-def deploy_kerberos_principals(cluster, instances=None):
705
-    if not kerberos.is_kerberos_security_enabled(cluster):
706
-        return
707
-    if instances is None:
708
-        instances = plugin_utils.get_instances(cluster)
709
-    mapper = {
710
-        'hdfs': plugin_utils.instances_with_services(
711
-            instances, [p_common.SECONDARY_NAMENODE, p_common.NAMENODE,
712
-                        p_common.DATANODE, p_common.JOURNAL_NODE]),
713
-        'spark': plugin_utils.instances_with_services(
714
-            instances, [p_common.SPARK_JOBHISTORYSERVER]),
715
-        'oozie': plugin_utils.instances_with_services(
716
-            instances, [p_common.OOZIE_SERVER]),
717
-    }
718
-
719
-    kerberos.create_keytabs_for_map(cluster, mapper)

+ 0
- 130
sahara/plugins/ambari/edp_engine.py View File

@@ -1,130 +0,0 @@
1
-# Copyright (c) 2015 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-from sahara import exceptions as exc
17
-from sahara.i18n import _
18
-from sahara.plugins.ambari import common as p_common
19
-from sahara.plugins import exceptions as pex
20
-from sahara.plugins import kerberos
21
-from sahara.plugins import utils as plugin_utils
22
-from sahara.service.edp import hdfs_helper
23
-from sahara.service.edp.oozie import engine as oozie_engine
24
-from sahara.service.edp.spark import engine as spark_engine
25
-
26
-
27
-def _get_lib_location(instance, lib_name):
28
-    with instance.remote() as r:
29
-        code, jar_path = r.execute_command(
30
-            ('find /usr/hdp -name "{lib_name}" 2>/dev/null '
31
-             '-print | head -n 1'.format(lib_name=lib_name)),
32
-            run_as_root=True)
33
-    # drop last whitespace character
34
-    return jar_path.rstrip()
35
-
36
-
37
-def _get_hadoop_openstack_jar_location(instance):
38
-    return _get_lib_location(instance, "hadoop-openstack*.jar")
39
-
40
-
41
-def _get_jackson_core(instance):
42
-    return _get_lib_location(instance, "jackson-core-asl-1.9*.jar")
43
-
44
-
45
-class EDPOozieEngine(oozie_engine.OozieJobEngine):
46
-    def get_hdfs_user(self):
47
-        return "oozie"
48
-
49
-    def get_client(self):
50
-        if kerberos.is_kerberos_security_enabled(self.cluster):
51
-            return super(EDPOozieEngine, self).get_remote_client()
52
-        return super(EDPOozieEngine, self).get_client()
53
-
54
-    def create_hdfs_dir(self, remote, dir_name):
55
-        hdfs_helper.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user())
56
-
57
-    def get_oozie_server_uri(self, cluster):
58
-        oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER)
59
-        return "http://%s:11000/oozie" % oozie.management_ip
60
-
61
-    def get_name_node_uri(self, cluster):
62
-        namenodes = plugin_utils.get_instances(cluster, p_common.NAMENODE)
63
-        if len(namenodes) == 1:
64
-            return "hdfs://%s:8020" % namenodes[0].fqdn()
65
-        else:
66
-            return "hdfs://hdfs-ha"
67
-
68
-    def get_resource_manager_uri(self, cluster):
69
-        resourcemanagers = plugin_utils.get_instances(cluster,
70
-                                                      p_common.RESOURCEMANAGER)
71
-        return "%s:8050" % resourcemanagers[0].fqdn()
72
-
73
-    def get_oozie_server(self, cluster):
74
-        return plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER)
75
-
76
-    def validate_job_execution(self, cluster, job, data):
77
-        oozie_count = plugin_utils.get_instances_count(cluster,
78
-                                                       p_common.OOZIE_SERVER)
79
-        if oozie_count != 1:
80
-            raise pex.InvalidComponentCountException(
81
-                p_common.OOZIE_SERVER, "1", oozie_count)
82
-        super(EDPOozieEngine, self).validate_job_execution(cluster, job, data)
83
-
84
-    @staticmethod
85
-    def get_possible_job_config(job_type):
86
-        return {"job_config": []}
87
-
88
-
89
-class EDPSparkEngine(spark_engine.SparkJobEngine):
90
-    edp_base_version = "2.2"
91
-
92
-    def __init__(self, cluster):
93
-        super(EDPSparkEngine, self).__init__(cluster)
94
-        # searching for spark instance
95
-        self.master = plugin_utils.get_instance(
96
-            cluster, p_common.SPARK_JOBHISTORYSERVER)
97
-        self.plugin_params["spark-user"] = "sudo -u spark "
98
-        self.plugin_params["spark-submit"] = "spark-submit"
99
-        self.plugin_params["deploy-mode"] = "cluster"
100
-        self.plugin_params["master"] = "yarn-cluster"
101
-
102
-    @staticmethod
103
-    def edp_supported(version):
104
-        return version >= EDPSparkEngine.edp_base_version
105
-
106
-    def run_job(self, job_execution):
107
-        # calculate class-path dynamically
108
-        driver_classpath = [
109
-            _get_hadoop_openstack_jar_location(self.master),
110
-            _get_jackson_core(self.master)]
111
-        self.plugin_params['driver-class-path'] = ":".join(driver_classpath)
112
-        self.plugin_params['drivers-to-jars'] = driver_classpath
113
-
114
-        return super(EDPSparkEngine, self).run_job(job_execution)
115
-
116
-    def validate_job_execution(self, cluster, job, data):
117
-        if not self.edp_supported(cluster.hadoop_version):
118
-            raise exc.InvalidDataException(
119
-                _('Ambari plugin of {base} or higher required to run {type} '
120
-                  'jobs').format(
121
-                    base=EDPSparkEngine.edp_base_version, type=job.type))
122
-
123
-        spark_nodes_count = plugin_utils.get_instances_count(
124
-            cluster, p_common.SPARK_JOBHISTORYSERVER)
125
-        if spark_nodes_count != 1:
126
-            raise pex.InvalidComponentCountException(
127
-                p_common.SPARK_JOBHISTORYSERVER, '1', spark_nodes_count)
128
-
129
-        super(EDPSparkEngine, self).validate_job_execution(
130
-            cluster, job, data)

+ 0
- 252
sahara/plugins/ambari/ha_helper.py View File

@@ -1,252 +0,0 @@
1
-# Copyright (c) 2015 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-from sahara.plugins.ambari import common as p_common
17
-from sahara.plugins import utils
18
-
19
-
20
-CORE_SITE = "core-site"
21
-YARN_SITE = "yarn-site"
22
-HBASE_SITE = "hbase-site"
23
-HDFS_SITE = "hdfs-site"
24
-HADOOP_ENV = "hadoop-env"
25
-ZOO_CFG = "zoo.cfg"
26
-
27
-
28
-def update_bp_ha_common(cluster, blueprint):
29
-    blueprint = _set_default_fs(cluster, blueprint, p_common.NAMENODE_HA)
30
-    blueprint = _set_high_zk_limits(blueprint)
31
-
32
-    return blueprint
33
-
34
-
35
-def update_bp_for_namenode_ha(cluster, blueprint):
36
-    blueprint = _add_zkfc_to_namenodes(blueprint)
37
-    blueprint = _set_zk_quorum(cluster, blueprint, CORE_SITE)
38
-    blueprint = _configure_hdfs_site(cluster, blueprint)
39
-
40
-    return blueprint
41
-
42
-
43
-def update_bp_for_resourcemanager_ha(cluster, blueprint):
44
-    blueprint = _configure_yarn_site(cluster, blueprint)
45
-    blueprint = _set_zk_quorum(cluster, blueprint, YARN_SITE)
46
-    blueprint = _set_default_fs(cluster, blueprint,
47
-                                p_common.RESOURCEMANAGER_HA)
48
-    return blueprint
49
-
50
-
51
-def update_bp_for_hbase_ha(cluster, blueprint):
52
-    return _confgure_hbase_site(cluster, blueprint)
53
-
54
-
55
-def _add_zkfc_to_namenodes(blueprint):
56
-    for hg in blueprint["host_groups"]:
57
-        if {"name": "NAMENODE"} in hg["components"]:
58
-            hg["components"].append({"name": "ZKFC"})
59
-
60
-    return blueprint
61
-
62
-
63
-def _find_create_properties_section(blueprint, section_name):
64
-    for conf_group in blueprint["configurations"]:
65
-        if section_name in conf_group:
66
-            return conf_group[section_name]
67
-
68
-    new_group = {section_name: {}}
69
-    blueprint["configurations"].append(new_group)
70
-
71
-    return new_group[section_name]
72
-
73
-
74
-def _find_hdfs_site(blueprint):
75
-    return _find_create_properties_section(blueprint, HDFS_SITE)
76
-
77
-
78
-def _find_yarn_site(blueprint):
79
-    return _find_create_properties_section(blueprint, YARN_SITE)
80
-
81
-
82
-def _find_core_site(blueprint):
83
-    return _find_create_properties_section(blueprint, CORE_SITE)
84
-
85
-
86
-def _find_hadoop_env(blueprint):
87
-    return _find_create_properties_section(blueprint, HADOOP_ENV)
88
-
89
-
90
-def _find_zoo_cfg(blueprint):
91
-    return _find_create_properties_section(blueprint, ZOO_CFG)
92
-
93
-
94
-def _find_hbase_site(blueprint):
95
-    return _find_create_properties_section(blueprint, HBASE_SITE)
96
-
97
-
98
-def _set_default_fs(cluster, blueprint, ha_type):
99
-    if ha_type == p_common.NAMENODE_HA:
100
-        _find_core_site(blueprint)["fs.defaultFS"] = "hdfs://hdfs-ha"
101
-    elif ha_type == p_common.RESOURCEMANAGER_HA:
102
-        nn_instance = utils.get_instances(cluster, p_common.NAMENODE)[0]
103
-        _find_core_site(blueprint)["fs.defaultFS"] = (
104
-            "hdfs://%s:8020" % nn_instance.fqdn())
105
-    return blueprint
106
-
107
-
108
-def _set_zk_quorum(cluster, blueprint, conf_type):
109
-    zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER)
110
-
111
-    value = ",".join(["%s:2181" % i.fqdn() for i in zk_instances])
112
-    if conf_type == CORE_SITE:
113
-        _find_core_site(blueprint)["ha.zookeeper.quorum"] = value
114
-    elif conf_type == YARN_SITE:
115
-        _find_yarn_site(blueprint)["hadoop.registry.zk.quorum"] = value
116
-
117
-    return blueprint
118
-
119
-
120
-def _set_high_zk_limits(blueprint):
121
-    props = _find_zoo_cfg(blueprint)
122
-    props["tickTime"] = "10000"
123
-
124
-    return blueprint
125
-
126
-
127
-def _set_primary_and_standby_namenode(cluster, blueprint):
128
-    props = _find_hadoop_env(blueprint)
129
-    nns = utils.get_instances(cluster, p_common.NAMENODE)
130
-    props["dfs_ha_initial_namenode_active"] = nns[0].fqdn()
131
-    props["dfs_ha_initial_namenode_standby"] = nns[1].fqdn()
132
-
133
-    return blueprint
134
-
135
-
136
-def _configure_hdfs_site(cluster, blueprint):
137
-    props = _find_hdfs_site(blueprint)
138
-
139
-    props["dfs.client.failover.proxy.provider.hdfs-ha"] = (
140
-        "org.apache.hadoop.hdfs.server.namenode.ha."
141
-        "ConfiguredFailoverProxyProvider")
142
-    props["dfs.ha.automatic-failover.enabled"] = "true"
143
-    props["dfs.ha.fencing.methods"] = "shell(/bin/true)"
144
-    props["dfs.nameservices"] = "hdfs-ha"
145
-
146
-    jns = utils.get_instances(cluster, p_common.JOURNAL_NODE)
147
-    journalnodes_concat = ";".join(
148
-        ["%s:8485" % i.fqdn() for i in jns])
149
-    journalnodes_value = "qjournal://%s/hdfs-ha" % journalnodes_concat
150
-    props["dfs.namenode.shared.edits.dir"] = journalnodes_value
151
-
152
-    nns = utils.get_instances(cluster, p_common.NAMENODE)
153
-    nn_id_concat = ",".join([i.instance_name for i in nns])
154
-    props["dfs.ha.namenodes.hdfs-ha"] = nn_id_concat
155
-
156
-    props["dfs.namenode.http-address"] = "%s:50070" % nns[0].fqdn()
157
-    props["dfs.namenode.https-address"] = "%s:50470" % nns[0].fqdn()
158
-    for i in nns:
159
-        props["dfs.namenode.http-address.hdfs-ha.%s" % i.instance_name] = (
160
-            "%s:50070" % i.fqdn())
161
-        props["dfs.namenode.https-address.hdfs-ha.%s" % i.instance_name] = (
162
-            "%s:50470" % i.fqdn())
163
-        props["dfs.namenode.rpc-address.hdfs-ha.%s" % i.instance_name] = (
164
-            "%s:8020" % i.fqdn())
165
-
166
-    return blueprint
167
-
168
-
169
-def _configure_yarn_site(cluster, blueprint):
170
-    props = _find_yarn_site(blueprint)
171
-    name = cluster.name
172
-    rm_instances = utils.get_instances(cluster, p_common.RESOURCEMANAGER)
173
-
174
-    props["hadoop.registry.rm.enabled"] = "false"
175
-
176
-    zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER)
177
-
178
-    zks = ",".join(["%s:2181" % i.fqdn() for i in zk_instances])
179
-    props["yarn.resourcemanager.zk-address"] = zks
180
-
181
-    hs = utils.get_instance(cluster, p_common.HISTORYSERVER)
182
-    props["yarn.log.server.url"] = "%s:19888/jobhistory/logs/" % hs.fqdn()
183
-
184
-    props["yarn.resourcemanager.address"] = "%s:8050" % rm_instances[0].fqdn()
185
-    props["yarn.resourcemanager.admin.address"] = ("%s:8141" %
186
-                                                   rm_instances[0].fqdn())
187
-    props["yarn.resourcemanager.cluster-id"] = name
188
-    props["yarn.resourcemanager.ha.automatic-failover.zk-base-path"] = (
189
-        "/yarn-leader-election")
190
-    props["yarn.resourcemanager.ha.enabled"] = "true"
191
-
192
-    rm_id_concat = ",".join([i.instance_name for i in rm_instances])
193
-    props["yarn.resourcemanager.ha.rm-ids"] = rm_id_concat
194
-
195
-    for i in rm_instances:
196
-        props["yarn.resourcemanager.hostname.%s" % i.instance_name] = i.fqdn()
197
-        props["yarn.resourcemanager.webapp.address.%s" %
198
-              i.instance_name] = "%s:8088" % i.fqdn()
199
-        props["yarn.resourcemanager.webapp.https.address.%s" %
200
-              i.instance_name] = "%s:8090" % i.fqdn()
201
-
202
-    props["yarn.resourcemanager.hostname"] = rm_instances[0].fqdn()
203
-    props["yarn.resourcemanager.recovery.enabled"] = "true"
204
-    props["yarn.resourcemanager.resource-tracker.address"] = (
205
-        "%s:8025" % rm_instances[0].fqdn())
206
-    props["yarn.resourcemanager.scheduler.address"] = (
207
-        "%s:8030" % rm_instances[0].fqdn())
208
-    props["yarn.resourcemanager.store.class"] = (
209
-        "org.apache.hadoop.yarn.server.resourcemanager.recovery."
210
-        "ZKRMStateStore")
211
-    props["yarn.resourcemanager.webapp.address"] = (
212
-        "%s:8088" % rm_instances[0].fqdn())
213
-    props["yarn.resourcemanager.webapp.https.address"] = (
214
-        "%s:8090" % rm_instances[0].fqdn())
215
-
216
-    tls_instance = utils.get_instance(cluster, p_common.APP_TIMELINE_SERVER)
217
-    props["yarn.timeline-service.address"] = "%s:10200" % tls_instance.fqdn()
218
-    props["yarn.timeline-service.webapp.address"] = (
219
-        "%s:8188" % tls_instance.fqdn())
220
-    props["yarn.timeline-service.webapp.https.address"] = (
221
-        "%s:8190" % tls_instance.fqdn())
222
-
223
-    return blueprint
224
-
225
-
226
-def _confgure_hbase_site(cluster, blueprint):
227
-    props = _find_hbase_site(blueprint)
228
-
229
-    props["hbase.regionserver.global.memstore.lowerLimit"] = "0.38"
230
-    props["hbase.regionserver.global.memstore.upperLimit"] = "0.4"
231
-    props["hbase.regionserver.handler.count"] = "60"
232
-    props["hbase.regionserver.info.port"] = "16030"
233
-    props["hbase.regionserver.storefile.refresh.period"] = "20"
234
-
235
-    props["hbase.rootdir"] = "hdfs://hdfs-ha/apps/hbase/data"
236
-
237
-    props["hbase.security.authentication"] = "simple"
238
-    props["hbase.security.authorization"] = "false"
239
-    props["hbase.superuser"] = "hbase"
240
-    props["hbase.tmp.dir"] = "/hadoop/hbase"
241
-    props["hbase.zookeeper.property.clientPort"] = "2181"
242
-
243
-    zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER)
244
-    zk_quorum_value = ",".join([i.fqdn() for i in zk_instances])
245
-    props["hbase.zookeeper.quorum"] = zk_quorum_value
246
-
247
-    props["hbase.zookeeper.useMulti"] = "true"
248
-    props["hfile.block.cache.size"] = "0.40"
249
-    props["zookeeper.session.timeout"] = "30000"
250
-    props["zookeeper.znode.parent"] = "/hbase-unsecure"
251
-
252
-    return blueprint

+ 0
- 149
sahara/plugins/ambari/health.py View File

@@ -1,149 +0,0 @@
1
-# Copyright (c) 2016 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-import collections
17
-import functools
18
-
19
-from oslo_log import log as logging
20
-import six
21
-
22
-from sahara.i18n import _
23
-from sahara.plugins.ambari import client
24
-from sahara.plugins.ambari import common as p_common
25
-from sahara.plugins import utils as plugin_utils
26
-from sahara.service.health import health_check_base
27
-
28
-
29
-LOG = logging.getLogger(__name__)
30
-
31
-
32
-class AlertsProvider(object):
33
-    def __init__(self, cluster):
34
-        self._data = None
35
-        self._cluster_services = None
36
-        self._exception_store = None
37
-        self.cluster = cluster
38
-        # calling to cache all data
39
-        self.get_alerts_data()
40
-
41
-    def get_cluster_services(self):
42
-        return self._cluster_services
43
-
44
-    def is_ambari_active(self):
45
-        if self._exception_store:
46
-            raise health_check_base.RedHealthError(self._exception_store)
47
-        return _("Ambari Monitor is healthy")
48
-
49
-    def get_alerts_data(self, service=None):
50
-        if self._data is not None:
51
-            # return cached data
52
-            return self._data.get(service, []) if service else self._data
53
-        self._data = {}
54
-        self._cluster_services = []
55
-        try:
56
-            ambari = plugin_utils.get_instance(
57
-                self.cluster, p_common.AMBARI_SERVER)
58
-            password = self.cluster.extra.get("ambari_password")
59
-            with client.AmbariClient(ambari, password=password) as ambari:
60
-                resp = ambari.get_alerts_data(self.cluster)
61
-            for alert in resp:
62
-                alert = alert.get('Alert', {})
63
-                service = alert.get('service_name').lower()
64
-                if service not in self._data:
65
-                    self._data[service] = []
66
-                    self._cluster_services.append(service)
67
-                self._data[service].append(alert)
68
-        except Exception as e:
69
-            prefix = _("Can't get response from Ambari Monitor")
70
-            msg = _("%(problem)s: %(description)s") % {
71
-                'problem': prefix, 'description': six.text_type(e)}
72
-            # don't put in exception to logs, it will be done by log.exception
73
-            LOG.exception(prefix)
74
-            self._exception_store = msg
75
-
76
-
77
-class AmbariHealthCheck(health_check_base.BasicHealthCheck):
78
-    def __init__(self, cluster, provider):
79
-        self.provider = provider
80
-        super(AmbariHealthCheck, self).__init__(cluster)
81
-
82
-    def get_health_check_name(self):
83
-        return "Ambari alerts health check"
84
-
85
-    def is_available(self):
86
-        return self.cluster.plugin_name == 'ambari'
87
-
88
-    def check_health(self):
89
-        return self.provider.is_ambari_active()
90
-
91
-
92
-class AmbariServiceHealthCheck(health_check_base.BasicHealthCheck):
93
-    def __init__(self, cluster, provider, service):
94
-        self.provider = provider
95
-        self.service = service.lower()
96
-        super(AmbariServiceHealthCheck, self).__init__(cluster)
97
-
98
-    def get_health_check_name(self):
99
-        return "Ambari alerts for %s Service" % self.service
100
-
101
-    def is_available(self):
102
-        return self.cluster.plugin_name == 'ambari'
103
-
104
-    def get_important_services(self):
105
-        return [
106
-            p_common.HDFS_SERVICE.lower(),
107
-            p_common.YARN_SERVICE.lower(),
108
-            p_common.OOZIE_SERVICE.lower(),
109
-            p_common.ZOOKEEPER_SERVICE.lower()
110
-        ]
111
-
112
-    def check_health(self):
113
-        imp_map = {'OK': 'GREEN', 'WARNING': 'YELLOW', 'CRITICAL': 'RED'}
114
-        other_map = {'OK': 'GREEN'}
115
-        color_counter = collections.Counter()
116
-        important_services = self.get_important_services()
117
-        for alert in self.provider.get_alerts_data(self.service):
118
-            alert_summary = alert.get('state', 'UNKNOWN')
119
-            if self.service in important_services:
120
-                target = imp_map.get(alert_summary, 'RED')
121
-            else:
122
-                target = other_map.get(alert_summary, 'YELLOW')
123
-            color_counter[target] += 1
124
-        if color_counter['RED'] > 0 and color_counter['YELLOW'] > 0:
125
-            raise health_check_base.RedHealthError(
126
-                _("Ambari Monitor has responded that cluster has "
127
-                  "%(red)d critical and %(yellow)d warning alert(s)")
128
-                % {'red': color_counter['RED'],
129
-                   'yellow': color_counter['YELLOW']})
130
-        elif color_counter['RED'] > 0:
131
-            raise health_check_base.RedHealthError(
132
-                _("Ambari Monitor has responded that cluster has "
133
-                  "%(red)d critical alert(s)")
134
-                % {'red': color_counter['RED']})
135
-        elif color_counter['YELLOW'] > 0:
136
-            raise health_check_base.YellowHealthError(
137
-                _("Ambari Monitor has responded that cluster "
138
-                  "has %d warning alert(s)")
139
-                % color_counter['YELLOW'])
140
-        return _("No alerts found")
141
-
142
-
143
-def get_health_checks(cluster):
144
-    provider = AlertsProvider(cluster)
145
-    checks = [functools.partial(AmbariHealthCheck, provider=provider)]
146
-    for service in provider.get_cluster_services():
147
-        checks.append(functools.partial(
148
-            AmbariServiceHealthCheck, provider=provider, service=service))
149
-    return checks

+ 0
- 298
sahara/plugins/ambari/plugin.py View File

@@ -1,298 +0,0 @@
1
-# Copyright (c) 2015 Mirantis Inc.
2
-#
3
-# Licensed under the Apache License, Version 2.0 (the "License");
4
-# you may not use this file except in compliance with the License.
5
-# You may obtain a copy of the License at
6
-#
7
-#    http://www.apache.org/licenses/LICENSE-2.0
8
-#
9
-# Unless required by applicable law or agreed to in writing, software
10
-# distributed under the License is distributed on an "AS IS" BASIS,
11
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
-# implied.
13
-# See the License for the specific language governing permissions and
14
-# limitations under the License.
15
-
16
-
17
-from sahara import conductor
18
-from sahara import context
19
-from sahara.i18n import _
20
-from sahara.plugins.ambari import common as p_common
21
-from sahara.plugins.ambari import configs
22
-from sahara.plugins.ambari import deploy
23
-from sahara.plugins.ambari import edp_engine
24
-from sahara.plugins.ambari import health
25
-from sahara.plugins.ambari import validation
26
-from sahara.plugins import images
27
-from sahara.plugins import kerberos
28
-from sahara.plugins import provisioning as p
29
-from sahara.plugins import utils as plugin_utils
30
-from sahara.swift import swift_helper
31
-
32
-
33
-conductor = conductor.API
34
-
35
-
36
-class AmbariPluginProvider(p.ProvisioningPluginBase):
37
-
38
-    def get_title(self):
39
-        return "HDP Plugin"
40
-
41
-    def get_description(self):
42
-        return _("The Ambari Sahara plugin provides the ability to launch "
43
-                 "clusters with Hortonworks Data Platform (HDP) on OpenStack "
44
-                 "using Apache Ambari")
45
-
46
-    def get_versions(self):
47
-        return ["2.3", "2.4", "2.5", "2.6"]
48
-
49
-    def get_node_processes(self, hadoop_version):
50
-        return {
51
-            p_common.AMBARI_SERVICE: [p_common.AMBARI_SERVER],
52
-            p_common.FALCON_SERVICE: [p_common.FALCON_SERVER],
53
-            p_common.FLUME_SERVICE: [p_common.FLUME_HANDLER],
54
-            p_common.HBASE_SERVICE: [p_common.HBASE_MASTER,
55
-                                     p_common.HBASE_REGIONSERVER],
56
-            p_common.HDFS_SERVICE: [p_common.DATANODE, p_common.NAMENODE,
57
-                                    p_common.SECONDARY_NAMENODE,
58
-                                    p_common.JOURNAL_NODE],
59
-            p_common.HIVE_SERVICE: [p_common.HIVE_METASTORE,
60
-                                    p_common.HIVE_SERVER],
61
-            p_common.KAFKA_SERVICE: [p_common.KAFKA_BROKER],
62
-            p_common.KNOX_SERVICE: [p_common.KNOX_GATEWAY],
63
-            p_common.OOZIE_SERVICE: [p_common.OOZIE_SERVER],
64
-            p_common.RANGER_SERVICE: [p_common.RANGER_ADMIN,
65
-                                      p_common.RANGER_USERSYNC],
66
-            p_common.SLIDER_SERVICE: [p_common.SLIDER],
67
-            p_common.SPARK_SERVICE: [p_common.SPARK_JOBHISTORYSERVER],
68
-            p_common.SQOOP_SERVICE: [p_common.SQOOP],
69
-            p_common.STORM_SERVICE: [
70
-                p_common.DRPC_SERVER, p_common.NIMBUS,
71
-                p_common.STORM_UI_SERVER, p_common.SUPERVISOR],
72
-            p_common.YARN_SERVICE: [
73
-                p_common.APP_TIMELINE_SERVER, p_common.HISTORYSERVER,
74
-                p_common.NODEMANAGER, p_common.RESOURCEMANAGER],
75
-            p_common.ZOOKEEPER_SERVICE: [p_common.ZOOKEEPER_SERVER],
76
-            'Kerberos': [],
77
-        }
78
-
79
-    def get_configs(self, hadoop_version):
80
-        cfgs = kerberos.get_config_list()
81
-        cfgs.extend(configs.load_configs(hadoop_version))
82
-        return cfgs
83
-
84
-    def configure_cluster(self, cluster):
85
-        deploy.disable_repos(cluster)
86
-        deploy.setup_ambari(cluster)
87
-        deploy.setup_agents(cluster)