From 3e0213e5c467bdc77a6b5c26ff3f6601dd3d5331 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 10 May 2024 17:28:16 -0700 Subject: [PATCH] Retire Sahara: remove repo content Sahara project is retiring - https://review.opendev.org/c/openstack/governance/+/919374 this commit remove the content of this project repo Depends-On: https://review.opendev.org/c/openstack/project-config/+/919376 Change-Id: I2ca927796262fc441a430514b7bf2ecedbbc4539 --- .gitignore | 30 - .stestr.conf | 3 - .zuul.yaml | 10 - CONTRIBUTING.rst | 19 - LICENSE | 175 -- README.rst | 44 +- babel.cfg | 1 - doc/requirements.txt | 9 - doc/source/conf.py | 214 -- doc/source/contributor/contributing.rst | 14 - doc/source/contributor/index.rst | 8 - doc/source/index.rst | 8 - doc/source/user/ambari-plugin.rst | 162 -- doc/source/user/index.rst | 8 - .../notes/drop-py2-7-75fab513d04abc88.yaml | 6 - .../fix-ambari-ubuntu-7915be74bdeaf730.yaml | 5 - releasenotes/source/2023.1.rst | 6 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 210 -- releasenotes/source/index.rst | 17 - .../locale/de/LC_MESSAGES/releasenotes.po | 57 - .../locale/en_GB/LC_MESSAGES/releasenotes.po | 58 - .../locale/ne/LC_MESSAGES/releasenotes.po | 37 - .../locale/pt_BR/LC_MESSAGES/releasenotes.po | 34 - releasenotes/source/stein.rst | 6 - releasenotes/source/train.rst | 6 - releasenotes/source/unreleased.rst | 5 - releasenotes/source/ussuri.rst | 6 - releasenotes/source/victoria.rst | 6 - releasenotes/source/wallaby.rst | 6 - releasenotes/source/xena.rst | 6 - releasenotes/source/yoga.rst | 6 - releasenotes/source/zed.rst | 6 - requirements.txt | 18 - sahara_plugin_ambari/__init__.py | 0 sahara_plugin_ambari/i18n.py | 26 - .../de/LC_MESSAGES/sahara_plugin_ambari.po | 215 -- .../en_GB/LC_MESSAGES/sahara_plugin_ambari.po | 166 -- .../id/LC_MESSAGES/sahara_plugin_ambari.po | 169 -- sahara_plugin_ambari/plugins/__init__.py | 0 .../plugins/ambari/__init__.py | 0 sahara_plugin_ambari/plugins/ambari/client.py | 363 --- sahara_plugin_ambari/plugins/ambari/common.py | 155 -- .../plugins/ambari/configs.py | 333 --- sahara_plugin_ambari/plugins/ambari/deploy.py | 723 ------ .../plugins/ambari/edp_engine.py | 127 -- .../plugins/ambari/ha_helper.py | 252 --- sahara_plugin_ambari/plugins/ambari/health.py | 148 -- sahara_plugin_ambari/plugins/ambari/plugin.py | 297 --- .../plugins/ambari/requests_helper.py | 145 -- .../plugins/ambari/resources/configs-2.3.json | 1276 ----------- .../plugins/ambari/resources/configs-2.4.json | 1331 ----------- .../plugins/ambari/resources/configs-2.5.json | 2008 ----------------- .../plugins/ambari/resources/configs-2.6.json | 2008 ----------------- .../ambari/resources/generate_config.py | 79 - .../resources/images/centos/disable_ambari | 8 - .../images/centos/disable_certificate_check | 12 - .../resources/images/centos/disable_firewall | 20 - .../resources/images/centos/disable_selinux | 12 - .../resources/images/centos/setup_java_home | 31 - .../centos/unlimited_security_artifacts | 11 - .../ambari/resources/images/centos/wget_repo | 9 - .../ambari/resources/images/common/add_jar | 31 - .../images/common/fix_tls_ambari_agent | 17 - .../images/common/mysql_connector_java_link | 14 - .../resources/images/common/oracle_java | 41 - .../ambari/resources/images/image.yaml | 140 -- .../resources/images/ubuntu/setup_java_home | 33 - .../ambari/resources/images/ubuntu/wget_repo | 11 - .../plugins/ambari/validation.py | 223 -- sahara_plugin_ambari/tests/__init__.py | 17 - sahara_plugin_ambari/tests/unit/__init__.py | 0 sahara_plugin_ambari/tests/unit/base.py | 53 - .../tests/unit/plugins/__init__.py | 0 .../tests/unit/plugins/ambari/__init__.py | 0 .../tests/unit/plugins/ambari/test_client.py | 372 --- .../tests/unit/plugins/ambari/test_common.py | 69 - .../tests/unit/plugins/ambari/test_configs.py | 164 -- .../tests/unit/plugins/ambari/test_deploy.py | 102 - .../unit/plugins/ambari/test_ha_helper.py | 263 --- .../tests/unit/plugins/ambari/test_health.py | 122 - .../unit/plugins/ambari/test_open_ports.py | 33 - .../tests/unit/plugins/ambari/test_plugin.py | 51 - .../plugins/ambari/test_requests_helper.py | 96 - .../unit/plugins/ambari/test_validation.py | 68 - sahara_plugin_ambari/utils/__init__.py | 0 sahara_plugin_ambari/utils/patches.py | 108 - setup.cfg | 43 - setup.py | 20 - test-requirements.txt | 16 - tox.ini | 99 - 92 files changed, 8 insertions(+), 13328 deletions(-) delete mode 100644 .gitignore delete mode 100644 .stestr.conf delete mode 100644 .zuul.yaml delete mode 100644 CONTRIBUTING.rst delete mode 100644 LICENSE delete mode 100644 babel.cfg delete mode 100644 doc/requirements.txt delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/contributor/contributing.rst delete mode 100644 doc/source/contributor/index.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/user/ambari-plugin.rst delete mode 100644 doc/source/user/index.rst delete mode 100644 releasenotes/notes/drop-py2-7-75fab513d04abc88.yaml delete mode 100644 releasenotes/notes/fix-ambari-ubuntu-7915be74bdeaf730.yaml delete mode 100644 releasenotes/source/2023.1.rst delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/locale/ne/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/locale/pt_BR/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/stein.rst delete mode 100644 releasenotes/source/train.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 releasenotes/source/ussuri.rst delete mode 100644 releasenotes/source/victoria.rst delete mode 100644 releasenotes/source/wallaby.rst delete mode 100644 releasenotes/source/xena.rst delete mode 100644 releasenotes/source/yoga.rst delete mode 100644 releasenotes/source/zed.rst delete mode 100644 requirements.txt delete mode 100644 sahara_plugin_ambari/__init__.py delete mode 100644 sahara_plugin_ambari/i18n.py delete mode 100644 sahara_plugin_ambari/locale/de/LC_MESSAGES/sahara_plugin_ambari.po delete mode 100644 sahara_plugin_ambari/locale/en_GB/LC_MESSAGES/sahara_plugin_ambari.po delete mode 100644 sahara_plugin_ambari/locale/id/LC_MESSAGES/sahara_plugin_ambari.po delete mode 100644 sahara_plugin_ambari/plugins/__init__.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/__init__.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/client.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/common.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/configs.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/deploy.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/edp_engine.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/ha_helper.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/health.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/plugin.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/requests_helper.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/configs-2.3.json delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/configs-2.4.json delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/configs-2.5.json delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/configs-2.6.json delete mode 100755 sahara_plugin_ambari/plugins/ambari/resources/generate_config.py delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_ambari delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_certificate_check delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_firewall delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_selinux delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/centos/setup_java_home delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/centos/unlimited_security_artifacts delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/centos/wget_repo delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/common/add_jar delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/common/fix_tls_ambari_agent delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/common/mysql_connector_java_link delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/common/oracle_java delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/image.yaml delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/setup_java_home delete mode 100644 sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/wget_repo delete mode 100644 sahara_plugin_ambari/plugins/ambari/validation.py delete mode 100644 sahara_plugin_ambari/tests/__init__.py delete mode 100644 sahara_plugin_ambari/tests/unit/__init__.py delete mode 100644 sahara_plugin_ambari/tests/unit/base.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/__init__.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/__init__.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_client.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_common.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_configs.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_deploy.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_ha_helper.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_health.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_open_ports.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_plugin.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_requests_helper.py delete mode 100644 sahara_plugin_ambari/tests/unit/plugins/ambari/test_validation.py delete mode 100644 sahara_plugin_ambari/utils/__init__.py delete mode 100644 sahara_plugin_ambari/utils/patches.py delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 5a05e6b..0000000 --- a/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -*.egg-info -*.egg[s] -*.log -*.py[co] -.coverage -.testrepository -.tox -.stestr -.venv -.idea -AUTHORS -ChangeLog -build -cover -develop-eggs -dist -doc/build -doc/html -eggs -etc/sahara.conf -etc/sahara/*.conf -etc/sahara/*.topology -sdist -target -tools/lintstack.head.py -tools/pylint_exceptions -doc/source/sample.config - -# Files created by releasenotes build -releasenotes/build diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index a4fd2c9..0000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=./sahara_plugin_ambari/tests/unit -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 7fe9334..0000000 --- a/.zuul.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- project: - templates: - - check-requirements - - openstack-python3-jobs - - publish-openstack-docs-pti - - release-notes-jobs-python3 - check: - jobs: - - sahara-buildimages-ambari: - voting: false diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 105514a..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -The source repository for this project can be found at: - - https://opendev.org/openstack/sahara-plugin-ambari - -Pull requests submitted through GitHub are not monitored. - -To start contributing to OpenStack, follow the steps in the contribution guide -to set up and use Gerrit: - - https://docs.openstack.org/contributors/code-and-documentation/quick-start.html - -Bugs should be filed on Storyboard: - - https://storyboard.openstack.org/#!/project/openstack/sahara-plugin-ambari - -For more specific information about contributing to this repository, see the -sahara-plugin-ambari contributor guide: - - https://docs.openstack.org/sahara-plugin-ambari/latest/contributor/contributing.html diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db858..0000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/README.rst b/README.rst index 2713896..4ee2c5f 100644 --- a/README.rst +++ b/README.rst @@ -1,38 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/sahara.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html - -.. Change things from this point on - -OpenStack Data Processing ("Sahara") Ambari Plugin -=================================================== - -OpenStack Sahara Ambari Plugin provides the users the option to -start Ambari clusters on OpenStack Sahara. -Check out OpenStack Sahara documentation to see how to deploy the -Ambari Plugin. - -Sahara at wiki.openstack.org: https://wiki.openstack.org/wiki/Sahara - -Storyboard project: https://storyboard.openstack.org/#!/project/openstack/sahara-plugin-ambari - -Sahara docs site: https://docs.openstack.org/sahara/latest/ - -Quickstart guide: https://docs.openstack.org/sahara/latest/user/quickstart.html - -How to participate: https://docs.openstack.org/sahara/latest/contributor/how-to-participate.html - -Source: https://opendev.org/openstack/sahara-plugin-ambari - -Bugs and feature requests: https://storyboard.openstack.org/#!/project/openstack/sahara-plugin-ambari - -Release notes: https://docs.openstack.org/releasenotes/sahara-plugin-ambari/ - -License -------- - -Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab8..0000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 40450dc..0000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -openstackdocstheme>=2.2.1 # Apache-2.0 -os-api-ref>=1.4.0 # Apache-2.0 -reno>=3.1.0 # Apache-2.0 -sphinx>=2.0.0,!=2.1.0 # BSD -sphinxcontrib-httpdomain>=1.3.0 # BSD -whereto>=0.3.0 # Apache-2.0 diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 7949253..0000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,214 +0,0 @@ -# -*- coding: utf-8 -*- -# -# sahara-plugin-ambari documentation build configuration file. -# -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'reno.sphinxext', - 'openstackdocstheme', -] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/sahara-plugin-ambari' -openstackdocs_pdf_link = True -openstackdocs_use_storyboard = True - -openstackdocs_projects = [ - 'sahara' -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2015, Sahara team' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'saharaambariplugin-testsdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'doc-sahara-plugin-ambari.tex', 'Sahara Ambari Plugin Documentation', - 'Sahara team', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - -smartquotes_excludes = {'builders': ['latex']} - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'sahara-plugin-ambari', 'sahara-plugin-ambari Documentation', - ['Sahara team'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'sahara-plugin-ambari', 'sahara-plugin-ambari Documentation', - 'Sahara team', 'sahara-plugin-ambari', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index 14e8c8c..0000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,14 +0,0 @@ -============================ -So You Want to Contribute... -============================ - -For general information on contributing to OpenStack, please check out the -`contributor guide `_ to get started. -It covers all the basics that are common to all OpenStack projects: the -accounts you need, the basics of interacting with our Gerrit review system, how -we communicate as a community, etc. - -sahara-plugin-ambari is maintained by the OpenStack Sahara project. -To understand our development process and how you can contribute to it, please -look at the Sahara project's general contributor's page: -http://docs.openstack.org/sahara/latest/contributor/contributing.html diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index 5534923..0000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -================= -Contributor Guide -================= - -.. toctree:: - :maxdepth: 2 - - contributing diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 50733dc..0000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Ambari plugin for Sahara -======================== - -.. toctree:: - :maxdepth: 2 - - user/index - contributor/index diff --git a/doc/source/user/ambari-plugin.rst b/doc/source/user/ambari-plugin.rst deleted file mode 100644 index d6ea164..0000000 --- a/doc/source/user/ambari-plugin.rst +++ /dev/null @@ -1,162 +0,0 @@ - -Ambari Plugin -============= -The Ambari sahara plugin provides a way to provision -clusters with Hortonworks Data Platform on OpenStack using templates in a -single click and in an easily repeatable fashion. The sahara controller serves -as the glue between Hadoop and OpenStack. The Ambari plugin mediates between -the sahara controller and Apache Ambari in order to deploy and configure Hadoop -on OpenStack. Core to the HDP Plugin is Apache Ambari -which is used as the orchestrator for deploying HDP on OpenStack. The Ambari -plugin uses Ambari Blueprints for cluster provisioning. - -Apache Ambari Blueprints ------------------------- -Apache Ambari Blueprints is a portable document definition, which provides a -complete definition for an Apache Hadoop cluster, including cluster topology, -components, services and their configurations. Ambari Blueprints can be -consumed by the Ambari plugin to instantiate a Hadoop cluster on OpenStack. The -benefits of this approach is that it allows for Hadoop clusters to be -configured and deployed using an Ambari native format that can be used with as -well as outside of OpenStack allowing for clusters to be re-instantiated in a -variety of environments. - -Images ------- - -For cluster provisioning, prepared images should be used. - -.. list-table:: Support matrix for the `ambari` plugin - :widths: 15 15 20 15 35 - :header-rows: 1 - - * - Version - (image tag) - - Distribution - - Build method - - Version - (build parameter) - - Notes - - * - 2.6 - - Ubuntu 16.04, CentOS 7 - - sahara-image-pack - - 2.6 - - uses Ambari 2.6 - - * - 2.5 - - Ubuntu 16.04, CentOS 7 - - sahara-image-pack - - 2.5 - - uses Ambari 2.6 - - * - 2.4 - - Ubuntu 14.04, CentOS 7 - - sahara-image-pack - - 2.4 - - uses Ambari 2.6 - - * - 2.4 - - Ubuntu 14.04, CentOS 7 - - sahara-image-create - - 2.4 - - uses Ambari 2.2.1.0 - - * - 2.3 - - Ubuntu 14.04, CentOS 7 - - sahara-image-pack - - 2.3 - - uses Ambari 2.4 - - * - 2.3 - - Ubuntu 14.04, CentOS 7 - - sahara-image-create - - 2.3 - - uses Ambari 2.2.0.0 - -For more information about building image, refer to -:sahara-doc:`Sahara documentation `. - -HDP plugin requires an image to be tagged in sahara Image Registry with two -tags: 'ambari' and '' (e.g. '2.5'). - -The image requires a username. For more information, refer to the -:sahara-doc:`registering image ` section -of the Sahara documentation. - -To speed up provisioning, the HDP packages can be pre-installed on the image -used. The packages' versions depend on the HDP version required. - -High Availability for HDFS and YARN ------------------------------------ -High Availability (Using the Quorum Journal Manager) can be -deployed automatically with the Ambari plugin. You can deploy High Available -cluster through UI by selecting ``NameNode HA`` and/or ``ResourceManager HA`` -options in general configs of cluster template. - -The NameNode High Availability is deployed using 2 NameNodes, one active and -one standby. The NameNodes use a set of JournalNodes and Zookepeer Servers to -ensure the necessary synchronization. In case of ResourceManager HA 2 -ResourceManagers should be enabled in addition. - -A typical Highly available Ambari cluster uses 2 separate NameNodes, 2 separate -ResourceManagers and at least 3 JournalNodes and at least 3 Zookeeper Servers. - -HDP Version Support -------------------- -The HDP plugin currently supports deployment of HDP 2.3, 2.4 and 2.5. - -Cluster Validation ------------------- -Prior to Hadoop cluster creation, the HDP plugin will perform the following -validation checks to ensure a successful Hadoop deployment: - -* Ensure the existence of Ambari Server process in the cluster; -* Ensure the existence of a NameNode, Zookeeper, ResourceManagers processes - HistoryServer and App TimeLine Server in the cluster - -Enabling Kerberos security for cluster --------------------------------------- - -If you want to protect your clusters using MIT Kerberos security you have to -complete a few steps below. - -* If you would like to create a cluster protected by Kerberos security you - just need to enable Kerberos by checkbox in the ``General Parameters`` - section of the cluster configuration. If you prefer to use the OpenStack CLI - for cluster creation, you have to put the data below in the - ``cluster_configs`` section: - - .. sourcecode:: console - - "cluster_configs": { - "Enable Kerberos Security": true, - } - - Sahara in this case will correctly prepare KDC server and will create - principals along with keytabs to enable authentication for Hadoop services. - -* Ensure that you have the latest hadoop-openstack jar file distributed - on your cluster nodes. You can download one at - ``https://tarballs.openstack.org/sahara-extra/dist/`` - -* Sahara will create principals along with keytabs for system users - like ``oozie``, ``hdfs`` and ``spark`` so that you will not have to - perform additional auth operations to execute your jobs on top of the - cluster. - -Adjusting Ambari Agent Package Installation timeout Parameter -------------------------------------------------------------- - -For a cluster with large number of nodes or slow connectivity to HDP repo -server, a Sahara HDP Cluster creation may fail due to ambari agent -reaching the timeout threshold while installing the packages in the nodes. - -Such failures will occur during the "cluster start" stage which can be -monitored from Cluster Events tab of Sahara Dashboard. The timeout error will -be visible from the Ambari Dashboard as well. - -* To avoid the package installation timeout by ambari agent you need to change - the default value of ``Ambari Agent Package Install timeout`` parameter which - can be found in the ``General Parameters`` section of the cluster template - configuration. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst deleted file mode 100644 index b26ffca..0000000 --- a/doc/source/user/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -========== -User Guide -========== - -.. toctree:: - :maxdepth: 2 - - ambari-plugin diff --git a/releasenotes/notes/drop-py2-7-75fab513d04abc88.yaml b/releasenotes/notes/drop-py2-7-75fab513d04abc88.yaml deleted file mode 100644 index 70927b4..0000000 --- a/releasenotes/notes/drop-py2-7-75fab513d04abc88.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Python 2.7 support has been dropped. Last release of sahara and its plugins - to support python 2.7 is OpenStack Train. The minimum version of Python now - supported by sahara and its plugins is Python 3.6. diff --git a/releasenotes/notes/fix-ambari-ubuntu-7915be74bdeaf730.yaml b/releasenotes/notes/fix-ambari-ubuntu-7915be74bdeaf730.yaml deleted file mode 100644 index f0cf5b9..0000000 --- a/releasenotes/notes/fix-ambari-ubuntu-7915be74bdeaf730.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed several bugs which prevented sahara-image-pack from generating - Ambari-based Ubuntu images. diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst deleted file mode 100644 index d123847..0000000 --- a/releasenotes/source/2023.1.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -2023.1 Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/2023.1 diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index b41ebb5..0000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Sahara Release Notes documentation build configuration file - -extensions = [ - 'reno.sphinxext', - 'openstackdocstheme' -] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/sahara-plugin-ambari' -openstackdocs_use_storyboard = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2015, Sahara Developers' - -# Release do not need a version number in the title, they -# cover multiple versions. -# The full version, including alpha/beta/rc tags. -release = '' -# The short X.Y version. -version = '' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'SaharaAmbariReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'SaharaAmbariReleaseNotes.tex', - 'Sahara Ambari Plugin Release Notes Documentation', - 'Sahara Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'saharaambarireleasenotes', - 'Sahara Ambari Plugin Release Notes Documentation', - ['Sahara Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'SaharaAmbariReleaseNotes', - 'Sahara Ambari Plugin Release Notes Documentation', - 'Sahara Developers', 'SaharaAmbariReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 72a170a..0000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -==================================== - Sahara Ambari Plugin Release Notes -==================================== - -.. toctree:: - :maxdepth: 1 - - unreleased - 2023.1 - zed - yoga - xena - wallaby - victoria - ussuri - train - stein diff --git a/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po deleted file mode 100644 index eb4f925..0000000 --- a/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,57 +0,0 @@ -# Andreas Jaeger , 2019. #zanata -# Andreas Jaeger , 2020. #zanata -msgid "" -msgstr "" -"Project-Id-Version: sahara-plugin-ambari\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-04-24 23:41+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2020-04-25 10:43+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "1.0.0" -msgstr "1.0.0" - -msgid "Bug Fixes" -msgstr "Fehlerkorrekturen" - -msgid "Current Series Release Notes" -msgstr "Aktuelle Serie Releasenotes" - -msgid "" -"Fixed several bugs which prevented sahara-image-pack from generating Ambari-" -"based Ubuntu images." -msgstr "" -"Mehrere Fehler wurden gefixt welche sahara-image-pack hinderten Ambari-" -"basierte Ubuntu Abbilder zu erzeugen." - -msgid "" -"Python 2.7 support has been dropped. Last release of sahara and its plugins " -"to support python 2.7 is OpenStack Train. The minimum version of Python now " -"supported by sahara and its plugins is Python 3.6." -msgstr "" -"Python 2.7 Unterstützung wurde beendet. Der letzte Release von Sahara und " -"seinen Plugins der Python 2.7 unterstützt ist OpenStack Train. Die minimal " -"Python Version welche von Sahara und seinen Plugins unterstützt wird, ist " -"Python 3.6." - -msgid "Sahara Ambari Plugin Release Notes" -msgstr "Sahara Ambari Plugin Release Notes" - -msgid "Stein Series Release Notes" -msgstr "Stein Serie Releasenotes" - -msgid "Train Series Release Notes" -msgstr "Train Serie Releasenotes" - -msgid "Upgrade Notes" -msgstr "Aktualisierungsnotizen" - -msgid "Ussuri Series Release Notes" -msgstr "Ussuri Serie Releasenotes" diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 503dbcd..0000000 --- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,58 +0,0 @@ -# Andi Chandler , 2020. #zanata -msgid "" -msgstr "" -"Project-Id-Version: sahara-plugin-ambari\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-04-26 20:52+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2020-05-02 09:30+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en_GB\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "1.0.0" -msgstr "1.0.0" - -msgid "3.0.0.0rc1" -msgstr "3.0.0.0rc1" - -msgid "Bug Fixes" -msgstr "Bug Fixes" - -msgid "Current Series Release Notes" -msgstr "Current Series Release Notes" - -msgid "" -"Fixed several bugs which prevented sahara-image-pack from generating Ambari-" -"based Ubuntu images." -msgstr "" -"Fixed several bugs which prevented sahara-image-pack from generating Ambari-" -"based Ubuntu images." - -msgid "" -"Python 2.7 support has been dropped. Last release of sahara and its plugins " -"to support python 2.7 is OpenStack Train. The minimum version of Python now " -"supported by sahara and its plugins is Python 3.6." -msgstr "" -"Python 2.7 support has been dropped. Last release of sahara and its plugins " -"to support python 2.7 is OpenStack Train. The minimum version of Python now " -"supported by sahara and its plugins is Python 3.6." - -msgid "Sahara Ambari Plugin Release Notes" -msgstr "Sahara Ambari Plugin Release Notes" - -msgid "Stein Series Release Notes" -msgstr "Stein Series Release Notes" - -msgid "Train Series Release Notes" -msgstr "Train Series Release Notes" - -msgid "Upgrade Notes" -msgstr "Upgrade Notes" - -msgid "Ussuri Series Release Notes" -msgstr "Ussuri Series Release Notes" diff --git a/releasenotes/source/locale/ne/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ne/LC_MESSAGES/releasenotes.po deleted file mode 100644 index b16838e..0000000 --- a/releasenotes/source/locale/ne/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,37 +0,0 @@ -# Surit Aryal , 2019. #zanata -msgid "" -msgstr "" -"Project-Id-Version: sahara-plugin-ambari\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2019-07-23 14:26+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2019-08-02 09:12+0000\n" -"Last-Translator: Surit Aryal \n" -"Language-Team: Nepali\n" -"Language: ne\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "1.0.0" -msgstr "१.०.०" - -msgid "Bug Fixes" -msgstr "बग फिक्स" - -msgid "Current Series Release Notes" -msgstr "Current Series रिलीज नोट्स" - -msgid "" -"Fixed several bugs which prevented sahara-image-pack from generating Ambari-" -"based Ubuntu images." -msgstr "" -"धेरै बगहरू स्थिर गरियो जसले sahara-image-packलाई Ambari-based Ubuntu छविहरू " -"उत्पादन गर्नबाट रोक्छ।" - -msgid "Sahara Ambari Plugin Release Notes" -msgstr "Sahara Ambari प्लगइन रिलीज नोट्स" - -msgid "Stein Series Release Notes" -msgstr "Stein Series रिलीज नोट्स" diff --git a/releasenotes/source/locale/pt_BR/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/pt_BR/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 295ad13..0000000 --- a/releasenotes/source/locale/pt_BR/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,34 +0,0 @@ -# Rodrigo Loures , 2019. #zanata -msgid "" -msgstr "" -"Project-Id-Version: sahara-plugin-ambari\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2019-04-22 11:43+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2019-04-18 09:33+0000\n" -"Last-Translator: Rodrigo Loures \n" -"Language-Team: Portuguese (Brazil)\n" -"Language: pt_BR\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "Bug Fixes" -msgstr "Correção de erros" - -msgid "Current Series Release Notes" -msgstr "Atual - Série de Notas de Versão" - -msgid "" -"Fixed several bugs which prevented sahara-image-pack from generating Ambari-" -"based Ubuntu images." -msgstr "" -"Correção de alguns erros aos quais impediam sahara-image-pack de gerar " -"imagens Ubuntu baseadas em Ambari. " - -msgid "Sahara Ambari Plugin Release Notes" -msgstr "Notas de versão do plugin Sahara Ambari" - -msgid "Stein Series Release Notes" -msgstr "Notas de versão da Série Stein" diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst deleted file mode 100644 index efaceb6..0000000 --- a/releasenotes/source/stein.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Stein Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 5839003..0000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aab..0000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e..0000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst deleted file mode 100644 index 4efc7b6..0000000 --- a/releasenotes/source/victoria.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -Victoria Series Release Notes -============================= - -.. release-notes:: - :branch: stable/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst deleted file mode 100644 index d77b565..0000000 --- a/releasenotes/source/wallaby.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -Wallaby Series Release Notes -============================ - -.. release-notes:: - :branch: stable/wallaby diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst deleted file mode 100644 index 1be85be..0000000 --- a/releasenotes/source/xena.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -Xena Series Release Notes -========================= - -.. release-notes:: - :branch: stable/xena diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst deleted file mode 100644 index 7cd5e90..0000000 --- a/releasenotes/source/yoga.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -Yoga Series Release Notes -========================= - -.. release-notes:: - :branch: stable/yoga diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst deleted file mode 100644 index 9608c05..0000000 --- a/releasenotes/source/zed.rst +++ /dev/null @@ -1,6 +0,0 @@ -======================== -Zed Series Release Notes -======================== - -.. release-notes:: - :branch: stable/zed diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 45195a5..0000000 --- a/requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -# Requirements lower bounds listed here are our best effort to keep them up to -# date but we do not test them so no guarantee of having them all correct. If -# you find any incorrect lower bounds, let us know or propose a fix. - -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr!=2.1.0,>=2.0.0 # Apache-2.0 - -Babel!=2.4.0,>=2.3.4 # BSD -eventlet>=0.26.0 # MIT -oslo.i18n>=3.15.3 # Apache-2.0 -oslo.log>=3.36.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 -oslo.utils>=3.33.0 # Apache-2.0 -requests>=2.14.2 # Apache-2.0 -sahara>=10.0.0.0b1 diff --git a/sahara_plugin_ambari/__init__.py b/sahara_plugin_ambari/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sahara_plugin_ambari/i18n.py b/sahara_plugin_ambari/i18n.py deleted file mode 100644 index 891cd7c..0000000 --- a/sahara_plugin_ambari/i18n.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2014 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# It's based on oslo.i18n usage in OpenStack Keystone project and -# recommendations from https://docs.openstack.org/oslo.i18n/latest/ -# user/usage.html - -import oslo_i18n - - -_translators = oslo_i18n.TranslatorFactory(domain='sahara_plugin_ambari') - -# The primary translation function using the well-known name "_" -_ = _translators.primary diff --git a/sahara_plugin_ambari/locale/de/LC_MESSAGES/sahara_plugin_ambari.po b/sahara_plugin_ambari/locale/de/LC_MESSAGES/sahara_plugin_ambari.po deleted file mode 100644 index b1d313a..0000000 --- a/sahara_plugin_ambari/locale/de/LC_MESSAGES/sahara_plugin_ambari.po +++ /dev/null @@ -1,215 +0,0 @@ -# Andreas Jaeger , 2019. #zanata -msgid "" -msgstr "" -"Project-Id-Version: sahara-plugin-ambari VERSION\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2019-09-20 17:28+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2019-09-25 06:06+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -#, python-format -msgid "%(problem)s: %(description)s" -msgstr "%(problem)s: %(description)s" - -# auto translated by TM merge from project: sahara, version: master, DocId: sahara/locale/sahara -msgid "0 or 1" -msgstr "0 oder 1" - -# auto translated by TM merge from project: sahara, version: master, DocId: sahara/locale/sahara -msgid "1 or more" -msgstr "1 oder mehr" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "3 or more. Odd number" -msgstr "3 oder mehr. Ungerade Zahl" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Add Hadoop Swift jar to instances" -msgstr "Füge Hadoop Swift-Jar zu Instanzen hinzu" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Add new hosts" -msgstr "Fügen Sie neue Hosts hinzu" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -#, python-format -msgid "Ambari Monitor has responded that cluster has %(red)d critical alert(s)" -msgstr "" -"Ambari Monitor hat geantwortet, dass der Cluster %(red)d kritische Alarme hat" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -#, python-format -msgid "" -"Ambari Monitor has responded that cluster has %(red)d critical and " -"%(yellow)d warning alert(s)" -msgstr "" -"Ambari Monitor hat geantwortet, dass der Cluster %(red)d kritisch und " -"%(yellow)d Warnmeldung(en) hat" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -#, python-format -msgid "Ambari Monitor has responded that cluster has %d warning alert(s)" -msgstr "" -"Ambari Monitor hat geantwortet, dass der Cluster %d-Warnmeldung(en) enthält" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Ambari Monitor is healthy" -msgstr "Ambari Monitor ist gesund" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Ambari plugin of {base} or higher required to run {type} jobs" -msgstr "" -"Ambari-Plugin von {base} oder höher, das zum Ausführen von {type} Jobs " -"erforderlich ist" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -#, python-format -msgid "Ambari request in %s state" -msgstr "Ambari Anfrage in %s Zustand" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "At least 3 JournalNodes are required for HA" -msgstr "Mindestens 3 JournalNodes sind für HA erforderlich" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "At least 3 ZooKeepers are required for HA" -msgstr "Für HA sind mindestens 3 ZooKeeper erforderlich" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Can't get response from Ambari Monitor" -msgstr "Antwort von Ambari Monitor nicht möglich" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Cleanup config groups" -msgstr "Konfigurationsgruppen bereinigen" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Configure rack awareness" -msgstr "Rack-Erkennung konfigurieren" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Create Ambari blueprint" -msgstr "Erstellen Sie Ambari Blueprint" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Decommission NodeManagers and DataNodes" -msgstr "NodeManagers und DataNodes außer Betrieb setzen" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Enable HBase RegionServer HA" -msgstr "Aktivieren Sie HBase RegionServer HA" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Enable NameNode HA" -msgstr "Aktivieren Sie NameNode HA" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Enable ResourceManager HA" -msgstr "Aktivieren Sie ResourceManager HA" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Generate config groups" -msgstr "Generieren Sie Konfigurationsgruppen" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Install services on hosts" -msgstr "Installiere Dienste auf Hosts" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "No alerts found" -msgstr "Keine Alarme gefunden" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Odd number" -msgstr "Ungerade Zahl" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Odd number of JournalNodes are required for HA" -msgstr "Eine ungerade Anzahl von JournalNodes ist für HA erforderlich" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Odd number of ZooKeepers are required for HA" -msgstr "Für HA ist eine ungerade Anzahl von ZooKeepern erforderlich" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Prepare Hive" -msgstr "Bereite Hive vor" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Regenerate keytabs for Kerberos" -msgstr "Generieren Sie Keytabs für Kerberos neu" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Remove hosts" -msgstr "Entferne Hosts" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Restart HDFS and MAPREDUCE2 services" -msgstr "Starte die HDFS- und MAPREDUCE2-Dienste neu" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Restart NameNodes and ResourceManagers" -msgstr "Starte NameNodes und ResourceManagers neu" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Restart of ambari-agent is needed for host {}, reason: {}" -msgstr "Neustart von ambari-agent wird für Host {} benötigt, Grund: {}" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Set up Ambari agents" -msgstr "Richten Sie Ambari-Agenten ein" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Set up Ambari management console" -msgstr "Richten Sie die Ambari-Verwaltungskonsole ein" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Set up HDP repositories" -msgstr "Richten Sie HDP-Repositorys ein" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -#, python-format -msgid "Some Ambari request(s) not in COMPLETED state: %(description)s." -msgstr "" -"Einige Ambari-Anfragen sind nicht im Status COMPLETED: %(description)s." - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Start cluster" -msgstr "Cluster starten" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Start services on hosts" -msgstr "Starte Dienste auf Hosts" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "" -"The Ambari Sahara plugin provides the ability to launch clusters with " -"Hortonworks Data Platform (HDP) on OpenStack using Apache Ambari" -msgstr "" -"Das Ambari Sahara-Plugin bietet die Möglichkeit, Cluster mit Hortonworks " -"Data Platform (HDP) auf OpenStack mit Apache Ambari zu starten" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Update default Ambari password" -msgstr "Aktualisieren Sie das Standard-Ambari-Passwort" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Wait Ambari accessible" -msgstr "Warte auf Ambari zugänglich" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -msgid "Wait registration of hosts" -msgstr "Warten Sie die Registrierung der Hosts" - -# auto translated by TM merge from project: sahara, version: stable-queens, DocId: sahara/locale/sahara -#, python-format -msgid "request %(id)d: %(name)s - in status %(status)s" -msgstr "Anfrage %(id)d: %(name)s - in Status %(status)s" diff --git a/sahara_plugin_ambari/locale/en_GB/LC_MESSAGES/sahara_plugin_ambari.po b/sahara_plugin_ambari/locale/en_GB/LC_MESSAGES/sahara_plugin_ambari.po deleted file mode 100644 index a5b3512..0000000 --- a/sahara_plugin_ambari/locale/en_GB/LC_MESSAGES/sahara_plugin_ambari.po +++ /dev/null @@ -1,166 +0,0 @@ -# Andi Chandler , 2020. #zanata -msgid "" -msgstr "" -"Project-Id-Version: sahara-plugin-ambari VERSION\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2020-04-26 20:52+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2020-05-02 09:33+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en_GB\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%(problem)s: %(description)s" -msgstr "%(problem)s: %(description)s" - -msgid "0 or 1" -msgstr "0 or 1" - -msgid "1 or more" -msgstr "1 or more" - -msgid "3 or more. Odd number" -msgstr "3 or more. Odd number" - -msgid "Add Hadoop Swift jar to instances" -msgstr "Add Hadoop Swift jar to instances" - -msgid "Add new hosts" -msgstr "Add new hosts" - -#, python-format -msgid "Ambari Monitor has responded that cluster has %(red)d critical alert(s)" -msgstr "" -"Ambari Monitor has responded that cluster has %(red)d critical alert(s)" - -#, python-format -msgid "" -"Ambari Monitor has responded that cluster has %(red)d critical and " -"%(yellow)d warning alert(s)" -msgstr "" -"Ambari Monitor has responded that cluster has %(red)d critical and " -"%(yellow)d warning alert(s)" - -#, python-format -msgid "Ambari Monitor has responded that cluster has %d warning alert(s)" -msgstr "Ambari Monitor has responded that cluster has %d warning alert(s)" - -msgid "Ambari Monitor is healthy" -msgstr "Ambari Monitor is healthy" - -msgid "Ambari plugin of {base} or higher required to run {type} jobs" -msgstr "Ambari plugin of {base} or higher required to run {type} jobs" - -#, python-format -msgid "Ambari request in %s state" -msgstr "Ambari request in %s state" - -msgid "At least 3 JournalNodes are required for HA" -msgstr "At least 3 JournalNodes are required for HA" - -msgid "At least 3 ZooKeepers are required for HA" -msgstr "At least 3 ZooKeepers are required for HA" - -msgid "Can't get response from Ambari Monitor" -msgstr "Can't get response from Ambari Monitor" - -msgid "Cleanup config groups" -msgstr "Cleanup config groups" - -msgid "Configure rack awareness" -msgstr "Configure rack awareness" - -msgid "Create Ambari blueprint" -msgstr "Create Ambari blueprint" - -msgid "Decommission NodeManagers and DataNodes" -msgstr "Decommission NodeManagers and DataNodes" - -msgid "Enable HBase RegionServer HA" -msgstr "Enable HBase RegionServer HA" - -msgid "Enable NameNode HA" -msgstr "Enable NameNode HA" - -msgid "Enable ResourceManager HA" -msgstr "Enable ResourceManager HA" - -msgid "Generate config groups" -msgstr "Generate config groups" - -msgid "Install services on hosts" -msgstr "Install services on hosts" - -msgid "No alerts found" -msgstr "No alerts found" - -msgid "Odd number" -msgstr "Odd number" - -msgid "Odd number of JournalNodes are required for HA" -msgstr "Odd number of JournalNodes are required for HA" - -msgid "Odd number of ZooKeepers are required for HA" -msgstr "Odd number of ZooKeepers are required for HA" - -msgid "Prepare Hive" -msgstr "Prepare Hive" - -msgid "Regenerate keytabs for Kerberos" -msgstr "Regenerate keytabs for Kerberos" - -msgid "Remove hosts" -msgstr "Remove hosts" - -msgid "Restart HDFS and MAPREDUCE2 services" -msgstr "Restart HDFS and MAPREDUCE2 services" - -msgid "Restart NameNodes and ResourceManagers" -msgstr "Restart NameNodes and ResourceManagers" - -msgid "Restart of ambari-agent is needed for host {}, reason: {}" -msgstr "Restart of ambari-agent is needed for host {}, reason: {}" - -msgid "Set up Ambari agents" -msgstr "Set up Ambari agents" - -msgid "Set up Ambari management console" -msgstr "Set up Ambari management console" - -msgid "Set up HDP repositories" -msgstr "Set up HDP repositories" - -#, python-format -msgid "Some Ambari request(s) not in COMPLETED state: %(description)s." -msgstr "Some Ambari request(s) not in COMPLETED state: %(description)s." - -msgid "Start cluster" -msgstr "Start cluster" - -msgid "Start services on hosts" -msgstr "Start services on hosts" - -msgid "" -"The Ambari Sahara plugin provides the ability to launch clusters with " -"Hortonworks Data Platform (HDP) on OpenStack using Apache Ambari" -msgstr "" -"The Ambari Sahara plugin provides the ability to launch clusters with " -"Hortonworks Data Platform (HDP) on OpenStack using Apache Ambari" - -msgid "Update default Ambari password" -msgstr "Update default Ambari password" - -msgid "Wait Ambari accessible" -msgstr "Wait Ambari accessible" - -msgid "Wait registration of hosts" -msgstr "Wait registration of hosts" - -#, python-format -msgid "request %(id)d: %(name)s - in status %(status)s" -msgstr "request %(id)d: %(name)s - in status %(status)s" diff --git a/sahara_plugin_ambari/locale/id/LC_MESSAGES/sahara_plugin_ambari.po b/sahara_plugin_ambari/locale/id/LC_MESSAGES/sahara_plugin_ambari.po deleted file mode 100644 index 0a78294..0000000 --- a/sahara_plugin_ambari/locale/id/LC_MESSAGES/sahara_plugin_ambari.po +++ /dev/null @@ -1,169 +0,0 @@ -# suhartono , 2019. #zanata -msgid "" -msgstr "" -"Project-Id-Version: sahara-plugin-ambari VERSION\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2019-09-30 09:30+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2019-10-06 02:53+0000\n" -"Last-Translator: suhartono \n" -"Language-Team: Indonesian\n" -"Language: id\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "%(problem)s: %(description)s" -msgstr "%(problem)s: %(description)s" - -msgid "0 or 1" -msgstr "0 or 1" - -msgid "1 or more" -msgstr "1 atau lebih" - -msgid "3 or more. Odd number" -msgstr "3 atau lebih. Angka ganjil" - -msgid "Add Hadoop Swift jar to instances" -msgstr "Tambahkan jar Hadoop Swift ke instance" - -msgid "Add new hosts" -msgstr "Tambahkan host baru" - -#, python-format -msgid "Ambari Monitor has responded that cluster has %(red)d critical alert(s)" -msgstr "" -"Ambari Monitor has responded that cluster has %(red)d critical alert(s)" - -#, python-format -msgid "" -"Ambari Monitor has responded that cluster has %(red)d critical and " -"%(yellow)d warning alert(s)" -msgstr "" -"Ambari Monitor telah merespons bahwa cluster telah %(red)d critical dan " -"%(yellow)d warning alert(s)" - -#, python-format -msgid "Ambari Monitor has responded that cluster has %d warning alert(s)" -msgstr "Ambari Monitor telah merespons bahwa cluster telah %d warning alert(s)" - -msgid "Ambari Monitor is healthy" -msgstr "Ambari Monitor sehat" - -msgid "Ambari plugin of {base} or higher required to run {type} jobs" -msgstr "" -"Plugin Ambari dari {base} atau lebih tinggi diperlukan untuk menjalankan " -"jobs {type}" - -#, python-format -msgid "Ambari request in %s state" -msgstr "Ambari meminta dalam %s state" - -msgid "At least 3 JournalNodes are required for HA" -msgstr "Setidaknya 3 JournalNodes diperlukan untuk HA" - -msgid "At least 3 ZooKeepers are required for HA" -msgstr "Setidaknya 3 ZooKeepers diperlukan untuk HA" - -msgid "Can't get response from Ambari Monitor" -msgstr "Tidak dapat mendapat respons dari Ambari Monitor" - -msgid "Cleanup config groups" -msgstr "Bersihkan grup konfigurasi" - -msgid "Configure rack awareness" -msgstr "Konfigurasikan rack awareness" - -msgid "Create Ambari blueprint" -msgstr "Buat cetak biru Ambari" - -msgid "Decommission NodeManagers and DataNodes" -msgstr "Decommission NodeManagers dan DataNodes" - -msgid "Enable HBase RegionServer HA" -msgstr "Aktifkan HBase RegionServer HA" - -msgid "Enable NameNode HA" -msgstr "Aktifkan NameNode HA" - -msgid "Enable ResourceManager HA" -msgstr "Aktifkan ResourceManager HA" - -msgid "Generate config groups" -msgstr "Hasilkan grup konfigurasi" - -msgid "Install services on hosts" -msgstr "Instal layanan di host" - -msgid "No alerts found" -msgstr "Tidak ada lansiran (alerts) yang ditemukan" - -msgid "Odd number" -msgstr "Angka ganjil" - -msgid "Odd number of JournalNodes are required for HA" -msgstr "Jumlah Aneh JournalNodes diperlukan untuk HA" - -msgid "Odd number of ZooKeepers are required for HA" -msgstr "Angka ganjil dari ZooKeepers diperlukan untuk HA" - -msgid "Prepare Hive" -msgstr "Siapkan Hive" - -msgid "Regenerate keytabs for Kerberos" -msgstr "Regenerasi keytabs untuk Kerberos" - -msgid "Remove hosts" -msgstr "Hapus host" - -msgid "Restart HDFS and MAPREDUCE2 services" -msgstr "Restart layanan HDFS dan MAPREDUCE2" - -msgid "Restart NameNodes and ResourceManagers" -msgstr "Restart NameNodes dan ResourceManagers" - -msgid "Restart of ambari-agent is needed for host {}, reason: {}" -msgstr "Restart agen ambari diperlukan untuk host {}, reason: {}" - -msgid "Set up Ambari agents" -msgstr "Menyiapkan agen Ambari" - -msgid "Set up Ambari management console" -msgstr "Siapkan konsol manajemen Ambari" - -msgid "Set up HDP repositories" -msgstr "Siapkan repositori HDP" - -#, python-format -msgid "Some Ambari request(s) not in COMPLETED state: %(description)s." -msgstr "" -"Beberapa permintaan Ambari tidak dalam keadaan COMPLETED: %(description)s." - -msgid "Start cluster" -msgstr "Mulai cluster" - -msgid "Start services on hosts" -msgstr "Mulai layanan di host" - -msgid "" -"The Ambari Sahara plugin provides the ability to launch clusters with " -"Hortonworks Data Platform (HDP) on OpenStack using Apache Ambari" -msgstr "" -"Plugin Ambari Sahara menyediakan kemampuan untuk meluncurkan cluster dengan " -"Hortonworks Data Platform (HDP) di OpenStack menggunakan Apache Ambari" - -msgid "Update default Ambari password" -msgstr "Perbarui kata sandi Ambari standar" - -msgid "Wait Ambari accessible" -msgstr "Tunggu Ambari dapat diakses" - -msgid "Wait registration of hosts" -msgstr "Tunggu pendaftaran host" - -#, python-format -msgid "request %(id)d: %(name)s - in status %(status)s" -msgstr "permintaan %(id)d: %(name)s - dalam status %(status)s" diff --git a/sahara_plugin_ambari/plugins/__init__.py b/sahara_plugin_ambari/plugins/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sahara_plugin_ambari/plugins/ambari/__init__.py b/sahara_plugin_ambari/plugins/ambari/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sahara_plugin_ambari/plugins/ambari/client.py b/sahara_plugin_ambari/plugins/ambari/client.py deleted file mode 100644 index 5d5fb9d..0000000 --- a/sahara_plugin_ambari/plugins/ambari/client.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from requests import auth - -from sahara.plugins import context -from sahara.plugins import exceptions as p_exc -from sahara_plugin_ambari.i18n import _ -from sahara_plugin_ambari.plugins.ambari import requests_helper as r_helper - - -LOG = logging.getLogger(__name__) - - -class AmbariNotFound(Exception): - pass - - -class AmbariClient(object): - def __init__(self, instance, port="8080", **kwargs): - kwargs.setdefault("username", "admin") - kwargs.setdefault("password", "admin") - - self._port = port - self._base_url = "http://{host}:{port}/api/v1".format( - host=instance.management_ip, port=port) - self._instance = instance - self._http_client = instance.remote().get_http_client(port) - self._headers = {"X-Requested-By": "sahara"} - self._auth = auth.HTTPBasicAuth(kwargs["username"], kwargs["password"]) - self._default_client_args = {"verify": False, "auth": self._auth, - "headers": self._headers} - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def close(self): - self._instance.remote().close_http_session(self._port) - - def get(self, *args, **kwargs): - kwargs.update(self._default_client_args) - return self._http_client.get(*args, **kwargs) - - def post(self, *args, **kwargs): - kwargs.update(self._default_client_args) - return self._http_client.post(*args, **kwargs) - - def put(self, *args, **kwargs): - kwargs.update(self._default_client_args) - return self._http_client.put(*args, **kwargs) - - def delete(self, *args, **kwargs): - kwargs.update(self._default_client_args) - return self._http_client.delete(*args, **kwargs) - - def get_alerts_data(self, cluster): - url = self._base_url + "/clusters/%s/alerts?fields=*" % cluster.name - resp = self.get(url) - data = self.check_response(resp) - return data.get('items', []) - - @staticmethod - def check_response(resp, handle_not_found=False): - if handle_not_found and resp.status_code == 404: - raise AmbariNotFound() - resp.raise_for_status() - if resp.text: - return jsonutils.loads(resp.text) - - @staticmethod - def req_id(response): - if not response.text: - raise p_exc.HadoopProvisionError("Cannot find request id. " - "No response body") - body = jsonutils.loads(response.text) - if "Requests" not in body or "id" not in body["Requests"]: - raise p_exc.HadoopProvisionError("Cannot find request id. " - "Unexpected response format") - return body["Requests"]["id"] - - def import_credential(self, cl_name, alias, data): - url = self._base_url + "/clusters/%s/credentials/%s" % (cl_name, alias) - resp = self.post(url, data=jsonutils.dumps(data)) - self.check_response(resp) - - def get_credential(self, cl_name, alias): - url = self._base_url + "/clusters/%s/credentials/%s" % (cl_name, alias) - resp = self.get(url) - self.check_response(resp, handle_not_found=True) - - def regenerate_keytabs(self, cl_name): - url = (self._base_url + - "/clusters/%s?regenerate_keytabs=missing" % cl_name) - data = jsonutils.dumps({"Clusters": {"security_type": "KERBEROS"}}) - resp = self.put(url, data=data) - self.check_response(resp) - return self.req_id(resp) - - def get_registered_hosts(self): - url = self._base_url + "/hosts" - resp = self.get(url) - data = self.check_response(resp) - return data.get("items", []) - - def get_host_info(self, host): - url = self._base_url + "/hosts/%s" % host - resp = self.get(url) - data = self.check_response(resp) - return data.get("Hosts", {}) - - def update_user_password(self, user, old_password, new_password): - url = self._base_url + "/users/%s" % user - data = jsonutils.dumps({ - "Users": { - "old_password": old_password, - "password": new_password - } - }) - resp = self.put(url, data=data) - self.check_response(resp) - - def create_blueprint(self, name, data): - url = self._base_url + "/blueprints/%s" % name - resp = self.post(url, data=jsonutils.dumps(data)) - return self.check_response(resp) - - def create_cluster(self, name, data): - url = self._base_url + "/clusters/%s" % name - resp = self.post(url, data=jsonutils.dumps(data)) - return self.check_response(resp).get("Requests") - - def add_host_to_cluster(self, instance): - cluster_name = instance.cluster.name - hostname = instance.fqdn() - url = self._base_url + "/clusters/{cluster}/hosts/{hostname}".format( - cluster=cluster_name, hostname=hostname) - resp = self.post(url) - self.check_response(resp) - - def get_config_groups(self, cluster): - url = self._base_url + "/clusters/%s/config_groups" % cluster.name - resp = self.get(url) - return self.check_response(resp) - - def get_detailed_config_group(self, cluster, cfg_id): - url = self._base_url + "/clusters/%s/config_groups/%s" % ( - cluster.name, cfg_id) - resp = self.get(url) - return self.check_response(resp) - - def remove_config_group(self, cluster, cfg_id): - url = self._base_url + "/clusters/%s/config_groups/%s" % ( - cluster.name, cfg_id) - resp = self.delete(url) - return self.check_response(resp) - - def create_config_group(self, cluster, data): - url = self._base_url + "/clusters/%s/config_groups" % cluster.name - resp = self.post(url, data=jsonutils.dumps(data)) - return self.check_response(resp) - - def add_service_to_host(self, inst, service): - url = "{pref}/clusters/{cluster}/hosts/{host}/host_components/{proc}" - url = url.format(pref=self._base_url, cluster=inst.cluster.name, - host=inst.fqdn(), proc=service) - self.check_response(self.post(url)) - - def start_service_on_host(self, inst, service, final_state): - url = "{pref}/clusters/{cluster}/hosts/{host}/host_components/{proc}" - url = url.format( - pref=self._base_url, cluster=inst.cluster.name, host=inst.fqdn(), - proc=service) - data = { - 'HostRoles': { - 'state': final_state - }, - 'RequestInfo': { - 'context': "Starting service {service}, moving to state " - "{state}".format(service=service, state=final_state) - } - } - resp = self.put(url, data=jsonutils.dumps(data)) - self.check_response(resp) - # return req_id to check health of request - return self.req_id(resp) - - def decommission_nodemanagers(self, cluster_name, instances): - url = self._base_url + "/clusters/%s/requests" % cluster_name - data = r_helper.build_nodemanager_decommission_request(cluster_name, - instances) - resp = self.post(url, data=jsonutils.dumps(data)) - self.wait_ambari_request(self.req_id(resp), cluster_name) - - def decommission_datanodes(self, cluster_name, instances): - url = self._base_url + "/clusters/%s/requests" % cluster_name - data = r_helper.build_datanode_decommission_request(cluster_name, - instances) - resp = self.post(url, data=jsonutils.dumps(data)) - self.wait_ambari_request(self.req_id(resp), cluster_name) - - def remove_process_from_host(self, cluster_name, instance, process): - url = self._base_url + "/clusters/%s/hosts/%s/host_components/%s" % ( - cluster_name, instance.fqdn(), process) - resp = self.delete(url) - - return self.check_response(resp) - - def stop_process_on_host(self, cluster_name, instance, process): - url = self._base_url + "/clusters/%s/hosts/%s/host_components/%s" % ( - cluster_name, instance.fqdn(), process) - check_installed_resp = self.check_response(self.get(url)) - - if check_installed_resp["HostRoles"]["state"] != "INSTALLED": - data = {"HostRoles": {"state": "INSTALLED"}, - "RequestInfo": {"context": "Stopping %s" % process}} - resp = self.put(url, data=jsonutils.dumps(data)) - - self.wait_ambari_request(self.req_id(resp), cluster_name) - - def restart_namenode(self, cluster_name, instance): - url = self._base_url + "/clusters/%s/requests" % cluster_name - data = r_helper.build_namenode_restart_request(cluster_name, instance) - resp = self.post(url, data=jsonutils.dumps(data)) - self.wait_ambari_request(self.req_id(resp), cluster_name) - - def restart_resourcemanager(self, cluster_name, instance): - url = self._base_url + "/clusters/%s/requests" % cluster_name - data = r_helper.build_resourcemanager_restart_request(cluster_name, - instance) - resp = self.post(url, data=jsonutils.dumps(data)) - self.wait_ambari_request(self.req_id(resp), cluster_name) - - def restart_service(self, cluster_name, service_name): - url = self._base_url + "/clusters/{}/services/{}".format( - cluster_name, service_name) - - data = r_helper.build_stop_service_request(service_name) - resp = self.put(url, data=jsonutils.dumps(data)) - self.wait_ambari_request(self.req_id(resp), cluster_name) - - data = r_helper.build_start_service_request(service_name) - resp = self.put(url, data=jsonutils.dumps(data)) - self.wait_ambari_request(self.req_id(resp), cluster_name) - - def delete_host(self, cluster_name, instance): - url = self._base_url + "/clusters/%s/hosts/%s" % (cluster_name, - instance.fqdn()) - resp = self.delete(url) - return self.check_response(resp) - - def check_request_status(self, cluster_name, req_id): - url = self._base_url + "/clusters/%s/requests/%d" % (cluster_name, - req_id) - resp = self.get(url) - return self.check_response(resp).get("Requests") - - def list_host_processes(self, cluster_name, instance): - url = self._base_url + "/clusters/%s/hosts/%s" % ( - cluster_name, instance.fqdn()) - resp = self.get(url) - body = jsonutils.loads(resp.text) - - procs = [p["HostRoles"]["component_name"] - for p in body["host_components"]] - return procs - - def set_up_mirror(self, stack_version, os_type, repo_id, repo_url): - url = self._base_url + ( - "/stacks/HDP/versions/%s/operating_systems/%s/repositories/%s") % ( - stack_version, os_type, repo_id) - data = { - "Repositories": { - "base_url": repo_url, - "verify_base_url": True - } - } - resp = self.put(url, data=jsonutils.dumps(data)) - self.check_response(resp) - - def set_rack_info_for_instance(self, cluster_name, instance, rack_name): - url = self._base_url + "/clusters/%s/hosts/%s" % ( - cluster_name, instance.fqdn()) - data = { - "Hosts": { - "rack_info": rack_name - } - } - resp = self.put(url, data=jsonutils.dumps(data)) - self.check_response(resp) - - def get_request_info(self, cluster_name, request_id): - url = self._base_url + ("/clusters/%s/requests/%s" % - (cluster_name, request_id)) - resp = self.check_response(self.get(url)) - return resp.get('Requests') - - def wait_ambari_requests(self, requests, cluster_name): - requests = set(requests) - failed = [] - context.sleep(20) - while len(requests) > 0: - completed, not_completed = set(), set() - for req_id in requests: - request = self.get_request_info(cluster_name, req_id) - status = request.get("request_status") - if status == 'COMPLETED': - completed.add(req_id) - elif status in ['IN_PROGRESS', 'PENDING']: - not_completed.add(req_id) - else: - failed.append(request) - if failed: - msg = _("Some Ambari request(s) " - "not in COMPLETED state: %(description)s.") - descrs = [] - for req in failed: - descr = _( - "request %(id)d: %(name)s - in status %(status)s") - descrs.append(descr % - {'id': req.get("id"), - 'name': req.get("request_context"), - 'status': req.get("request_status")}) - raise p_exc.HadoopProvisionError(msg % {'description': descrs}) - requests = not_completed - context.sleep(5) - LOG.debug("Waiting for %d ambari request(s) to be completed", - len(not_completed)) - LOG.debug("All ambari requests have been completed") - - def wait_ambari_request(self, request_id, cluster_name): - context.sleep(20) - while True: - status = self.check_request_status(cluster_name, request_id) - LOG.debug("Task %(context)s in %(status)s state. " - "Completed %(percent).1f%%", - {'context': status["request_context"], - 'status': status["request_status"], - 'percent': status["progress_percent"]}) - if status["request_status"] == "COMPLETED": - return - if status["request_status"] in ["IN_PROGRESS", "PENDING"]: - context.sleep(5) - else: - raise p_exc.HadoopProvisionError( - _("Ambari request in %s state") % status["request_status"]) diff --git a/sahara_plugin_ambari/plugins/ambari/common.py b/sahara_plugin_ambari/plugins/ambari/common.py deleted file mode 100644 index 8250858..0000000 --- a/sahara_plugin_ambari/plugins/ambari/common.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.plugins import kerberos - -# define service names - -AMBARI_SERVICE = "Ambari" -FALCON_SERVICE = "Falcon" -FLUME_SERVICE = "Flume" -HBASE_SERVICE = "HBase" -HDFS_SERVICE = "HDFS" -HIVE_SERVICE = "Hive" -KAFKA_SERVICE = "Kafka" -KNOX_SERVICE = "Knox" -MAPREDUCE2_SERVICE = "MAPREDUCE2" -OOZIE_SERVICE = "Oozie" -RANGER_SERVICE = "Ranger" -SLIDER_SERVICE = "Slider" -SPARK_SERVICE = "Spark" -SQOOP_SERVICE = "Sqoop" -STORM_SERVICE = "Storm" -YARN_SERVICE = "YARN" -ZOOKEEPER_SERVICE = "ZooKeeper" - -# define process names - -AMBARI_SERVER = "Ambari" -APP_TIMELINE_SERVER = "YARN Timeline Server" -DATANODE = "DataNode" -DRPC_SERVER = "DRPC Server" -FALCON_SERVER = "Falcon Server" -FLUME_HANDLER = "Flume" -HBASE_MASTER = "HBase Master" -HBASE_REGIONSERVER = "HBase RegionServer" -HISTORYSERVER = "MapReduce History Server" -HIVE_METASTORE = "Hive Metastore" -HIVE_SERVER = "HiveServer" -KAFKA_BROKER = "Kafka Broker" -KNOX_GATEWAY = "Knox Gateway" -NAMENODE = "NameNode" -NIMBUS = "Nimbus" -NODEMANAGER = "NodeManager" -OOZIE_SERVER = "Oozie" -RANGER_ADMIN = "Ranger Admin" -RANGER_USERSYNC = "Ranger Usersync" -RESOURCEMANAGER = "ResourceManager" -SECONDARY_NAMENODE = "SecondaryNameNode" -SLIDER = "Slider" -SPARK_JOBHISTORYSERVER = "Spark History Server" -SQOOP = "Sqoop" -STORM_UI_SERVER = "Storm UI Server" -SUPERVISOR = "Supervisor" -ZOOKEEPER_SERVER = "ZooKeeper" -JOURNAL_NODE = "JournalNode" - - -PROC_MAP = { - AMBARI_SERVER: ["METRICS_COLLECTOR"], - APP_TIMELINE_SERVER: ["APP_TIMELINE_SERVER"], - DATANODE: ["DATANODE"], - DRPC_SERVER: ["DRPC_SERVER"], - FALCON_SERVER: ["FALCON_SERVER"], - HBASE_MASTER: ["HBASE_MASTER"], - HBASE_REGIONSERVER: ["HBASE_REGIONSERVER"], - HISTORYSERVER: ["HISTORYSERVER"], - HIVE_METASTORE: ["HIVE_METASTORE"], - HIVE_SERVER: ["HIVE_SERVER", "MYSQL_SERVER", "WEBHCAT_SERVER"], - KAFKA_BROKER: ["KAFKA_BROKER"], - KNOX_GATEWAY: ["KNOX_GATEWAY"], - NAMENODE: ["NAMENODE"], - NIMBUS: ["NIMBUS"], - NODEMANAGER: ["NODEMANAGER"], - OOZIE_SERVER: ["OOZIE_SERVER", "PIG"], - RANGER_ADMIN: ["RANGER_ADMIN"], - RANGER_USERSYNC: ["RANGER_USERSYNC"], - RESOURCEMANAGER: ["RESOURCEMANAGER"], - SECONDARY_NAMENODE: ["SECONDARY_NAMENODE"], - SLIDER: ["SLIDER"], - SPARK_JOBHISTORYSERVER: ["SPARK_JOBHISTORYSERVER"], - SQOOP: ["SQOOP"], - STORM_UI_SERVER: ["STORM_UI_SERVER"], - SUPERVISOR: ["SUPERVISOR"], - ZOOKEEPER_SERVER: ["ZOOKEEPER_SERVER"], - JOURNAL_NODE: ["JOURNALNODE"] -} - -CLIENT_MAP = { - APP_TIMELINE_SERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"], - DATANODE: ["HDFS_CLIENT"], - FALCON_SERVER: ["FALCON_CLIENT"], - FLUME_HANDLER: ["FLUME_HANDLER"], - HBASE_MASTER: ["HBASE_CLIENT"], - HBASE_REGIONSERVER: ["HBASE_CLIENT"], - HISTORYSERVER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"], - HIVE_METASTORE: ["HIVE_CLIENT"], - HIVE_SERVER: ["HIVE_CLIENT"], - NAMENODE: ["HDFS_CLIENT"], - NODEMANAGER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"], - OOZIE_SERVER: ["OOZIE_CLIENT", "TEZ_CLIENT"], - RESOURCEMANAGER: ["MAPREDUCE2_CLIENT", "YARN_CLIENT"], - SECONDARY_NAMENODE: ["HDFS_CLIENT"], - SPARK_JOBHISTORYSERVER: ["SPARK_CLIENT"], - ZOOKEEPER_SERVER: ["ZOOKEEPER_CLIENT"] -} - -KERBEROS_CLIENT = 'KERBEROS_CLIENT' -ALL_LIST = ["METRICS_MONITOR"] - -# types of HA -NAMENODE_HA = "NameNode HA" -RESOURCEMANAGER_HA = "ResourceManager HA" -HBASE_REGIONSERVER_HA = "HBase RegionServer HA" - - -def get_ambari_proc_list(node_group): - procs = [] - for sp in node_group.node_processes: - procs.extend(PROC_MAP.get(sp, [])) - return procs - - -def get_clients(cluster): - procs = [] - for ng in cluster.node_groups: - procs.extend(ng.node_processes) - - clients = [] - for proc in procs: - clients.extend(CLIENT_MAP.get(proc, [])) - clients = list(set(clients)) - clients.extend(ALL_LIST) - if kerberos.is_kerberos_security_enabled(cluster): - clients.append(KERBEROS_CLIENT) - return clients - - -def instances_have_process(instances, process): - for i in instances: - if process in i.node_group.node_processes: - return True - - return False diff --git a/sahara_plugin_ambari/plugins/ambari/configs.py b/sahara_plugin_ambari/plugins/ambari/configs.py deleted file mode 100644 index ec63256..0000000 --- a/sahara_plugin_ambari/plugins/ambari/configs.py +++ /dev/null @@ -1,333 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from oslo_serialization import jsonutils - -from sahara.plugins import provisioning -from sahara.plugins import swift_helper -from sahara.plugins import utils -from sahara_plugin_ambari.i18n import _ -from sahara_plugin_ambari.plugins.ambari import common - - -CONFIGS = {} -OBJ_CONFIGS = {} -CFG_PROCESS_MAP = { - "admin-properties": common.RANGER_SERVICE, - "ams-env": common.AMBARI_SERVICE, - "ams-hbase-env": common.AMBARI_SERVICE, - "ams-hbase-policy": common.AMBARI_SERVICE, - "ams-hbase-security-site": common.AMBARI_SERVICE, - "ams-hbase-site": common.AMBARI_SERVICE, - "ams-site": common.AMBARI_SERVICE, - "capacity-scheduler": common.YARN_SERVICE, - "cluster-env": "general", - "core-site": common.HDFS_SERVICE, - "falcon-env": common.FALCON_SERVICE, - "falcon-runtime.properties": common.FALCON_SERVICE, - "falcon-startup.properties": common.FALCON_SERVICE, - "flume-env": common.FLUME_SERVICE, - "gateway-site": common.KNOX_SERVICE, - "hadoop-env": common.HDFS_SERVICE, - "hadoop-policy": common.HDFS_SERVICE, - "hbase-env": common.HBASE_SERVICE, - "hbase-policy": common.HBASE_SERVICE, - "hbase-site": common.HBASE_SERVICE, - "hdfs-site": common.HDFS_SERVICE, - "hive-env": common.HIVE_SERVICE, - "hive-site": common.HIVE_SERVICE, - "hiveserver2-site": common.HIVE_SERVICE, - "kafka-broker": common.KAFKA_SERVICE, - "kafka-env": common.KAFKA_SERVICE, - "knox-env": common.KNOX_SERVICE, - "mapred-env": common.YARN_SERVICE, - "mapred-site": common.YARN_SERVICE, - "oozie-env": common.OOZIE_SERVICE, - "oozie-site": common.OOZIE_SERVICE, - "ranger-env": common.RANGER_SERVICE, - "ranger-hbase-plugin-properties": common.HBASE_SERVICE, - "ranger-hdfs-plugin-properties": common.HDFS_SERVICE, - "ranger-hive-plugin-properties": common.HIVE_SERVICE, - "ranger-knox-plugin-properties": common.KNOX_SERVICE, - "ranger-site": common.RANGER_SERVICE, - "ranger-storm-plugin-properties": common.STORM_SERVICE, - "spark-defaults": common.SPARK_SERVICE, - "spark-env": common.SPARK_SERVICE, - "sqoop-env": common.SQOOP_SERVICE, - "storm-env": common.STORM_SERVICE, - "storm-site": common.STORM_SERVICE, - "tez-site": common.OOZIE_SERVICE, - "usersync-properties": common.RANGER_SERVICE, - "yarn-env": common.YARN_SERVICE, - "yarn-site": common.YARN_SERVICE, - "zoo.cfg": common.ZOOKEEPER_SERVICE, - "zookeeper-env": common.ZOOKEEPER_SERVICE -} - - -SERVICES_TO_CONFIGS_MAP = None - - -def get_service_to_configs_map(): - global SERVICES_TO_CONFIGS_MAP - if SERVICES_TO_CONFIGS_MAP: - return SERVICES_TO_CONFIGS_MAP - data = {} - for (key, item) in CFG_PROCESS_MAP.items(): - if item not in data: - data[item] = [] - data[item].append(key) - SERVICES_TO_CONFIGS_MAP = data - return SERVICES_TO_CONFIGS_MAP - - -ng_confs = [ - "dfs.datanode.data.dir", - "dtnode_heapsize", - "mapreduce.map.java.opts", - "mapreduce.map.memory.mb", - "mapreduce.reduce.java.opts", - "mapreduce.reduce.memory.mb", - "mapreduce.task.io.sort.mb", - "nodemanager_heapsize", - "yarn.app.mapreduce.am.command-opts", - "yarn.app.mapreduce.am.resource.mb", - "yarn.nodemanager.resource.cpu-vcores", - "yarn.nodemanager.resource.memory-mb", - "yarn.scheduler.maximum-allocation-mb", - "yarn.scheduler.minimum-allocation-mb" -] - - -use_base_repos_cfg = provisioning.Config( - "Enable external repos on instances", 'general', 'cluster', priority=1, - default_value=True, config_type="bool") -hdp_repo_cfg = provisioning.Config( - "HDP repo URL", "general", "cluster", priority=1, default_value="") -hdp_utils_repo_cfg = provisioning.Config( - "HDP-UTILS repo URL", "general", "cluster", priority=1, default_value="") -autoconfigs_strategy = provisioning.Config( - "Auto-configuration strategy", 'general', 'cluster', priority=1, - config_type='dropdown', - default_value='NEVER_APPLY', - config_values=[(v, v) for v in [ - 'NEVER_APPLY', 'ALWAYS_APPLY', 'ONLY_STACK_DEFAULTS_APPLY', - ]], -) -ambari_pkg_install_timeout = provisioning.Config( - "Ambari Agent Package Install timeout", "general", "cluster", - priority=1, default_value="1800") - - -def _get_service_name(service): - return CFG_PROCESS_MAP.get(service, service) - - -def _get_config_group(group, param, plugin_version): - if not CONFIGS or plugin_version not in CONFIGS: - load_configs(plugin_version) - for section, process in CFG_PROCESS_MAP.items(): - if process == group and param in CONFIGS[plugin_version][section]: - return section - - -def _get_param_scope(param): - if param in ng_confs: - return "node" - else: - return "cluster" - - -def _get_ha_params(): - enable_namenode_ha = provisioning.Config( - name=common.NAMENODE_HA, - applicable_target="general", - scope="cluster", - config_type="bool", - default_value=False, - is_optional=True, - description=_("Enable NameNode HA"), - priority=1) - - enable_resourcemanager_ha = provisioning.Config( - name=common.RESOURCEMANAGER_HA, - applicable_target="general", - scope="cluster", - config_type="bool", - default_value=False, - is_optional=True, - description=_("Enable ResourceManager HA"), - priority=1) - - enable_regionserver_ha = provisioning.Config( - name=common.HBASE_REGIONSERVER_HA, - applicable_target="general", - scope="cluster", - config_type="bool", - default_value=False, - is_optional=True, - description=_("Enable HBase RegionServer HA"), - priority=1) - - return [enable_namenode_ha, - enable_resourcemanager_ha, - enable_regionserver_ha] - - -def load_configs(version): - if OBJ_CONFIGS.get(version): - return OBJ_CONFIGS[version] - cfg_path = "plugins/ambari/resources/configs-%s.json" % version - vanilla_cfg = jsonutils.loads(utils.get_file_text(cfg_path, - 'sahara_plugin_ambari')) - CONFIGS[version] = vanilla_cfg - sahara_cfg = [hdp_repo_cfg, hdp_utils_repo_cfg, use_base_repos_cfg, - autoconfigs_strategy, ambari_pkg_install_timeout] - for service, confs in vanilla_cfg.items(): - for k, v in confs.items(): - sahara_cfg.append(provisioning.Config( - k, _get_service_name(service), _get_param_scope(k), - default_value=v)) - - sahara_cfg.extend(_get_ha_params()) - OBJ_CONFIGS[version] = sahara_cfg - return sahara_cfg - - -def _get_config_value(cluster, key): - return cluster.cluster_configs.get("general", {}).get(key.name, - key.default_value) - - -def use_base_repos_needed(cluster): - return _get_config_value(cluster, use_base_repos_cfg) - - -def get_hdp_repo_url(cluster): - return _get_config_value(cluster, hdp_repo_cfg) - - -def get_hdp_utils_repo_url(cluster): - return _get_config_value(cluster, hdp_utils_repo_cfg) - - -def get_auto_configuration_strategy(cluster): - return _get_config_value(cluster, autoconfigs_strategy) - - -def get_ambari_pkg_install_timeout(cluster): - return _get_config_value(cluster, ambari_pkg_install_timeout) - - -def _serialize_ambari_configs(configs): - return list(map(lambda x: {x: configs[x]}, configs)) - - -def _create_ambari_configs(sahara_configs, plugin_version): - configs = {} - for service, params in sahara_configs.items(): - if service == "general" or service == "Kerberos": - # General and Kerberos configs are designed for Sahara, not for - # the plugin - continue - for k, v in params.items(): - group = _get_config_group(service, k, plugin_version) - configs.setdefault(group, {}) - configs[group].update({k: v}) - return configs - - -def _make_paths(dirs, suffix): - return ",".join([d + suffix for d in dirs]) - - -def get_instance_params_mapping(inst): - configs = _create_ambari_configs(inst.node_group.node_configs, - inst.node_group.cluster.hadoop_version) - storage_paths = inst.storage_paths() - configs.setdefault("hdfs-site", {}) - configs["hdfs-site"]["dfs.datanode.data.dir"] = _make_paths( - storage_paths, "/hdfs/data") - configs["hdfs-site"]["dfs.journalnode.edits.dir"] = _make_paths( - storage_paths, "/hdfs/journalnode") - configs["hdfs-site"]["dfs.namenode.checkpoint.dir"] = _make_paths( - storage_paths, "/hdfs/namesecondary") - configs["hdfs-site"]["dfs.namenode.name.dir"] = _make_paths( - storage_paths, "/hdfs/namenode") - configs.setdefault("yarn-site", {}) - configs["yarn-site"]["yarn.nodemanager.local-dirs"] = _make_paths( - storage_paths, "/yarn/local") - configs["yarn-site"]["yarn.nodemanager.log-dirs"] = _make_paths( - storage_paths, "/yarn/log") - configs["yarn-site"][ - "yarn.timeline-service.leveldb-timeline-store.path"] = _make_paths( - storage_paths, "/yarn/timeline") - configs.setdefault("oozie-site", {}) - configs["oozie-site"][ - "oozie.service.AuthorizationService.security.enabled"] = "false" - return configs - - -def get_instance_params(inst): - return _serialize_ambari_configs(get_instance_params_mapping(inst)) - - -def get_cluster_params(cluster): - configs = _create_ambari_configs(cluster.cluster_configs, - cluster.hadoop_version) - swift_configs = {x["name"]: x["value"] - for x in swift_helper.get_swift_configs()} - configs.setdefault("core-site", {}) - configs["core-site"].update(swift_configs) - if utils.get_instance(cluster, common.RANGER_ADMIN): - configs.setdefault("admin-properties", {}) - configs["admin-properties"]["db_root_password"] = ( - cluster.extra["ranger_db_password"]) - return _serialize_ambari_configs(configs) - - -def get_config_group(instance): - params = get_instance_params_mapping(instance) - groups = [] - for (service, targets) in get_service_to_configs_map().items(): - current_group = { - 'cluster_name': instance.cluster.name, - 'group_name': "%s:%s" % ( - instance.cluster.name, instance.instance_name), - 'tag': service, - 'description': "Config group for scaled " - "node %s" % instance.instance_name, - 'hosts': [ - { - 'host_name': instance.fqdn() - } - ], - 'desired_configs': [] - } - at_least_one_added = False - for target in targets: - configs = params.get(target, {}) - if configs: - current_group['desired_configs'].append({ - 'type': target, - 'properties': configs, - 'tag': instance.instance_name - }) - at_least_one_added = True - if at_least_one_added: - # Config Group without overridden data is not interesting - groups.append({'ConfigGroup': current_group}) - return groups diff --git a/sahara_plugin_ambari/plugins/ambari/deploy.py b/sahara_plugin_ambari/plugins/ambari/deploy.py deleted file mode 100644 index 32ab067..0000000 --- a/sahara_plugin_ambari/plugins/ambari/deploy.py +++ /dev/null @@ -1,723 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import functools -import telnetlib # nosec - -from oslo_log import log as logging -from oslo_utils import uuidutils - -from sahara.plugins import conductor -from sahara.plugins import context -from sahara.plugins import kerberos -from sahara.plugins import topology_helper as t_helper -from sahara.plugins import utils as plugin_utils -from sahara_plugin_ambari.i18n import _ -from sahara_plugin_ambari.plugins.ambari import client as ambari_client -from sahara_plugin_ambari.plugins.ambari import common as p_common -from sahara_plugin_ambari.plugins.ambari import configs -from sahara_plugin_ambari.plugins.ambari import ha_helper - - -LOG = logging.getLogger(__name__) - - -repo_id_map = { - "2.3": { - "HDP": "HDP-2.3", - "HDP-UTILS": "HDP-UTILS-1.1.0.20" - }, - "2.4": { - "HDP": "HDP-2.4", - "HDP-UTILS": "HDP-UTILS-1.1.0.20" - }, - "2.5": { - "HDP": "HDP-2.5", - "HDP-UTILS": "HDP-UTILS-1.1.0.21" - }, - "2.6": { - "HDP": "HDP-2.6", - "HDP-UTILS": "HDP-UTILS-1.1.0.22" - }, -} - -os_type_map = { - "centos6": "redhat6", - "redhat6": "redhat6", - "centos7": "redhat7", - "redhat7": "redhat7", - "ubuntu14": "ubuntu14" -} - - -@plugin_utils.event_wrapper(True, step=_("Set up Ambari management console"), - param=('cluster', 0)) -def setup_ambari(cluster): - LOG.debug("Set up Ambari management console") - ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) - ambari_settings = ("agent.package.install.task.timeout=%s" - % configs.get_ambari_pkg_install_timeout(cluster)) - with ambari.remote() as r: - sudo = functools.partial(r.execute_command, run_as_root=True) - sudo("rngd -r /dev/urandom -W 4096") - r.replace_remote_line("/etc/ambari-server/conf/ambari.properties", - "agent.package.install.task.timeout=", - ambari_settings) - sudo("ambari-server setup -s -j" - " `cut -f2 -d \"=\" /etc/profile.d/99-java.sh`", timeout=1800) - # the following change must be after ambari-setup, or it would be - # overwritten (probably because it's not part of the base set of - # keywords/values handled by ambari-setup). - r.append_to_file("/etc/ambari-server/conf/ambari.properties", - "server.startup.web.timeout=180", run_as_root=True) - redirect_file = "/tmp/%s" % uuidutils.generate_uuid() - sudo("service ambari-server start >{rfile} && " - "cat {rfile} && rm {rfile}".format(rfile=redirect_file)) - LOG.debug("Ambari management console installed") - - -def setup_agents(cluster, instances=None): - LOG.debug("Set up Ambari agents") - manager_address = plugin_utils.get_instance( - cluster, p_common.AMBARI_SERVER).fqdn() - if not instances: - instances = plugin_utils.get_instances(cluster) - _setup_agents(instances, manager_address) - - -def _setup_agents(instances, manager_address): - plugin_utils.add_provisioning_step( - instances[0].cluster.id, _("Set up Ambari agents"), len(instances)) - with context.PluginsThreadGroup() as tg: - for inst in instances: - tg.spawn("hwx-agent-setup-%s" % inst.id, - _setup_agent, inst, manager_address) - LOG.debug("Ambari agents have been installed") - - -def _disable_repos_on_inst(instance): - with context.set_current_instance_id(instance_id=instance.instance_id): - with instance.remote() as r: - sudo = functools.partial(r.execute_command, run_as_root=True) - if r.get_os_distrib() == "ubuntu": - sudo("mv /etc/apt/sources.list /etc/apt/sources.list.tmp") - else: - tmp_name = "/tmp/yum.repos.d-%s" % instance.instance_id[:8] - # moving to other folder - sudo("mv /etc/yum.repos.d/ {fold_name}".format( - fold_name=tmp_name)) - sudo("mkdir /etc/yum.repos.d") - - -def disable_repos(cluster): - if configs.use_base_repos_needed(cluster): - LOG.debug("Using base repos") - return - instances = plugin_utils.get_instances(cluster) - with context.PluginsThreadGroup() as tg: - for inst in instances: - tg.spawn("disable-repos-%s" % inst.instance_name, - _disable_repos_on_inst, inst) - - -@plugin_utils.event_wrapper(True) -def _setup_agent(instance, ambari_address): - with instance.remote() as r: - sudo = functools.partial(r.execute_command, run_as_root=True) - r.replace_remote_string("/etc/ambari-agent/conf/ambari-agent.ini", - "localhost", ambari_address) - try: - sudo("ambari-agent start") - except Exception as e: - # workaround for ubuntu, because on ubuntu the ambari agent - # starts automatically after image boot - msg = _("Restart of ambari-agent is needed for host {}, " - "reason: {}").format(instance.fqdn(), e) - LOG.exception(msg) - sudo("ambari-agent restart") - # for correct installing packages - r.update_repository() - - -@plugin_utils.event_wrapper(True, step=_("Wait Ambari accessible"), - param=('cluster', 0)) -def wait_ambari_accessible(cluster): - ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) - kwargs = {"host": ambari.management_ip, "port": 8080} - plugin_utils.poll(_check_port_accessible, kwargs=kwargs, timeout=300) - - -def _check_port_accessible(host, port): - try: - conn = telnetlib.Telnet(host, port) - conn.close() - return True - except IOError: - return False - - -def resolve_package_conflicts(cluster, instances=None): - if not instances: - instances = plugin_utils.get_instances(cluster) - for instance in instances: - with instance.remote() as r: - if r.get_os_distrib() == 'ubuntu': - try: - r.execute_command( - "apt-get remove -y libmysql-java", run_as_root=True) - except Exception: - LOG.warning("Can't remove libmysql-java, " - "it's probably not installed") - - -def _prepare_ranger(cluster): - ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN) - if not ranger: - return - ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) - with ambari.remote() as r: - sudo = functools.partial(r.execute_command, run_as_root=True) - sudo("ambari-server setup --jdbc-db=mysql " - "--jdbc-driver=/usr/share/java/mysql-connector-java.jar") - init_db_template = ( - "create user 'root'@'%' identified by '{password}';\n" - "set password for 'root'@'localhost' = password('{password}');") - password = uuidutils.generate_uuid() - extra = cluster.extra.to_dict() if cluster.extra else {} - extra["ranger_db_password"] = password - ctx = context.ctx() - conductor.cluster_update(ctx, cluster, {"extra": extra}) - with ranger.remote() as r: - sudo = functools.partial(r.execute_command, run_as_root=True) - # TODO(sreshetnyak): add ubuntu support - sudo("yum install -y mysql-server") - sudo("service mysqld start") - r.write_file_to("/tmp/init.sql", - init_db_template.format(password=password)) - sudo("mysql < /tmp/init.sql") - sudo("rm /tmp/init.sql") - - -@plugin_utils.event_wrapper(True, - step=_("Prepare Hive"), param=('cluster', 0)) -def prepare_hive(cluster): - hive = plugin_utils.get_instance(cluster, p_common.HIVE_SERVER) - if not hive: - return - with hive.remote() as r: - r.execute_command( - 'sudo su - -c "hadoop fs -mkdir /user/oozie/conf" hdfs') - r.execute_command( - 'sudo su - -c "hadoop fs -copyFromLocal ' - '/etc/hive/conf/hive-site.xml ' - '/user/oozie/conf/hive-site.xml" hdfs') - - -@plugin_utils.event_wrapper(True, step=_("Update default Ambari password"), - param=('cluster', 0)) -def update_default_ambari_password(cluster): - ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) - new_password = uuidutils.generate_uuid() - with ambari_client.AmbariClient(ambari) as client: - client.update_user_password("admin", "admin", new_password) - extra = cluster.extra.to_dict() if cluster.extra else {} - extra["ambari_password"] = new_password - ctx = context.ctx() - conductor.cluster_update(ctx, cluster, {"extra": extra}) - cluster = conductor.cluster_get(ctx, cluster.id) - - -@plugin_utils.event_wrapper(True, step=_("Wait registration of hosts"), - param=('cluster', 0)) -def wait_host_registration(cluster, instances): - with _get_ambari_client(cluster) as client: - kwargs = {"client": client, "instances": instances} - plugin_utils.poll(_check_host_registration, kwargs=kwargs, - timeout=600) - - -def _check_host_registration(client, instances): - hosts = client.get_registered_hosts() - registered_host_names = [h["Hosts"]["host_name"] for h in hosts] - for instance in instances: - if instance.fqdn() not in registered_host_names: - return False - return True - - -@plugin_utils.event_wrapper(True, step=_("Set up HDP repositories"), - param=('cluster', 0)) -def _set_up_hdp_repos(cluster, hdp_repo, hdp_utils_repo): - ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) - pv = cluster.hadoop_version - repos = repo_id_map[pv] - with _get_ambari_client(cluster) as client: - os_type = os_type_map[client.get_host_info(ambari.fqdn())["os_type"]] - if hdp_repo: - client.set_up_mirror(pv, os_type, repos["HDP"], hdp_repo) - if hdp_utils_repo: - client.set_up_mirror(pv, os_type, repos["HDP-UTILS"], - hdp_utils_repo) - - -def set_up_hdp_repos(cluster): - hdp_repo = configs.get_hdp_repo_url(cluster) - hdp_utils_repo = configs.get_hdp_utils_repo_url(cluster) - if hdp_repo or hdp_utils_repo: - _set_up_hdp_repos(cluster, hdp_repo, hdp_utils_repo) - - -def get_kdc_server(cluster): - return plugin_utils.get_instance( - cluster, p_common.AMBARI_SERVER) - - -def _prepare_kerberos(cluster, instances=None): - if instances is None: - kerberos.deploy_infrastructure(cluster, get_kdc_server(cluster)) - kerberos.prepare_policy_files(cluster) - else: - server = None - if not kerberos.using_existing_kdc(cluster): - server = get_kdc_server(cluster) - kerberos.setup_clients(cluster, server) - kerberos.prepare_policy_files(cluster) - - -def prepare_kerberos(cluster, instances=None): - if kerberos.is_kerberos_security_enabled(cluster): - _prepare_kerberos(cluster, instances) - - -def _serialize_mit_kdc_kerberos_env(cluster): - return { - 'kerberos-env': { - "realm": kerberos.get_realm_name(cluster), - "kdc_type": "mit-kdc", - "kdc_host": kerberos.get_kdc_host( - cluster, get_kdc_server(cluster)), - "admin_server_host": kerberos.get_kdc_host( - cluster, get_kdc_server(cluster)), - 'encryption_types': 'aes256-cts-hmac-sha1-96', - 'ldap_url': '', 'container_dn': '', - } - } - - -def _serialize_krb5_configs(cluster): - return { - "krb5-conf": { - "properties_attributes": {}, - "properties": { - "manage_krb5_conf": "false" - } - } - } - - -def _get_credentials(cluster): - return [{ - "alias": "kdc.admin.credential", - "principal": kerberos.get_admin_principal(cluster), - "key": kerberos.get_server_password(cluster), - "type": "TEMPORARY" - }] - - -def get_host_group_components(cluster, processes): - result = [] - for proc in processes: - result.append({'name': proc}) - return result - - -@plugin_utils.event_wrapper(True, step=_("Create Ambari blueprint"), - param=('cluster', 0)) -def create_blueprint(cluster): - _prepare_ranger(cluster) - cluster = conductor.cluster_get(context.ctx(), cluster.id) - host_groups = [] - for ng in cluster.node_groups: - procs = p_common.get_ambari_proc_list(ng) - procs.extend(p_common.get_clients(cluster)) - for instance in ng.instances: - hg = { - "name": instance.instance_name, - "configurations": configs.get_instance_params(instance), - "components": get_host_group_components(cluster, procs) - } - host_groups.append(hg) - bp = { - "Blueprints": { - "stack_name": "HDP", - "stack_version": cluster.hadoop_version, - }, - "host_groups": host_groups, - "configurations": configs.get_cluster_params(cluster) - } - - if kerberos.is_kerberos_security_enabled(cluster): - bp['configurations'].extend([ - _serialize_mit_kdc_kerberos_env(cluster), - _serialize_krb5_configs(cluster) - ]) - bp['Blueprints']['security'] = {'type': 'KERBEROS'} - - general_configs = cluster.cluster_configs.get("general", {}) - if (general_configs.get(p_common.NAMENODE_HA) or - general_configs.get(p_common.RESOURCEMANAGER_HA) or - general_configs.get(p_common.HBASE_REGIONSERVER_HA)): - bp = ha_helper.update_bp_ha_common(cluster, bp) - - if general_configs.get(p_common.NAMENODE_HA): - bp = ha_helper.update_bp_for_namenode_ha(cluster, bp) - - if general_configs.get(p_common.RESOURCEMANAGER_HA): - bp = ha_helper.update_bp_for_resourcemanager_ha(cluster, bp) - - if general_configs.get(p_common.HBASE_REGIONSERVER_HA): - bp = ha_helper.update_bp_for_hbase_ha(cluster, bp) - - with _get_ambari_client(cluster) as client: - return client.create_blueprint(cluster.name, bp) - - -def _build_ambari_cluster_template(cluster): - cl_tmpl = { - "blueprint": cluster.name, - "default_password": uuidutils.generate_uuid(), - "host_groups": [] - } - - if cluster.use_autoconfig: - strategy = configs.get_auto_configuration_strategy(cluster) - cl_tmpl["config_recommendation_strategy"] = strategy - - if kerberos.is_kerberos_security_enabled(cluster): - cl_tmpl["credentials"] = _get_credentials(cluster) - cl_tmpl["security"] = {"type": "KERBEROS"} - topology = _get_topology_data(cluster) - for ng in cluster.node_groups: - for instance in ng.instances: - host = {"fqdn": instance.fqdn()} - if t_helper.is_data_locality_enabled(): - host["rack_info"] = topology[instance.instance_name] - cl_tmpl["host_groups"].append({ - "name": instance.instance_name, - "hosts": [host] - }) - return cl_tmpl - - -@plugin_utils.event_wrapper(True, - step=_("Start cluster"), param=('cluster', 0)) -def start_cluster(cluster): - ambari_template = _build_ambari_cluster_template(cluster) - with _get_ambari_client(cluster) as client: - req_id = client.create_cluster(cluster.name, ambari_template)["id"] - client.wait_ambari_request(req_id, cluster.name) - - -@plugin_utils.event_wrapper(True) -def _add_host_to_cluster(instance, client): - client.add_host_to_cluster(instance) - - -def add_new_hosts(cluster, instances): - with _get_ambari_client(cluster) as client: - plugin_utils.add_provisioning_step( - cluster.id, _("Add new hosts"), len(instances)) - for inst in instances: - _add_host_to_cluster(inst, client) - - -@plugin_utils.event_wrapper(True, step=_("Generate config groups"), - param=('cluster', 0)) -def manage_config_groups(cluster, instances): - groups = [] - for instance in instances: - groups.extend(configs.get_config_group(instance)) - with _get_ambari_client(cluster) as client: - client.create_config_group(cluster, groups) - - -@plugin_utils.event_wrapper(True, step=_("Cleanup config groups"), - param=('cluster', 0)) -def cleanup_config_groups(cluster, instances): - to_remove = set() - for instance in instances: - cfg_name = "%s:%s" % (cluster.name, instance.instance_name) - to_remove.add(cfg_name) - with _get_ambari_client(cluster) as client: - config_groups = client.get_config_groups(cluster) - for group in config_groups['items']: - cfg_id = group['ConfigGroup']['id'] - detailed = client.get_detailed_config_group(cluster, cfg_id) - cfg_name = detailed['ConfigGroup']['group_name'] - # we have config group per host - if cfg_name in to_remove: - client.remove_config_group(cluster, cfg_id) - - -@plugin_utils.event_wrapper(True, step=_("Regenerate keytabs for Kerberos"), - param=('cluster', 0)) -def _regenerate_keytabs(cluster): - with _get_ambari_client(cluster) as client: - alias = "kdc.admin.credential" - try: - client.get_credential(cluster.name, alias) - except ambari_client.AmbariNotFound: - # credentials are missing - data = { - 'Credential': { - "principal": kerberos.get_admin_principal(cluster), - "key": kerberos.get_server_password(cluster), - "type": "TEMPORARY" - } - } - - client.import_credential(cluster.name, alias, data) - - req_id = client.regenerate_keytabs(cluster.name) - client.wait_ambari_request(req_id, cluster.name) - - -@plugin_utils.event_wrapper(True, step=_("Install services on hosts"), - param=('cluster', 0)) -def _install_services_to_hosts(cluster, instances): - requests_ids = [] - with _get_ambari_client(cluster) as client: - clients = p_common.get_clients(cluster) - for instance in instances: - services = p_common.get_ambari_proc_list(instance.node_group) - services.extend(clients) - for service in services: - client.add_service_to_host(instance, service) - requests_ids.append( - client.start_service_on_host( - instance, service, 'INSTALLED')) - client.wait_ambari_requests(requests_ids, cluster.name) - - -@plugin_utils.event_wrapper(True, step=_("Start services on hosts"), - param=('cluster', 0)) -def _start_services_on_hosts(cluster, instances): - with _get_ambari_client(cluster) as client: - # all services added and installed, let's start them - requests_ids = [] - for instance in instances: - services = p_common.get_ambari_proc_list(instance.node_group) - services.extend(p_common.ALL_LIST) - for service in services: - requests_ids.append( - client.start_service_on_host( - instance, service, 'STARTED')) - client.wait_ambari_requests(requests_ids, cluster.name) - - -def manage_host_components(cluster, instances): - _install_services_to_hosts(cluster, instances) - if kerberos.is_kerberos_security_enabled(cluster): - _regenerate_keytabs(cluster) - _start_services_on_hosts(cluster, instances) - - -@plugin_utils.event_wrapper(True, - step=_("Decommission NodeManagers and DataNodes"), - param=('cluster', 0)) -def decommission_hosts(cluster, instances): - nodemanager_instances = filter( - lambda i: p_common.NODEMANAGER in i.node_group.node_processes, - instances) - if len(nodemanager_instances) > 0: - decommission_nodemanagers(cluster, nodemanager_instances) - - datanode_instances = filter( - lambda i: p_common.DATANODE in i.node_group.node_processes, - instances) - if len(datanode_instances) > 0: - decommission_datanodes(cluster, datanode_instances) - - -def decommission_nodemanagers(cluster, instances): - with _get_ambari_client(cluster) as client: - client.decommission_nodemanagers(cluster.name, instances) - - -def decommission_datanodes(cluster, instances): - with _get_ambari_client(cluster) as client: - client.decommission_datanodes(cluster.name, instances) - - -def restart_namenode(cluster, instance): - with _get_ambari_client(cluster) as client: - client.restart_namenode(cluster.name, instance) - - -def restart_resourcemanager(cluster, instance): - with _get_ambari_client(cluster) as client: - client.restart_resourcemanager(cluster.name, instance) - - -@plugin_utils.event_wrapper(True, - step=_("Restart NameNodes and ResourceManagers"), - param=('cluster', 0)) -def restart_nns_and_rms(cluster): - nns = plugin_utils.get_instances(cluster, p_common.NAMENODE) - for nn in nns: - restart_namenode(cluster, nn) - - rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER) - for rm in rms: - restart_resourcemanager(cluster, rm) - - -def restart_service(cluster, service_name): - with _get_ambari_client(cluster) as client: - client.restart_service(cluster.name, service_name) - - -@plugin_utils.event_wrapper(True, - step=_("Remove hosts"), param=('cluster', 0)) -def remove_services_from_hosts(cluster, instances): - for inst in instances: - LOG.debug("Stopping and removing processes from host %s", inst.fqdn()) - _remove_services_from_host(cluster, inst) - LOG.debug("Removing the host %s", inst.fqdn()) - _remove_host(cluster, inst) - - -def _remove_services_from_host(cluster, instance): - with _get_ambari_client(cluster) as client: - hdp_processes = client.list_host_processes(cluster.name, instance) - for proc in hdp_processes: - LOG.debug("Stopping process %(proc)s on host %(fqdn)s ", - {'proc': proc, 'fqdn': instance.fqdn()}) - client.stop_process_on_host(cluster.name, instance, proc) - - LOG.debug("Removing process %(proc)s from host %(fqdn)s ", - {'proc': proc, 'fqdn': instance.fqdn()}) - client.remove_process_from_host(cluster.name, instance, proc) - - _wait_all_processes_removed(cluster, instance) - - -def _remove_host(cluster, inst): - with _get_ambari_client(cluster) as client: - client.delete_host(cluster.name, inst) - - -def _wait_all_processes_removed(cluster, instance): - with _get_ambari_client(cluster) as client: - while True: - hdp_processes = client.list_host_processes(cluster.name, instance) - if not hdp_processes: - return - context.sleep(5) - - -def _get_ambari_client(cluster): - ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) - password = cluster.extra["ambari_password"] - return ambari_client.AmbariClient(ambari, password=password) - - -def _get_topology_data(cluster): - if not t_helper.is_data_locality_enabled(): - return {} - - LOG.warning("Node group awareness is not implemented in YARN yet " - "so enable_hypervisor_awareness set to False " - "explicitly") - return t_helper.generate_topology_map(cluster, is_node_awareness=False) - - -@plugin_utils.event_wrapper(True) -def _configure_topology_data(cluster, inst, client): - topology = _get_topology_data(cluster) - client.set_rack_info_for_instance( - cluster.name, inst, topology[inst.instance_name]) - - -@plugin_utils.event_wrapper(True, - step=_("Restart HDFS and MAPREDUCE2 services"), - param=('cluster', 0)) -def _restart_hdfs_and_mapred_services(cluster, client): - client.restart_service(cluster.name, p_common.HDFS_SERVICE) - client.restart_service(cluster.name, p_common.MAPREDUCE2_SERVICE) - - -def configure_rack_awareness(cluster, instances): - if not t_helper.is_data_locality_enabled(): - return - - with _get_ambari_client(cluster) as client: - plugin_utils.add_provisioning_step( - cluster.id, _("Configure rack awareness"), len(instances)) - for inst in instances: - _configure_topology_data(cluster, inst, client) - _restart_hdfs_and_mapred_services(cluster, client) - - -@plugin_utils.event_wrapper(True) -def _add_hadoop_swift_jar(instance, new_jar): - with instance.remote() as r: - code, out = r.execute_command( - "test -f %s" % new_jar, raise_when_error=False) - if code == 0: - # get ambari hadoop version (e.g.: 2.7.1.2.3.4.0-3485) - code, amb_hadoop_version = r.execute_command( - "sudo hadoop version | grep 'Hadoop' | awk '{print $2}'") - amb_hadoop_version = amb_hadoop_version.strip() - # get special code of ambari hadoop version(e.g.:2.3.4.0-3485) - amb_code = '.'.join(amb_hadoop_version.split('.')[3:]) - origin_jar = ( - "/usr/hdp/{}/hadoop-mapreduce/hadoop-openstack-{}.jar".format( - amb_code, amb_hadoop_version)) - r.execute_command("sudo cp {} {}".format(new_jar, origin_jar)) - else: - LOG.warning("The {jar_file} file cannot be found " - "in the {dir} directory so Keystone API v3 " - "is not enabled for this cluster." - .format(jar_file="hadoop-openstack.jar", - dir="/opt")) - - -def add_hadoop_swift_jar(instances): - new_jar = "/opt/hadoop-openstack.jar" - plugin_utils.add_provisioning_step(instances[0].cluster.id, - _("Add Hadoop Swift jar to instances"), - len(instances)) - for inst in instances: - _add_hadoop_swift_jar(inst, new_jar) - - -def deploy_kerberos_principals(cluster, instances=None): - if not kerberos.is_kerberos_security_enabled(cluster): - return - if instances is None: - instances = plugin_utils.get_instances(cluster) - mapper = { - 'hdfs': plugin_utils.instances_with_services( - instances, [p_common.SECONDARY_NAMENODE, p_common.NAMENODE, - p_common.DATANODE, p_common.JOURNAL_NODE]), - 'spark': plugin_utils.instances_with_services( - instances, [p_common.SPARK_JOBHISTORYSERVER]), - 'oozie': plugin_utils.instances_with_services( - instances, [p_common.OOZIE_SERVER]), - } - - kerberos.create_keytabs_for_map(cluster, mapper) diff --git a/sahara_plugin_ambari/plugins/ambari/edp_engine.py b/sahara_plugin_ambari/plugins/ambari/edp_engine.py deleted file mode 100644 index 24c7837..0000000 --- a/sahara_plugin_ambari/plugins/ambari/edp_engine.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.plugins import edp -from sahara.plugins import exceptions as pex -from sahara.plugins import kerberos -from sahara.plugins import utils as plugin_utils -from sahara_plugin_ambari.i18n import _ -from sahara_plugin_ambari.plugins.ambari import common as p_common - - -def _get_lib_location(instance, lib_name): - with instance.remote() as r: - code, jar_path = r.execute_command( - ('find /usr/hdp -name "{lib_name}" 2>/dev/null ' - '-print | head -n 1'.format(lib_name=lib_name)), - run_as_root=True) - # drop last whitespace character - return jar_path.rstrip() - - -def _get_hadoop_openstack_jar_location(instance): - return _get_lib_location(instance, "hadoop-openstack*.jar") - - -def _get_jackson_core(instance): - return _get_lib_location(instance, "jackson-core-asl-1.9*.jar") - - -class EDPOozieEngine(edp.PluginsOozieJobEngine): - def get_hdfs_user(self): - return "oozie" - - def get_client(self): - if kerberos.is_kerberos_security_enabled(self.cluster): - return super(EDPOozieEngine, self).get_remote_client() - return super(EDPOozieEngine, self).get_client() - - def create_hdfs_dir(self, remote, dir_name): - edp.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user()) - - def get_oozie_server_uri(self, cluster): - oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER) - return "http://%s:11000/oozie" % oozie.management_ip - - def get_name_node_uri(self, cluster): - namenodes = plugin_utils.get_instances(cluster, p_common.NAMENODE) - if len(namenodes) == 1: - return "hdfs://%s:8020" % namenodes[0].fqdn() - else: - return "hdfs://hdfs-ha" - - def get_resource_manager_uri(self, cluster): - resourcemanagers = plugin_utils.get_instances(cluster, - p_common.RESOURCEMANAGER) - return "%s:8050" % resourcemanagers[0].fqdn() - - def get_oozie_server(self, cluster): - return plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER) - - def validate_job_execution(self, cluster, job, data): - oozie_count = plugin_utils.get_instances_count(cluster, - p_common.OOZIE_SERVER) - if oozie_count != 1: - raise pex.InvalidComponentCountException( - p_common.OOZIE_SERVER, "1", oozie_count) - super(EDPOozieEngine, self).validate_job_execution(cluster, job, data) - - @staticmethod - def get_possible_job_config(job_type): - return {"job_config": []} - - -class EDPSparkEngine(edp.PluginsSparkJobEngine): - edp_base_version = "2.2" - - def __init__(self, cluster): - super(EDPSparkEngine, self).__init__(cluster) - # searching for spark instance - self.master = plugin_utils.get_instance( - cluster, p_common.SPARK_JOBHISTORYSERVER) - self.plugin_params["spark-user"] = "sudo -u spark " - self.plugin_params["spark-submit"] = "spark-submit" - self.plugin_params["deploy-mode"] = "cluster" - self.plugin_params["master"] = "yarn-cluster" - - @staticmethod - def edp_supported(version): - return version >= EDPSparkEngine.edp_base_version - - def run_job(self, job_execution): - # calculate class-path dynamically - driver_classpath = [ - _get_hadoop_openstack_jar_location(self.master), - _get_jackson_core(self.master)] - self.plugin_params['driver-class-path'] = ":".join(driver_classpath) - self.plugin_params['drivers-to-jars'] = driver_classpath - - return super(EDPSparkEngine, self).run_job(job_execution) - - def validate_job_execution(self, cluster, job, data): - if not self.edp_supported(cluster.hadoop_version): - raise pex.PluginInvalidDataException( - _('Ambari plugin of {base} or higher required to run {type} ' - 'jobs').format( - base=EDPSparkEngine.edp_base_version, type=job.type)) - - spark_nodes_count = plugin_utils.get_instances_count( - cluster, p_common.SPARK_JOBHISTORYSERVER) - if spark_nodes_count != 1: - raise pex.InvalidComponentCountException( - p_common.SPARK_JOBHISTORYSERVER, '1', spark_nodes_count) - - super(EDPSparkEngine, self).validate_job_execution( - cluster, job, data) diff --git a/sahara_plugin_ambari/plugins/ambari/ha_helper.py b/sahara_plugin_ambari/plugins/ambari/ha_helper.py deleted file mode 100644 index 73ad57a..0000000 --- a/sahara_plugin_ambari/plugins/ambari/ha_helper.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.plugins import utils -from sahara_plugin_ambari.plugins.ambari import common as p_common - - -CORE_SITE = "core-site" -YARN_SITE = "yarn-site" -HBASE_SITE = "hbase-site" -HDFS_SITE = "hdfs-site" -HADOOP_ENV = "hadoop-env" -ZOO_CFG = "zoo.cfg" - - -def update_bp_ha_common(cluster, blueprint): - blueprint = _set_default_fs(cluster, blueprint, p_common.NAMENODE_HA) - blueprint = _set_high_zk_limits(blueprint) - - return blueprint - - -def update_bp_for_namenode_ha(cluster, blueprint): - blueprint = _add_zkfc_to_namenodes(blueprint) - blueprint = _set_zk_quorum(cluster, blueprint, CORE_SITE) - blueprint = _configure_hdfs_site(cluster, blueprint) - - return blueprint - - -def update_bp_for_resourcemanager_ha(cluster, blueprint): - blueprint = _configure_yarn_site(cluster, blueprint) - blueprint = _set_zk_quorum(cluster, blueprint, YARN_SITE) - blueprint = _set_default_fs(cluster, blueprint, - p_common.RESOURCEMANAGER_HA) - return blueprint - - -def update_bp_for_hbase_ha(cluster, blueprint): - return _confgure_hbase_site(cluster, blueprint) - - -def _add_zkfc_to_namenodes(blueprint): - for hg in blueprint["host_groups"]: - if {"name": "NAMENODE"} in hg["components"]: - hg["components"].append({"name": "ZKFC"}) - - return blueprint - - -def _find_create_properties_section(blueprint, section_name): - for conf_group in blueprint["configurations"]: - if section_name in conf_group: - return conf_group[section_name] - - new_group = {section_name: {}} - blueprint["configurations"].append(new_group) - - return new_group[section_name] - - -def _find_hdfs_site(blueprint): - return _find_create_properties_section(blueprint, HDFS_SITE) - - -def _find_yarn_site(blueprint): - return _find_create_properties_section(blueprint, YARN_SITE) - - -def _find_core_site(blueprint): - return _find_create_properties_section(blueprint, CORE_SITE) - - -def _find_hadoop_env(blueprint): - return _find_create_properties_section(blueprint, HADOOP_ENV) - - -def _find_zoo_cfg(blueprint): - return _find_create_properties_section(blueprint, ZOO_CFG) - - -def _find_hbase_site(blueprint): - return _find_create_properties_section(blueprint, HBASE_SITE) - - -def _set_default_fs(cluster, blueprint, ha_type): - if ha_type == p_common.NAMENODE_HA: - _find_core_site(blueprint)["fs.defaultFS"] = "hdfs://hdfs-ha" - elif ha_type == p_common.RESOURCEMANAGER_HA: - nn_instance = utils.get_instances(cluster, p_common.NAMENODE)[0] - _find_core_site(blueprint)["fs.defaultFS"] = ( - "hdfs://%s:8020" % nn_instance.fqdn()) - return blueprint - - -def _set_zk_quorum(cluster, blueprint, conf_type): - zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER) - - value = ",".join(["%s:2181" % i.fqdn() for i in zk_instances]) - if conf_type == CORE_SITE: - _find_core_site(blueprint)["ha.zookeeper.quorum"] = value - elif conf_type == YARN_SITE: - _find_yarn_site(blueprint)["hadoop.registry.zk.quorum"] = value - - return blueprint - - -def _set_high_zk_limits(blueprint): - props = _find_zoo_cfg(blueprint) - props["tickTime"] = "10000" - - return blueprint - - -def _set_primary_and_standby_namenode(cluster, blueprint): - props = _find_hadoop_env(blueprint) - nns = utils.get_instances(cluster, p_common.NAMENODE) - props["dfs_ha_initial_namenode_active"] = nns[0].fqdn() - props["dfs_ha_initial_namenode_standby"] = nns[1].fqdn() - - return blueprint - - -def _configure_hdfs_site(cluster, blueprint): - props = _find_hdfs_site(blueprint) - - props["dfs.client.failover.proxy.provider.hdfs-ha"] = ( - "org.apache.hadoop.hdfs.server.namenode.ha." - "ConfiguredFailoverProxyProvider") - props["dfs.ha.automatic-failover.enabled"] = "true" - props["dfs.ha.fencing.methods"] = "shell(/bin/true)" - props["dfs.nameservices"] = "hdfs-ha" - - jns = utils.get_instances(cluster, p_common.JOURNAL_NODE) - journalnodes_concat = ";".join( - ["%s:8485" % i.fqdn() for i in jns]) - journalnodes_value = "qjournal://%s/hdfs-ha" % journalnodes_concat - props["dfs.namenode.shared.edits.dir"] = journalnodes_value - - nns = utils.get_instances(cluster, p_common.NAMENODE) - nn_id_concat = ",".join([i.instance_name for i in nns]) - props["dfs.ha.namenodes.hdfs-ha"] = nn_id_concat - - props["dfs.namenode.http-address"] = "%s:50070" % nns[0].fqdn() - props["dfs.namenode.https-address"] = "%s:50470" % nns[0].fqdn() - for i in nns: - props["dfs.namenode.http-address.hdfs-ha.%s" % i.instance_name] = ( - "%s:50070" % i.fqdn()) - props["dfs.namenode.https-address.hdfs-ha.%s" % i.instance_name] = ( - "%s:50470" % i.fqdn()) - props["dfs.namenode.rpc-address.hdfs-ha.%s" % i.instance_name] = ( - "%s:8020" % i.fqdn()) - - return blueprint - - -def _configure_yarn_site(cluster, blueprint): - props = _find_yarn_site(blueprint) - name = cluster.name - rm_instances = utils.get_instances(cluster, p_common.RESOURCEMANAGER) - - props["hadoop.registry.rm.enabled"] = "false" - - zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER) - - zks = ",".join(["%s:2181" % i.fqdn() for i in zk_instances]) - props["yarn.resourcemanager.zk-address"] = zks - - hs = utils.get_instance(cluster, p_common.HISTORYSERVER) - props["yarn.log.server.url"] = "%s:19888/jobhistory/logs/" % hs.fqdn() - - props["yarn.resourcemanager.address"] = "%s:8050" % rm_instances[0].fqdn() - props["yarn.resourcemanager.admin.address"] = ("%s:8141" % - rm_instances[0].fqdn()) - props["yarn.resourcemanager.cluster-id"] = name - props["yarn.resourcemanager.ha.automatic-failover.zk-base-path"] = ( - "/yarn-leader-election") - props["yarn.resourcemanager.ha.enabled"] = "true" - - rm_id_concat = ",".join([i.instance_name for i in rm_instances]) - props["yarn.resourcemanager.ha.rm-ids"] = rm_id_concat - - for i in rm_instances: - props["yarn.resourcemanager.hostname.%s" % i.instance_name] = i.fqdn() - props["yarn.resourcemanager.webapp.address.%s" % - i.instance_name] = "%s:8088" % i.fqdn() - props["yarn.resourcemanager.webapp.https.address.%s" % - i.instance_name] = "%s:8090" % i.fqdn() - - props["yarn.resourcemanager.hostname"] = rm_instances[0].fqdn() - props["yarn.resourcemanager.recovery.enabled"] = "true" - props["yarn.resourcemanager.resource-tracker.address"] = ( - "%s:8025" % rm_instances[0].fqdn()) - props["yarn.resourcemanager.scheduler.address"] = ( - "%s:8030" % rm_instances[0].fqdn()) - props["yarn.resourcemanager.store.class"] = ( - "org.apache.hadoop.yarn.server.resourcemanager.recovery." - "ZKRMStateStore") - props["yarn.resourcemanager.webapp.address"] = ( - "%s:8088" % rm_instances[0].fqdn()) - props["yarn.resourcemanager.webapp.https.address"] = ( - "%s:8090" % rm_instances[0].fqdn()) - - tls_instance = utils.get_instance(cluster, p_common.APP_TIMELINE_SERVER) - props["yarn.timeline-service.address"] = "%s:10200" % tls_instance.fqdn() - props["yarn.timeline-service.webapp.address"] = ( - "%s:8188" % tls_instance.fqdn()) - props["yarn.timeline-service.webapp.https.address"] = ( - "%s:8190" % tls_instance.fqdn()) - - return blueprint - - -def _confgure_hbase_site(cluster, blueprint): - props = _find_hbase_site(blueprint) - - props["hbase.regionserver.global.memstore.lowerLimit"] = "0.38" - props["hbase.regionserver.global.memstore.upperLimit"] = "0.4" - props["hbase.regionserver.handler.count"] = "60" - props["hbase.regionserver.info.port"] = "16030" - props["hbase.regionserver.storefile.refresh.period"] = "20" - - props["hbase.rootdir"] = "hdfs://hdfs-ha/apps/hbase/data" - - props["hbase.security.authentication"] = "simple" - props["hbase.security.authorization"] = "false" - props["hbase.superuser"] = "hbase" - props["hbase.tmp.dir"] = "/hadoop/hbase" - props["hbase.zookeeper.property.clientPort"] = "2181" - - zk_instances = utils.get_instances(cluster, p_common.ZOOKEEPER_SERVER) - zk_quorum_value = ",".join([i.fqdn() for i in zk_instances]) - props["hbase.zookeeper.quorum"] = zk_quorum_value - - props["hbase.zookeeper.useMulti"] = "true" - props["hfile.block.cache.size"] = "0.40" - props["zookeeper.session.timeout"] = "30000" - props["zookeeper.znode.parent"] = "/hbase-unsecure" - - return blueprint diff --git a/sahara_plugin_ambari/plugins/ambari/health.py b/sahara_plugin_ambari/plugins/ambari/health.py deleted file mode 100644 index 8bdc523..0000000 --- a/sahara_plugin_ambari/plugins/ambari/health.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import functools - -from oslo_log import log as logging - -from sahara.plugins import health_check_base -from sahara.plugins import utils as plugin_utils -from sahara_plugin_ambari.i18n import _ -from sahara_plugin_ambari.plugins.ambari import client -from sahara_plugin_ambari.plugins.ambari import common as p_common - - -LOG = logging.getLogger(__name__) - - -class AlertsProvider(object): - def __init__(self, cluster): - self._data = None - self._cluster_services = None - self._exception_store = None - self.cluster = cluster - # calling to cache all data - self.get_alerts_data() - - def get_cluster_services(self): - return self._cluster_services - - def is_ambari_active(self): - if self._exception_store: - raise health_check_base.RedHealthError(self._exception_store) - return _("Ambari Monitor is healthy") - - def get_alerts_data(self, service=None): - if self._data is not None: - # return cached data - return self._data.get(service, []) if service else self._data - self._data = {} - self._cluster_services = [] - try: - ambari = plugin_utils.get_instance( - self.cluster, p_common.AMBARI_SERVER) - password = self.cluster.extra.get("ambari_password") - with client.AmbariClient(ambari, password=password) as ambari: - resp = ambari.get_alerts_data(self.cluster) - for alert in resp: - alert = alert.get('Alert', {}) - service = alert.get('service_name').lower() - if service not in self._data: - self._data[service] = [] - self._cluster_services.append(service) - self._data[service].append(alert) - except Exception as e: - prefix = _("Can't get response from Ambari Monitor") - msg = _("%(problem)s: %(description)s") % { - 'problem': prefix, 'description': str(e)} - # don't put in exception to logs, it will be done by log.exception - LOG.exception(prefix) - self._exception_store = msg - - -class AmbariHealthCheck(health_check_base.BasicHealthCheck): - def __init__(self, cluster, provider): - self.provider = provider - super(AmbariHealthCheck, self).__init__(cluster) - - def get_health_check_name(self): - return "Ambari alerts health check" - - def is_available(self): - return self.cluster.plugin_name == 'ambari' - - def check_health(self): - return self.provider.is_ambari_active() - - -class AmbariServiceHealthCheck(health_check_base.BasicHealthCheck): - def __init__(self, cluster, provider, service): - self.provider = provider - self.service = service.lower() - super(AmbariServiceHealthCheck, self).__init__(cluster) - - def get_health_check_name(self): - return "Ambari alerts for %s Service" % self.service - - def is_available(self): - return self.cluster.plugin_name == 'ambari' - - def get_important_services(self): - return [ - p_common.HDFS_SERVICE.lower(), - p_common.YARN_SERVICE.lower(), - p_common.OOZIE_SERVICE.lower(), - p_common.ZOOKEEPER_SERVICE.lower() - ] - - def check_health(self): - imp_map = {'OK': 'GREEN', 'WARNING': 'YELLOW', 'CRITICAL': 'RED'} - other_map = {'OK': 'GREEN'} - color_counter = collections.Counter() - important_services = self.get_important_services() - for alert in self.provider.get_alerts_data(self.service): - alert_summary = alert.get('state', 'UNKNOWN') - if self.service in important_services: - target = imp_map.get(alert_summary, 'RED') - else: - target = other_map.get(alert_summary, 'YELLOW') - color_counter[target] += 1 - if color_counter['RED'] > 0 and color_counter['YELLOW'] > 0: - raise health_check_base.RedHealthError( - _("Ambari Monitor has responded that cluster has " - "%(red)d critical and %(yellow)d warning alert(s)") - % {'red': color_counter['RED'], - 'yellow': color_counter['YELLOW']}) - elif color_counter['RED'] > 0: - raise health_check_base.RedHealthError( - _("Ambari Monitor has responded that cluster has " - "%(red)d critical alert(s)") - % {'red': color_counter['RED']}) - elif color_counter['YELLOW'] > 0: - raise health_check_base.YellowHealthError( - _("Ambari Monitor has responded that cluster " - "has %d warning alert(s)") - % color_counter['YELLOW']) - return _("No alerts found") - - -def get_health_checks(cluster): - provider = AlertsProvider(cluster) - checks = [functools.partial(AmbariHealthCheck, provider=provider)] - for service in provider.get_cluster_services(): - checks.append(functools.partial( - AmbariServiceHealthCheck, provider=provider, service=service)) - return checks diff --git a/sahara_plugin_ambari/plugins/ambari/plugin.py b/sahara_plugin_ambari/plugins/ambari/plugin.py deleted file mode 100644 index d1ff3e5..0000000 --- a/sahara_plugin_ambari/plugins/ambari/plugin.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from sahara.plugins import conductor -from sahara.plugins import context -from sahara.plugins import images -from sahara.plugins import kerberos -from sahara.plugins import provisioning as p -from sahara.plugins import swift_helper -from sahara.plugins import utils as plugin_utils -from sahara_plugin_ambari.i18n import _ -from sahara_plugin_ambari.plugins.ambari import common as p_common -from sahara_plugin_ambari.plugins.ambari import configs -from sahara_plugin_ambari.plugins.ambari import deploy -from sahara_plugin_ambari.plugins.ambari import edp_engine -from sahara_plugin_ambari.plugins.ambari import health -from sahara_plugin_ambari.plugins.ambari import validation - - -class AmbariPluginProvider(p.ProvisioningPluginBase): - - def get_title(self): - return "HDP Plugin" - - def get_description(self): - return _("The Ambari Sahara plugin provides the ability to launch " - "clusters with Hortonworks Data Platform (HDP) on OpenStack " - "using Apache Ambari") - - def get_versions(self): - return ["2.3", "2.4", "2.5", "2.6"] - - def get_node_processes(self, hadoop_version): - return { - p_common.AMBARI_SERVICE: [p_common.AMBARI_SERVER], - p_common.FALCON_SERVICE: [p_common.FALCON_SERVER], - p_common.FLUME_SERVICE: [p_common.FLUME_HANDLER], - p_common.HBASE_SERVICE: [p_common.HBASE_MASTER, - p_common.HBASE_REGIONSERVER], - p_common.HDFS_SERVICE: [p_common.DATANODE, p_common.NAMENODE, - p_common.SECONDARY_NAMENODE, - p_common.JOURNAL_NODE], - p_common.HIVE_SERVICE: [p_common.HIVE_METASTORE, - p_common.HIVE_SERVER], - p_common.KAFKA_SERVICE: [p_common.KAFKA_BROKER], - p_common.KNOX_SERVICE: [p_common.KNOX_GATEWAY], - p_common.OOZIE_SERVICE: [p_common.OOZIE_SERVER], - p_common.RANGER_SERVICE: [p_common.RANGER_ADMIN, - p_common.RANGER_USERSYNC], - p_common.SLIDER_SERVICE: [p_common.SLIDER], - p_common.SPARK_SERVICE: [p_common.SPARK_JOBHISTORYSERVER], - p_common.SQOOP_SERVICE: [p_common.SQOOP], - p_common.STORM_SERVICE: [ - p_common.DRPC_SERVER, p_common.NIMBUS, - p_common.STORM_UI_SERVER, p_common.SUPERVISOR], - p_common.YARN_SERVICE: [ - p_common.APP_TIMELINE_SERVER, p_common.HISTORYSERVER, - p_common.NODEMANAGER, p_common.RESOURCEMANAGER], - p_common.ZOOKEEPER_SERVICE: [p_common.ZOOKEEPER_SERVER], - 'Kerberos': [], - } - - def get_configs(self, hadoop_version): - cfgs = kerberos.get_config_list() - cfgs.extend(configs.load_configs(hadoop_version)) - return cfgs - - def configure_cluster(self, cluster): - deploy.disable_repos(cluster) - deploy.setup_ambari(cluster) - deploy.setup_agents(cluster) - deploy.wait_ambari_accessible(cluster) - deploy.update_default_ambari_password(cluster) - cluster = conductor.cluster_get(context.ctx(), cluster.id) - deploy.wait_host_registration(cluster, - plugin_utils.get_instances(cluster)) - deploy.prepare_kerberos(cluster) - deploy.set_up_hdp_repos(cluster) - deploy.resolve_package_conflicts(cluster) - deploy.create_blueprint(cluster) - - def start_cluster(self, cluster): - self._set_cluster_info(cluster) - deploy.start_cluster(cluster) - cluster_instances = plugin_utils.get_instances(cluster) - swift_helper.install_ssl_certs(cluster_instances) - deploy.add_hadoop_swift_jar(cluster_instances) - deploy.prepare_hive(cluster) - deploy.deploy_kerberos_principals(cluster) - - def _set_cluster_info(self, cluster): - ambari_ip = plugin_utils.get_instance( - cluster, p_common.AMBARI_SERVER).get_ip_or_dns_name() - ambari_port = "8080" - info = { - p_common.AMBARI_SERVER: { - "Web UI": "http://{host}:{port}".format(host=ambari_ip, - port=ambari_port), - "Username": "admin", - "Password": cluster.extra["ambari_password"] - } - } - nns = plugin_utils.get_instances(cluster, p_common.NAMENODE) - info[p_common.NAMENODE] = {} - for idx, namenode in enumerate(nns): - info[p_common.NAMENODE][ - "Web UI %s" % (idx + 1)] = ( - "http://%s:50070" % namenode.get_ip_or_dns_name()) - - rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER) - info[p_common.RESOURCEMANAGER] = {} - for idx, resourcemanager in enumerate(rms): - info[p_common.RESOURCEMANAGER][ - "Web UI %s" % (idx + 1)] = ( - "http://%s:8088" % resourcemanager.get_ip_or_dns_name()) - - historyserver = plugin_utils.get_instance(cluster, - p_common.HISTORYSERVER) - if historyserver: - info[p_common.HISTORYSERVER] = { - "Web UI": "http://%s:19888" % - historyserver.get_ip_or_dns_name() - } - atlserver = plugin_utils.get_instance(cluster, - p_common.APP_TIMELINE_SERVER) - if atlserver: - info[p_common.APP_TIMELINE_SERVER] = { - "Web UI": "http://%s:8188" % atlserver.get_ip_or_dns_name() - } - oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER) - if oozie: - info[p_common.OOZIE_SERVER] = { - "Web UI": "http://%s:11000/oozie" % oozie.get_ip_or_dns_name() - } - hbase_master = plugin_utils.get_instance(cluster, - p_common.HBASE_MASTER) - if hbase_master: - info[p_common.HBASE_MASTER] = { - "Web UI": "http://%s:16010" % hbase_master.get_ip_or_dns_name() - } - falcon = plugin_utils.get_instance(cluster, p_common.FALCON_SERVER) - if falcon: - info[p_common.FALCON_SERVER] = { - "Web UI": "http://%s:15000" % falcon.get_ip_or_dns_name() - } - storm_ui = plugin_utils.get_instance(cluster, p_common.STORM_UI_SERVER) - if storm_ui: - info[p_common.STORM_UI_SERVER] = { - "Web UI": "http://%s:8744" % storm_ui.get_ip_or_dns_name() - } - ranger_admin = plugin_utils.get_instance(cluster, - p_common.RANGER_ADMIN) - if ranger_admin: - info[p_common.RANGER_ADMIN] = { - "Web UI": "http://%s:6080" % ranger_admin.get_ip_or_dns_name(), - "Username": "admin", - "Password": "admin" - } - spark_hs = plugin_utils.get_instance(cluster, - p_common.SPARK_JOBHISTORYSERVER) - if spark_hs: - info[p_common.SPARK_JOBHISTORYSERVER] = { - "Web UI": "http://%s:18080" % spark_hs.get_ip_or_dns_name() - } - info.update(cluster.info.to_dict()) - ctx = context.ctx() - conductor.cluster_update(ctx, cluster, {"info": info}) - cluster = conductor.cluster_get(ctx, cluster.id) - - def validate(self, cluster): - validation.validate(cluster.id) - - def scale_cluster(self, cluster, instances): - deploy.prepare_kerberos(cluster, instances) - deploy.setup_agents(cluster, instances) - cluster = conductor.cluster_get(context.ctx(), cluster.id) - deploy.wait_host_registration(cluster, instances) - deploy.resolve_package_conflicts(cluster, instances) - deploy.add_new_hosts(cluster, instances) - deploy.manage_config_groups(cluster, instances) - deploy.manage_host_components(cluster, instances) - deploy.configure_rack_awareness(cluster, instances) - swift_helper.install_ssl_certs(instances) - deploy.add_hadoop_swift_jar(instances) - deploy.deploy_kerberos_principals(cluster, instances) - - def decommission_nodes(self, cluster, instances): - deploy.decommission_hosts(cluster, instances) - deploy.remove_services_from_hosts(cluster, instances) - deploy.restart_nns_and_rms(cluster) - deploy.cleanup_config_groups(cluster, instances) - - def validate_scaling(self, cluster, existing, additional): - validation.validate(cluster.id) - - def get_edp_engine(self, cluster, job_type): - if job_type in edp_engine.EDPSparkEngine.get_supported_job_types(): - return edp_engine.EDPSparkEngine(cluster) - if job_type in edp_engine.EDPOozieEngine.get_supported_job_types(): - return edp_engine.EDPOozieEngine(cluster) - return None - - def get_edp_job_types(self, versions=None): - res = {} - for version in self.get_versions(): - if not versions or version in versions: - oozie_engine = edp_engine.EDPOozieEngine - spark_engine = edp_engine.EDPSparkEngine - res[version] = (oozie_engine.get_supported_job_types() + - spark_engine.get_supported_job_types()) - return res - - def get_edp_config_hints(self, job_type, version): - if job_type in edp_engine.EDPSparkEngine.get_supported_job_types(): - return edp_engine.EDPSparkEngine.get_possible_job_config(job_type) - if job_type in edp_engine.EDPOozieEngine.get_supported_job_types(): - return edp_engine.EDPOozieEngine.get_possible_job_config(job_type) - - def get_open_ports(self, node_group): - ports_map = { - p_common.AMBARI_SERVER: [8080], - p_common.APP_TIMELINE_SERVER: [8188, 8190, 10200], - p_common.DATANODE: [50075, 50475], - p_common.DRPC_SERVER: [3772, 3773], - p_common.FALCON_SERVER: [15000], - p_common.FLUME_HANDLER: [8020, 41414], - p_common.HBASE_MASTER: [16000, 16010], - p_common.HBASE_REGIONSERVER: [16020, 16030], - p_common.HISTORYSERVER: [10020, 19888], - p_common.HIVE_METASTORE: [9933], - p_common.HIVE_SERVER: [9999, 10000], - p_common.KAFKA_BROKER: [6667], - p_common.NAMENODE: [8020, 9000, 50070, 50470], - p_common.NIMBUS: [6627], - p_common.NODEMANAGER: [8042, 8044, 45454], - p_common.OOZIE_SERVER: [11000, 11443], - p_common.RANGER_ADMIN: [6080], - p_common.RESOURCEMANAGER: [8025, 8030, 8050, 8088, 8141], - p_common.SECONDARY_NAMENODE: [50090], - p_common.SPARK_JOBHISTORYSERVER: [18080], - p_common.STORM_UI_SERVER: [8000, 8080, 8744], - p_common.ZOOKEEPER_SERVER: [2181], - } - ports = [] - for service in node_group.node_processes: - ports.extend(ports_map.get(service, [])) - return ports - - def get_health_checks(self, cluster): - return health.get_health_checks(cluster) - - validator = images.SaharaImageValidator.from_yaml( - 'plugins/ambari/resources/images/image.yaml', - resource_roots=['plugins/ambari/resources/images'], - package='sahara_plugin_ambari') - - def get_image_arguments(self, hadoop_version): - if hadoop_version not in self.get_versions(): - return NotImplemented - return self.validator.get_argument_list() - - def pack_image(self, hadoop_version, remote, - test_only=False, image_arguments=None): - if hadoop_version == '2.3': - image_arguments['ambari_version'] = '2.4.3.0' - - self.validator.validate(remote, test_only=test_only, - image_arguments=image_arguments) - - def validate_images(self, cluster, test_only=False, image_arguments=None): - image_arguments = self.get_image_arguments(cluster['hadoop_version']) - if cluster['hadoop_version'] == '2.3': - for arguments in image_arguments: - if arguments.name == 'ambari_version': - arguments.default = '2.4.3.0' - - if not test_only: - instances = plugin_utils.get_instances(cluster) - else: - instances = plugin_utils.get_instances(cluster)[0] - for instance in instances: - with instance.remote() as r: - self.validator.validate(r, test_only=test_only, - image_arguments=image_arguments) diff --git a/sahara_plugin_ambari/plugins/ambari/requests_helper.py b/sahara_plugin_ambari/plugins/ambari/requests_helper.py deleted file mode 100644 index 9488b99..0000000 --- a/sahara_plugin_ambari/plugins/ambari/requests_helper.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - - -_COMMON_DECOMMISSION_TEMPLATE = { - "RequestInfo": { - "context": "", - "command": "DECOMMISSION", - "parameters": { - "slave_type": "", - "excluded_hosts": "" - }, - "operation_level": { - "level": "HOST_COMPONENT", - "cluster_name": "" - } - }, - "Requests/resource_filters": [ - { - "service_name": "", - "component_name": "" - } - ] -} - -_COMMON_RESTART_TEMPLATE = { - "RequestInfo": { - "context": "", - "command": "RESTART", - "operation_level": { - "level": "HOST", - "cluster_name": "" - } - }, - "Requests/resource_filters": [ - { - "service_name": "", - "component_name": "", - "hosts": "" - } - ] -} - -_COMMON_RESTART_SERVICE_TEMPLATE = { - "RequestInfo": { - "context": "", - }, - "Body": { - "ServiceInfo": { - "state": "" - } - } -} - - -def build_datanode_decommission_request(cluster_name, instances): - tmpl = copy.deepcopy(_COMMON_DECOMMISSION_TEMPLATE) - - tmpl["RequestInfo"]["context"] = "Decommission DataNodes" - - tmpl["RequestInfo"]["parameters"]["slave_type"] = "DATANODE" - tmpl["RequestInfo"]["parameters"]["excluded_hosts"] = ",".join( - [i.fqdn() for i in instances]) - - tmpl["RequestInfo"]["operation_level"]["cluster_name"] = cluster_name - - tmpl["Requests/resource_filters"][0]["service_name"] = "HDFS" - tmpl["Requests/resource_filters"][0]["component_name"] = "NAMENODE" - - return tmpl - - -def build_nodemanager_decommission_request(cluster_name, instances): - tmpl = copy.deepcopy(_COMMON_DECOMMISSION_TEMPLATE) - - tmpl["RequestInfo"]["context"] = "Decommission NodeManagers" - - tmpl["RequestInfo"]["parameters"]["slave_type"] = "NODEMANAGER" - tmpl["RequestInfo"]["parameters"]["excluded_hosts"] = ",".join( - [i.fqdn() for i in instances]) - - tmpl["RequestInfo"]["operation_level"]["cluster_name"] = cluster_name - - tmpl["Requests/resource_filters"][0]["service_name"] = "YARN" - tmpl["Requests/resource_filters"][0]["component_name"] = "RESOURCEMANAGER" - - return tmpl - - -def build_namenode_restart_request(cluster_name, nn_instance): - tmpl = copy.deepcopy(_COMMON_RESTART_TEMPLATE) - - tmpl["RequestInfo"]["context"] = "Restart NameNode" - - tmpl["RequestInfo"]["operation_level"]["cluster_name"] = cluster_name - - tmpl["Requests/resource_filters"][0]["service_name"] = "HDFS" - tmpl["Requests/resource_filters"][0]["component_name"] = "NAMENODE" - tmpl["Requests/resource_filters"][0]["hosts"] = nn_instance.fqdn() - - return tmpl - - -def build_resourcemanager_restart_request(cluster_name, rm_instance): - tmpl = copy.deepcopy(_COMMON_RESTART_TEMPLATE) - - tmpl["RequestInfo"]["context"] = "Restart ResourceManager" - - tmpl["RequestInfo"]["operation_level"]["cluster_name"] = cluster_name - - tmpl["Requests/resource_filters"][0]["service_name"] = "YARN" - tmpl["Requests/resource_filters"][0]["component_name"] = "RESOURCEMANAGER" - tmpl["Requests/resource_filters"][0]["hosts"] = rm_instance.fqdn() - - return tmpl - - -def build_stop_service_request(service_name): - tmpl = copy.deepcopy(_COMMON_RESTART_SERVICE_TEMPLATE) - tmpl["RequestInfo"]["context"] = ( - "Restart %s service (stopping)" % service_name) - tmpl["Body"]["ServiceInfo"]["state"] = "INSTALLED" - return tmpl - - -def build_start_service_request(service_name): - tmpl = copy.deepcopy(_COMMON_RESTART_SERVICE_TEMPLATE) - tmpl["RequestInfo"]["context"] = ( - "Restart %s service (starting)" % service_name) - tmpl["Body"]["ServiceInfo"]["state"] = "STARTED" - return tmpl diff --git a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.3.json b/sahara_plugin_ambari/plugins/ambari/resources/configs-2.3.json deleted file mode 100644 index 1fcc2c8..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.3.json +++ /dev/null @@ -1,1276 +0,0 @@ -{ - "accumulo-env": { - "accumulo_gc_heapsize": "256", - "accumulo_instance_name": "hdp-accumulo-instance", - "accumulo_log_dir": "/var/log/accumulo", - "accumulo_master_heapsize": "1024", - "accumulo_monitor_bind_all": "false", - "accumulo_monitor_heapsize": "1024", - "accumulo_other_heapsize": "1024", - "accumulo_pid_dir": "/var/run/accumulo", - "accumulo_tserver_heapsize": "1536", - "accumulo_user": "accumulo", - "instance_secret": "pass", - "server_content": "#! /usr/bin/env bash export HADOOP_PREFIX={{hadoop_prefix}} export HADOOP_CONF_DIR={{hadoop_conf_dir}} export JAVA_HOME={{java64_home}} export ZOOKEEPER_HOME={{zookeeper_home}} export ACCUMULO_PID_DIR={{pid_dir}} export ACCUMULO_LOG_DIR={{log_dir}} export ACCUMULO_CONF_DIR={{server_conf_dir}} export ACCUMULO_TSERVER_OPTS=\"-Xmx{{accumulo_tserver_heapsize}}m -Xms{{accumulo_tserver_heapsize}}m\" export ACCUMULO_MASTER_OPTS=\"-Xmx{{accumulo_master_heapsize}}m -Xms{{accumulo_master_heapsize}}m\" export ACCUMULO_MONITOR_OPTS=\"-Xmx{{accumulo_monitor_heapsize}}m -Xms{{accumulo_monitor_heapsize}}m\" export ACCUMULO_GC_OPTS=\"-Xmx{{accumulo_gc_heapsize}}m -Xms{{accumulo_gc_heapsize}}m\" export ACCUMULO_GENERAL_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -Djava.net.preferIPv4Stack=true ${ACCUMULO_GENERAL_OPTS}\" export ACCUMULO_OTHER_OPTS=\"-Xmx{{accumulo_other_heapsize}}m -Xms{{accumulo_other_heapsize}}m ${ACCUMULO_OTHER_OPTS}\" export ACCUMULO_MONITOR_BIND_ALL={{monitor_bind_str}} # what do when the JVM runs out of heap memory export ACCUMULO_KILL_CMD='kill -9 %p'" - }, - "accumulo-log4j": { - "audit_log_level": "OFF", - "debug_log_size": "512M", - "debug_num_logs": "10", - "info_log_size": "512M", - "info_num_logs": "10", - "monitor_forwarding_log_level": "WARN" - }, - "accumulo-site": { - "gc.port.client": "50092", - "general.classpaths": "$ACCUMULO_HOME/lib/accumulo-server.jar, $ACCUMULO_HOME/lib/accumulo-core.jar, $ACCUMULO_HOME/lib/accumulo-start.jar, $ACCUMULO_HOME/lib/accumulo-fate.jar, $ACCUMULO_HOME/lib/accumulo-proxy.jar, $ACCUMULO_HOME/lib/[^.].*.jar, $ZOOKEEPER_HOME/zookeeper[^.].*.jar, $HADOOP_CONF_DIR, /usr/hdp/current/hadoop-client/[^.].*.jar, /usr/hdp/current/hadoop-client/lib/(?!slf4j)[^.].*.jar, /usr/hdp/current/hadoop-hdfs-client/[^.].*.jar, /usr/hdp/current/hadoop-mapreduce-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/lib/jersey.*.jar, /usr/hdp/current/hive-client/lib/hive-accumulo-handler.jar,", - "instance.volumes": "hdfs://%HOSTGROUP::host_group_1%:8020/apps/accumulo/data", - "instance.zookeeper.host": "%HOSTGROUP::host_group_1%:2181", - "instance.zookeeper.timeout": "30s", - "master.port.client": "9999", - "monitor.port.client": "50095", - "monitor.port.log4j": "4560", - "trace.port.client": "12234", - "trace.user": "trace", - "tserver.cache.data.size": "128M", - "tserver.cache.index.size": "256M", - "tserver.memory.maps.max": "1G", - "tserver.memory.maps.native.enabled": "true", - "tserver.port.client": "9997", - "tserver.sort.buffer.size": "200M", - "tserver.walog.max.size": "1G" - }, - "ams-env": { - "ambari_metrics_user": "ams", - "metrics_collector_heapsize": "512m", - "metrics_collector_log_dir": "/var/log/ambari-metrics-collector", - "metrics_collector_pid_dir": "/var/run/ambari-metrics-collector", - "metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor", - "metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor" - }, - "ams-hbase-env": { - "hbase_log_dir": "/var/log/ambari-metrics-collector", - "hbase_master_heapsize": "1024m", - "hbase_master_maxperm_size": "128m", - "hbase_master_xmn_size": "256m", - "hbase_pid_dir": "/var/run/ambari-metrics-collector/", - "hbase_regionserver_heapsize": "1024m", - "hbase_regionserver_xmn_ratio": "0.2", - "max_open_files_limit": "32768", - "regionserver_xmn_size": "256m" - }, - "ams-hbase-policy": { - "security.admin.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.masterregion.protocol.acl": "*" - }, - "ams-hbase-security-site": { - "ams.zookeeper.keytab": "", - "ams.zookeeper.principal": "", - "hadoop.security.authentication": "", - "hbase.coprocessor.master.classes": "", - "hbase.coprocessor.region.classes": "", - "hbase.master.kerberos.principal": "", - "hbase.master.keytab.file": "", - "hbase.myclient.keytab": "", - "hbase.myclient.principal": "", - "hbase.regionserver.kerberos.principal": "", - "hbase.regionserver.keytab.file": "", - "hbase.security.authentication": "", - "hbase.security.authorization": "", - "hbase.zookeeper.property.authProvider.1": "", - "hbase.zookeeper.property.jaasLoginRenew": "", - "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "", - "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "", - "zookeeper.znode.parent": "" - }, - "ams-hbase-site": { - "hbase.client.scanner.caching": "10000", - "hbase.client.scanner.timeout.period": "900000", - "hbase.cluster.distributed": "false", - "hbase.hregion.majorcompaction": "0", - "hbase.hregion.memstore.block.multiplier": "4", - "hbase.hregion.memstore.flush.size": "134217728", - "hbase.hstore.blockingStoreFiles": "200", - "hbase.hstore.flusher.count": "2", - "hbase.local.dir": "${hbase.tmp.dir}/local", - "hbase.master.info.bindAddress": "0.0.0.0", - "hbase.master.info.port": "61310", - "hbase.master.port": "61300", - "hbase.master.wait.on.regionservers.mintostart": "1", - "hbase.regionserver.global.memstore.lowerLimit": "0.4", - "hbase.regionserver.global.memstore.upperLimit": "0.5", - "hbase.regionserver.info.port": "61330", - "hbase.regionserver.port": "61320", - "hbase.regionserver.thread.compaction.large": "2", - "hbase.regionserver.thread.compaction.small": "3", - "hbase.replication": "false", - "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase", - "hbase.snapshot.enabled": "false", - "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp", - "hbase.zookeeper.leaderport": "61388", - "hbase.zookeeper.peerport": "61288", - "hbase.zookeeper.property.clientPort": "61181", - "hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper", - "hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}", - "hbase_master_xmn_size": "128m", - "hfile.block.cache.size": "0.3", - "phoenix.groupby.maxCacheSize": "307200000", - "phoenix.query.maxGlobalMemoryPercentage": "15", - "phoenix.query.spoolThresholdBytes": "12582912", - "phoenix.query.timeoutMs": "1200000", - "phoenix.sequence.saltBuckets": "2", - "phoenix.spool.directory": "${hbase.tmp.dir}/phoenix-spool", - "zookeeper.session.timeout": "120000", - "zookeeper.session.timeout.localHBaseCluster": "20000" - }, - "ams-site": { - "phoenix.query.maxGlobalMemoryPercentage": "25", - "phoenix.spool.directory": "/tmp", - "timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint", - "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "1", - "timeline.metrics.cluster.aggregator.daily.disabled": "false", - "timeline.metrics.cluster.aggregator.daily.interval": "86400", - "timeline.metrics.cluster.aggregator.daily.ttl": "63072000", - "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.hourly.disabled": "false", - "timeline.metrics.cluster.aggregator.hourly.interval": "3600", - "timeline.metrics.cluster.aggregator.hourly.ttl": "31536000", - "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.minute.disabled": "false", - "timeline.metrics.cluster.aggregator.minute.interval": "120", - "timeline.metrics.cluster.aggregator.minute.timeslice.interval": "30", - "timeline.metrics.cluster.aggregator.minute.ttl": "2592000", - "timeline.metrics.daily.aggregator.minute.interval": "86400", - "timeline.metrics.hbase.compression.scheme": "SNAPPY", - "timeline.metrics.hbase.data.block.encoding": "FAST_DIFF", - "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1", - "timeline.metrics.host.aggregator.daily.disabled": "false", - "timeline.metrics.host.aggregator.daily.ttl": "31536000", - "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.hourly.disabled": "false", - "timeline.metrics.host.aggregator.hourly.interval": "3600", - "timeline.metrics.host.aggregator.hourly.ttl": "2592000", - "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.minute.disabled": "false", - "timeline.metrics.host.aggregator.minute.interval": "120", - "timeline.metrics.host.aggregator.minute.ttl": "604800", - "timeline.metrics.host.aggregator.ttl": "86400", - "timeline.metrics.service.checkpointDelay": "60", - "timeline.metrics.service.cluster.aggregator.appIds": "datanode,nodemanager,hbase", - "timeline.metrics.service.default.result.limit": "5760", - "timeline.metrics.service.operation.mode": "embedded", - "timeline.metrics.service.resultset.fetchSize": "2000", - "timeline.metrics.service.rpc.address": "0.0.0.0:60200", - "timeline.metrics.service.webapp.address": "0.0.0.0:6188" - }, - "application-properties": { - "atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab", - "atlas.authentication.method": "simple", - "atlas.authentication.principal": "atlas", - "atlas.enableTLS": "false", - "atlas.graph.index.search.backend": "elasticsearch", - "atlas.graph.index.search.directory": "/var/lib/atlas/data/es", - "atlas.graph.index.search.elasticsearch.client-only": "false", - "atlas.graph.index.search.elasticsearch.local-mode": "true", - "atlas.graph.storage.backend": "berkeleyje", - "atlas.graph.storage.directory": "/var/lib/atlas/data/berkeley", - "atlas.http.authentication.enabled": "false", - "atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", - "atlas.http.authentication.kerberos.name.rules": "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*// DEFAULT", - "atlas.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", - "atlas.http.authentication.type": "simple", - "atlas.lineage.hive.process.inputs.name": "inputs", - "atlas.lineage.hive.process.outputs.name": "outputs", - "atlas.lineage.hive.process.type.name": "Process", - "atlas.lineage.hive.table.schema.query.Table": "Table where name='%s'\\, columns", - "atlas.lineage.hive.table.schema.query.hive_table": "hive_table where name='%s'\\, columns", - "atlas.lineage.hive.table.type.name": "DataSet", - "atlas.server.bind.address": "localhost" - }, - "atlas-env": { - "metadata_classpath": "", - "metadata_data_dir": "/var/lib/atlas/data", - "metadata_expanded_war_dir": "./server/webapp", - "metadata_log_dir": "/var/log/atlas", - "metadata_opts": "-Xmx1024m", - "metadata_pid_dir": "/var/run/atlas", - "metadata_port": "21000", - "metadata_user": "atlas" - }, - "capacity-scheduler": { - "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100", - "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", - "yarn.scheduler.capacity.maximum-applications": "10000", - "yarn.scheduler.capacity.node-locality-delay": "40", - "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator", - "yarn.scheduler.capacity.root.accessible-node-labels": "*", - "yarn.scheduler.capacity.root.acl_administer_queue": "*", - "yarn.scheduler.capacity.root.capacity": "100", - "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", - "yarn.scheduler.capacity.root.default.acl_submit_applications": "*", - "yarn.scheduler.capacity.root.default.capacity": "100", - "yarn.scheduler.capacity.root.default.maximum-capacity": "100", - "yarn.scheduler.capacity.root.default.state": "RUNNING", - "yarn.scheduler.capacity.root.default.user-limit-factor": "1", - "yarn.scheduler.capacity.root.queues": "default" - }, - "cluster-env": { - "command_retry_enabled": "true", - "command_retry_max_time_in_sec": "600", - "commands_to_retry": "INSTALL,START", - "ignore_groupsusers_create": "false", - "kerberos_domain": "EXAMPLE.COM", - "repo_suse_rhel_template": "[{{repo_id}}] name={{repo_id}} {% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %} path=/ enabled=1 gpgcheck=0", - "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}", - "security_enabled": "false", - "smokeuser": "ambari-qa", - "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", - "user_group": "hadoop" - }, - "core-site": { - "fs.defaultFS": "hdfs://%HOSTGROUP::host_group_1%:8020", - "fs.swift.impl": "org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem", - "fs.swift.service.sahara.auth.endpoint.prefix": "/endpoints/AUTH_", - "fs.swift.service.sahara.auth.url": "http://172.18.168.2:5000/v2.0/tokens/", - "fs.swift.service.sahara.http.port": "8080", - "fs.swift.service.sahara.https.port": "443", - "fs.swift.service.sahara.public": "true", - "fs.swift.service.sahara.tenant": "devs", - "fs.trash.interval": "360", - "ha.failover-controller.active-standby-elector.zk.op.retries": "120", - "hadoop.http.authentication.simple.anonymous.allowed": "true", - "hadoop.proxyuser.falcon.groups": "users", - "hadoop.proxyuser.falcon.hosts": "*", - "hadoop.proxyuser.hcat.groups": "users", - "hadoop.proxyuser.hcat.hosts": "%HOSTGROUP::host_group_2%", - "hadoop.proxyuser.hdfs.groups": "*", - "hadoop.proxyuser.hdfs.hosts": "*", - "hadoop.proxyuser.hive.groups": "users", - "hadoop.proxyuser.hive.hosts": "%HOSTGROUP::host_group_2%", - "hadoop.proxyuser.oozie.groups": "users", - "hadoop.proxyuser.oozie.hosts": "*", - "hadoop.security.auth_to_local": "DEFAULT", - "hadoop.security.authentication": "simple", - "hadoop.security.authorization": "false", - "hadoop.security.key.provider.path": "", - "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", - "io.file.buffer.size": "131072", - "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", - "ipc.client.connect.max.retries": "50", - "ipc.client.connection.maxidletime": "30000", - "ipc.client.idlethreshold": "8000", - "ipc.server.tcpnodelay": "true", - "mapreduce.jobtracker.webinterface.trusted": "false", - "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", - "proxyuser_group": "users" - }, - "falcon-env": { - "falcon.embeddedmq": "true", - "falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data", - "falcon.emeddedmq.port": "61616", - "falcon_local_dir": "/hadoop/falcon", - "falcon_log_dir": "/var/log/falcon", - "falcon_pid_dir": "/var/run/falcon", - "falcon_port": "15000", - "falcon_store_uri": "file:///hadoop/falcon/store", - "falcon_user": "falcon", - "supports_hive_dr": "true" - }, - "falcon-runtime.properties": { - "*.domain": "${falcon.app.type}", - "*.log.cleanup.frequency.days.retention": "days(7)", - "*.log.cleanup.frequency.hours.retention": "minutes(1)", - "*.log.cleanup.frequency.minutes.retention": "hours(6)", - "*.log.cleanup.frequency.months.retention": "months(3)" - }, - "falcon-startup.properties": { - "*.ConfigSyncService.impl": "org.apache.falcon.resource.ConfigSyncService", - "*.ProcessInstanceManager.impl": "org.apache.falcon.resource.InstanceManager", - "*.SchedulableEntityManager.impl": "org.apache.falcon.resource.SchedulableEntityManager", - "*.application.services": "org.apache.falcon.security.AuthenticationInitializationService,\\ org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\ org.apache.falcon.service.ProcessSubscriberService,\\ org.apache.falcon.entity.store.ConfigurationStore,\\ org.apache.falcon.rerun.service.RetryService,\\ org.apache.falcon.rerun.service.LateRunService,\\ org.apache.falcon.service.LogCleanupService,\\ org.apache.falcon.metadata.MetadataMappingService", - "*.broker.impl.class": "org.apache.activemq.ActiveMQConnectionFactory", - "*.broker.ttlInMins": "4320", - "*.broker.url": "tcp://%HOSTGROUP::host_group_2%:61616", - "*.catalog.service.impl": "org.apache.falcon.catalog.HiveCatalogService", - "*.config.store.uri": "file:///hadoop/falcon/store", - "*.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\ org.apache.falcon.entity.ColoClusterRelation,\\ org.apache.falcon.group.FeedGroupMap,\\ org.apache.falcon.service.SharedLibraryHostingService", - "*.domain": "${falcon.app.type}", - "*.entity.topic": "FALCON.ENTITY.TOPIC", - "*.falcon.authentication.type": "simple", - "*.falcon.cleanup.service.frequency": "days(1)", - "*.falcon.enableTLS": "false", - "*.falcon.graph.blueprints.graph": "com.thinkaurelius.titan.core.TitanFactory", - "*.falcon.graph.preserve.history": "false", - "*.falcon.graph.serialize.path": "/hadoop/falcon/data/lineage", - "*.falcon.graph.storage.backend": "berkeleyje", - "*.falcon.graph.storage.directory": "/hadoop/falcon/data/lineage/graphdb", - "*.falcon.http.authentication.blacklisted.users": "", - "*.falcon.http.authentication.cookie.domain": "EXAMPLE.COM", - "*.falcon.http.authentication.kerberos.name.rules": "DEFAULT", - "*.falcon.http.authentication.signature.secret": "falcon", - "*.falcon.http.authentication.simple.anonymous.allowed": "true", - "*.falcon.http.authentication.token.validity": "36000", - "*.falcon.http.authentication.type": "simple", - "*.falcon.security.authorization.admin.groups": "falcon", - "*.falcon.security.authorization.admin.users": "falcon,ambari-qa", - "*.falcon.security.authorization.enabled": "false", - "*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider", - "*.falcon.security.authorization.superusergroup": "falcon", - "*.hive.shared.libs": "hive-exec,hive-metastore,hive-common,hive-service,hive-hcatalog-server-extensions,\\ hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client", - "*.internal.queue.size": "1000", - "*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal", - "*.max.retry.failure.count": "1", - "*.oozie.feed.workflow.builder": "org.apache.falcon.workflow.OozieFeedWorkflowBuilder", - "*.oozie.process.workflow.builder": "org.apache.falcon.workflow.OozieProcessWorkflowBuilder", - "*.retry.recorder.path": "${falcon.log.dir}/retry", - "*.shared.libs": "activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el", - "*.system.lib.location": "${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib", - "*.workflow.engine.impl": "org.apache.falcon.workflow.engine.OozieWorkflowEngine", - "prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore", - "prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\ org.apache.falcon.entity.ColoClusterRelation,\\ org.apache.falcon.group.FeedGroupMap" - }, - "flume-env": { - "flume_conf_dir": "/etc/flume/conf", - "flume_log_dir": "/var/log/flume", - "flume_run_dir": "/var/run/flume", - "flume_user": "flume" - }, - "gateway-site": { - "gateway.gateway.conf.dir": "deployments", - "gateway.hadoop.kerberos.secured": "false", - "gateway.path": "gateway", - "gateway.port": "8443", - "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf", - "java.security.krb5.conf": "/etc/knox/conf/krb5.conf", - "sun.security.krb5.debug": "true" - }, - "hadoop-env": { - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", - "dtnode_heapsize": "1024m", - "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "hadoop_root_logger": "INFO,RFA", - "hdfs_log_dir_prefix": "/var/log/hadoop", - "hdfs_user": "hdfs", - "keyserver_host": "", - "keyserver_port": "", - "namenode_heapsize": "1024m", - "namenode_opt_maxnewsize": "200m", - "namenode_opt_maxpermsize": "256m", - "namenode_opt_newsize": "200m", - "namenode_opt_permsize": "128m", - "nfsgateway_heapsize": "1024", - "proxyuser_group": "users" - }, - "hadoop-policy": { - "security.admin.operations.protocol.acl": "hadoop", - "security.client.datanode.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.datanode.protocol.acl": "*", - "security.inter.datanode.protocol.acl": "*", - "security.inter.tracker.protocol.acl": "*", - "security.job.client.protocol.acl": "*", - "security.job.task.protocol.acl": "*", - "security.namenode.protocol.acl": "*", - "security.refresh.policy.protocol.acl": "hadoop", - "security.refresh.usertogroups.mappings.protocol.acl": "hadoop" - }, - "hbase-env": { - "hbase_log_dir": "/var/log/hbase", - "hbase_master_heapsize": "1024m", - "hbase_pid_dir": "/var/run/hbase", - "hbase_regionserver_heapsize": "1024m", - "hbase_regionserver_xmn_max": "512", - "hbase_regionserver_xmn_ratio": "0.2", - "hbase_user": "hbase", - "override_hbase_uid": "true", - "phoenix_sql_enabled": "false" - }, - "hbase-policy": { - "security.admin.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.masterregion.protocol.acl": "*" - }, - "hbase-site": { - "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", - "hbase.bulkload.staging.dir": "/apps/hbase/staging", - "hbase.client.keyvalue.maxsize": "1048576", - "hbase.client.retries.number": "35", - "hbase.client.scanner.caching": "100", - "hbase.cluster.distributed": "true", - "hbase.coprocessor.master.classes": "", - "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint", - "hbase.coprocessor.regionserver.classes": "", - "hbase.defaults.for.version.skip": "true", - "hbase.hregion.majorcompaction": "604800000", - "hbase.hregion.majorcompaction.jitter": "0.50", - "hbase.hregion.max.filesize": "10737418240", - "hbase.hregion.memstore.block.multiplier": "4", - "hbase.hregion.memstore.flush.size": "134217728", - "hbase.hregion.memstore.mslab.enabled": "true", - "hbase.hstore.blockingStoreFiles": "10", - "hbase.hstore.compaction.max": "10", - "hbase.hstore.compactionThreshold": "3", - "hbase.local.dir": "${hbase.tmp.dir}/local", - "hbase.master.info.bindAddress": "0.0.0.0", - "hbase.master.info.port": "16010", - "hbase.master.port": "16000", - "hbase.regionserver.global.memstore.size": "0.4", - "hbase.regionserver.handler.count": "30", - "hbase.regionserver.info.port": "16030", - "hbase.regionserver.port": "16020", - "hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec", - "hbase.rootdir": "hdfs://%HOSTGROUP::host_group_1%:8020/apps/hbase/data", - "hbase.rpc.controllerfactory.class": "", - "hbase.rpc.protection": "authentication", - "hbase.rpc.timeout": "90000", - "hbase.security.authentication": "simple", - "hbase.security.authorization": "false", - "hbase.superuser": "hbase", - "hbase.tmp.dir": "/tmp/hbase-${user.name}", - "hbase.zookeeper.property.clientPort": "2181", - "hbase.zookeeper.quorum": "%HOSTGROUP::host_group_1%", - "hbase.zookeeper.useMulti": "true", - "hfile.block.cache.size": "0.40", - "phoenix.functions.allowUserDefinedFunctions": "", - "phoenix.query.timeoutMs": "60000", - "zookeeper.session.timeout": "90000", - "zookeeper.znode.parent": "/hbase-unsecure" - }, - "hdfs-site": { - "dfs.block.access.token.enable": "true", - "dfs.blockreport.initialDelay": "120", - "dfs.blocksize": "134217728", - "dfs.client.read.shortcircuit": "true", - "dfs.client.read.shortcircuit.streams.cache.size": "4096", - "dfs.client.retry.policy.enabled": "false", - "dfs.cluster.administrators": "hdfs", - "dfs.datanode.address": "0.0.0.0:50010", - "dfs.datanode.balance.bandwidthPerSec": "6250000", - "dfs.datanode.data.dir": "/hadoop/hdfs/data", - "dfs.datanode.data.dir.perm": "750", - "dfs.datanode.du.reserved": "1073741824", - "dfs.datanode.failed.volumes.tolerated": "0", - "dfs.datanode.http.address": "0.0.0.0:50075", - "dfs.datanode.https.address": "0.0.0.0:50475", - "dfs.datanode.ipc.address": "0.0.0.0:8010", - "dfs.datanode.max.transfer.threads": "1024", - "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", - "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", - "dfs.encryption.key.provider.uri": "", - "dfs.heartbeat.interval": "3", - "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", - "dfs.http.policy": "HTTP_ONLY", - "dfs.https.port": "50470", - "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", - "dfs.journalnode.http-address": "0.0.0.0:8480", - "dfs.journalnode.https-address": "0.0.0.0:8481", - "dfs.namenode.accesstime.precision": "0", - "dfs.namenode.audit.log.async": "true", - "dfs.namenode.avoid.read.stale.datanode": "true", - "dfs.namenode.avoid.write.stale.datanode": "true", - "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", - "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", - "dfs.namenode.checkpoint.period": "21600", - "dfs.namenode.checkpoint.txns": "1000000", - "dfs.namenode.fslock.fair": "false", - "dfs.namenode.handler.count": "100", - "dfs.namenode.http-address": "%HOSTGROUP::host_group_1%:50070", - "dfs.namenode.https-address": "%HOSTGROUP::host_group_1%:50470", - "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", - "dfs.namenode.name.dir.restore": "true", - "dfs.namenode.rpc-address": "%HOSTGROUP::host_group_1%:8020", - "dfs.namenode.safemode.threshold-pct": "0.999", - "dfs.namenode.secondary.http-address": "%HOSTGROUP::host_group_1%:50090", - "dfs.namenode.stale.datanode.interval": "30000", - "dfs.namenode.startup.delay.block.deletion.sec": "3600", - "dfs.namenode.write.stale.datanode.ratio": "1.0f", - "dfs.permissions.enabled": "true", - "dfs.permissions.superusergroup": "hdfs", - "dfs.replication": "3", - "dfs.replication.max": "50", - "dfs.support.append": "true", - "dfs.webhdfs.enabled": "true", - "fs.permissions.umask-mode": "022", - "nfs.exports.allowed.hosts": "* rw", - "nfs.file.dump.dir": "/tmp/.hdfs-nfs" - }, - "hive-env": { - "cost_based_optimizer": "On", - "hcat_log_dir": "/var/log/webhcat", - "hcat_pid_dir": "/var/run/webhcat", - "hcat_user": "hcat", - "hive_database": "New MySQL Database", - "hive_database_name": "hive", - "hive_database_type": "mysql", - "hive_exec_orc_storage_strategy": "SPEED", - "hive_log_dir": "/var/log/hive", - "hive_pid_dir": "/var/run/hive", - "hive_security_authorization": "None", - "hive_timeline_logging_enabled": "true", - "hive_txn_acid": "off", - "hive_user": "hive", - "webhcat_user": "hcat" - }, - "hive-site": { - "ambari.hive.db.schema.name": "hive", - "datanucleus.autoCreateSchema": "false", - "datanucleus.cache.level2.type": "none", - "hive.auto.convert.join": "true", - "hive.auto.convert.join.noconditionaltask": "true", - "hive.auto.convert.join.noconditionaltask.size": "59419306", - "hive.auto.convert.sortmerge.join": "true", - "hive.auto.convert.sortmerge.join.to.mapjoin": "false", - "hive.cbo.enable": "true", - "hive.cli.print.header": "false", - "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore", - "hive.cluster.delegation.token.store.zookeeper.connectString": "%HOSTGROUP::host_group_1%:2181", - "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation", - "hive.compactor.abortedtxn.threshold": "1000", - "hive.compactor.check.interval": "300L", - "hive.compactor.delta.num.threshold": "10", - "hive.compactor.delta.pct.threshold": "0.1f", - "hive.compactor.initiator.on": "false", - "hive.compactor.worker.threads": "0", - "hive.compactor.worker.timeout": "86400L", - "hive.compute.query.using.stats": "true", - "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role", - "hive.convert.join.bucket.mapjoin.tez": "false", - "hive.default.fileformat": "TextFile", - "hive.default.fileformat.managed": "TextFile", - "hive.enforce.bucketing": "false", - "hive.enforce.sorting": "true", - "hive.enforce.sortmergebucketmapjoin": "true", - "hive.exec.compress.intermediate": "false", - "hive.exec.compress.output": "false", - "hive.exec.dynamic.partition": "true", - "hive.exec.dynamic.partition.mode": "strict", - "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.max.created.files": "100000", - "hive.exec.max.dynamic.partitions": "5000", - "hive.exec.max.dynamic.partitions.pernode": "2000", - "hive.exec.orc.compression.strategy": "SPEED", - "hive.exec.orc.default.compress": "ZLIB", - "hive.exec.orc.default.stripe.size": "67108864", - "hive.exec.orc.encoding.strategy": "SPEED", - "hive.exec.parallel": "false", - "hive.exec.parallel.thread.number": "8", - "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.reducers.bytes.per.reducer": "67108864", - "hive.exec.reducers.max": "1009", - "hive.exec.scratchdir": "/tmp/hive", - "hive.exec.submit.local.task.via.child": "true", - "hive.exec.submitviachild": "false", - "hive.execution.engine": "tez", - "hive.fetch.task.aggr": "false", - "hive.fetch.task.conversion": "more", - "hive.fetch.task.conversion.threshold": "1073741824", - "hive.limit.optimize.enable": "true", - "hive.limit.pushdown.memory.usage": "0.04", - "hive.map.aggr": "true", - "hive.map.aggr.hash.force.flush.memory.threshold": "0.9", - "hive.map.aggr.hash.min.reduction": "0.5", - "hive.map.aggr.hash.percentmemory": "0.5", - "hive.mapjoin.bucket.cache.size": "10000", - "hive.mapjoin.optimized.hashtable": "true", - "hive.mapred.reduce.tasks.speculative.execution": "false", - "hive.merge.mapfiles": "true", - "hive.merge.mapredfiles": "false", - "hive.merge.orcfile.stripe.level": "true", - "hive.merge.rcfile.block.level": "true", - "hive.merge.size.per.task": "256000000", - "hive.merge.smallfiles.avgsize": "16000000", - "hive.merge.tezfiles": "false", - "hive.metastore.authorization.storage.checks": "false", - "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", - "hive.metastore.client.connect.retry.delay": "5s", - "hive.metastore.client.socket.timeout": "1800s", - "hive.metastore.connect.retries": "24", - "hive.metastore.execute.setugi": "true", - "hive.metastore.failure.retries": "24", - "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab", - "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM", - "hive.metastore.pre.event.listeners": "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener", - "hive.metastore.sasl.enabled": "false", - "hive.metastore.server.max.threads": "100000", - "hive.metastore.uris": "thrift://%HOSTGROUP::host_group_2%:9083", - "hive.metastore.warehouse.dir": "/apps/hive/warehouse", - "hive.optimize.bucketmapjoin": "true", - "hive.optimize.bucketmapjoin.sortedmerge": "false", - "hive.optimize.constant.propagation": "true", - "hive.optimize.index.filter": "true", - "hive.optimize.metadataonly": "true", - "hive.optimize.null.scan": "true", - "hive.optimize.reducededuplication": "true", - "hive.optimize.reducededuplication.min.reducer": "4", - "hive.optimize.sort.dynamic.partition": "false", - "hive.orc.compute.splits.num.threads": "10", - "hive.orc.splits.include.file.footer": "false", - "hive.prewarm.enabled": "false", - "hive.prewarm.numcontainers": "3", - "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", - "hive.security.authorization.enabled": "false", - "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory", - "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", - "hive.security.metastore.authorization.auth.reads": "true", - "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", - "hive.server2.allow.user.substitution": "true", - "hive.server2.authentication": "NONE", - "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM", - "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab", - "hive.server2.enable.doAs": "true", - "hive.server2.logging.operation.enabled": "true", - "hive.server2.logging.operation.log.location": "${system:java.io.tmpdir}/${system:user.name}/operation_logs", - "hive.server2.support.dynamic.service.discovery": "true", - "hive.server2.table.type.mapping": "CLASSIC", - "hive.server2.tez.default.queues": "default", - "hive.server2.tez.initialize.default.sessions": "false", - "hive.server2.tez.sessions.per.default.queue": "1", - "hive.server2.thrift.http.path": "cliservice", - "hive.server2.thrift.http.port": "10001", - "hive.server2.thrift.max.worker.threads": "500", - "hive.server2.thrift.port": "10000", - "hive.server2.thrift.sasl.qop": "auth", - "hive.server2.transport.mode": "binary", - "hive.server2.use.SSL": "false", - "hive.server2.zookeeper.namespace": "hiveserver2", - "hive.smbjoin.cache.rows": "10000", - "hive.stats.autogather": "true", - "hive.stats.dbclass": "fs", - "hive.stats.fetch.column.stats": "true", - "hive.stats.fetch.partition.stats": "true", - "hive.support.concurrency": "false", - "hive.tez.auto.reducer.parallelism": "true", - "hive.tez.container.size": "170", - "hive.tez.cpu.vcores": "-1", - "hive.tez.dynamic.partition.pruning": "true", - "hive.tez.dynamic.partition.pruning.max.data.size": "104857600", - "hive.tez.dynamic.partition.pruning.max.event.size": "1048576", - "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat", - "hive.tez.java.opts": "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps", - "hive.tez.log.level": "INFO", - "hive.tez.max.partition.factor": "2.0", - "hive.tez.min.partition.factor": "0.25", - "hive.tez.smb.number.waves": "0.5", - "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", - "hive.txn.max.open.batch": "1000", - "hive.txn.timeout": "300", - "hive.user.install.directory": "/user/", - "hive.vectorized.execution.enabled": "true", - "hive.vectorized.execution.reduce.enabled": "false", - "hive.vectorized.groupby.checkinterval": "4096", - "hive.vectorized.groupby.flush.percent": "0.1", - "hive.vectorized.groupby.maxentries": "100000", - "hive.zookeeper.client.port": "2181", - "hive.zookeeper.namespace": "hive_zookeeper_namespace", - "hive.zookeeper.quorum": "%HOSTGROUP::host_group_1%:2181", - "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", - "javax.jdo.option.ConnectionURL": "jdbc:mysql://%HOSTGROUP::host_group_2%/hive?createDatabaseIfNotExist=true", - "javax.jdo.option.ConnectionUserName": "hive" - }, - "hiveserver2-site": { - "hive.security.authorization.enabled": "false" - }, - "kafka-broker": { - "auto.create.topics.enable": "true", - "auto.leader.rebalance.enable": "true", - "compression.type": "producer", - "controlled.shutdown.enable": "true", - "controlled.shutdown.max.retries": "3", - "controlled.shutdown.retry.backoff.ms": "5000", - "controller.message.queue.size": "10", - "controller.socket.timeout.ms": "30000", - "default.replication.factor": "1", - "delete.topic.enable": "false", - "fetch.purgatory.purge.interval.requests": "10000", - "kafka.ganglia.metrics.group": "kafka", - "kafka.ganglia.metrics.port": "8671", - "kafka.ganglia.metrics.reporter.enabled": "true", - "kafka.metrics.reporters": "{{kafka_metrics_reporters}}", - "kafka.timeline.metrics.host": "{{metric_collector_host}}", - "kafka.timeline.metrics.maxRowCacheSize": "10000", - "kafka.timeline.metrics.port": "{{metric_collector_port}}", - "kafka.timeline.metrics.reporter.enabled": "true", - "kafka.timeline.metrics.reporter.sendInterval": "5900", - "leader.imbalance.check.interval.seconds": "300", - "leader.imbalance.per.broker.percentage": "10", - "listeners": "PLAINTEXT://localhost:6667", - "log.cleanup.interval.mins": "10", - "log.dirs": "/kafka-logs", - "log.index.interval.bytes": "4096", - "log.index.size.max.bytes": "10485760", - "log.retention.bytes": "-1", - "log.retention.hours": "168", - "log.roll.hours": "168", - "log.segment.bytes": "1073741824", - "message.max.bytes": "1000000", - "min.insync.replicas": "1", - "num.io.threads": "8", - "num.network.threads": "3", - "num.partitions": "1", - "num.recovery.threads.per.data.dir": "1", - "num.replica.fetchers": "1", - "offset.metadata.max.bytes": "4096", - "offsets.commit.required.acks": "-1", - "offsets.commit.timeout.ms": "5000", - "offsets.load.buffer.size": "5242880", - "offsets.retention.check.interval.ms": "600000", - "offsets.retention.minutes": "86400000", - "offsets.topic.compression.codec": "0", - "offsets.topic.num.partitions": "50", - "offsets.topic.replication.factor": "3", - "offsets.topic.segment.bytes": "104857600", - "producer.purgatory.purge.interval.requests": "10000", - "queued.max.requests": "500", - "replica.fetch.max.bytes": "1048576", - "replica.fetch.min.bytes": "1", - "replica.fetch.wait.max.ms": "500", - "replica.high.watermark.checkpoint.interval.ms": "5000", - "replica.lag.max.messages": "4000", - "replica.lag.time.max.ms": "10000", - "replica.socket.receive.buffer.bytes": "65536", - "replica.socket.timeout.ms": "30000", - "socket.receive.buffer.bytes": "102400", - "socket.request.max.bytes": "104857600", - "socket.send.buffer.bytes": "102400", - "zookeeper.connect": "%HOSTGROUP::host_group_1%:2181", - "zookeeper.connection.timeout.ms": "15000", - "zookeeper.session.timeout.ms": "30000", - "zookeeper.sync.time.ms": "2000" - }, - "kafka-env": { - "is_supported_kafka_ranger": "true", - "kafka_log_dir": "/var/log/kafka", - "kafka_pid_dir": "/var/run/kafka", - "kafka_user": "kafka" - }, - "knox-env": { - "knox_group": "knox", - "knox_master_secret": "pass", - "knox_pid_dir": "/var/run/knox", - "knox_user": "knox" - }, - "mahout-env": { - "mahout_user": "mahout" - }, - "mapred-env": { - "jobhistory_heapsize": "900", - "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", - "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", - "mapred_user": "mapred" - }, - "mapred-site": { - "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "mapreduce.am.max-attempts": "2", - "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", - "mapreduce.cluster.administrators": "hadoop", - "mapreduce.framework.name": "yarn", - "mapreduce.job.counters.max": "130", - "mapreduce.job.emit-timeline-data": "false", - "mapreduce.job.reduce.slowstart.completedmaps": "0.05", - "mapreduce.jobhistory.address": "%HOSTGROUP::host_group_1%:10020", - "mapreduce.jobhistory.bind-host": "0.0.0.0", - "mapreduce.jobhistory.done-dir": "/mr-history/done", - "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", - "mapreduce.jobhistory.recovery.enable": "true", - "mapreduce.jobhistory.recovery.store.class": "org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService", - "mapreduce.jobhistory.recovery.store.leveldb.path": "/hadoop/mapreduce/jhs", - "mapreduce.jobhistory.webapp.address": "%HOSTGROUP::host_group_1%:19888", - "mapreduce.map.java.opts": "-Xmx410m", - "mapreduce.map.log.level": "INFO", - "mapreduce.map.memory.mb": "512", - "mapreduce.map.output.compress": "false", - "mapreduce.map.sort.spill.percent": "0.7", - "mapreduce.map.speculative": "false", - "mapreduce.output.fileoutputformat.compress": "false", - "mapreduce.output.fileoutputformat.compress.type": "BLOCK", - "mapreduce.reduce.input.buffer.percent": "0.0", - "mapreduce.reduce.java.opts": "-Xmx756m", - "mapreduce.reduce.log.level": "INFO", - "mapreduce.reduce.memory.mb": "1024", - "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", - "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", - "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000", - "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", - "mapreduce.reduce.shuffle.merge.percent": "0.66", - "mapreduce.reduce.shuffle.parallelcopies": "30", - "mapreduce.reduce.speculative": "false", - "mapreduce.shuffle.port": "13562", - "mapreduce.task.io.sort.factor": "100", - "mapreduce.task.io.sort.mb": "358", - "mapreduce.task.timeout": "300000", - "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}", - "yarn.app.mapreduce.am.command-opts": "-Xmx410m", - "yarn.app.mapreduce.am.log.level": "INFO", - "yarn.app.mapreduce.am.resource.mb": "512", - "yarn.app.mapreduce.am.staging-dir": "/user" - }, - "oozie-env": { - "oozie_admin_port": "11001", - "oozie_admin_users": "{oozie_user}, oozie-admin", - "oozie_data_dir": "/hadoop/oozie/data", - "oozie_database": "New Derby Database", - "oozie_derby_database": "Derby", - "oozie_heapsize": "2048m", - "oozie_hostname": "%HOSTGROUP::host_group_1%", - "oozie_log_dir": "/var/log/oozie", - "oozie_permsize": "256m", - "oozie_pid_dir": "/var/run/oozie", - "oozie_user": "oozie" - }, - "oozie-site": { - "oozie.authentication.kerberos.name.rules": "", - "oozie.authentication.simple.anonymous.allowed": "true", - "oozie.authentication.type": "simple", - "oozie.base.url": "http://%HOSTGROUP::host_group_1%:11000/oozie", - "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials", - "oozie.db.schema.name": "oozie", - "oozie.service.AuthorizationService.security.enabled": "true", - "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", - "oozie.service.HadoopAccessorService.kerberos.enabled": "false", - "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", - "oozie.service.JPAService.jdbc.username": "oozie", - "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", - "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService" - }, - "ranger-hdfs-audit": { - "xasecure.audit.credential.provider.file": "jceks://file{{credential_file}}", - "xasecure.audit.destination.db": "false", - "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/db/spool", - "xasecure.audit.destination.db.jdbc.driver": "{{jdbc_driver}}", - "xasecure.audit.destination.db.jdbc.url": "{{audit_jdbc_url}}", - "xasecure.audit.destination.db.user": "{{xa_audit_db_user}}", - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "{{ranger_audit_solr_urls}}", - "xasecure.audit.destination.solr.zookeepers": "none", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-hdfs-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hadoop", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "", - "policy_user": "ambari-qa", - "ranger-hdfs-plugin-enabled": "No" - }, - "ranger-hdfs-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-hdfs-security": { - "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", - "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.hdfs.service.name": "{{repo_name}}", - "xasecure.add-hadoop-authorization": "true" - }, - "ranger-yarn-audit": { - "xasecure.audit.credential.provider.file": "jceks://file{{credential_file}}", - "xasecure.audit.destination.db": "false", - "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/hadoop/yarn/audit/db/spool", - "xasecure.audit.destination.db.jdbc.driver": "{{jdbc_driver}}", - "xasecure.audit.destination.db.jdbc.url": "{{audit_jdbc_url}}", - "xasecure.audit.destination.db.user": "{{xa_audit_db_user}}", - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/yarn/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/yarn/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "{{ranger_audit_solr_urls}}", - "xasecure.audit.destination.solr.zookeepers": "none", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-yarn-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "yarn", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "", - "policy_user": "ambari-qa", - "ranger-yarn-plugin-enabled": "No" - }, - "ranger-yarn-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-yarn-security": { - "ranger.plugin.yarn.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.yarn.policy.pollIntervalMs": "30000", - "ranger.plugin.yarn.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml", - "ranger.plugin.yarn.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.yarn.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.yarn.service.name": "{{repo_name}}" - }, - "spark-defaults": { - "spark.driver.extraJavaOptions": "-Dhdp.version={{hdp_full_version}}", - "spark.history.kerberos.keytab": "none", - "spark.history.kerberos.principal": "none", - "spark.history.provider": "org.apache.spark.deploy.yarn.history.YarnHistoryProvider", - "spark.history.ui.port": "18080", - "spark.yarn.am.extraJavaOptions": "-Dhdp.version={{hdp_full_version}}", - "spark.yarn.applicationMaster.waitTries": "10", - "spark.yarn.containerLauncherMaxThreads": "25", - "spark.yarn.driver.memoryOverhead": "384", - "spark.yarn.executor.memoryOverhead": "384", - "spark.yarn.historyServer.address": "{{spark_history_server_host}}:{{spark_history_ui_port}}", - "spark.yarn.max.executor.failures": "3", - "spark.yarn.preserve.staging.files": "false", - "spark.yarn.queue": "default", - "spark.yarn.scheduler.heartbeat.interval-ms": "5000", - "spark.yarn.services": "org.apache.spark.deploy.yarn.history.YarnHistoryService", - "spark.yarn.submit.file.replication": "3" - }, - "spark-env": { - "spark_group": "spark", - "spark_log_dir": "/var/log/spark", - "spark_pid_dir": "/var/run/spark", - "spark_user": "spark" - }, - "sqoop-env": { - "jdbc_drivers": "", - "sqoop_user": "sqoop" - }, - "ssl-client": { - "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", - "ssl.client.keystore.type": "jks", - "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", - "ssl.client.truststore.reload.interval": "10000", - "ssl.client.truststore.type": "jks" - }, - "ssl-server": { - "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", - "ssl.server.keystore.type": "jks", - "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", - "ssl.server.truststore.reload.interval": "10000", - "ssl.server.truststore.type": "jks" - }, - "storm-env": { - "jmxremote_port": "56431", - "nimbus_seeds_supported": "true", - "storm_log_dir": "/var/log/storm", - "storm_pid_dir": "/var/run/storm", - "storm_user": "storm" - }, - "storm-site": { - "_storm.min.ruid": "null", - "_storm.thrift.nonsecure.transport": "backtype.storm.security.auth.SimpleTransportPlugin", - "_storm.thrift.secure.transport": "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin", - "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", - "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "drpc.invocations.port": "3773", - "drpc.port": "3772", - "drpc.queue.size": "128", - "drpc.request.timeout.secs": "600", - "drpc.worker.threads": "64", - "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib", - "logviewer.appender.name": "A1", - "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER", - "logviewer.port": "8000", - "metrics.reporter.register": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter", - "nimbus.cleanup.inbox.freq.secs": "600", - "nimbus.file.copy.expiration.secs": "600", - "nimbus.inbox.jar.expiration.secs": "3600", - "nimbus.monitor.freq.secs": "10", - "nimbus.reassign": "true", - "nimbus.seeds": "['%HOSTGROUP::host_group_2%']", - "nimbus.supervisor.timeout.secs": "60", - "nimbus.task.launch.secs": "120", - "nimbus.task.timeout.secs": "30", - "nimbus.thrift.max_buffer_size": "1048576", - "nimbus.thrift.port": "6627", - "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", - "storm.cluster.mode": "distributed", - "storm.local.dir": "/hadoop/storm", - "storm.local.mode.zmq": "false", - "storm.log.dir": "{{log_dir}}", - "storm.messaging.netty.buffer_size": "5242880", - "storm.messaging.netty.client_worker_threads": "1", - "storm.messaging.netty.max_retries": "30", - "storm.messaging.netty.max_wait_ms": "1000", - "storm.messaging.netty.min_wait_ms": "100", - "storm.messaging.netty.server_worker_threads": "1", - "storm.messaging.transport": "backtype.storm.messaging.netty.Context", - "storm.thrift.transport": "{{storm_thrift_transport}}", - "storm.zookeeper.connection.timeout": "15000", - "storm.zookeeper.port": "2181", - "storm.zookeeper.retry.interval": "1000", - "storm.zookeeper.retry.intervalceiling.millis": "30000", - "storm.zookeeper.retry.times": "5", - "storm.zookeeper.root": "/storm", - "storm.zookeeper.servers": "['%HOSTGROUP::host_group_1%']", - "storm.zookeeper.session.timeout": "20000", - "supervisor.heartbeat.frequency.secs": "5", - "supervisor.monitor.frequency.secs": "3", - "supervisor.slots.ports": "[6700, 6701]", - "supervisor.worker.start.timeout.secs": "120", - "supervisor.worker.timeout.secs": "30", - "task.heartbeat.frequency.secs": "3", - "task.refresh.poll.secs": "10", - "topology.acker.executors": "null", - "topology.builtin.metrics.bucket.size.secs": "60", - "topology.debug": "false", - "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", - "topology.enable.message.timeouts": "true", - "topology.error.throttle.interval.secs": "10", - "topology.executor.receive.buffer.size": "1024", - "topology.executor.send.buffer.size": "1024", - "topology.fall.back.on.java.serialization": "true", - "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", - "topology.max.error.report.per.interval": "5", - "topology.max.replication.wait.time.sec": "{{actual_topology_max_replication_wait_time_sec}}", - "topology.max.replication.wait.time.sec.default": "60", - "topology.max.spout.pending": "null", - "topology.max.task.parallelism": "null", - "topology.message.timeout.secs": "30", - "topology.metrics.consumer.register": "[{'class': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsSink', 'parallelism.hint': 1}]", - "topology.min.replication.count": "{{actual_topology_min_replication_count}}", - "topology.min.replication.count.default": "1", - "topology.optimize": "true", - "topology.receiver.buffer.size": "8", - "topology.skip.missing.kryo.registrations": "false", - "topology.sleep.spout.wait.strategy.time.ms": "1", - "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", - "topology.state.synchronization.timeout.secs": "60", - "topology.stats.sample.rate": "0.05", - "topology.tick.tuple.freq.secs": "null", - "topology.transfer.buffer.size": "1024", - "topology.trident.batch.emit.interval.millis": "500", - "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", - "topology.worker.childopts": "null", - "topology.worker.shared.thread.pool.size": "4", - "topology.workers": "1", - "transactional.zookeeper.port": "null", - "transactional.zookeeper.root": "/transactional", - "transactional.zookeeper.servers": "null", - "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "ui.filter": "null", - "ui.port": "8744", - "worker.heartbeat.frequency.secs": "1", - "zmq.hwm": "0", - "zmq.linger.millis": "5000", - "zmq.threads": "1" - }, - "tez-env": { - "tez_user": "tez" - }, - "tez-site": { - "tez.am.am-rm.heartbeat.interval-ms.max": "250", - "tez.am.container.idle.release-timeout-max.millis": "20000", - "tez.am.container.idle.release-timeout-min.millis": "10000", - "tez.am.container.reuse.enabled": "true", - "tez.am.container.reuse.locality.delay-allocation-millis": "250", - "tez.am.container.reuse.non-local-fallback.enabled": "false", - "tez.am.container.reuse.rack-fallback.enabled": "true", - "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", - "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.am.log.level": "INFO", - "tez.am.max.app.attempts": "2", - "tez.am.maxtaskfailures.per.node": "10", - "tez.am.resource.memory.mb": "1536", - "tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", - "tez.am.view-acls": "*", - "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "tez.counters.max": "2000", - "tez.counters.max.groups": "1000", - "tez.generate.debug.artifacts": "false", - "tez.grouping.max-size": "1073741824", - "tez.grouping.min-size": "16777216", - "tez.grouping.split-waves": "1.7", - "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService", - "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", - "tez.runtime.compress": "true", - "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", - "tez.runtime.convert.user-payload.to.history-text": "false", - "tez.runtime.io.sort.mb": "272", - "tez.runtime.optimize.local.fetch": "true", - "tez.runtime.pipelined.sorter.sort.threads": "2", - "tez.runtime.sorter.class": "PIPELINED", - "tez.runtime.unordered.output.buffer.size-mb": "100", - "tez.session.am.dag.submit.timeout.secs": "300", - "tez.session.client.timeout.secs": "-1", - "tez.shuffle-vertex-manager.max-src-fraction": "0.4", - "tez.shuffle-vertex-manager.min-src-fraction": "0.2", - "tez.staging-dir": "/tmp/${user.name}/staging", - "tez.task.am.heartbeat.counter.interval-ms.max": "4000", - "tez.task.generate.counters.per.io": "true", - "tez.task.get-task.sleep.interval-ms.max": "200", - "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", - "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.task.max-events-per-heartbeat": "500", - "tez.task.resource.memory.mb": "1536", - "tez.use.cluster.hadoop-libs": "false" - }, - "webhcat-site": { - "templeton.exec.timeout": "60000", - "templeton.hadoop": "/usr/hdp/${hdp.version}/hadoop/bin/hadoop", - "templeton.hadoop.conf.dir": "/etc/hadoop/conf", - "templeton.hcat": "/usr/hdp/${hdp.version}/hive/bin/hcat", - "templeton.hcat.home": "hive.tar.gz/hive/hcatalog", - "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz", - "templeton.hive.extra.files": "/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib", - "templeton.hive.home": "hive.tar.gz/hive", - "templeton.hive.path": "hive.tar.gz/hive/bin/hive", - "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://%HOSTGROUP::host_group_2%:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true", - "templeton.jar": "/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar", - "templeton.libjars": "/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar", - "templeton.override.enabled": "false", - "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz", - "templeton.pig.path": "pig.tar.gz/pig/bin/pig", - "templeton.port": "50111", - "templeton.python": "${env.PYTHON_CMD}", - "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz", - "templeton.sqoop.home": "sqoop.tar.gz/sqoop", - "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop", - "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", - "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar", - "templeton.zookeeper.hosts": "%HOSTGROUP::host_group_1%:2181" - }, - "yarn-env": { - "apptimelineserver_heapsize": "1024", - "is_supported_yarn_ranger": "true", - "min_user_id": "1000", - "nodemanager_heapsize": "1024", - "resourcemanager_heapsize": "1024", - "yarn_cgroups_enabled": "false", - "yarn_heapsize": "1024", - "yarn_log_dir_prefix": "/var/log/hadoop-yarn", - "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", - "yarn_user": "yarn" - }, - "yarn-site": { - "hadoop.registry.rm.enabled": "true", - "hadoop.registry.zk.quorum": "%HOSTGROUP::host_group_1%:2181", - "yarn.acl.enable": "false", - "yarn.admin.acl": "yarn", - "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*", - "yarn.client.nodemanager-connect.max-wait-ms": "60000", - "yarn.client.nodemanager-connect.retry-interval-ms": "10000", - "yarn.http.policy": "HTTP_ONLY", - "yarn.log-aggregation-enable": "true", - "yarn.log-aggregation.retain-seconds": "2592000", - "yarn.log.server.url": "http://%HOSTGROUP::host_group_1%:19888/jobhistory/logs", - "yarn.node-labels.enabled": "false", - "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", - "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", - "yarn.nodemanager.address": "0.0.0.0:45454", - "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", - "yarn.nodemanager.aux-services": "mapreduce_shuffle", - "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler", - "yarn.nodemanager.bind-host": "0.0.0.0", - "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", - "yarn.nodemanager.container-monitor.interval-ms": "3000", - "yarn.nodemanager.delete.debug-delay-sec": "0", - "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90", - "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", - "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", - "yarn.nodemanager.health-checker.interval-ms": "135000", - "yarn.nodemanager.health-checker.script.timeout-ms": "60000", - "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn", - "yarn.nodemanager.linux-container-executor.cgroups.mount": "false", - "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false", - "yarn.nodemanager.linux-container-executor.group": "hadoop", - "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", - "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", - "yarn.nodemanager.log-aggregation.compression-type": "gz", - "yarn.nodemanager.log-aggregation.debug-enabled": "false", - "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", - "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1", - "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", - "yarn.nodemanager.log.retain-second": "604800", - "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state", - "yarn.nodemanager.recovery.enabled": "true", - "yarn.nodemanager.remote-app-log-dir": "/app-logs", - "yarn.nodemanager.remote-app-log-dir-suffix": "logs", - "yarn.nodemanager.resource.cpu-vcores": "8", - "yarn.nodemanager.resource.memory-mb": "5120", - "yarn.nodemanager.resource.percentage-physical-cpu-limit": "80", - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.nodemanager.vmem-pmem-ratio": "2.1", - "yarn.resourcemanager.address": "%HOSTGROUP::host_group_1%:8050", - "yarn.resourcemanager.admin.address": "%HOSTGROUP::host_group_1%:8141", - "yarn.resourcemanager.am.max-attempts": "2", - "yarn.resourcemanager.bind-host": "0.0.0.0", - "yarn.resourcemanager.connect.max-wait.ms": "900000", - "yarn.resourcemanager.connect.retry-interval.ms": "30000", - "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", - "yarn.resourcemanager.fs.state-store.uri": "", - "yarn.resourcemanager.ha.enabled": "false", - "yarn.resourcemanager.hostname": "%HOSTGROUP::host_group_1%", - "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", - "yarn.resourcemanager.recovery.enabled": "true", - "yarn.resourcemanager.resource-tracker.address": "%HOSTGROUP::host_group_1%:8025", - "yarn.resourcemanager.scheduler.address": "%HOSTGROUP::host_group_1%:8030", - "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", - "yarn.resourcemanager.scheduler.monitor.enable": "false", - "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}", - "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", - "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", - "yarn.resourcemanager.system-metrics-publisher.enabled": "true", - "yarn.resourcemanager.webapp.address": "%HOSTGROUP::host_group_1%:8088", - "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", - "yarn.resourcemanager.webapp.https.address": "%HOSTGROUP::host_group_1%:8090", - "yarn.resourcemanager.work-preserving-recovery.enabled": "true", - "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", - "yarn.resourcemanager.zk-acl": "world:anyone:rwcda", - "yarn.resourcemanager.zk-address": "%HOSTGROUP::host_group_1%:2181", - "yarn.resourcemanager.zk-num-retries": "1000", - "yarn.resourcemanager.zk-retry-interval-ms": "1000", - "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", - "yarn.resourcemanager.zk-timeout-ms": "10000", - "yarn.scheduler.maximum-allocation-mb": "5120", - "yarn.scheduler.maximum-allocation-vcores": "8", - "yarn.scheduler.minimum-allocation-mb": "512", - "yarn.scheduler.minimum-allocation-vcores": "1", - "yarn.timeline-service.address": "%HOSTGROUP::host_group_1%:10200", - "yarn.timeline-service.bind-host": "0.0.0.0", - "yarn.timeline-service.client.max-retries": "30", - "yarn.timeline-service.client.retry-interval-ms": "1000", - "yarn.timeline-service.enabled": "true", - "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore", - "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", - "yarn.timeline-service.http-authentication.type": "simple", - "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600", - "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", - "yarn.timeline-service.recovery.enabled": "true", - "yarn.timeline-service.state-store-class": "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore", - "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", - "yarn.timeline-service.ttl-enable": "true", - "yarn.timeline-service.ttl-ms": "2678400000", - "yarn.timeline-service.webapp.address": "%HOSTGROUP::host_group_1%:8188", - "yarn.timeline-service.webapp.https.address": "%HOSTGROUP::host_group_1%:8190" - }, - "zoo.cfg": { - "autopurge.purgeInterval": "24", - "autopurge.snapRetainCount": "30", - "clientPort": "2181", - "dataDir": "/hadoop/zookeeper", - "initLimit": "10", - "syncLimit": "5", - "tickTime": "2000" - }, - "zookeeper-env": { - "zk_log_dir": "/var/log/zookeeper", - "zk_pid_dir": "/var/run/zookeeper", - "zk_user": "zookeeper" - } -} \ No newline at end of file diff --git a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.4.json b/sahara_plugin_ambari/plugins/ambari/resources/configs-2.4.json deleted file mode 100644 index 4facd67..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.4.json +++ /dev/null @@ -1,1331 +0,0 @@ -{ - "accumulo-env": { - "accumulo_gc_heapsize": "256", - "accumulo_instance_name": "hdp-accumulo-instance", - "accumulo_log_dir": "/var/log/accumulo", - "accumulo_master_heapsize": "1024", - "accumulo_monitor_bind_all": "false", - "accumulo_monitor_heapsize": "1024", - "accumulo_other_heapsize": "1024", - "accumulo_pid_dir": "/var/run/accumulo", - "accumulo_tserver_heapsize": "1536", - "accumulo_user": "accumulo", - "instance_secret": "SECRET:accumulo-env:1:instance_secret", - "server_content": "#! /usr/bin/env bash export HADOOP_PREFIX={{hadoop_prefix}} export HADOOP_CONF_DIR={{hadoop_conf_dir}} export JAVA_HOME={{java64_home}} export ZOOKEEPER_HOME={{zookeeper_home}} export ACCUMULO_PID_DIR={{pid_dir}} export ACCUMULO_LOG_DIR={{log_dir}} export ACCUMULO_CONF_DIR={{server_conf_dir}} export ACCUMULO_TSERVER_OPTS=\"-Xmx{{accumulo_tserver_heapsize}}m -Xms{{accumulo_tserver_heapsize}}m\" export ACCUMULO_MASTER_OPTS=\"-Xmx{{accumulo_master_heapsize}}m -Xms{{accumulo_master_heapsize}}m\" export ACCUMULO_MONITOR_OPTS=\"-Xmx{{accumulo_monitor_heapsize}}m -Xms{{accumulo_monitor_heapsize}}m\" export ACCUMULO_GC_OPTS=\"-Xmx{{accumulo_gc_heapsize}}m -Xms{{accumulo_gc_heapsize}}m\" export ACCUMULO_GENERAL_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -Djava.net.preferIPv4Stack=true ${ACCUMULO_GENERAL_OPTS}\" export ACCUMULO_OTHER_OPTS=\"-Xmx{{accumulo_other_heapsize}}m -Xms{{accumulo_other_heapsize}}m ${ACCUMULO_OTHER_OPTS}\" export ACCUMULO_MONITOR_BIND_ALL={{monitor_bind_str}} # what do when the JVM runs out of heap memory export ACCUMULO_KILL_CMD='kill -9 %p'" - }, - "accumulo-log4j": { - "audit_log_level": "OFF", - "debug_log_size": "512M", - "debug_num_logs": "10", - "info_log_size": "512M", - "info_num_logs": "10", - "monitor_forwarding_log_level": "WARN" - }, - "accumulo-site": { - "gc.port.client": "50092", - "general.classpaths": "$ACCUMULO_HOME/lib/accumulo-server.jar, $ACCUMULO_HOME/lib/accumulo-core.jar, $ACCUMULO_HOME/lib/accumulo-start.jar, $ACCUMULO_HOME/lib/accumulo-fate.jar, $ACCUMULO_HOME/lib/accumulo-proxy.jar, $ACCUMULO_HOME/lib/[^.].*.jar, $ZOOKEEPER_HOME/zookeeper[^.].*.jar, $HADOOP_CONF_DIR, /usr/hdp/current/hadoop-client/[^.].*.jar, /usr/hdp/current/hadoop-client/lib/(?!slf4j)[^.].*.jar, /usr/hdp/current/hadoop-hdfs-client/[^.].*.jar, /usr/hdp/current/hadoop-mapreduce-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/lib/jersey.*.jar, /usr/hdp/current/hive-client/lib/hive-accumulo-handler.jar,", - "instance.volumes": "hdfs://%HOSTGROUP::host_group_1%:8020/apps/accumulo/data", - "instance.zookeeper.host": "%HOSTGROUP::host_group_1%:2181", - "instance.zookeeper.timeout": "30s", - "master.port.client": "9999", - "monitor.port.client": "50095", - "monitor.port.log4j": "4560", - "trace.port.client": "12234", - "trace.user": "trace", - "tserver.cache.data.size": "128M", - "tserver.cache.index.size": "256M", - "tserver.memory.maps.max": "1G", - "tserver.memory.maps.native.enabled": "true", - "tserver.port.client": "9997", - "tserver.sort.buffer.size": "200M", - "tserver.walog.max.size": "1G" - }, - "ams-env": { - "ambari_metrics_user": "ams", - "metrics_collector_heapsize": "512", - "metrics_collector_log_dir": "/var/log/ambari-metrics-collector", - "metrics_collector_pid_dir": "/var/run/ambari-metrics-collector", - "metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor", - "metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor" - }, - "ams-hbase-env": { - "hbase_classpath_additional": "", - "hbase_log_dir": "/var/log/ambari-metrics-collector", - "hbase_master_heapsize": "768", - "hbase_master_maxperm_size": "128", - "hbase_master_xmn_size": "192", - "hbase_pid_dir": "/var/run/ambari-metrics-collector/", - "hbase_regionserver_heapsize": "512", - "hbase_regionserver_xmn_ratio": "0.2", - "max_open_files_limit": "32768", - "regionserver_xmn_size": "256" - }, - "ams-hbase-policy": { - "security.admin.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.masterregion.protocol.acl": "*" - }, - "ams-hbase-security-site": { - "ams.zookeeper.keytab": "", - "ams.zookeeper.principal": "", - "hadoop.security.authentication": "", - "hbase.coprocessor.master.classes": "", - "hbase.coprocessor.region.classes": "", - "hbase.master.kerberos.principal": "", - "hbase.master.keytab.file": "", - "hbase.myclient.keytab": "", - "hbase.myclient.principal": "", - "hbase.regionserver.kerberos.principal": "", - "hbase.regionserver.keytab.file": "", - "hbase.security.authentication": "", - "hbase.security.authorization": "", - "hbase.zookeeper.property.authProvider.1": "", - "hbase.zookeeper.property.jaasLoginRenew": "", - "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "", - "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "" - }, - "ams-hbase-site": { - "dfs.client.read.shortcircuit": "true", - "hbase.client.scanner.caching": "10000", - "hbase.client.scanner.timeout.period": "900000", - "hbase.cluster.distributed": "false", - "hbase.hregion.majorcompaction": "0", - "hbase.hregion.max.filesize": "4294967296", - "hbase.hregion.memstore.block.multiplier": "4", - "hbase.hregion.memstore.flush.size": "134217728", - "hbase.hstore.blockingStoreFiles": "200", - "hbase.hstore.flusher.count": "2", - "hbase.local.dir": "${hbase.tmp.dir}/local", - "hbase.master.info.bindAddress": "0.0.0.0", - "hbase.master.info.port": "61310", - "hbase.master.normalizer.class": "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer", - "hbase.master.port": "61300", - "hbase.master.wait.on.regionservers.mintostart": "1", - "hbase.normalizer.enabled": "true", - "hbase.normalizer.period": "600000", - "hbase.regionserver.global.memstore.lowerLimit": "0.3", - "hbase.regionserver.global.memstore.upperLimit": "0.35", - "hbase.regionserver.info.port": "61330", - "hbase.regionserver.port": "61320", - "hbase.regionserver.thread.compaction.large": "2", - "hbase.regionserver.thread.compaction.small": "3", - "hbase.replication": "false", - "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase", - "hbase.snapshot.enabled": "false", - "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp", - "hbase.zookeeper.leaderport": "61388", - "hbase.zookeeper.peerport": "61288", - "hbase.zookeeper.property.clientPort": "{{zookeeper_clientPort}}", - "hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper", - "hbase.zookeeper.property.tickTime": "6000", - "hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}", - "hfile.block.cache.size": "0.3", - "phoenix.coprocessor.maxMetaDataCacheSize": "20480000", - "phoenix.coprocessor.maxServerCacheTimeToLiveMs": "60000", - "phoenix.groupby.maxCacheSize": "307200000", - "phoenix.mutate.batchSize": "10000", - "phoenix.query.maxGlobalMemoryPercentage": "15", - "phoenix.query.rowKeyOrderSaltedTable": "true", - "phoenix.query.spoolThresholdBytes": "12582912", - "phoenix.query.timeoutMs": "1200000", - "phoenix.sequence.saltBuckets": "2", - "phoenix.spool.directory": "${hbase.tmp.dir}/phoenix-spool", - "zookeeper.session.timeout": "120000", - "zookeeper.session.timeout.localHBaseCluster": "120000", - "zookeeper.znode.parent": "/ams-hbase-unsecure" - }, - "ams-site": { - "phoenix.query.maxGlobalMemoryPercentage": "25", - "phoenix.spool.directory": "/tmp", - "timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint", - "timeline.metrics.cluster.aggregate.splitpoints": "", - "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "1", - "timeline.metrics.cluster.aggregator.daily.disabled": "false", - "timeline.metrics.cluster.aggregator.daily.interval": "86400", - "timeline.metrics.cluster.aggregator.daily.ttl": "63072000", - "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.hourly.disabled": "false", - "timeline.metrics.cluster.aggregator.hourly.interval": "3600", - "timeline.metrics.cluster.aggregator.hourly.ttl": "31536000", - "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.minute.disabled": "false", - "timeline.metrics.cluster.aggregator.minute.interval": "300", - "timeline.metrics.cluster.aggregator.minute.ttl": "7776000", - "timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.second.disabled": "false", - "timeline.metrics.cluster.aggregator.second.interval": "120", - "timeline.metrics.cluster.aggregator.second.timeslice.interval": "30", - "timeline.metrics.cluster.aggregator.second.ttl": "2592000", - "timeline.metrics.daily.aggregator.minute.interval": "86400", - "timeline.metrics.hbase.compression.scheme": "SNAPPY", - "timeline.metrics.hbase.data.block.encoding": "FAST_DIFF", - "timeline.metrics.hbase.fifo.compaction.enabled": "true", - "timeline.metrics.host.aggregate.splitpoints": "", - "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1", - "timeline.metrics.host.aggregator.daily.disabled": "false", - "timeline.metrics.host.aggregator.daily.ttl": "31536000", - "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.hourly.disabled": "false", - "timeline.metrics.host.aggregator.hourly.interval": "3600", - "timeline.metrics.host.aggregator.hourly.ttl": "2592000", - "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.minute.disabled": "false", - "timeline.metrics.host.aggregator.minute.interval": "300", - "timeline.metrics.host.aggregator.minute.ttl": "604800", - "timeline.metrics.host.aggregator.ttl": "86400", - "timeline.metrics.service.checkpointDelay": "60", - "timeline.metrics.service.cluster.aggregator.appIds": "datanode,nodemanager,hbase", - "timeline.metrics.service.default.result.limit": "15840", - "timeline.metrics.service.operation.mode": "embedded", - "timeline.metrics.service.resultset.fetchSize": "2000", - "timeline.metrics.service.rpc.address": "0.0.0.0:60200", - "timeline.metrics.service.use.groupBy.aggregators": "true", - "timeline.metrics.service.watcher.delay": "30", - "timeline.metrics.service.watcher.initial.delay": "600", - "timeline.metrics.service.watcher.timeout": "30", - "timeline.metrics.service.webapp.address": "0.0.0.0:6188", - "timeline.metrics.sink.collection.period": "60", - "timeline.metrics.sink.report.interval": "60" - }, - "anonymization-rules": { - "anonymization-rules-content": "{ \"rules\":[ { \"name\":\"ip_address\", \"path\":null, \"pattern\": \"[ :\\\\/]?[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}[ :\\\\/]?\", \"extract\": \"[ :\\\\/]?([0-9\\\\.]+)[ :\\\\/]?\", \"shared\":true }, { \"name\":\"domain\", \"path\":null, \"pattern\": \"$DOMAIN_RULE$\", \"shared\":true }, { \"name\":\"delete_oozie_jdbc_password\", \"path\":\"oozie-site.xml\", \"property\": \"oozie.service.JPAService.jdbc.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_sqoop_metastore_password\", \"path\":\"sqoop-site.xml\", \"property\": \"sqoop.metastore.client.autoconnect.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_hive_metastore_password\", \"path\":\"hive-site.xml\", \"property\": \"javax.jdo.option.ConnectionPassword\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" } ] }" - }, - "application-properties": { - "atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab", - "atlas.authentication.method": "simple", - "atlas.authentication.principal": "atlas", - "atlas.enableTLS": "false", - "atlas.graph.index.search.backend": "elasticsearch", - "atlas.graph.index.search.directory": "/var/lib/atlas/data/es", - "atlas.graph.index.search.elasticsearch.client-only": "false", - "atlas.graph.index.search.elasticsearch.local-mode": "true", - "atlas.graph.storage.backend": "berkeleyje", - "atlas.graph.storage.directory": "/var/lib/atlas/data/berkeley", - "atlas.http.authentication.enabled": "false", - "atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", - "atlas.http.authentication.kerberos.name.rules": "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*// DEFAULT", - "atlas.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", - "atlas.http.authentication.type": "simple", - "atlas.lineage.hive.process.inputs.name": "inputs", - "atlas.lineage.hive.process.outputs.name": "outputs", - "atlas.lineage.hive.process.type.name": "Process", - "atlas.lineage.hive.table.schema.query.Table": "Table where name='%s'\\, columns", - "atlas.lineage.hive.table.schema.query.hive_table": "hive_table where name='%s'\\, columns", - "atlas.lineage.hive.table.type.name": "DataSet", - "atlas.server.bind.address": "localhost", - "atlas.server.http.port": "21000", - "atlas.server.https.port": "21443" - }, - "atlas-env": { - "metadata_classpath": "", - "metadata_data_dir": "/var/lib/atlas/data", - "metadata_expanded_war_dir": "./server/webapp", - "metadata_log_dir": "/var/log/atlas", - "metadata_opts": "-Xmx1024m -Dlog4j.configuration=atlas-log4j.xml", - "metadata_pid_dir": "/var/run/atlas", - "metadata_user": "atlas" - }, - "capacity-scheduler": { - "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100", - "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", - "yarn.scheduler.capacity.maximum-applications": "10000", - "yarn.scheduler.capacity.node-locality-delay": "40", - "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator", - "yarn.scheduler.capacity.root.accessible-node-labels": "*", - "yarn.scheduler.capacity.root.acl_administer_queue": "*", - "yarn.scheduler.capacity.root.capacity": "100", - "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", - "yarn.scheduler.capacity.root.default.acl_submit_applications": "*", - "yarn.scheduler.capacity.root.default.capacity": "100", - "yarn.scheduler.capacity.root.default.maximum-capacity": "100", - "yarn.scheduler.capacity.root.default.state": "RUNNING", - "yarn.scheduler.capacity.root.default.user-limit-factor": "1", - "yarn.scheduler.capacity.root.queues": "default" - }, - "capture-levels": { - "capture-levels-content": "[ { \"name\":\"L1\", \"description\":\"Configurations\", \"filter\":\"$type == 'CONF' or ($type == 'REPORTS' and $service is None) or ($type == 'LOG' and $service is None)\" }, { \"name\":\"L2\", \"description\":\"Configurations and Metrics\", \"filter\":\"$type in ['CONF', 'REPORTS'] or ($type == 'REPORTS' and $service is None) or ($type == 'LOG' and $service is None)\" }, { \"name\":\"L3\", \"description\":\"Configurations, Metrics and Logs\", \"filter\":\"$type in ['CONF', 'REPORTS', 'LOG'] or ($type == 'REPORTS' and $service is None) or ($type == 'LOG' and $service is None)\" } ]" - }, - "cluster-env": { - "fetch_nonlocal_groups": "true", - "ignore_groupsusers_create": "false", - "kerberos_domain": "EXAMPLE.COM", - "override_uid": "true", - "repo_suse_rhel_template": "[{{repo_id}}] name={{repo_id}} {% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %} path=/ enabled=1 gpgcheck=0", - "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}", - "security_enabled": "false", - "smokeuser": "ambari-qa", - "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", - "user_group": "hadoop" - }, - "core-site": { - "fs.defaultFS": "hdfs://%HOSTGROUP::host_group_1%:8020", - "fs.trash.interval": "360", - "ha.failover-controller.active-standby-elector.zk.op.retries": "120", - "hadoop.http.authentication.simple.anonymous.allowed": "true", - "hadoop.proxyuser.falcon.groups": "*", - "hadoop.proxyuser.falcon.hosts": "*", - "hadoop.proxyuser.hcat.groups": "*", - "hadoop.proxyuser.hcat.hosts": "%HOSTGROUP::host_group_1%", - "hadoop.proxyuser.hdfs.groups": "*", - "hadoop.proxyuser.hdfs.hosts": "*", - "hadoop.proxyuser.hive.groups": "*", - "hadoop.proxyuser.hive.hosts": "%HOSTGROUP::host_group_1%", - "hadoop.proxyuser.oozie.groups": "*", - "hadoop.proxyuser.oozie.hosts": "%HOSTGROUP::host_group_1%", - "hadoop.security.auth_to_local": "DEFAULT", - "hadoop.security.authentication": "simple", - "hadoop.security.authorization": "false", - "hadoop.security.key.provider.path": "", - "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", - "io.file.buffer.size": "131072", - "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", - "ipc.client.connect.max.retries": "50", - "ipc.client.connection.maxidletime": "30000", - "ipc.client.idlethreshold": "8000", - "ipc.server.tcpnodelay": "true", - "mapreduce.jobtracker.webinterface.trusted": "false", - "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py" - }, - "falcon-env": { - "falcon.embeddedmq": "true", - "falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data", - "falcon.emeddedmq.port": "61616", - "falcon_local_dir": "/hadoop/falcon", - "falcon_log_dir": "/var/log/falcon", - "falcon_pid_dir": "/var/run/falcon", - "falcon_port": "15000", - "falcon_store_uri": "file:///hadoop/falcon/store", - "falcon_user": "falcon", - "supports_hive_dr": "true" - }, - "falcon-runtime.properties": { - "*.domain": "${falcon.app.type}", - "*.log.cleanup.frequency.days.retention": "days(7)", - "*.log.cleanup.frequency.hours.retention": "minutes(1)", - "*.log.cleanup.frequency.minutes.retention": "hours(6)", - "*.log.cleanup.frequency.months.retention": "months(3)" - }, - "falcon-startup.properties": { - "*.ConfigSyncService.impl": "org.apache.falcon.resource.ConfigSyncService", - "*.ProcessInstanceManager.impl": "org.apache.falcon.resource.InstanceManager", - "*.SchedulableEntityManager.impl": "org.apache.falcon.resource.SchedulableEntityManager", - "*.application.services": "org.apache.falcon.security.AuthenticationInitializationService,\\ org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\ org.apache.falcon.service.ProcessSubscriberService,\\ org.apache.falcon.entity.store.ConfigurationStore,\\ org.apache.falcon.rerun.service.RetryService,\\ org.apache.falcon.rerun.service.LateRunService,\\ org.apache.falcon.service.LogCleanupService,\\ org.apache.falcon.metadata.MetadataMappingService", - "*.broker.impl.class": "org.apache.activemq.ActiveMQConnectionFactory", - "*.broker.ttlInMins": "4320", - "*.broker.url": "tcp://%HOSTGROUP::host_group_1%:61616", - "*.catalog.service.impl": "org.apache.falcon.catalog.HiveCatalogService", - "*.config.store.uri": "file:///hadoop/falcon/store", - "*.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\ org.apache.falcon.entity.ColoClusterRelation,\\ org.apache.falcon.group.FeedGroupMap,\\ org.apache.falcon.service.SharedLibraryHostingService", - "*.domain": "${falcon.app.type}", - "*.entity.topic": "FALCON.ENTITY.TOPIC", - "*.falcon.authentication.type": "simple", - "*.falcon.cleanup.service.frequency": "days(1)", - "*.falcon.enableTLS": "false", - "*.falcon.graph.blueprints.graph": "com.thinkaurelius.titan.core.TitanFactory", - "*.falcon.graph.preserve.history": "false", - "*.falcon.graph.serialize.path": "/hadoop/falcon/data/lineage", - "*.falcon.graph.storage.backend": "berkeleyje", - "*.falcon.graph.storage.directory": "/hadoop/falcon/data/lineage/graphdb", - "*.falcon.http.authentication.blacklisted.users": "", - "*.falcon.http.authentication.cookie.domain": "EXAMPLE.COM", - "*.falcon.http.authentication.kerberos.name.rules": "DEFAULT", - "*.falcon.http.authentication.signature.secret": "falcon", - "*.falcon.http.authentication.simple.anonymous.allowed": "true", - "*.falcon.http.authentication.token.validity": "36000", - "*.falcon.http.authentication.type": "simple", - "*.falcon.security.authorization.admin.groups": "falcon", - "*.falcon.security.authorization.admin.users": "falcon,ambari-qa", - "*.falcon.security.authorization.enabled": "false", - "*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider", - "*.falcon.security.authorization.superusergroup": "falcon", - "*.hive.shared.libs": "hive-exec,hive-metastore,hive-common,hive-service,hive-hcatalog-server-extensions,\\ hive-hcatalog-core,hive-jdbc,hive-webhcat-java-client", - "*.internal.queue.size": "1000", - "*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal", - "*.max.retry.failure.count": "1", - "*.oozie.feed.workflow.builder": "org.apache.falcon.workflow.OozieFeedWorkflowBuilder", - "*.oozie.process.workflow.builder": "org.apache.falcon.workflow.OozieProcessWorkflowBuilder", - "*.retry.recorder.path": "${falcon.log.dir}/retry", - "*.shared.libs": "activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el", - "*.system.lib.location": "${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib", - "*.workflow.engine.impl": "org.apache.falcon.workflow.engine.OozieWorkflowEngine", - "prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore", - "prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\ org.apache.falcon.entity.ColoClusterRelation,\\ org.apache.falcon.group.FeedGroupMap" - }, - "flume-env": { - "flume_conf_dir": "/etc/flume/conf", - "flume_log_dir": "/var/log/flume", - "flume_run_dir": "/var/run/flume", - "flume_user": "flume" - }, - "gateway-site": { - "gateway.gateway.conf.dir": "deployments", - "gateway.hadoop.kerberos.secured": "false", - "gateway.path": "gateway", - "gateway.port": "8443", - "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf", - "java.security.krb5.conf": "/etc/knox/conf/krb5.conf", - "sun.security.krb5.debug": "true" - }, - "hadoop-env": { - "dtnode_heapsize": "1024m", - "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "hadoop_root_logger": "INFO,RFA", - "hdfs_log_dir_prefix": "/var/log/hadoop", - "hdfs_user": "hdfs", - "hdfs_user_nofile_limit": "128000", - "hdfs_user_nproc_limit": "65536", - "keyserver_host": "", - "keyserver_port": "", - "namenode_heapsize": "1024m", - "namenode_opt_maxnewsize": "128m", - "namenode_opt_maxpermsize": "256m", - "namenode_opt_newsize": "128m", - "namenode_opt_permsize": "128m", - "nfsgateway_heapsize": "1024", - "proxyuser_group": "users" - }, - "hadoop-policy": { - "security.admin.operations.protocol.acl": "hadoop", - "security.client.datanode.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.datanode.protocol.acl": "*", - "security.inter.datanode.protocol.acl": "*", - "security.inter.tracker.protocol.acl": "*", - "security.job.client.protocol.acl": "*", - "security.job.task.protocol.acl": "*", - "security.namenode.protocol.acl": "*", - "security.refresh.policy.protocol.acl": "hadoop", - "security.refresh.usertogroups.mappings.protocol.acl": "hadoop" - }, - "hbase-env": { - "hbase_java_io_tmpdir": "/tmp", - "hbase_log_dir": "/var/log/hbase", - "hbase_master_heapsize": "8192", - "hbase_pid_dir": "/var/run/hbase", - "hbase_regionserver_heapsize": "8192", - "hbase_regionserver_xmn_max": "512", - "hbase_regionserver_xmn_ratio": "0.2", - "hbase_user": "hbase", - "hbase_user_nofile_limit": "32000", - "hbase_user_nproc_limit": "16000", - "phoenix_sql_enabled": "false" - }, - "hbase-policy": { - "security.admin.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.masterregion.protocol.acl": "*" - }, - "hbase-site": { - "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", - "hbase.bulkload.staging.dir": "/apps/hbase/staging", - "hbase.client.keyvalue.maxsize": "1048576", - "hbase.client.retries.number": "35", - "hbase.client.scanner.caching": "100", - "hbase.cluster.distributed": "true", - "hbase.coprocessor.master.classes": "", - "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint", - "hbase.defaults.for.version.skip": "true", - "hbase.hregion.majorcompaction": "604800000", - "hbase.hregion.majorcompaction.jitter": "0.50", - "hbase.hregion.max.filesize": "10737418240", - "hbase.hregion.memstore.block.multiplier": "4", - "hbase.hregion.memstore.flush.size": "134217728", - "hbase.hregion.memstore.mslab.enabled": "true", - "hbase.hstore.blockingStoreFiles": "10", - "hbase.hstore.compaction.max": "10", - "hbase.hstore.compactionThreshold": "3", - "hbase.local.dir": "${hbase.tmp.dir}/local", - "hbase.master.info.bindAddress": "0.0.0.0", - "hbase.master.info.port": "16010", - "hbase.master.port": "16000", - "hbase.regionserver.global.memstore.size": "0.4", - "hbase.regionserver.handler.count": "30", - "hbase.regionserver.info.port": "16030", - "hbase.regionserver.port": "16020", - "hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec", - "hbase.rootdir": "hdfs://%HOSTGROUP::host_group_1%:8020/apps/hbase/data", - "hbase.rpc.protection": "authentication", - "hbase.rpc.timeout": "90000", - "hbase.security.authentication": "simple", - "hbase.security.authorization": "false", - "hbase.superuser": "hbase", - "hbase.tmp.dir": "/tmp/hbase-${user.name}", - "hbase.zookeeper.property.clientPort": "2181", - "hbase.zookeeper.quorum": "%HOSTGROUP::host_group_1%", - "hbase.zookeeper.useMulti": "true", - "hfile.block.cache.size": "0.4", - "phoenix.query.timeoutMs": "60000", - "zookeeper.session.timeout": "90000", - "zookeeper.znode.parent": "/hbase-unsecure" - }, - "hdfs-site": { - "dfs.block.access.token.enable": "true", - "dfs.blockreport.initialDelay": "120", - "dfs.blocksize": "134217728", - "dfs.client.read.shortcircuit": "true", - "dfs.client.read.shortcircuit.streams.cache.size": "4096", - "dfs.client.retry.policy.enabled": "false", - "dfs.cluster.administrators": "hdfs", - "dfs.content-summary.limit": "5000", - "dfs.datanode.address": "0.0.0.0:50010", - "dfs.datanode.balance.bandwidthPerSec": "6250000", - "dfs.datanode.data.dir": "/hadoop/hdfs/data", - "dfs.datanode.data.dir.perm": "750", - "dfs.datanode.du.reserved": "1073741824", - "dfs.datanode.failed.volumes.tolerated": "0", - "dfs.datanode.http.address": "0.0.0.0:50075", - "dfs.datanode.https.address": "0.0.0.0:50475", - "dfs.datanode.ipc.address": "0.0.0.0:8010", - "dfs.datanode.max.transfer.threads": "16384", - "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", - "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", - "dfs.encryption.key.provider.uri": "", - "dfs.heartbeat.interval": "3", - "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", - "dfs.http.policy": "HTTP_ONLY", - "dfs.https.port": "50470", - "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", - "dfs.journalnode.http-address": "0.0.0.0:8480", - "dfs.journalnode.https-address": "0.0.0.0:8481", - "dfs.namenode.accesstime.precision": "0", - "dfs.namenode.audit.log.async": "true", - "dfs.namenode.avoid.read.stale.datanode": "true", - "dfs.namenode.avoid.write.stale.datanode": "true", - "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", - "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", - "dfs.namenode.checkpoint.period": "21600", - "dfs.namenode.checkpoint.txns": "1000000", - "dfs.namenode.fslock.fair": "false", - "dfs.namenode.handler.count": "800", - "dfs.namenode.http-address": "%HOSTGROUP::host_group_1%:50070", - "dfs.namenode.https-address": "%HOSTGROUP::host_group_1%:50470", - "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", - "dfs.namenode.name.dir.restore": "true", - "dfs.namenode.rpc-address": "%HOSTGROUP::host_group_1%:8020", - "dfs.namenode.safemode.threshold-pct": "1", - "dfs.namenode.secondary.http-address": "%HOSTGROUP::host_group_1%:50090", - "dfs.namenode.stale.datanode.interval": "30000", - "dfs.namenode.startup.delay.block.deletion.sec": "3600", - "dfs.namenode.write.stale.datanode.ratio": "1.0f", - "dfs.permissions.enabled": "true", - "dfs.permissions.superusergroup": "hdfs", - "dfs.replication": "3", - "dfs.replication.max": "50", - "dfs.support.append": "true", - "dfs.webhdfs.enabled": "true", - "fs.permissions.umask-mode": "022", - "nfs.exports.allowed.hosts": "* rw", - "nfs.file.dump.dir": "/tmp/.hdfs-nfs" - }, - "hive-env": { - "cost_based_optimizer": "On", - "hcat_log_dir": "/var/log/webhcat", - "hcat_pid_dir": "/var/run/webhcat", - "hcat_user": "hcat", - "hive.client.heapsize": "1024", - "hive.heapsize": "11963", - "hive.metastore.heapsize": "3987", - "hive_ambari_database": "MySQL", - "hive_database": "New MySQL Database", - "hive_database_name": "hive", - "hive_database_type": "mysql", - "hive_exec_orc_storage_strategy": "SPEED", - "hive_log_dir": "/var/log/hive", - "hive_pid_dir": "/var/run/hive", - "hive_security_authorization": "None", - "hive_timeline_logging_enabled": "true", - "hive_txn_acid": "off", - "hive_user": "hive", - "hive_user_nofile_limit": "32000", - "hive_user_nproc_limit": "16000", - "webhcat_user": "hcat" - }, - "hive-site": { - "ambari.hive.db.schema.name": "hive", - "atlas.cluster.name": "default", - "atlas.hook.hive.maxThreads": "1", - "atlas.hook.hive.minThreads": "1", - "atlas.rest.address": "", - "datanucleus.autoCreateSchema": "false", - "datanucleus.cache.level2.type": "none", - "datanucleus.fixedDatastore": "true", - "hive.auto.convert.join": "true", - "hive.auto.convert.join.noconditionaltask": "true", - "hive.auto.convert.join.noconditionaltask.size": "715827882", - "hive.auto.convert.sortmerge.join": "true", - "hive.auto.convert.sortmerge.join.to.mapjoin": "false", - "hive.cbo.enable": "true", - "hive.cli.print.header": "false", - "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore", - "hive.cluster.delegation.token.store.zookeeper.connectString": "%HOSTGROUP::host_group_1%:2181", - "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation", - "hive.compactor.abortedtxn.threshold": "1000", - "hive.compactor.check.interval": "300L", - "hive.compactor.delta.num.threshold": "10", - "hive.compactor.delta.pct.threshold": "0.1f", - "hive.compactor.initiator.on": "false", - "hive.compactor.worker.threads": "0", - "hive.compactor.worker.timeout": "86400L", - "hive.compute.query.using.stats": "true", - "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role", - "hive.convert.join.bucket.mapjoin.tez": "false", - "hive.default.fileformat": "TextFile", - "hive.default.fileformat.managed": "TextFile", - "hive.enforce.bucketing": "false", - "hive.enforce.sorting": "true", - "hive.enforce.sortmergebucketmapjoin": "true", - "hive.exec.compress.intermediate": "false", - "hive.exec.compress.output": "false", - "hive.exec.dynamic.partition": "true", - "hive.exec.dynamic.partition.mode": "strict", - "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.max.created.files": "100000", - "hive.exec.max.dynamic.partitions": "5000", - "hive.exec.max.dynamic.partitions.pernode": "2000", - "hive.exec.orc.compression.strategy": "SPEED", - "hive.exec.orc.default.compress": "ZLIB", - "hive.exec.orc.default.stripe.size": "67108864", - "hive.exec.orc.encoding.strategy": "SPEED", - "hive.exec.parallel": "false", - "hive.exec.parallel.thread.number": "8", - "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook,org.apache.atlas.hive.hook.HiveHook", - "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.reducers.bytes.per.reducer": "67108864", - "hive.exec.reducers.max": "1009", - "hive.exec.scratchdir": "/tmp/hive", - "hive.exec.submit.local.task.via.child": "true", - "hive.exec.submitviachild": "false", - "hive.execution.engine": "tez", - "hive.fetch.task.aggr": "false", - "hive.fetch.task.conversion": "more", - "hive.fetch.task.conversion.threshold": "1073741824", - "hive.limit.optimize.enable": "true", - "hive.limit.pushdown.memory.usage": "0.04", - "hive.map.aggr": "true", - "hive.map.aggr.hash.force.flush.memory.threshold": "0.9", - "hive.map.aggr.hash.min.reduction": "0.5", - "hive.map.aggr.hash.percentmemory": "0.5", - "hive.mapjoin.bucket.cache.size": "10000", - "hive.mapjoin.optimized.hashtable": "true", - "hive.mapred.reduce.tasks.speculative.execution": "false", - "hive.merge.mapfiles": "true", - "hive.merge.mapredfiles": "false", - "hive.merge.orcfile.stripe.level": "true", - "hive.merge.rcfile.block.level": "true", - "hive.merge.size.per.task": "256000000", - "hive.merge.smallfiles.avgsize": "16000000", - "hive.merge.tezfiles": "false", - "hive.metastore.authorization.storage.checks": "false", - "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", - "hive.metastore.client.connect.retry.delay": "5s", - "hive.metastore.client.socket.timeout": "1800s", - "hive.metastore.connect.retries": "24", - "hive.metastore.execute.setugi": "true", - "hive.metastore.failure.retries": "24", - "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab", - "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM", - "hive.metastore.pre.event.listeners": "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener", - "hive.metastore.sasl.enabled": "false", - "hive.metastore.server.max.threads": "100000", - "hive.metastore.uris": "thrift://%HOSTGROUP::host_group_1%:9083", - "hive.metastore.warehouse.dir": "/apps/hive/warehouse", - "hive.optimize.bucketmapjoin": "true", - "hive.optimize.bucketmapjoin.sortedmerge": "false", - "hive.optimize.constant.propagation": "true", - "hive.optimize.index.filter": "true", - "hive.optimize.metadataonly": "true", - "hive.optimize.null.scan": "true", - "hive.optimize.reducededuplication": "true", - "hive.optimize.reducededuplication.min.reducer": "4", - "hive.optimize.sort.dynamic.partition": "false", - "hive.orc.compute.splits.num.threads": "10", - "hive.orc.splits.include.file.footer": "false", - "hive.prewarm.enabled": "false", - "hive.prewarm.numcontainers": "3", - "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", - "hive.security.authorization.enabled": "false", - "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory", - "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", - "hive.security.metastore.authorization.auth.reads": "true", - "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", - "hive.server2.allow.user.substitution": "true", - "hive.server2.authentication": "NONE", - "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM", - "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab", - "hive.server2.enable.doAs": "true", - "hive.server2.logging.operation.enabled": "true", - "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs", - "hive.server2.support.dynamic.service.discovery": "true", - "hive.server2.table.type.mapping": "CLASSIC", - "hive.server2.tez.default.queues": "default", - "hive.server2.tez.initialize.default.sessions": "false", - "hive.server2.tez.sessions.per.default.queue": "1", - "hive.server2.thrift.http.path": "cliservice", - "hive.server2.thrift.http.port": "10001", - "hive.server2.thrift.max.worker.threads": "500", - "hive.server2.thrift.port": "10000", - "hive.server2.thrift.sasl.qop": "auth", - "hive.server2.transport.mode": "binary", - "hive.server2.use.SSL": "false", - "hive.server2.zookeeper.namespace": "hiveserver2", - "hive.smbjoin.cache.rows": "10000", - "hive.stats.autogather": "true", - "hive.stats.dbclass": "fs", - "hive.stats.fetch.column.stats": "true", - "hive.stats.fetch.partition.stats": "true", - "hive.support.concurrency": "false", - "hive.tez.auto.reducer.parallelism": "true", - "hive.tez.container.size": "2048", - "hive.tez.cpu.vcores": "-1", - "hive.tez.dynamic.partition.pruning": "true", - "hive.tez.dynamic.partition.pruning.max.data.size": "104857600", - "hive.tez.dynamic.partition.pruning.max.event.size": "1048576", - "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat", - "hive.tez.java.opts": "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps", - "hive.tez.log.level": "INFO", - "hive.tez.max.partition.factor": "2.0", - "hive.tez.min.partition.factor": "0.25", - "hive.tez.smb.number.waves": "0.5", - "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", - "hive.txn.max.open.batch": "1000", - "hive.txn.timeout": "300", - "hive.user.install.directory": "/user/", - "hive.vectorized.execution.enabled": "true", - "hive.vectorized.execution.reduce.enabled": "false", - "hive.vectorized.groupby.checkinterval": "4096", - "hive.vectorized.groupby.flush.percent": "0.1", - "hive.vectorized.groupby.maxentries": "100000", - "hive.zookeeper.client.port": "2181", - "hive.zookeeper.namespace": "hive_zookeeper_namespace", - "hive.zookeeper.quorum": "%HOSTGROUP::host_group_1%:2181", - "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", - "javax.jdo.option.ConnectionURL": "jdbc:mysql://%HOSTGROUP::host_group_1%/hive?createDatabaseIfNotExist=true", - "javax.jdo.option.ConnectionUserName": "hive" - }, - "hiveserver2-site": { - "hive.security.authorization.enabled": "false" - }, - "hst-agent-conf": { - "agent.loglevel": "INFO", - "agent.tmp_dir": "/var/lib/smartsense/hst-agent/data/tmp", - "bundle.compress_captured_log_locally": "false", - "bundle.logs_to_capture": "(.*).log$|(.*).log.1$|(.*).out$", - "management.patch.auto.apply.enabled": "true", - "server.connection_retry_count": "50", - "server.connection_retry_interval": "5", - "upload.retry_count": "50", - "upload.retry_interval": "15" - }, - "hst-common-conf": { - "agent.capture.level": "L2", - "security.anonymization.enabled": "true" - }, - "hst-log4j": { - "hst-log4j-content": "# Copyright 2014 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # \"License\"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Define some default values that can be overridden by system properties # Root logger option log4j.rootLogger=INFO,file log4j.appender.file=org.apache.log4j.RollingFileAppender log4j.appender.file.File=/var/log/hst/${log.file.name} log4j.appender.file.MaxFileSize=80MB log4j.appender.file.MaxBackupIndex=60 log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n # HST logger log4j.logger.com.hortonworks=DEBUG com.github.oxo42.stateless4j=WARN log4j.logger.com.sun.jersey=WARN log4j.logger.org.eclipse.jetty.server=INFO" - }, - "hst-server-conf": { - "agent.request.processing.timeout": "1800", - "agent.request.syncup.interval": "60", - "bundle.monitor.interval": "20", - "client.password_less_ssh.enabled": "false", - "client.threadpool.size.max": "50", - "customer.account.name": "", - "customer.notification.email": "", - "customer.smartsense.id": "", - "gateway.enabled": "false", - "gateway.host": "Please Specify", - "gateway.port": "9451", - "gateway.registration.port": "9450", - "server.http.session.inactive_timeout": "1800", - "server.min_required_storage": "1", - "server.port": "9000", - "server.storage.dir": "/var/lib/smartsense/hst-server/data", - "server.tmp.dir": "/var/lib/smartsense/hst-server/tmp" - }, - "kafka-broker": { - "auto.create.topics.enable": "true", - "auto.leader.rebalance.enable": "true", - "compression.type": "producer", - "controlled.shutdown.enable": "true", - "controlled.shutdown.max.retries": "3", - "controlled.shutdown.retry.backoff.ms": "5000", - "controller.message.queue.size": "10", - "controller.socket.timeout.ms": "30000", - "default.replication.factor": "1", - "delete.topic.enable": "false", - "external.kafka.metrics.exclude.prefix": "kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec", - "external.kafka.metrics.include.prefix": "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request", - "fetch.purgatory.purge.interval.requests": "10000", - "kafka.ganglia.metrics.group": "kafka", - "kafka.ganglia.metrics.port": "8671", - "kafka.ganglia.metrics.reporter.enabled": "true", - "kafka.timeline.metrics.host": "{{metric_collector_host}}", - "kafka.timeline.metrics.maxRowCacheSize": "10000", - "kafka.timeline.metrics.port": "{{metric_collector_port}}", - "kafka.timeline.metrics.reporter.enabled": "true", - "kafka.timeline.metrics.reporter.sendInterval": "5900", - "leader.imbalance.check.interval.seconds": "300", - "leader.imbalance.per.broker.percentage": "10", - "listeners": "PLAINTEXT://localhost:6667", - "log.cleanup.interval.mins": "10", - "log.dirs": "/kafka-logs", - "log.index.interval.bytes": "4096", - "log.index.size.max.bytes": "10485760", - "log.retention.bytes": "-1", - "log.retention.hours": "168", - "log.roll.hours": "168", - "log.segment.bytes": "1073741824", - "message.max.bytes": "1000000", - "min.insync.replicas": "1", - "num.io.threads": "8", - "num.network.threads": "3", - "num.partitions": "1", - "num.recovery.threads.per.data.dir": "1", - "num.replica.fetchers": "1", - "offset.metadata.max.bytes": "4096", - "offsets.commit.required.acks": "-1", - "offsets.commit.timeout.ms": "5000", - "offsets.load.buffer.size": "5242880", - "offsets.retention.check.interval.ms": "600000", - "offsets.retention.minutes": "86400000", - "offsets.topic.compression.codec": "0", - "offsets.topic.num.partitions": "50", - "offsets.topic.replication.factor": "3", - "offsets.topic.segment.bytes": "104857600", - "producer.purgatory.purge.interval.requests": "10000", - "queued.max.requests": "500", - "replica.fetch.max.bytes": "1048576", - "replica.fetch.min.bytes": "1", - "replica.fetch.wait.max.ms": "500", - "replica.high.watermark.checkpoint.interval.ms": "5000", - "replica.lag.max.messages": "4000", - "replica.lag.time.max.ms": "10000", - "replica.socket.receive.buffer.bytes": "65536", - "replica.socket.timeout.ms": "30000", - "socket.receive.buffer.bytes": "102400", - "socket.request.max.bytes": "104857600", - "socket.send.buffer.bytes": "102400", - "zookeeper.connect": "%HOSTGROUP::host_group_1%:2181", - "zookeeper.connection.timeout.ms": "25000", - "zookeeper.session.timeout.ms": "30000", - "zookeeper.sync.time.ms": "2000" - }, - "kafka-env": { - "is_supported_kafka_ranger": "true", - "kafka_log_dir": "/var/log/kafka", - "kafka_pid_dir": "/var/run/kafka", - "kafka_user": "kafka", - "kafka_user_nofile_limit": "32000", - "kafka_user_nproc_limit": "16000" - }, - "knox-env": { - "knox_group": "knox", - "knox_master_secret": "SECRET:knox-env:1:knox_master_secret", - "knox_pid_dir": "/var/run/knox", - "knox_user": "knox" - }, - "mahout-env": { - "mahout_user": "mahout" - }, - "mapred-env": { - "jobhistory_heapsize": "900", - "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", - "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", - "mapred_user": "mapred", - "mapred_user_nofile_limit": "32768", - "mapred_user_nproc_limit": "65536" - }, - "mapred-site": { - "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "mapreduce.am.max-attempts": "2", - "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", - "mapreduce.cluster.administrators": "hadoop", - "mapreduce.framework.name": "yarn", - "mapreduce.job.counters.max": "130", - "mapreduce.job.emit-timeline-data": "false", - "mapreduce.job.reduce.slowstart.completedmaps": "0.05", - "mapreduce.jobhistory.address": "%HOSTGROUP::host_group_1%:10020", - "mapreduce.jobhistory.bind-host": "0.0.0.0", - "mapreduce.jobhistory.done-dir": "/mr-history/done", - "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", - "mapreduce.jobhistory.recovery.enable": "true", - "mapreduce.jobhistory.recovery.store.class": "org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService", - "mapreduce.jobhistory.recovery.store.leveldb.path": "/hadoop/mapreduce/jhs", - "mapreduce.jobhistory.webapp.address": "%HOSTGROUP::host_group_1%:19888", - "mapreduce.map.java.opts": "-Xmx1638m", - "mapreduce.map.log.level": "INFO", - "mapreduce.map.memory.mb": "2048", - "mapreduce.map.output.compress": "false", - "mapreduce.map.sort.spill.percent": "0.7", - "mapreduce.map.speculative": "false", - "mapreduce.output.fileoutputformat.compress": "false", - "mapreduce.output.fileoutputformat.compress.type": "BLOCK", - "mapreduce.reduce.input.buffer.percent": "0.0", - "mapreduce.reduce.java.opts": "-Xmx3276m", - "mapreduce.reduce.log.level": "INFO", - "mapreduce.reduce.memory.mb": "4096", - "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", - "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", - "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000", - "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", - "mapreduce.reduce.shuffle.merge.percent": "0.66", - "mapreduce.reduce.shuffle.parallelcopies": "30", - "mapreduce.reduce.speculative": "false", - "mapreduce.shuffle.port": "13562", - "mapreduce.task.io.sort.factor": "100", - "mapreduce.task.io.sort.mb": "1146", - "mapreduce.task.timeout": "300000", - "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}", - "yarn.app.mapreduce.am.command-opts": "-Xmx1638m -Dhdp.version=${hdp.version}", - "yarn.app.mapreduce.am.log.level": "INFO", - "yarn.app.mapreduce.am.resource.mb": "2048", - "yarn.app.mapreduce.am.staging-dir": "/user" - }, - "oozie-env": { - "oozie_admin_port": "11001", - "oozie_admin_users": "{oozie_user}, oozie-admin", - "oozie_data_dir": "/hadoop/oozie/data", - "oozie_database": "New Derby Database", - "oozie_derby_database": "Derby", - "oozie_heapsize": "2048m", - "oozie_hostname": "%HOSTGROUP::host_group_1%", - "oozie_log_dir": "/var/log/oozie", - "oozie_permsize": "256m", - "oozie_pid_dir": "/var/run/oozie", - "oozie_user": "oozie" - }, - "oozie-site": { - "oozie.authentication.simple.anonymous.allowed": "true", - "oozie.authentication.type": "simple", - "oozie.base.url": "http://%HOSTGROUP::host_group_1%:11000/oozie", - "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials", - "oozie.db.schema.name": "oozie", - "oozie.service.AuthorizationService.security.enabled": "true", - "oozie.service.ELService.ext.functions.coord-action-create": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-action-create-inst": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek_inst, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek_inst, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-action-start": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear, latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest, future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future, dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn, instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime, dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset, formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-job-submit-data": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo, instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-job-submit-instances": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo", - "oozie.service.ELService.ext.functions.coord-sla-create": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-sla-submit": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.HadoopAccessorService.hadoop.configurations": "*={{hadoop_conf_dir}}", - "oozie.service.HadoopAccessorService.kerberos.enabled": "false", - "oozie.service.HadoopAccessorService.supported.filesystems": "*", - "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", - "oozie.service.JPAService.jdbc.username": "oozie", - "oozie.service.ProxyUserService.proxyuser.falcon.groups": "*", - "oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*", - "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", - "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService" - }, - "ranger-hdfs-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hadoop", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "", - "policy_user": "ambari-qa", - "ranger-hdfs-plugin-enabled": "No" - }, - "spark-defaults": { - "spark.eventLog.dir": "hdfs:///spark-history", - "spark.eventLog.enabled": "true", - "spark.history.fs.logDirectory": "hdfs:///spark-history", - "spark.history.kerberos.keytab": "none", - "spark.history.kerberos.principal": "none", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.history.ui.port": "18080", - "spark.yarn.containerLauncherMaxThreads": "25", - "spark.yarn.driver.memoryOverhead": "384", - "spark.yarn.executor.memoryOverhead": "384", - "spark.yarn.historyServer.address": "{{spark_history_server_host}}:{{spark_history_ui_port}}", - "spark.yarn.max.executor.failures": "3", - "spark.yarn.preserve.staging.files": "false", - "spark.yarn.queue": "default", - "spark.yarn.scheduler.heartbeat.interval-ms": "5000", - "spark.yarn.submit.file.replication": "3" - }, - "spark-env": { - "spark_group": "spark", - "spark_log_dir": "/var/log/spark", - "spark_pid_dir": "/var/run/spark", - "spark_thrift_cmd_opts": "", - "spark_user": "spark" - }, - "spark-hive-site-override": { - "hive.metastore.client.connect.retry.delay": "5", - "hive.metastore.client.socket.timeout": "1800", - "hive.server2.enable.doAs": "false", - "hive.server2.thrift.port": "10015", - "hive.server2.transport.mode": "binary" - }, - "spark-thrift-fairscheduler": { - "fairscheduler_content": " FAIR 1 2 " - }, - "spark-thrift-sparkconf": { - "spark.dynamicAllocation.enabled": "true", - "spark.dynamicAllocation.initialExecutors": "0", - "spark.dynamicAllocation.maxExecutors": "10", - "spark.dynamicAllocation.minExecutors": "0", - "spark.eventLog.dir": "{{spark_history_dir}}", - "spark.eventLog.enabled": "true", - "spark.executor.memory": "1g", - "spark.history.fs.logDirectory": "{{spark_history_dir}}", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.master": "{{spark_thrift_master}}", - "spark.scheduler.allocation.file": "{{spark_conf}}/spark-thrift-fairscheduler.xml", - "spark.scheduler.mode": "FAIR", - "spark.shuffle.service.enabled": "true", - "spark.yarn.am.memory": "512m", - "spark.yarn.queue": "default" - }, - "sqoop-env": { - "jdbc_drivers": "", - "sqoop_user": "sqoop" - }, - "ssl-client": { - "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", - "ssl.client.keystore.type": "jks", - "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", - "ssl.client.truststore.reload.interval": "10000", - "ssl.client.truststore.type": "jks" - }, - "ssl-server": { - "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", - "ssl.server.keystore.type": "jks", - "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", - "ssl.server.truststore.reload.interval": "10000", - "ssl.server.truststore.type": "jks" - }, - "storm-env": { - "jmxremote_port": "56431", - "nimbus_seeds_supported": "true", - "storm_log_dir": "/var/log/storm", - "storm_logs_supported": "true", - "storm_pid_dir": "/var/run/storm", - "storm_user": "storm" - }, - "storm-site": { - "_storm.min.ruid": "null", - "_storm.thrift.nonsecure.transport": "backtype.storm.security.auth.SimpleTransportPlugin", - "_storm.thrift.secure.transport": "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin", - "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", - "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "drpc.invocations.port": "3773", - "drpc.port": "3772", - "drpc.queue.size": "128", - "drpc.request.timeout.secs": "600", - "drpc.worker.threads": "64", - "drpc_server_host": "[%HOSTGROUP::host_group_1%]", - "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib", - "logviewer.appender.name": "A1", - "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER", - "logviewer.port": "8000", - "nimbus.cleanup.inbox.freq.secs": "600", - "nimbus.file.copy.expiration.secs": "600", - "nimbus.inbox.jar.expiration.secs": "3600", - "nimbus.monitor.freq.secs": "120", - "nimbus.reassign": "true", - "nimbus.seeds": "['%HOSTGROUP::host_group_1%']", - "nimbus.supervisor.timeout.secs": "60", - "nimbus.task.launch.secs": "120", - "nimbus.task.timeout.secs": "30", - "nimbus.thrift.max_buffer_size": "1048576", - "nimbus.thrift.port": "6627", - "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", - "nimbus_hosts": "[%HOSTGROUP::host_group_1%]", - "storm.cluster.mode": "distributed", - "storm.local.dir": "/hadoop/storm", - "storm.local.mode.zmq": "false", - "storm.log.dir": "{{log_dir}}", - "storm.messaging.netty.buffer_size": "5242880", - "storm.messaging.netty.client_worker_threads": "1", - "storm.messaging.netty.max_retries": "30", - "storm.messaging.netty.max_wait_ms": "1000", - "storm.messaging.netty.min_wait_ms": "100", - "storm.messaging.netty.server_worker_threads": "1", - "storm.messaging.transport": "backtype.storm.messaging.netty.Context", - "storm.thrift.transport": "{{storm_thrift_transport}}", - "storm.zookeeper.connection.timeout": "15000", - "storm.zookeeper.port": "2181", - "storm.zookeeper.retry.interval": "1000", - "storm.zookeeper.retry.intervalceiling.millis": "30000", - "storm.zookeeper.retry.times": "5", - "storm.zookeeper.root": "/storm", - "storm.zookeeper.servers": "['%HOSTGROUP::host_group_1%']", - "storm.zookeeper.session.timeout": "20000", - "storm_ui_server_host": "[%HOSTGROUP::host_group_1%]", - "supervisor.heartbeat.frequency.secs": "5", - "supervisor.monitor.frequency.secs": "3", - "supervisor.slots.ports": "[6700, 6701]", - "supervisor.worker.start.timeout.secs": "120", - "supervisor.worker.timeout.secs": "30", - "supervisor_hosts": "[%HOSTGROUP::host_group_1%]", - "task.heartbeat.frequency.secs": "3", - "task.refresh.poll.secs": "10", - "topology.acker.executors": "null", - "topology.builtin.metrics.bucket.size.secs": "60", - "topology.debug": "false", - "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", - "topology.enable.message.timeouts": "true", - "topology.error.throttle.interval.secs": "10", - "topology.executor.receive.buffer.size": "1024", - "topology.executor.send.buffer.size": "1024", - "topology.fall.back.on.java.serialization": "true", - "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", - "topology.max.error.report.per.interval": "5", - "topology.max.replication.wait.time.sec": "{{actual_topology_max_replication_wait_time_sec}}", - "topology.max.replication.wait.time.sec.default": "60", - "topology.max.spout.pending": "1000", - "topology.max.task.parallelism": "null", - "topology.message.timeout.secs": "30", - "topology.min.replication.count": "{{actual_topology_min_replication_count}}", - "topology.min.replication.count.default": "1", - "topology.optimize": "true", - "topology.receiver.buffer.size": "8", - "topology.skip.missing.kryo.registrations": "false", - "topology.sleep.spout.wait.strategy.time.ms": "1", - "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", - "topology.state.synchronization.timeout.secs": "60", - "topology.stats.sample.rate": "0.05", - "topology.tick.tuple.freq.secs": "null", - "topology.transfer.buffer.size": "1024", - "topology.trident.batch.emit.interval.millis": "500", - "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", - "topology.worker.childopts": "null", - "topology.worker.shared.thread.pool.size": "4", - "topology.workers": "1", - "transactional.zookeeper.port": "null", - "transactional.zookeeper.root": "/transactional", - "transactional.zookeeper.servers": "null", - "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "ui.filter": "null", - "ui.port": "8744", - "worker.heartbeat.frequency.secs": "1", - "zmq.hwm": "0", - "zmq.linger.millis": "5000", - "zmq.threads": "1" - }, - "tez-env": { - "tez_user": "tez" - }, - "tez-site": { - "tez.am.am-rm.heartbeat.interval-ms.max": "250", - "tez.am.container.idle.release-timeout-max.millis": "20000", - "tez.am.container.idle.release-timeout-min.millis": "10000", - "tez.am.container.reuse.enabled": "true", - "tez.am.container.reuse.locality.delay-allocation-millis": "250", - "tez.am.container.reuse.non-local-fallback.enabled": "false", - "tez.am.container.reuse.rack-fallback.enabled": "true", - "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB", - "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.am.log.level": "INFO", - "tez.am.max.app.attempts": "2", - "tez.am.maxtaskfailures.per.node": "10", - "tez.am.resource.memory.mb": "4096", - "tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", - "tez.am.view-acls": "*", - "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "tez.counters.max": "10000", - "tez.counters.max.groups": "3000", - "tez.generate.debug.artifacts": "false", - "tez.grouping.max-size": "1073741824", - "tez.grouping.min-size": "16777216", - "tez.grouping.split-waves": "1.7", - "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSV15HistoryLoggingService", - "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", - "tez.runtime.compress": "true", - "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", - "tez.runtime.convert.user-payload.to.history-text": "false", - "tez.runtime.io.sort.mb": "819", - "tez.runtime.optimize.local.fetch": "true", - "tez.runtime.pipelined.sorter.sort.threads": "2", - "tez.runtime.sorter.class": "PIPELINED", - "tez.runtime.unordered.output.buffer.size-mb": "153", - "tez.session.am.dag.submit.timeout.secs": "600", - "tez.session.client.timeout.secs": "-1", - "tez.shuffle-vertex-manager.max-src-fraction": "0.4", - "tez.shuffle-vertex-manager.min-src-fraction": "0.2", - "tez.staging-dir": "/tmp/${user.name}/staging", - "tez.task.am.heartbeat.counter.interval-ms.max": "4000", - "tez.task.generate.counters.per.io": "true", - "tez.task.get-task.sleep.interval-ms.max": "200", - "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB", - "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.task.max-events-per-heartbeat": "500", - "tez.task.resource.memory.mb": "2048", - "tez.use.cluster.hadoop-libs": "false" - }, - "webhcat-site": { - "templeton.exec.timeout": "60000", - "templeton.hadoop": "/usr/hdp/${hdp.version}/hadoop/bin/hadoop", - "templeton.hadoop.conf.dir": "/etc/hadoop/conf", - "templeton.hcat": "/usr/hdp/${hdp.version}/hive/bin/hcat", - "templeton.hcat.home": "hive.tar.gz/hive/hcatalog", - "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz", - "templeton.hive.extra.files": "/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib", - "templeton.hive.home": "hive.tar.gz/hive", - "templeton.hive.path": "hive.tar.gz/hive/bin/hive", - "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://%HOSTGROUP::host_group_1%:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true", - "templeton.jar": "/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar", - "templeton.libjars": "/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar", - "templeton.override.enabled": "false", - "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz", - "templeton.pig.path": "pig.tar.gz/pig/bin/pig", - "templeton.port": "50111", - "templeton.python": "${env.PYTHON_CMD}", - "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz", - "templeton.sqoop.home": "sqoop.tar.gz/sqoop", - "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop", - "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", - "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar", - "templeton.zookeeper.hosts": "%HOSTGROUP::host_group_1%:2181" - }, - "yarn-env": { - "apptimelineserver_heapsize": "1024", - "is_supported_yarn_ranger": "true", - "min_user_id": "1000", - "nodemanager_heapsize": "1024", - "resourcemanager_heapsize": "1024", - "yarn_cgroups_enabled": "false", - "yarn_heapsize": "1024", - "yarn_log_dir_prefix": "/var/log/hadoop-yarn", - "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", - "yarn_user": "yarn", - "yarn_user_nofile_limit": "32768", - "yarn_user_nproc_limit": "65536" - }, - "yarn-site": { - "hadoop.registry.rm.enabled": "true", - "hadoop.registry.zk.quorum": "%HOSTGROUP::host_group_1%:2181", - "yarn.acl.enable": "false", - "yarn.admin.acl": "yarn", - "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*", - "yarn.client.nodemanager-connect.max-wait-ms": "60000", - "yarn.client.nodemanager-connect.retry-interval-ms": "10000", - "yarn.http.policy": "HTTP_ONLY", - "yarn.log-aggregation-enable": "true", - "yarn.log-aggregation.retain-seconds": "2592000", - "yarn.log.server.url": "http://%HOSTGROUP::host_group_1%:19888/jobhistory/logs", - "yarn.node-labels.enabled": "false", - "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", - "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", - "yarn.nodemanager.address": "0.0.0.0:45454", - "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", - "yarn.nodemanager.aux-services": "mapreduce_shuffle,spark_shuffle", - "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler", - "yarn.nodemanager.aux-services.spark_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService", - "yarn.nodemanager.bind-host": "0.0.0.0", - "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", - "yarn.nodemanager.container-monitor.interval-ms": "3000", - "yarn.nodemanager.delete.debug-delay-sec": "0", - "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90", - "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", - "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", - "yarn.nodemanager.health-checker.interval-ms": "135000", - "yarn.nodemanager.health-checker.script.timeout-ms": "60000", - "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn", - "yarn.nodemanager.linux-container-executor.cgroups.mount": "false", - "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false", - "yarn.nodemanager.linux-container-executor.group": "hadoop", - "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", - "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", - "yarn.nodemanager.log-aggregation.compression-type": "gz", - "yarn.nodemanager.log-aggregation.debug-enabled": "false", - "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", - "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1", - "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", - "yarn.nodemanager.log.retain-second": "604800", - "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state", - "yarn.nodemanager.recovery.enabled": "true", - "yarn.nodemanager.remote-app-log-dir": "/app-logs", - "yarn.nodemanager.remote-app-log-dir-suffix": "logs", - "yarn.nodemanager.resource.cpu-vcores": "25", - "yarn.nodemanager.resource.memory-mb": "16384", - "yarn.nodemanager.resource.percentage-physical-cpu-limit": "80", - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.nodemanager.vmem-pmem-ratio": "2.1", - "yarn.resourcemanager.address": "%HOSTGROUP::host_group_1%:8050", - "yarn.resourcemanager.admin.address": "%HOSTGROUP::host_group_1%:8141", - "yarn.resourcemanager.am.max-attempts": "2", - "yarn.resourcemanager.bind-host": "0.0.0.0", - "yarn.resourcemanager.connect.max-wait.ms": "900000", - "yarn.resourcemanager.connect.retry-interval.ms": "30000", - "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", - "yarn.resourcemanager.fs.state-store.uri": "", - "yarn.resourcemanager.ha.enabled": "false", - "yarn.resourcemanager.hostname": "%HOSTGROUP::host_group_1%", - "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", - "yarn.resourcemanager.recovery.enabled": "true", - "yarn.resourcemanager.resource-tracker.address": "%HOSTGROUP::host_group_1%:8025", - "yarn.resourcemanager.scheduler.address": "%HOSTGROUP::host_group_1%:8030", - "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", - "yarn.resourcemanager.scheduler.monitor.enable": "false", - "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}", - "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", - "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", - "yarn.resourcemanager.system-metrics-publisher.enabled": "true", - "yarn.resourcemanager.webapp.address": "%HOSTGROUP::host_group_1%:8088", - "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", - "yarn.resourcemanager.webapp.https.address": "%HOSTGROUP::host_group_1%:8090", - "yarn.resourcemanager.work-preserving-recovery.enabled": "true", - "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", - "yarn.resourcemanager.zk-acl": "world:anyone:rwcda", - "yarn.resourcemanager.zk-address": "%HOSTGROUP::host_group_1%:2181", - "yarn.resourcemanager.zk-num-retries": "1000", - "yarn.resourcemanager.zk-retry-interval-ms": "1000", - "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", - "yarn.resourcemanager.zk-timeout-ms": "10000", - "yarn.scheduler.maximum-allocation-mb": "16384", - "yarn.scheduler.maximum-allocation-vcores": "25", - "yarn.scheduler.minimum-allocation-mb": "2048", - "yarn.scheduler.minimum-allocation-vcores": "1", - "yarn.timeline-service.address": "%HOSTGROUP::host_group_1%:10200", - "yarn.timeline-service.bind-host": "0.0.0.0", - "yarn.timeline-service.client.max-retries": "30", - "yarn.timeline-service.client.retry-interval-ms": "1000", - "yarn.timeline-service.enabled": "true", - "yarn.timeline-service.entity-group-fs-store.active-dir": "/ats/active/", - "yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds": "3600", - "yarn.timeline-service.entity-group-fs-store.done-dir": "/ats/done/", - "yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes": "org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl", - "yarn.timeline-service.entity-group-fs-store.retain-seconds": "604800", - "yarn.timeline-service.entity-group-fs-store.scan-interval-seconds": "60", - "yarn.timeline-service.entity-group-fs-store.summary-store": "org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore", - "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore", - "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", - "yarn.timeline-service.http-authentication.type": "simple", - "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600", - "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", - "yarn.timeline-service.recovery.enabled": "true", - "yarn.timeline-service.state-store-class": "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore", - "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore", - "yarn.timeline-service.ttl-enable": "true", - "yarn.timeline-service.ttl-ms": "2678400000", - "yarn.timeline-service.version": "1.5", - "yarn.timeline-service.webapp.address": "%HOSTGROUP::host_group_1%:8188", - "yarn.timeline-service.webapp.https.address": "%HOSTGROUP::host_group_1%:8190" - }, - "zoo.cfg": { - "autopurge.purgeInterval": "24", - "autopurge.snapRetainCount": "30", - "clientPort": "2181", - "dataDir": "/hadoop/zookeeper", - "initLimit": "10", - "syncLimit": "5", - "tickTime": "2000" - }, - "zookeeper-env": { - "zk_log_dir": "/var/log/zookeeper", - "zk_pid_dir": "/var/run/zookeeper", - "zk_user": "zookeeper" - } -} diff --git a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.5.json b/sahara_plugin_ambari/plugins/ambari/resources/configs-2.5.json deleted file mode 100644 index 71f42c4..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.5.json +++ /dev/null @@ -1,2008 +0,0 @@ -{ - "accumulo-env": { - "accumulo_gc_heapsize": "256", - "accumulo_instance_name": "hdp-accumulo-instance", - "accumulo_log_dir": "/var/log/accumulo", - "accumulo_master_heapsize": "1024", - "accumulo_monitor_bind_all": "false", - "accumulo_monitor_heapsize": "1024", - "accumulo_other_heapsize": "1024", - "accumulo_pid_dir": "/var/run/accumulo", - "accumulo_tserver_heapsize": "1536", - "accumulo_user": "accumulo", - "server_content": "#! /usr/bin/env bash export HADOOP_PREFIX={{hadoop_prefix}} export HADOOP_CONF_DIR={{hadoop_conf_dir}} export JAVA_HOME={{java64_home}} export ZOOKEEPER_HOME={{zookeeper_home}} export ACCUMULO_PID_DIR={{pid_dir}} export ACCUMULO_LOG_DIR={{log_dir}} export ACCUMULO_CONF_DIR={{server_conf_dir}} export ACCUMULO_TSERVER_OPTS=\"-Xmx{{accumulo_tserver_heapsize}}m -Xms{{accumulo_tserver_heapsize}}m\" export ACCUMULO_MASTER_OPTS=\"-Xmx{{accumulo_master_heapsize}}m -Xms{{accumulo_master_heapsize}}m\" export ACCUMULO_MONITOR_OPTS=\"-Xmx{{accumulo_monitor_heapsize}}m -Xms{{accumulo_monitor_heapsize}}m\" export ACCUMULO_GC_OPTS=\"-Xmx{{accumulo_gc_heapsize}}m -Xms{{accumulo_gc_heapsize}}m\" export ACCUMULO_GENERAL_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -Djava.net.preferIPv4Stack=true ${ACCUMULO_GENERAL_OPTS}\" export ACCUMULO_OTHER_OPTS=\"-Xmx{{accumulo_other_heapsize}}m -Xms{{accumulo_other_heapsize}}m ${ACCUMULO_OTHER_OPTS}\" export ACCUMULO_MONITOR_BIND_ALL={{monitor_bind_str}} # what do when the JVM runs out of heap memory export ACCUMULO_KILL_CMD='kill -9 %p'" - }, - "accumulo-log4j": { - "audit_log_level": "OFF", - "debug_log_size": "512M", - "debug_num_logs": "10", - "info_log_size": "512M", - "info_num_logs": "10", - "monitor_forwarding_log_level": "WARN" - }, - "accumulo-site": { - "gc.port.client": "50092", - "general.classpaths": "$ACCUMULO_HOME/lib/accumulo-server.jar, $ACCUMULO_HOME/lib/accumulo-core.jar, $ACCUMULO_HOME/lib/accumulo-start.jar, $ACCUMULO_HOME/lib/accumulo-fate.jar, $ACCUMULO_HOME/lib/accumulo-proxy.jar, $ACCUMULO_HOME/lib/[^.].*.jar, $ZOOKEEPER_HOME/zookeeper[^.].*.jar, $HADOOP_CONF_DIR, /usr/hdp/current/hadoop-client/[^.].*.jar, /usr/hdp/current/hadoop-client/lib/(?!slf4j)[^.].*.jar, /usr/hdp/current/hadoop-hdfs-client/[^.].*.jar, /usr/hdp/current/hadoop-mapreduce-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/lib/jersey.*.jar, /usr/hdp/current/hive-client/lib/hive-accumulo-handler.jar", - "instance.volumes": "hdfs://%HOSTGROUP::host_group_1%:8020/apps/accumulo/data", - "instance.zookeeper.host": "%HOSTGROUP::host_group_1%:2181", - "instance.zookeeper.timeout": "30s", - "master.port.client": "9999", - "monitor.port.client": "50095", - "monitor.port.log4j": "4560", - "trace.port.client": "12234", - "trace.user": "trace", - "tserver.cache.data.size": "128M", - "tserver.cache.index.size": "256M", - "tserver.memory.maps.max": "1G", - "tserver.memory.maps.native.enabled": "true", - "tserver.port.client": "9997", - "tserver.sort.buffer.size": "200M", - "tserver.walog.max.size": "1G" - }, - "activity-conf": { - "activity.explorer.user": "activity_explorer", - "global.activity.analyzer.user": "activity_analyzer", - "global.activity.processing.parallelism": "8", - "global.activity.processor.pool.max.wait.seconds": "60", - "hdfs.activity.watcher.enabled": "true", - "mr_job.activity.watcher.enabled": "true", - "mr_job.max.job.size.mb.for.parallel.execution": "500", - "phoenix.sink.batch.size": "100", - "phoenix.sink.flush.interval.seconds": "30", - "tez_job.activity.watcher.enabled": "true", - "tez_job.tmp.dir": "/var/lib/smartsense/activity-analyzer/tez/tmp/", - "yarn_app.activity.watcher.enabled": "true" - }, - "activity-env": { - "activity-env-content": "#!/bin/bash # Copyright 2014 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # \"License\"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Enable verbose shell execution #set -xv ## Set HOME for various components export HADOOP_HOME=/usr/hdp/current/hadoop-client export HDFS_HOME=/usr/hdp/current/hadoop-hdfs-client export MAPREDUCE_HOME=/usr/hdp/current/hadoop-mapreduce-client export YARN_HOME=/usr/hdp/current/hadoop-yarn-client export HIVE_HOME=/usr/hdp/current/hive-client export HCAT_HOME=/usr/hdp/current/hive-webhcat export TEZ_HOME=/usr/hdp/current/tez-client export HBASE_HOME=/usr/hdp/current/hbase-client export PHOENIX_HOME=/usr/hdp/current/phoenix-client export ACTIVITY_ANALYZER_HOME=/usr/hdp/share/hst/activity-analyzer export AMS_COLLECTOR_HOME=/usr/lib/ambari-metrics-collector ## Set conf dir for various components export HADOOP_CONF_DIR=/etc/hadoop/conf/ export HIVE_CONF_DIR=/etc/hive/conf/ export HBASE_CONF_DIR=/etc/hbase/conf/ export TEZ_CONF_DIR=/etc/tez/conf/ export ACTIVITY_ANALYZER_CONF_DIR=/etc/smartsense-activity/conf/ export AMS_HBASE_CONF=/etc/ams-hbase/conf export DEBUG_ENABLED=false" - }, - "activity-log4j": { - "activity-log4j-content": "# Copyright 2014 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # \"License\"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Define some default values that can be overridden by system properties # Root logger option log4j.rootLogger=INFO,file log4j.appender.file=org.apache.log4j.RollingFileAppender log4j.appender.file.File={{activity_log_dir}}/${log.file.name} log4j.appender.file.MaxFileSize=30MB log4j.appender.file.MaxBackupIndex=10 log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n", - "activity_log_dir": "/var/log/smartsense-activity" - }, - "activity-zeppelin-env": { - "activity-zeppelin-env-content": "#!/bin/bash # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # export JAVA_HOME={{java_home}} # Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS=\"-Dspark.executor.memory=8g -Dspark.cores.max=16\" export ZEPPELIN_JAVA_OPTS=\"-Dhdp.version={{hdp_version}} -Dlog.file.name=activity-explorer.log -DSmartSenseActivityExplorer\" # export ZEPPELIN_MEM # Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m # export ZEPPELIN_INTP_MEM # zeppelin interpreter process jvm mem options. Default = ZEPPELIN_MEM # export ZEPPELIN_INTP_JAVA_OPTS # zeppelin interpreter process jvm options. Default = ZEPPELIN_JAVA_OPTS export ZEPPELIN_LOG_DIR={{activity_log_dir}} export ZEPPELIN_PID_DIR=/var/run/smartsense-activity-explorer export ZEPPELIN_WAR_TEMPDIR=/var/lib/smartsense/activity-explorer/webapp export ZEPPELIN_NOTEBOOK_DIR=/var/lib/smartsense/activity-explorer/notebook export ZEPPELIN_CLASSPATH=\"/etc/ams-hbase/conf:${ZEPPELIN_CLASSPATH}\" export CLASSPATH=${ZEPPELIN_CLASSPATH}" - }, - "activity-zeppelin-interpreter": { - "activity-zeppelin-interpreter-content": "{ \"interpreterSettings\": { \"2BJB693M8\": { \"id\": \"2BJB693M8\", \"name\": \"phoenix\", \"group\": \"phoenix\", \"properties\": { \"phoenix.jdbc.url\": \"{{activity_explorer_jdbc_url}}\", \"phoenix.user\": \"\", \"phoenix.password\": \"\", \"phoenix.max.result\": \"1000\", \"phoenix.driver.name\": \"org.apache.phoenix.jdbc.PhoenixDriver\" }, \"interpreterGroup\": [ { \"class\": \"org.apache.zeppelin.phoenix.PhoenixInterpreter\", \"name\": \"sql\" } ], \"dependencies\": [], \"option\": { \"remote\": true, \"perNoteSession\": false } } }, \"interpreterBindings\": { \"2BNVQJUBK\": [ \"2BJB693M8\" ], \"2BPD7951H\": [ \"2BJB693M8\" ], \"2BQH91X36\": [ \"2BJB693M8\" ], \"2BTCVPTMH\": [ \"2BJB693M8\" ] }, \"interpreterRepositories\": [ { \"id\": \"central\", \"type\": \"default\", \"url\": \"http://repo1.maven.org/maven2/\", \"releasePolicy\": { \"enabled\": true, \"updatePolicy\": \"daily\", \"checksumPolicy\": \"warn\" }, \"snapshotPolicy\": { \"enabled\": true, \"updatePolicy\": \"daily\", \"checksumPolicy\": \"warn\" }, \"mirroredRepositories\": [], \"repositoryManager\": false } ] }" - }, - "activity-zeppelin-shiro": { - "main.securityManager.sessionManager": "$sessionManager", - "main.sessionManager": "org.apache.shiro.web.session.mgt.DefaultWebSessionManager", - "securityManager.sessionManager.globalSessionTimeout": "86400000" - }, - "activity-zeppelin-site": { - "zeppelin.anonymous.allowed": "false", - "zeppelin.interpreter.connect.timeout": "30000", - "zeppelin.interpreter.dir": "/usr/hdp/share/hst/activity-explorer/interpreter", - "zeppelin.interpreters": "org.apache.zeppelin.phoenix.PhoenixInterpreter", - "zeppelin.notebook.dir": "/var/lib/smartsense/activity-explorer/notebook", - "zeppelin.notebook.homescreen.hide": "false", - "zeppelin.notebook.storage": "org.apache.zeppelin.notebook.repo.VFSNotebookRepo", - "zeppelin.server.addr": "0.0.0.0", - "zeppelin.server.allowed.origins": "*", - "zeppelin.server.context.path": "/", - "zeppelin.server.port": "9060", - "zeppelin.ssl": "false", - "zeppelin.ssl.client.auth": "false", - "zeppelin.ssl.keystore.path": "/var/lib/smartsense/activity-explorer/keystore", - "zeppelin.ssl.keystore.type": "JKS", - "zeppelin.ssl.truststore.path": "/var/lib/smartsense/activity-explorer/truststore", - "zeppelin.ssl.truststore.type": "JKS", - "zeppelin.war.tempdir": "/var/lib/smartsense/activity-explorer/webapp", - "zeppelin.websocket.max.text.message.size": "1024000" - }, - "ams-env": { - "ambari_metrics_user": "ams", - "metrics_collector_heapsize": "512", - "metrics_collector_log_dir": "/var/log/ambari-metrics-collector", - "metrics_collector_pid_dir": "/var/run/ambari-metrics-collector", - "metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor", - "metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor" - }, - "ams-grafana-env": { - "metrics_grafana_data_dir": "/var/lib/ambari-metrics-grafana", - "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana", - "metrics_grafana_pid_dir": "/var/run/ambari-metrics-grafana", - "metrics_grafana_username": "admin" - }, - "ams-grafana-ini": { - "cert_file": "/etc/ambari-metrics-grafana/conf/ams-grafana.crt", - "cert_key": "/etc/ambari-metrics-grafana/conf/ams-grafana.key", - "port": "3000", - "protocol": "http" - }, - "ams-hbase-env": { - "hbase_classpath_additional": "", - "hbase_log_dir": "/var/log/ambari-metrics-collector", - "hbase_master_heapsize": "768", - "hbase_master_maxperm_size": "128", - "hbase_master_xmn_size": "256", - "hbase_pid_dir": "/var/run/ambari-metrics-collector/", - "hbase_regionserver_heapsize": "768", - "hbase_regionserver_shutdown_timeout": "30", - "hbase_regionserver_xmn_ratio": "0.2", - "max_open_files_limit": "32768", - "regionserver_xmn_size": "128" - }, - "ams-hbase-policy": { - "security.admin.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.masterregion.protocol.acl": "*" - }, - "ams-hbase-security-site": { - "ams.zookeeper.keytab": "", - "ams.zookeeper.principal": "", - "hadoop.security.authentication": "", - "hbase.coprocessor.master.classes": "", - "hbase.coprocessor.region.classes": "", - "hbase.master.kerberos.principal": "", - "hbase.master.keytab.file": "", - "hbase.myclient.keytab": "", - "hbase.myclient.principal": "", - "hbase.regionserver.kerberos.principal": "", - "hbase.regionserver.keytab.file": "", - "hbase.security.authentication": "", - "hbase.security.authorization": "", - "hbase.zookeeper.property.authProvider.1": "", - "hbase.zookeeper.property.jaasLoginRenew": "", - "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "", - "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "" - }, - "ams-hbase-site": { - "dfs.client.read.shortcircuit": "true", - "hbase.client.scanner.caching": "10000", - "hbase.client.scanner.timeout.period": "300000", - "hbase.cluster.distributed": "false", - "hbase.hregion.majorcompaction": "0", - "hbase.hregion.max.filesize": "4294967296", - "hbase.hregion.memstore.block.multiplier": "4", - "hbase.hregion.memstore.flush.size": "134217728", - "hbase.hstore.blockingStoreFiles": "200", - "hbase.hstore.flusher.count": "2", - "hbase.local.dir": "${hbase.tmp.dir}/local", - "hbase.master.info.bindAddress": "0.0.0.0", - "hbase.master.info.port": "61310", - "hbase.master.normalizer.class": "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer", - "hbase.master.port": "61300", - "hbase.master.wait.on.regionservers.mintostart": "1", - "hbase.normalizer.enabled": "false", - "hbase.normalizer.period": "600000", - "hbase.regionserver.global.memstore.lowerLimit": "0.3", - "hbase.regionserver.global.memstore.upperLimit": "0.35", - "hbase.regionserver.info.port": "61330", - "hbase.regionserver.port": "61320", - "hbase.regionserver.thread.compaction.large": "2", - "hbase.regionserver.thread.compaction.small": "3", - "hbase.replication": "false", - "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase", - "hbase.rpc.timeout": "300000", - "hbase.snapshot.enabled": "false", - "hbase.superuser": "activity_explorer,activity_analyzer", - "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp", - "hbase.zookeeper.leaderport": "61388", - "hbase.zookeeper.peerport": "61288", - "hbase.zookeeper.property.clientPort": "{{zookeeper_clientPort}}", - "hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper", - "hbase.zookeeper.property.tickTime": "6000", - "hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}", - "hfile.block.cache.size": "0.3", - "phoenix.coprocessor.maxMetaDataCacheSize": "20480000", - "phoenix.coprocessor.maxServerCacheTimeToLiveMs": "60000", - "phoenix.groupby.maxCacheSize": "307200000", - "phoenix.mutate.batchSize": "10000", - "phoenix.query.keepAliveMs": "300000", - "phoenix.query.maxGlobalMemoryPercentage": "15", - "phoenix.query.rowKeyOrderSaltedTable": "true", - "phoenix.query.spoolThresholdBytes": "20971520", - "phoenix.query.timeoutMs": "300000", - "phoenix.sequence.saltBuckets": "2", - "phoenix.spool.directory": "${hbase.tmp.dir}/phoenix-spool", - "zookeeper.session.timeout": "120000", - "zookeeper.session.timeout.localHBaseCluster": "120000", - "zookeeper.znode.parent": "/ams-hbase-unsecure" - }, - "ams-site": { - "phoenix.query.maxGlobalMemoryPercentage": "25", - "phoenix.spool.directory": "/tmp", - "timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint", - "timeline.metrics.aggregators.skip.blockcache.enabled": "false", - "timeline.metrics.cache.commit.interval": "3", - "timeline.metrics.cache.enabled": "true", - "timeline.metrics.cache.size": "150", - "timeline.metrics.cluster.aggregate.splitpoints": "mapred.ShuffleMetrics.ShuffleOutputsFailed", - "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.daily.disabled": "false", - "timeline.metrics.cluster.aggregator.daily.interval": "86400", - "timeline.metrics.cluster.aggregator.daily.ttl": "63072000", - "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.hourly.disabled": "false", - "timeline.metrics.cluster.aggregator.hourly.interval": "3600", - "timeline.metrics.cluster.aggregator.hourly.ttl": "31536000", - "timeline.metrics.cluster.aggregator.interpolation.enabled": "true", - "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.minute.disabled": "false", - "timeline.metrics.cluster.aggregator.minute.interval": "300", - "timeline.metrics.cluster.aggregator.minute.ttl": "2592000", - "timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.second.disabled": "false", - "timeline.metrics.cluster.aggregator.second.interval": "120", - "timeline.metrics.cluster.aggregator.second.timeslice.interval": "30", - "timeline.metrics.cluster.aggregator.second.ttl": "259200", - "timeline.metrics.daily.aggregator.minute.interval": "86400", - "timeline.metrics.hbase.compression.scheme": "SNAPPY", - "timeline.metrics.hbase.data.block.encoding": "FAST_DIFF", - "timeline.metrics.hbase.fifo.compaction.enabled": "true", - "timeline.metrics.hbase.init.check.enabled": "true", - "timeline.metrics.host.aggregate.splitpoints": "mapred.ShuffleMetrics.ShuffleOutputsFailed", - "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.daily.disabled": "false", - "timeline.metrics.host.aggregator.daily.ttl": "31536000", - "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.hourly.disabled": "false", - "timeline.metrics.host.aggregator.hourly.interval": "3600", - "timeline.metrics.host.aggregator.hourly.ttl": "2592000", - "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.minute.disabled": "false", - "timeline.metrics.host.aggregator.minute.interval": "300", - "timeline.metrics.host.aggregator.minute.ttl": "604800", - "timeline.metrics.host.aggregator.ttl": "86400", - "timeline.metrics.service.checkpointDelay": "60", - "timeline.metrics.service.cluster.aggregator.appIds": "datanode,nodemanager,hbase", - "timeline.metrics.service.default.result.limit": "15840", - "timeline.metrics.service.handler.thread.count": "20", - "timeline.metrics.service.http.policy": "HTTP_ONLY", - "timeline.metrics.service.operation.mode": "embedded", - "timeline.metrics.service.resultset.fetchSize": "2000", - "timeline.metrics.service.rpc.address": "0.0.0.0:60200", - "timeline.metrics.service.use.groupBy.aggregators": "true", - "timeline.metrics.service.watcher.delay": "30", - "timeline.metrics.service.watcher.disabled": "false", - "timeline.metrics.service.watcher.initial.delay": "600", - "timeline.metrics.service.watcher.timeout": "30", - "timeline.metrics.service.webapp.address": "%HOSTGROUP::host_group_1%:6188", - "timeline.metrics.sink.collection.period": "10", - "timeline.metrics.sink.report.interval": "60" - }, - "ams-ssl-client": { - "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", - "ssl.client.truststore.type": "jks" - }, - "ams-ssl-server": { - "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", - "ssl.server.keystore.type": "jks", - "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", - "ssl.server.truststore.reload.interval": "10000", - "ssl.server.truststore.type": "jks" - }, - "anonymization-rules": { - "anonymization-rules-content": "{ \"rules\":[ { \"name\":\"ip_address\", \"ruleId\": \"Pattern\", \"path\":null, \"pattern\": \"([^a-z0-9\\\\.]|^)[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}([^a-z0-9\\\\.\\\\-]|(\\\\.[^0-9])|$)\", \"extract\": \"[ :\\\\/]?([0-9\\\\.]+)[ :\\\\/]?\", \"excludes\": [\"hdp-select*.*\", \"*version.txt\"], \"shared\":true }, { \"name\":\"domain\", \"ruleId\": \"Domain\", \"path\":null, \"pattern\": \"$DOMAIN_RULE$\", \"shared\":true }, { \"name\":\"delete_oozie_jdbc_password\", \"ruleId\": \"Property\", \"path\":\"oozie-site.xml\", \"property\": \"oozie.service.JPAService.jdbc.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_sqoop_metastore_password\", \"ruleId\": \"Property\", \"path\":\"sqoop-site.xml\", \"property\": \"sqoop.metastore.client.autoconnect.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_hive_metastore_password\", \"ruleId\": \"Property\", \"path\":\"hive-site.xml\", \"property\": \"javax.jdo.option.ConnectionPassword\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3.awsAccessKeyId\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3_secret_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3.awsSecretAccessKey\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3n_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3n.awsAccessKeyId\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3n_secret_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3n.awsSecretAccessKey\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_azure_account_key\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.azure.account.key.*\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_ldap_password\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"hadoop.security.group.mapping.ldap.bind.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_client_keystore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-client.xml\", \"property\": \"ssl.client.keystore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_client_truststore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-client.xml\", \"property\": \"ssl.client.truststore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_server_keystore_keypwd\", \"ruleId\": \"Property\", \"path\":\"ssl-server.xml\", \"property\": \"ssl.server.keystore.keypassword\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_server_keystore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-server.xml\", \"property\": \"ssl.server.keystore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_server_truststore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-server.xml\", \"property\": \"ssl.server.truststore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_oozie_pwd_in_java_process_info\", \"ruleId\": \"Pattern\", \"path\":\"java_process.txt\", \"pattern\": \"oozie.https.keystore.pass=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"hide_oozie_pwd_in_process_info\", \"ruleId\": \"Pattern\", \"path\":\"pid.txt\", \"pattern\": \"oozie.https.keystore.pass=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"hide_oozie_pwd_in_ambariagent_log\", \"ruleId\": \"Pattern\", \"path\":\"ambari-agent.log\", \"pattern\": \"oozie.https.keystore.pass=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"delete_oozie_https_keystore_pass\", \"ruleId\": \"Pattern\", \"path\":\"oozie-env.cmd\", \"pattern\":\"OOZIE_HTTPS_KEYSTORE_PASS=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"java_process_ganglia_password\", \"ruleId\": \"Pattern\", \"path\":\"java_process.txt\", \"pattern\":\"ganglia_password=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"hide_ssn_from_logs\", \"ruleId\": \"Pattern\", \"path\":\"*\\\\.log*\", \"pattern\": \"(^|[^0-9x])[0-9x]{3}-[0-9x]{2}-[0-9]{4}($|[^0-9x])\", \"extract\": \"(? 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}", - "llap_log_level": "INFO", - "llap_queue_capacity": "0", - "num_llap_nodes": "1", - "num_retries_for_checking_llap_status": "10", - "slider_am_container_mb": "341" - }, - "hive-interactive-site": { - "hive.driver.parallel.compilation": "true", - "hive.exec.orc.split.strategy": "HYBRID", - "hive.execution.engine": "tez", - "hive.execution.mode": "llap", - "hive.llap.auto.allow.uber": "false", - "hive.llap.client.consistent.splits": "true", - "hive.llap.daemon.allow.permanent.fns": "false", - "hive.llap.daemon.num.executors": "1", - "hive.llap.daemon.queue.name": "default", - "hive.llap.daemon.rpc.port": "15001", - "hive.llap.daemon.service.hosts": "@llap0", - "hive.llap.daemon.task.scheduler.enable.preemption": "true", - "hive.llap.daemon.vcpus.per.instance": "${hive.llap.daemon.num.executors}", - "hive.llap.daemon.yarn.container.mb": "341", - "hive.llap.daemon.yarn.shuffle.port": "15551", - "hive.llap.execution.mode": "all", - "hive.llap.io.enabled": "true", - "hive.llap.io.memory.mode": "", - "hive.llap.io.memory.size": "0", - "hive.llap.io.threadpool.size": "2", - "hive.llap.io.use.lrfu": "true", - "hive.llap.management.rpc.port": "15004", - "hive.llap.object.cache.enabled": "true", - "hive.llap.task.scheduler.locality.delay": "-1", - "hive.llap.zk.sm.connectionString": "%HOSTGROUP::host_group_1%:2181", - "hive.mapjoin.hybridgrace.hashtable": "false", - "hive.metastore.event.listeners": "", - "hive.metastore.uris": "", - "hive.optimize.dynamic.partition.hashjoin": "true", - "hive.prewarm.enabled": "false", - "hive.server2.enable.doAs": "false", - "hive.server2.tez.default.queues": "default", - "hive.server2.tez.initialize.default.sessions": "true", - "hive.server2.tez.sessions.per.default.queue": "1", - "hive.server2.thrift.http.port": "10501", - "hive.server2.thrift.port": "10500", - "hive.server2.webui.port": "10502", - "hive.server2.webui.use.ssl": "false", - "hive.server2.zookeeper.namespace": "hiveserver2-hive2", - "hive.tez.bucket.pruning": "true", - "hive.tez.exec.print.summary": "true", - "hive.tez.input.generate.consistent.splits": "true", - "hive.vectorized.execution.mapjoin.minmax.enabled": "true", - "hive.vectorized.execution.mapjoin.native.enabled": "true", - "hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled": "true", - "hive.vectorized.execution.reduce.enabled": "true", - "llap.shuffle.connection-keep-alive.enable": "true", - "llap.shuffle.connection-keep-alive.timeout": "60" - }, - "hive-site": { - "ambari.hive.db.schema.name": "hive", - "atlas.hook.hive.maxThreads": "1", - "atlas.hook.hive.minThreads": "1", - "atlas.rest.address": "http://%HOSTGROUP::host_group_1%:21000", - "datanucleus.autoCreateSchema": "false", - "datanucleus.cache.level2.type": "none", - "datanucleus.fixedDatastore": "true", - "hive.auto.convert.join": "true", - "hive.auto.convert.join.noconditionaltask": "true", - "hive.auto.convert.join.noconditionaltask.size": "286331153", - "hive.auto.convert.sortmerge.join": "true", - "hive.auto.convert.sortmerge.join.to.mapjoin": "false", - "hive.cbo.enable": "true", - "hive.cli.print.header": "false", - "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore", - "hive.cluster.delegation.token.store.zookeeper.connectString": "%HOSTGROUP::host_group_1%:2181", - "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation", - "hive.compactor.abortedtxn.threshold": "1000", - "hive.compactor.check.interval": "300L", - "hive.compactor.delta.num.threshold": "10", - "hive.compactor.delta.pct.threshold": "0.1f", - "hive.compactor.initiator.on": "false", - "hive.compactor.worker.threads": "0", - "hive.compactor.worker.timeout": "86400L", - "hive.compute.query.using.stats": "true", - "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role", - "hive.convert.join.bucket.mapjoin.tez": "false", - "hive.default.fileformat": "TextFile", - "hive.default.fileformat.managed": "TextFile", - "hive.enforce.bucketing": "false", - "hive.enforce.sorting": "true", - "hive.enforce.sortmergebucketmapjoin": "true", - "hive.exec.compress.intermediate": "false", - "hive.exec.compress.output": "false", - "hive.exec.dynamic.partition": "true", - "hive.exec.dynamic.partition.mode": "strict", - "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.max.created.files": "100000", - "hive.exec.max.dynamic.partitions": "5000", - "hive.exec.max.dynamic.partitions.pernode": "2000", - "hive.exec.orc.compression.strategy": "SPEED", - "hive.exec.orc.default.compress": "ZLIB", - "hive.exec.orc.default.stripe.size": "67108864", - "hive.exec.orc.encoding.strategy": "SPEED", - "hive.exec.parallel": "false", - "hive.exec.parallel.thread.number": "8", - "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook,org.apache.atlas.hive.hook.HiveHook", - "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.reducers.bytes.per.reducer": "67108864", - "hive.exec.reducers.max": "1009", - "hive.exec.scratchdir": "/tmp/hive", - "hive.exec.submit.local.task.via.child": "true", - "hive.exec.submitviachild": "false", - "hive.execution.engine": "tez", - "hive.fetch.task.aggr": "false", - "hive.fetch.task.conversion": "more", - "hive.fetch.task.conversion.threshold": "1073741824", - "hive.limit.optimize.enable": "true", - "hive.limit.pushdown.memory.usage": "0.04", - "hive.map.aggr": "true", - "hive.map.aggr.hash.force.flush.memory.threshold": "0.9", - "hive.map.aggr.hash.min.reduction": "0.5", - "hive.map.aggr.hash.percentmemory": "0.5", - "hive.mapjoin.bucket.cache.size": "10000", - "hive.mapjoin.optimized.hashtable": "true", - "hive.mapred.reduce.tasks.speculative.execution": "false", - "hive.merge.mapfiles": "true", - "hive.merge.mapredfiles": "false", - "hive.merge.orcfile.stripe.level": "true", - "hive.merge.rcfile.block.level": "true", - "hive.merge.size.per.task": "256000000", - "hive.merge.smallfiles.avgsize": "16000000", - "hive.merge.tezfiles": "false", - "hive.metastore.authorization.storage.checks": "false", - "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", - "hive.metastore.client.connect.retry.delay": "5s", - "hive.metastore.client.socket.timeout": "1800s", - "hive.metastore.connect.retries": "24", - "hive.metastore.execute.setugi": "true", - "hive.metastore.failure.retries": "24", - "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab", - "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM", - "hive.metastore.pre.event.listeners": "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener", - "hive.metastore.sasl.enabled": "false", - "hive.metastore.server.max.threads": "100000", - "hive.metastore.uris": "thrift://%HOSTGROUP::host_group_1%:9083", - "hive.metastore.warehouse.dir": "/apps/hive/warehouse", - "hive.optimize.bucketmapjoin": "true", - "hive.optimize.bucketmapjoin.sortedmerge": "false", - "hive.optimize.constant.propagation": "true", - "hive.optimize.index.filter": "true", - "hive.optimize.metadataonly": "true", - "hive.optimize.null.scan": "true", - "hive.optimize.reducededuplication": "true", - "hive.optimize.reducededuplication.min.reducer": "4", - "hive.optimize.sort.dynamic.partition": "false", - "hive.orc.compute.splits.num.threads": "10", - "hive.orc.splits.include.file.footer": "false", - "hive.prewarm.enabled": "false", - "hive.prewarm.numcontainers": "3", - "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", - "hive.security.authorization.enabled": "false", - "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory", - "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", - "hive.security.metastore.authorization.auth.reads": "true", - "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", - "hive.server2.allow.user.substitution": "true", - "hive.server2.authentication": "NONE", - "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM", - "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab", - "hive.server2.enable.doAs": "true", - "hive.server2.logging.operation.enabled": "true", - "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs", - "hive.server2.max.start.attempts": "5", - "hive.server2.support.dynamic.service.discovery": "true", - "hive.server2.table.type.mapping": "CLASSIC", - "hive.server2.tez.default.queues": "default", - "hive.server2.tez.initialize.default.sessions": "false", - "hive.server2.tez.sessions.per.default.queue": "1", - "hive.server2.thrift.http.path": "cliservice", - "hive.server2.thrift.http.port": "10001", - "hive.server2.thrift.max.worker.threads": "500", - "hive.server2.thrift.port": "10000", - "hive.server2.thrift.sasl.qop": "auth", - "hive.server2.transport.mode": "binary", - "hive.server2.use.SSL": "false", - "hive.server2.zookeeper.namespace": "hiveserver2", - "hive.smbjoin.cache.rows": "10000", - "hive.stats.autogather": "true", - "hive.stats.dbclass": "fs", - "hive.stats.fetch.column.stats": "true", - "hive.stats.fetch.partition.stats": "true", - "hive.support.concurrency": "false", - "hive.tez.auto.reducer.parallelism": "true", - "hive.tez.container.size": "1024", - "hive.tez.cpu.vcores": "-1", - "hive.tez.dynamic.partition.pruning": "true", - "hive.tez.dynamic.partition.pruning.max.data.size": "104857600", - "hive.tez.dynamic.partition.pruning.max.event.size": "1048576", - "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat", - "hive.tez.java.opts": "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps", - "hive.tez.log.level": "INFO", - "hive.tez.max.partition.factor": "2.0", - "hive.tez.min.partition.factor": "0.25", - "hive.tez.smb.number.waves": "0.5", - "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", - "hive.txn.max.open.batch": "1000", - "hive.txn.timeout": "300", - "hive.user.install.directory": "/user/", - "hive.vectorized.execution.enabled": "true", - "hive.vectorized.execution.reduce.enabled": "false", - "hive.vectorized.groupby.checkinterval": "4096", - "hive.vectorized.groupby.flush.percent": "0.1", - "hive.vectorized.groupby.maxentries": "100000", - "hive.zookeeper.client.port": "2181", - "hive.zookeeper.namespace": "hive_zookeeper_namespace", - "hive.zookeeper.quorum": "%HOSTGROUP::host_group_1%:2181", - "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", - "javax.jdo.option.ConnectionURL": "jdbc:mysql://%HOSTGROUP::host_group_1%/hive?createDatabaseIfNotExist=true", - "javax.jdo.option.ConnectionUserName": "hive" - }, - "hivemetastore-site": { - "hive.metastore.metrics.enabled": "true", - "hive.service.metrics.file.location": "/var/log/hive/hivemetastore-report.json", - "hive.service.metrics.hadoop2.component": "hivemetastore", - "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2" - }, - "hiveserver2-interactive-site": { - "hive.async.log.enabled": "false", - "hive.metastore.metrics.enabled": "true", - "hive.service.metrics.file.location": "/var/log/hive/hiveserver2Interactive-report.json", - "hive.service.metrics.hadoop2.component": "hiveserver2", - "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2" - }, - "hiveserver2-site": { - "hive.metastore.metrics.enabled": "true", - "hive.security.authorization.enabled": "false", - "hive.service.metrics.file.location": "/var/log/hive/hiveserver2-report.json", - "hive.service.metrics.hadoop2.component": "hiveserver2", - "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2" - }, - "hst-agent-conf": { - "agent.loglevel": "INFO", - "agent.tmp_dir": "/var/lib/smartsense/hst-agent/data/tmp", - "bundle.logs_to_capture": "(.*).log$,(.*).out$", - "server.connection_retry_count": "100", - "server.connection_retry_interval": "10", - "upload.retry_count": "100", - "upload.retry_interval": "15" - }, - "hst-log4j": { - "hst-log4j-content": "# Copyright 2014 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # \"License\"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Define some default values that can be overridden by system properties # Root logger option log4j.rootLogger=INFO,file log4j.appender.file=org.apache.log4j.RollingFileAppender log4j.appender.file.File={{hst_log_dir}}/${log.file.name} log4j.appender.file.MaxFileSize=30MB log4j.appender.file.MaxBackupIndex=10 log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n # HST logger log4j.logger.com.hortonworks=INFO com.github.oxo42.stateless4j=WARN log4j.logger.com.sun.jersey=WARN log4j.logger.org.eclipse.jetty.server=INFO", - "hst_log_dir": "/var/log/hst" - }, - "hst-server-conf": { - "agent.request.processing.timeout": "7200", - "agent.request.syncup.interval": "180", - "client.threadpool.size.max": "40", - "customer.account.name": "unspecified", - "customer.notification.email": "unspecified", - "customer.smartsense.id": "unspecified", - "gateway.host": "embedded", - "gateway.port": "9451", - "gateway.registration.port": "9450", - "server.port": "9000", - "server.storage.dir": "/var/lib/smartsense/hst-server/data", - "server.tmp.dir": "/var/lib/smartsense/hst-server/tmp" - }, - "infra-solr-client-log4j": { - "infra_solr_client_log_dir": "/var/log/ambari-infra-solr-client" - }, - "infra-solr-env": { - "infra_solr_datadir": "/opt/ambari_infra_solr/data", - "infra_solr_jmx_port": "18886", - "infra_solr_kerberos_keytab": "/etc/security/keytabs/infra_solr.service.keytab", - "infra_solr_kerberos_name_rules": "DEFAULT", - "infra_solr_kerberos_principal": "infra-solr", - "infra_solr_keystore_location": "/etc/security/serverKeys/infra.solr.keyStore.jks", - "infra_solr_keystore_type": "jks", - "infra_solr_log_dir": "/var/log/ambari-infra-solr", - "infra_solr_maxmem": "2048", - "infra_solr_minmem": "1024", - "infra_solr_pid_dir": "/var/run/ambari-infra-solr", - "infra_solr_port": "8886", - "infra_solr_ssl_enabled": "false", - "infra_solr_truststore_location": "/etc/security/serverKeys/infra.solr.trustStore.jks", - "infra_solr_truststore_type": "jks", - "infra_solr_user": "infra-solr", - "infra_solr_web_kerberos_keytab": "/etc/security/keytabs/spnego.service.keytab", - "infra_solr_web_kerberos_principal": "HTTP/_HOST@EXAMPLE.COM", - "infra_solr_znode": "/infra-solr" - }, - "kafka-broker": { - "auto.create.topics.enable": "true", - "auto.leader.rebalance.enable": "true", - "compression.type": "producer", - "controlled.shutdown.enable": "true", - "controlled.shutdown.max.retries": "3", - "controlled.shutdown.retry.backoff.ms": "5000", - "controller.message.queue.size": "10", - "controller.socket.timeout.ms": "30000", - "default.replication.factor": "1", - "delete.topic.enable": "false", - "external.kafka.metrics.exclude.prefix": "kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec", - "external.kafka.metrics.include.prefix": "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request", - "fetch.purgatory.purge.interval.requests": "10000", - "kafka.ganglia.metrics.group": "kafka", - "kafka.ganglia.metrics.port": "8671", - "kafka.ganglia.metrics.reporter.enabled": "true", - "kafka.metrics.reporters": "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter", - "kafka.timeline.metrics.host": "{{metric_collector_host}}", - "kafka.timeline.metrics.maxRowCacheSize": "10000", - "kafka.timeline.metrics.port": "{{metric_collector_port}}", - "kafka.timeline.metrics.protocol": "{{metric_collector_protocol}}", - "kafka.timeline.metrics.reporter.enabled": "true", - "kafka.timeline.metrics.reporter.sendInterval": "5900", - "kafka.timeline.metrics.truststore.path": "{{metric_truststore_path}}", - "kafka.timeline.metrics.truststore.type": "{{metric_truststore_type}}", - "leader.imbalance.check.interval.seconds": "300", - "leader.imbalance.per.broker.percentage": "10", - "listeners": "PLAINTEXT://localhost:6667", - "log.cleanup.interval.mins": "10", - "log.dirs": "/kafka-logs", - "log.index.interval.bytes": "4096", - "log.index.size.max.bytes": "10485760", - "log.retention.bytes": "-1", - "log.retention.hours": "168", - "log.roll.hours": "168", - "log.segment.bytes": "1073741824", - "message.max.bytes": "1000000", - "min.insync.replicas": "1", - "num.io.threads": "8", - "num.network.threads": "3", - "num.partitions": "1", - "num.recovery.threads.per.data.dir": "1", - "num.replica.fetchers": "1", - "offset.metadata.max.bytes": "4096", - "offsets.commit.required.acks": "-1", - "offsets.commit.timeout.ms": "5000", - "offsets.load.buffer.size": "5242880", - "offsets.retention.check.interval.ms": "600000", - "offsets.retention.minutes": "86400000", - "offsets.topic.compression.codec": "0", - "offsets.topic.num.partitions": "50", - "offsets.topic.replication.factor": "3", - "offsets.topic.segment.bytes": "104857600", - "port": "6667", - "producer.purgatory.purge.interval.requests": "10000", - "queued.max.requests": "500", - "replica.fetch.max.bytes": "1048576", - "replica.fetch.min.bytes": "1", - "replica.fetch.wait.max.ms": "500", - "replica.high.watermark.checkpoint.interval.ms": "5000", - "replica.lag.max.messages": "4000", - "replica.lag.time.max.ms": "10000", - "replica.socket.receive.buffer.bytes": "65536", - "replica.socket.timeout.ms": "30000", - "socket.receive.buffer.bytes": "102400", - "socket.request.max.bytes": "104857600", - "socket.send.buffer.bytes": "102400", - "zookeeper.connect": "%HOSTGROUP::host_group_1%:2181", - "zookeeper.connection.timeout.ms": "25000", - "zookeeper.session.timeout.ms": "30000", - "zookeeper.sync.time.ms": "2000" - }, - "kafka-env": { - "is_supported_kafka_ranger": "true", - "kafka_log_dir": "/var/log/kafka", - "kafka_pid_dir": "/var/run/kafka", - "kafka_user": "kafka", - "kafka_user_nofile_limit": "128000", - "kafka_user_nproc_limit": "65536" - }, - "knox-env": { - "knox_group": "knox", - "knox_pid_dir": "/var/run/knox", - "knox_user": "knox" - }, - "livy-conf": { - "livy.environment": "production", - "livy.impersonation.enabled": "true", - "livy.server.csrf_protection.enabled": "true", - "livy.server.port": "8998", - "livy.server.session.timeout": "3600000" - }, - "livy-env": { - "livy_group": "livy", - "livy_log_dir": "/var/log/livy", - "livy_pid_dir": "/var/run/livy", - "livy_user": "livy", - "spark_home": "/usr/hdp/current/spark-client" - }, - "logfeeder-env": { - "logfeeder_kerberos_keytab": "/etc/security/keytabs/logfeeder.service.keytab", - "logfeeder_kerberos_principal": "logfeeder", - "logfeeder_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks", - "logfeeder_keystore_type": "jks", - "logfeeder_log_dir": "/var/log/ambari-logsearch-logfeeder", - "logfeeder_max_mem": "512m", - "logfeeder_pid_dir": "/var/run/ambari-logsearch-logfeeder", - "logfeeder_solr_audit_logs_enable": "true", - "logfeeder_solr_service_logs_enable": "true", - "logfeeder_truststore_location": "/etc/security/serverKeys/logsearch.trustStore.jks", - "logfeeder_truststore_type": "jks" - }, - "logfeeder-properties": { - "logfeeder.checkpoint.folder": "/etc/ambari-logsearch-logfeeder/conf/checkpoints", - "logfeeder.config.files": "{default_config_files}", - "logfeeder.log.filter.enable": "true", - "logfeeder.metrics.collector.hosts": "{metrics_collector_hosts}", - "logfeeder.solr.config.interval": "5" - }, - "logsearch-admin-json": { - "logsearch_admin_username": "ambari_logsearch_admin" - }, - "logsearch-audit_logs-solrconfig": { - "logsearch_audit_logs_max_retention": "7", - "logsearch_audit_logs_merge_factor": "5" - }, - "logsearch-env": { - "logsearch_app_max_memory": "1024", - "logsearch_debug_enabled": "false", - "logsearch_debug_port": "5005", - "logsearch_kerberos_keytab": "/etc/security/keytabs/logsearch.service.keytab", - "logsearch_kerberos_principal": "logsearch", - "logsearch_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks", - "logsearch_keystore_type": "jks", - "logsearch_log_dir": "/var/log/ambari-logsearch-portal", - "logsearch_pid_dir": "/var/run/ambari-logsearch-portal", - "logsearch_solr_audit_logs_zk_node": "{infra_solr_znode}", - "logsearch_solr_audit_logs_zk_quorum": "{zookeeper_quorum}", - "logsearch_truststore_location": "/etc/security/serverKeys/logsearch.trustStore.jks", - "logsearch_truststore_type": "jks", - "logsearch_ui_port": "61888", - "logsearch_ui_protocol": "http", - "logsearch_user": "logsearch" - }, - "logsearch-properties": { - "logsearch.audit.logs.split.interval.mins": "15", - "logsearch.collection.audit.logs.numshards": "2", - "logsearch.collection.audit.logs.replication.factor": "1", - "logsearch.collection.service.logs.numshards": "2", - "logsearch.collection.service.logs.replication.factor": "1", - "logsearch.external.auth.enabled": "true", - "logsearch.external.auth.host_url": "{ambari_server_auth_host_url}", - "logsearch.external.auth.login_url": "/api/v1/users/$USERNAME/privileges?fields=*", - "logsearch.logfeeder.include.default.level": "FATAL,ERROR,WARN", - "logsearch.service.logs.fields": "logtime,level,event_count,ip,type,seq_num,path,file,line_number,host,log_message,id", - "logsearch.service.logs.split.interval.mins": "15", - "logsearch.solr.collection.audit.logs": "audit_logs", - "logsearch.solr.collection.service.logs": "hadoop_logs", - "logsearch.solr.metrics.collector.hosts": "{metrics_collector_hosts}" - }, - "logsearch-service_logs-solrconfig": { - "logsearch_service_logs_max_retention": "7", - "logsearch_service_logs_merge_factor": "5" - }, - "mahout-env": { - "mahout_user": "mahout" - }, - "mapred-env": { - "jobhistory_heapsize": "900", - "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", - "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", - "mapred_user": "mapred", - "mapred_user_nofile_limit": "32768", - "mapred_user_nproc_limit": "65536" - }, - "mapred-site": { - "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "mapreduce.am.max-attempts": "2", - "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", - "mapreduce.cluster.administrators": "hadoop", - "mapreduce.framework.name": "yarn", - "mapreduce.job.counters.max": "130", - "mapreduce.job.emit-timeline-data": "false", - "mapreduce.job.queuename": "default", - "mapreduce.job.reduce.slowstart.completedmaps": "0.05", - "mapreduce.jobhistory.address": "%HOSTGROUP::host_group_1%:10020", - "mapreduce.jobhistory.bind-host": "0.0.0.0", - "mapreduce.jobhistory.done-dir": "/mr-history/done", - "mapreduce.jobhistory.http.policy": "HTTP_ONLY", - "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", - "mapreduce.jobhistory.recovery.enable": "true", - "mapreduce.jobhistory.recovery.store.class": "org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService", - "mapreduce.jobhistory.recovery.store.leveldb.path": "/hadoop/mapreduce/jhs", - "mapreduce.jobhistory.webapp.address": "%HOSTGROUP::host_group_1%:19888", - "mapreduce.map.java.opts": "-Xmx1228m", - "mapreduce.map.log.level": "INFO", - "mapreduce.map.memory.mb": "1536", - "mapreduce.map.output.compress": "false", - "mapreduce.map.sort.spill.percent": "0.7", - "mapreduce.map.speculative": "false", - "mapreduce.output.fileoutputformat.compress": "false", - "mapreduce.output.fileoutputformat.compress.type": "BLOCK", - "mapreduce.reduce.input.buffer.percent": "0.0", - "mapreduce.reduce.java.opts": "-Xmx1638m", - "mapreduce.reduce.log.level": "INFO", - "mapreduce.reduce.memory.mb": "2048", - "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", - "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", - "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000", - "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", - "mapreduce.reduce.shuffle.merge.percent": "0.66", - "mapreduce.reduce.shuffle.parallelcopies": "30", - "mapreduce.reduce.speculative": "false", - "mapreduce.shuffle.port": "13562", - "mapreduce.task.io.sort.factor": "100", - "mapreduce.task.io.sort.mb": "859", - "mapreduce.task.timeout": "300000", - "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}", - "yarn.app.mapreduce.am.command-opts": "-Xmx819m -Dhdp.version=${hdp.version}", - "yarn.app.mapreduce.am.log.level": "INFO", - "yarn.app.mapreduce.am.resource.mb": "1024", - "yarn.app.mapreduce.am.staging-dir": "/user" - }, - "oozie-env": { - "oozie_admin_port": "11001", - "oozie_admin_users": "{oozie_user}, oozie-admin,falcon", - "oozie_data_dir": "/hadoop/oozie/data", - "oozie_database": "New Derby Database", - "oozie_heapsize": "2048m", - "oozie_log_dir": "/var/log/oozie", - "oozie_permsize": "256m", - "oozie_pid_dir": "/var/run/oozie", - "oozie_tmp_dir": "/var/tmp/oozie", - "oozie_user": "oozie", - "oozie_user_nofile_limit": "32000", - "oozie_user_nproc_limit": "16000" - }, - "oozie-site": { - "oozie.action.retry.interval": "30", - "oozie.authentication.simple.anonymous.allowed": "true", - "oozie.authentication.type": "simple", - "oozie.base.url": "http://%HOSTGROUP::host_group_1%:11000/oozie", - "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials", - "oozie.db.schema.name": "oozie", - "oozie.service.AuthorizationService.security.enabled": "true", - "oozie.service.ELService.ext.functions.coord-action-create": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-action-create-inst": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek_inst, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek_inst, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-action-start": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear, latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest, future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future, dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn, instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime, dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset, formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-job-submit-data": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo, instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-job-submit-instances": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo", - "oozie.service.ELService.ext.functions.coord-sla-create": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-sla-submit": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.workflow": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo", - "oozie.service.HadoopAccessorService.hadoop.configurations": "*={{hadoop_conf_dir}}", - "oozie.service.HadoopAccessorService.kerberos.enabled": "false", - "oozie.service.HadoopAccessorService.supported.filesystems": "*", - "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", - "oozie.service.JPAService.jdbc.username": "oozie", - "oozie.service.ProxyUserService.proxyuser.falcon.groups": "*", - "oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*", - "oozie.service.SparkConfigurationService.spark.configurations": "*=spark-conf", - "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", - "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService" - }, - "ranger-hbase-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hbase/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hbase/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "true" - }, - "ranger-hbase-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hbase", - "common.name.for.certificate": "", - "policy_user": "ambari-qa", - "ranger-hbase-plugin-enabled": "No" - }, - "ranger-hbase-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-hbase-security": { - "ranger.plugin.hbase.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.hbase.policy.pollIntervalMs": "30000", - "ranger.plugin.hbase.policy.rest.ssl.config.file": "/etc/hbase/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.hbase.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.hbase.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.hbase.service.name": "{{repo_name}}", - "xasecure.hbase.update.xapolicies.on.grant.revoke": "true" - }, - "ranger-hdfs-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-hdfs-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hadoop", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "authentication", - "policy_user": "ambari-qa", - "ranger-hdfs-plugin-enabled": "No" - }, - "ranger-hdfs-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-hdfs-security": { - "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", - "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.hdfs.service.name": "{{repo_name}}", - "xasecure.add-hadoop-authorization": "true" - }, - "ranger-hive-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hive/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hive/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-hive-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hive", - "common.name.for.certificate": "", - "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", - "policy_user": "ambari-qa" - }, - "ranger-hive-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-hive-security": { - "ranger.plugin.hive.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.hive.policy.pollIntervalMs": "30000", - "ranger.plugin.hive.policy.rest.ssl.config.file": "/usr/hdp/current/{{ranger_hive_component}}/conf/conf.server/ranger-policymgr-ssl.xml", - "ranger.plugin.hive.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.hive.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.hive.service.name": "{{repo_name}}", - "xasecure.hive.update.xapolicies.on.grant.revoke": "true" - }, - "ranger-kafka-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/kafka/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/kafka/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "true" - }, - "ranger-kafka-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "kafka", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "", - "policy_user": "ambari-qa", - "ranger-kafka-plugin-enabled": "No", - "zookeeper.connect": "localhost:2181" - }, - "ranger-kafka-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file/{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file/{{credential_file}}" - }, - "ranger-kafka-security": { - "ranger.plugin.kafka.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.kafka.policy.pollIntervalMs": "30000", - "ranger.plugin.kafka.policy.rest.ssl.config.file": "/etc/kafka/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.kafka.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.kafka.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.kafka.service.name": "{{repo_name}}" - }, - "ranger-knox-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/knox/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/knox/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-knox-plugin-properties": { - "KNOX_HOME": "/usr/hdp/current/knox-server", - "REPOSITORY_CONFIG_USERNAME": "admin", - "common.name.for.certificate": "", - "policy_user": "ambari-qa", - "ranger-knox-plugin-enabled": "No" - }, - "ranger-knox-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-knox-security": { - "ranger.plugin.knox.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.knox.policy.pollIntervalMs": "30000", - "ranger.plugin.knox.policy.rest.ssl.config.file": "/usr/hdp/current/knox-server/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.knox.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.knox.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminJersey2RESTClient", - "ranger.plugin.knox.service.name": "{{repo_name}}" - }, - "ranger-yarn-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/yarn/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/yarn/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-yarn-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "yarn", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "", - "policy_user": "ambari-qa", - "ranger-yarn-plugin-enabled": "No" - }, - "ranger-yarn-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-yarn-security": { - "ranger.plugin.yarn.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.yarn.policy.pollIntervalMs": "30000", - "ranger.plugin.yarn.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml", - "ranger.plugin.yarn.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.yarn.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.yarn.service.name": "{{repo_name}}" - }, - "spark-defaults": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.eventLog.dir": "hdfs:///spark-history", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.history.fs.logDirectory": "hdfs:///spark-history", - "spark.history.kerberos.keytab": "none", - "spark.history.kerberos.principal": "none", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.history.ui.port": "18080", - "spark.yarn.containerLauncherMaxThreads": "25", - "spark.yarn.driver.memoryOverhead": "384", - "spark.yarn.executor.memoryOverhead": "384", - "spark.yarn.historyServer.address": "{{spark_history_server_host}}:{{spark_history_ui_port}}", - "spark.yarn.preserve.staging.files": "false", - "spark.yarn.queue": "default", - "spark.yarn.scheduler.heartbeat.interval-ms": "5000", - "spark.yarn.submit.file.replication": "3" - }, - "spark-env": { - "hive_kerberos_keytab": "{{hive_kerberos_keytab}}", - "hive_kerberos_principal": "{{hive_kerberos_principal}}", - "spark_daemon_memory": "1024", - "spark_group": "spark", - "spark_log_dir": "/var/log/spark", - "spark_pid_dir": "/var/run/spark", - "spark_thrift_cmd_opts": "", - "spark_user": "spark" - }, - "spark-hive-site-override": { - "hive.metastore.client.connect.retry.delay": "5", - "hive.metastore.client.socket.timeout": "1800", - "hive.server2.enable.doAs": "false", - "hive.server2.thrift.port": "10015", - "hive.server2.transport.mode": "binary" - }, - "spark-thrift-fairscheduler": { - "fairscheduler_content": " FAIR 1 2 " - }, - "spark-thrift-sparkconf": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.dynamicAllocation.enabled": "true", - "spark.dynamicAllocation.initialExecutors": "0", - "spark.dynamicAllocation.maxExecutors": "10", - "spark.dynamicAllocation.minExecutors": "0", - "spark.eventLog.dir": "{{spark_history_dir}}", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.executor.memory": "1g", - "spark.hadoop.cacheConf": "false", - "spark.history.fs.logDirectory": "{{spark_history_dir}}", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.master": "{{spark_thrift_master}}", - "spark.scheduler.allocation.file": "{{spark_conf}}/spark-thrift-fairscheduler.xml", - "spark.scheduler.mode": "FAIR", - "spark.shuffle.service.enabled": "true", - "spark.yarn.am.memory": "512m", - "spark.yarn.queue": "default" - }, - "spark2-defaults": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.eventLog.dir": "hdfs:///spark2-history/", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.history.fs.logDirectory": "hdfs:///spark2-history/", - "spark.history.kerberos.keytab": "none", - "spark.history.kerberos.principal": "none", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.history.ui.port": "18081", - "spark.yarn.historyServer.address": "{{spark_history_server_host}}:{{spark_history_ui_port}}", - "spark.yarn.queue": "default" - }, - "spark2-env": { - "hive_kerberos_keytab": "{{hive_kerberos_keytab}}", - "hive_kerberos_principal": "{{hive_kerberos_principal}}", - "spark_daemon_memory": "1024", - "spark_group": "spark", - "spark_log_dir": "/var/log/spark2", - "spark_pid_dir": "/var/run/spark2", - "spark_thrift_cmd_opts": "", - "spark_user": "spark" - }, - "spark2-hive-site-override": { - "hive.metastore.client.connect.retry.delay": "5", - "hive.metastore.client.socket.timeout": "1800", - "hive.server2.enable.doAs": "false", - "hive.server2.thrift.port": "10016", - "hive.server2.transport.mode": "binary" - }, - "spark2-thrift-fairscheduler": { - "fairscheduler_content": " FAIR 1 2 " - }, - "spark2-thrift-sparkconf": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.dynamicAllocation.enabled": "true", - "spark.dynamicAllocation.initialExecutors": "0", - "spark.dynamicAllocation.maxExecutors": "10", - "spark.dynamicAllocation.minExecutors": "0", - "spark.eventLog.dir": "{{spark_history_dir}}", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.hadoop.cacheConf": "false", - "spark.history.fs.logDirectory": "{{spark_history_dir}}", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.master": "{{spark_thrift_master}}", - "spark.scheduler.allocation.file": "{{spark_conf}}/spark-thrift-fairscheduler.xml", - "spark.scheduler.mode": "FAIR", - "spark.shuffle.service.enabled": "true", - "spark.yarn.queue": "default" - }, - "sqoop-atlas-application.properties": { - "atlas.jaas.KafkaClient.option.renewTicket": "true", - "atlas.jaas.KafkaClient.option.useTicketCache": "true" - }, - "sqoop-env": { - "jdbc_drivers": "", - "sqoop_user": "sqoop" - }, - "ssl-client": { - "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", - "ssl.client.keystore.type": "jks", - "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", - "ssl.client.truststore.reload.interval": "10000", - "ssl.client.truststore.type": "jks" - }, - "ssl-server": { - "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", - "ssl.server.keystore.type": "jks", - "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", - "ssl.server.truststore.reload.interval": "10000", - "ssl.server.truststore.type": "jks" - }, - "storm-atlas-application.properties": { - "atlas.hook.storm.numRetries": "3" - }, - "storm-env": { - "jmxremote_port": "56431", - "nimbus_seeds_supported": "true", - "storm_log_dir": "/var/log/storm", - "storm_logs_supported": "true", - "storm_pid_dir": "/var/run/storm", - "storm_user": "storm", - "storm_user_nofile_limit": "128000", - "storm_user_nproc_limit": "65536" - }, - "storm-site": { - "_storm.min.ruid": "null", - "_storm.thrift.nonsecure.transport": "org.apache.storm.security.auth.SimpleTransportPlugin", - "_storm.thrift.secure.transport": "org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin", - "client.jartransformer.class": "org.apache.storm.hack.StormShadeTransformer", - "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", - "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "drpc.invocations.port": "3773", - "drpc.port": "3772", - "drpc.queue.size": "128", - "drpc.request.timeout.secs": "600", - "drpc.worker.threads": "64", - "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib", - "logviewer.appender.name": "A1", - "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER", - "logviewer.port": "8000", - "metrics.reporter.register": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter", - "nimbus.cleanup.inbox.freq.secs": "600", - "nimbus.file.copy.expiration.secs": "600", - "nimbus.inbox.jar.expiration.secs": "3600", - "nimbus.monitor.freq.secs": "10", - "nimbus.reassign": "true", - "nimbus.seeds": "['%HOSTGROUP::host_group_2%']", - "nimbus.supervisor.timeout.secs": "60", - "nimbus.task.launch.secs": "120", - "nimbus.task.timeout.secs": "30", - "nimbus.thrift.max_buffer_size": "1048576", - "nimbus.thrift.port": "6627", - "nimbus.thrift.threads": "196", - "nimbus.topology.validator": "org.apache.storm.nimbus.DefaultTopologyValidator", - "storm.cluster.mode": "distributed", - "storm.local.dir": "/hadoop/storm", - "storm.local.mode.zmq": "false", - "storm.log.dir": "{{log_dir}}", - "storm.messaging.netty.buffer_size": "5242880", - "storm.messaging.netty.client_worker_threads": "1", - "storm.messaging.netty.max_retries": "30", - "storm.messaging.netty.max_wait_ms": "1000", - "storm.messaging.netty.min_wait_ms": "100", - "storm.messaging.netty.server_worker_threads": "1", - "storm.messaging.transport": "org.apache.storm.messaging.netty.Context", - "storm.thrift.transport": "{{storm_thrift_transport}}", - "storm.topology.submission.notifier.plugin.class": "org.apache.atlas.storm.hook.StormAtlasHook", - "storm.zookeeper.connection.timeout": "30000", - "storm.zookeeper.port": "2181", - "storm.zookeeper.retry.interval": "1000", - "storm.zookeeper.retry.intervalceiling.millis": "30000", - "storm.zookeeper.retry.times": "5", - "storm.zookeeper.root": "/storm", - "storm.zookeeper.servers": "['%HOSTGROUP::host_group_1%']", - "storm.zookeeper.session.timeout": "30000", - "supervisor.heartbeat.frequency.secs": "5", - "supervisor.monitor.frequency.secs": "3", - "supervisor.slots.ports": "[6700, 6701]", - "supervisor.worker.start.timeout.secs": "120", - "supervisor.worker.timeout.secs": "30", - "task.heartbeat.frequency.secs": "3", - "task.refresh.poll.secs": "10", - "topology.acker.executors": "null", - "topology.builtin.metrics.bucket.size.secs": "60", - "topology.debug": "false", - "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", - "topology.enable.message.timeouts": "true", - "topology.error.throttle.interval.secs": "10", - "topology.executor.receive.buffer.size": "1024", - "topology.executor.send.buffer.size": "1024", - "topology.fall.back.on.java.serialization": "true", - "topology.kryo.factory": "org.apache.storm.serialization.DefaultKryoFactory", - "topology.max.error.report.per.interval": "5", - "topology.max.replication.wait.time.sec": "{{actual_topology_max_replication_wait_time_sec}}", - "topology.max.replication.wait.time.sec.default": "60", - "topology.max.spout.pending": "1000", - "topology.max.task.parallelism": "null", - "topology.message.timeout.secs": "30", - "topology.min.replication.count": "{{actual_topology_min_replication_count}}", - "topology.min.replication.count.default": "1", - "topology.optimize": "true", - "topology.receiver.buffer.size": "8", - "topology.skip.missing.kryo.registrations": "false", - "topology.sleep.spout.wait.strategy.time.ms": "1", - "topology.spout.wait.strategy": "org.apache.storm.spout.SleepSpoutWaitStrategy", - "topology.state.synchronization.timeout.secs": "60", - "topology.stats.sample.rate": "0.05", - "topology.tick.tuple.freq.secs": "null", - "topology.transfer.buffer.size": "1024", - "topology.trident.batch.emit.interval.millis": "500", - "topology.tuple.serializer": "org.apache.storm.serialization.types.ListDelegateSerializer", - "topology.worker.childopts": "null", - "topology.worker.shared.thread.pool.size": "4", - "topology.workers": "1", - "transactional.zookeeper.port": "null", - "transactional.zookeeper.root": "/transactional", - "transactional.zookeeper.servers": "null", - "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "ui.filter": "null", - "ui.port": "8744", - "worker.heartbeat.frequency.secs": "1", - "zmq.hwm": "0", - "zmq.linger.millis": "5000", - "zmq.threads": "1" - }, - "tez-env": { - "tez_user": "tez" - }, - "tez-interactive-site": { - "tez.am.resource.memory.mb": "1536", - "tez.dag.recovery.enabled": "false", - "tez.grouping.node.local.only": "true", - "tez.lib.uris": "/hdp/apps/${hdp.version}/tez_hive2/tez.tar.gz", - "tez.runtime.pipelined-shuffle.enabled": "false", - "tez.runtime.pipelined.sorter.lazy-allocate.memory": "true", - "tez.runtime.report.partition.stats": "true", - "tez.runtime.shuffle.fetch.buffer.percent": "0.6", - "tez.runtime.shuffle.fetch.verify-disk-checksum": "false", - "tez.runtime.shuffle.memory.limit.percent": "0.25", - "tez.session.am.dag.submit.timeout.secs": "3600" - }, - "tez-site": { - "tez.am.am-rm.heartbeat.interval-ms.max": "250", - "tez.am.container.idle.release-timeout-max.millis": "20000", - "tez.am.container.idle.release-timeout-min.millis": "10000", - "tez.am.container.reuse.enabled": "true", - "tez.am.container.reuse.locality.delay-allocation-millis": "250", - "tez.am.container.reuse.non-local-fallback.enabled": "false", - "tez.am.container.reuse.rack-fallback.enabled": "true", - "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", - "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.am.log.level": "INFO", - "tez.am.max.app.attempts": "2", - "tez.am.maxtaskfailures.per.node": "10", - "tez.am.resource.memory.mb": "2048", - "tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", - "tez.am.view-acls": "*", - "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "tez.counters.max": "10000", - "tez.counters.max.groups": "3000", - "tez.generate.debug.artifacts": "false", - "tez.grouping.max-size": "1073741824", - "tez.grouping.min-size": "16777216", - "tez.grouping.split-waves": "1.7", - "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSV15HistoryLoggingService", - "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", - "tez.queue.name": "default", - "tez.runtime.compress": "true", - "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", - "tez.runtime.convert.user-payload.to.history-text": "false", - "tez.runtime.io.sort.mb": "270", - "tez.runtime.optimize.local.fetch": "true", - "tez.runtime.pipelined.sorter.sort.threads": "2", - "tez.runtime.shuffle.fetch.buffer.percent": "0.6", - "tez.runtime.shuffle.memory.limit.percent": "0.25", - "tez.runtime.sorter.class": "PIPELINED", - "tez.runtime.unordered.output.buffer.size-mb": "76", - "tez.session.am.dag.submit.timeout.secs": "600", - "tez.session.client.timeout.secs": "-1", - "tez.shuffle-vertex-manager.max-src-fraction": "0.4", - "tez.shuffle-vertex-manager.min-src-fraction": "0.2", - "tez.staging-dir": "/tmp/${user.name}/staging", - "tez.task.am.heartbeat.counter.interval-ms.max": "4000", - "tez.task.generate.counters.per.io": "true", - "tez.task.get-task.sleep.interval-ms.max": "200", - "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", - "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.task.max-events-per-heartbeat": "500", - "tez.task.resource.memory.mb": "1024", - "tez.use.cluster.hadoop-libs": "false" - }, - "webhcat-site": { - "templeton.exec.timeout": "60000", - "templeton.hadoop": "/usr/hdp/${hdp.version}/hadoop/bin/hadoop", - "templeton.hadoop.conf.dir": "/etc/hadoop/conf", - "templeton.hadoop.queue.name": "default", - "templeton.hcat": "/usr/hdp/${hdp.version}/hive/bin/hcat", - "templeton.hcat.home": "hive.tar.gz/hive/hcatalog", - "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz", - "templeton.hive.extra.files": "/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib", - "templeton.hive.home": "hive.tar.gz/hive", - "templeton.hive.path": "hive.tar.gz/hive/bin/hive", - "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://%HOSTGROUP::host_group_1%:9083,hive.metastore.sasl.enabled=false", - "templeton.jar": "/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar", - "templeton.libjars": "/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar", - "templeton.override.enabled": "false", - "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz", - "templeton.pig.path": "pig.tar.gz/pig/bin/pig", - "templeton.port": "50111", - "templeton.python": "${env.PYTHON_CMD}", - "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz", - "templeton.sqoop.home": "sqoop.tar.gz/sqoop", - "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop", - "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", - "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar", - "templeton.zookeeper.hosts": "%HOSTGROUP::host_group_1%:2181", - "webhcat.proxyuser.root.groups": "*", - "webhcat.proxyuser.root.hosts": "vgt-imaster-0.novalocal" - }, - "yarn-env": { - "apptimelineserver_heapsize": "1024", - "is_supported_yarn_ranger": "true", - "min_user_id": "1000", - "nodemanager_heapsize": "1024", - "resourcemanager_heapsize": "1024", - "service_check.queue.name": "default", - "yarn_cgroups_enabled": "false", - "yarn_heapsize": "1024", - "yarn_log_dir_prefix": "/var/log/hadoop-yarn", - "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", - "yarn_user": "yarn", - "yarn_user_nofile_limit": "32768", - "yarn_user_nproc_limit": "65536" - }, - "yarn-site": { - "hadoop.registry.rm.enabled": "true", - "hadoop.registry.zk.quorum": "%HOSTGROUP::host_group_1%:2181", - "yarn.acl.enable": "false", - "yarn.admin.acl": "yarn", - "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*", - "yarn.client.nodemanager-connect.max-wait-ms": "60000", - "yarn.client.nodemanager-connect.retry-interval-ms": "10000", - "yarn.http.policy": "HTTP_ONLY", - "yarn.log-aggregation-enable": "true", - "yarn.log-aggregation.retain-seconds": "2592000", - "yarn.log.server.url": "http://%HOSTGROUP::host_group_1%:19888/jobhistory/logs", - "yarn.node-labels.enabled": "false", - "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", - "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", - "yarn.nodemanager.address": "0.0.0.0:45454", - "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", - "yarn.nodemanager.aux-services": "mapreduce_shuffle,spark_shuffle,spark2_shuffle", - "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler", - "yarn.nodemanager.aux-services.spark2_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService", - "yarn.nodemanager.aux-services.spark2_shuffle.classpath": "{{stack_root}}/${hdp.version}/spark2/aux/*", - "yarn.nodemanager.aux-services.spark_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService", - "yarn.nodemanager.aux-services.spark_shuffle.classpath": "{{stack_root}}/${hdp.version}/spark/aux/*", - "yarn.nodemanager.bind-host": "0.0.0.0", - "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", - "yarn.nodemanager.container-metrics.unregister-delay-ms": "60000", - "yarn.nodemanager.container-monitor.interval-ms": "3000", - "yarn.nodemanager.delete.debug-delay-sec": "0", - "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90", - "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", - "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", - "yarn.nodemanager.health-checker.interval-ms": "135000", - "yarn.nodemanager.health-checker.script.timeout-ms": "60000", - "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn", - "yarn.nodemanager.linux-container-executor.cgroups.mount": "false", - "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false", - "yarn.nodemanager.linux-container-executor.group": "hadoop", - "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", - "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", - "yarn.nodemanager.log-aggregation.compression-type": "gz", - "yarn.nodemanager.log-aggregation.debug-enabled": "false", - "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", - "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "3600", - "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", - "yarn.nodemanager.log.retain-second": "604800", - "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state", - "yarn.nodemanager.recovery.enabled": "true", - "yarn.nodemanager.remote-app-log-dir": "/app-logs", - "yarn.nodemanager.remote-app-log-dir-suffix": "logs", - "yarn.nodemanager.resource.cpu-vcores": "6", - "yarn.nodemanager.resource.memory-mb": "9216", - "yarn.nodemanager.resource.percentage-physical-cpu-limit": "80", - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.nodemanager.vmem-pmem-ratio": "2.1", - "yarn.resourcemanager.address": "%HOSTGROUP::host_group_1%:8050", - "yarn.resourcemanager.admin.address": "%HOSTGROUP::host_group_1%:8141", - "yarn.resourcemanager.am.max-attempts": "2", - "yarn.resourcemanager.bind-host": "0.0.0.0", - "yarn.resourcemanager.connect.max-wait.ms": "900000", - "yarn.resourcemanager.connect.retry-interval.ms": "30000", - "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", - "yarn.resourcemanager.fs.state-store.uri": "", - "yarn.resourcemanager.ha.enabled": "false", - "yarn.resourcemanager.hostname": "%HOSTGROUP::host_group_1%", - "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", - "yarn.resourcemanager.recovery.enabled": "true", - "yarn.resourcemanager.resource-tracker.address": "%HOSTGROUP::host_group_1%:8025", - "yarn.resourcemanager.scheduler.address": "%HOSTGROUP::host_group_1%:8030", - "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", - "yarn.resourcemanager.scheduler.monitor.enable": "false", - "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}", - "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", - "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", - "yarn.resourcemanager.system-metrics-publisher.enabled": "true", - "yarn.resourcemanager.webapp.address": "%HOSTGROUP::host_group_1%:8088", - "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", - "yarn.resourcemanager.webapp.https.address": "%HOSTGROUP::host_group_1%:8090", - "yarn.resourcemanager.work-preserving-recovery.enabled": "true", - "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", - "yarn.resourcemanager.zk-acl": "world:anyone:rwcda", - "yarn.resourcemanager.zk-address": "%HOSTGROUP::host_group_1%:2181", - "yarn.resourcemanager.zk-num-retries": "1000", - "yarn.resourcemanager.zk-retry-interval-ms": "1000", - "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", - "yarn.resourcemanager.zk-timeout-ms": "10000", - "yarn.scheduler.maximum-allocation-mb": "9216", - "yarn.scheduler.maximum-allocation-vcores": "6", - "yarn.scheduler.minimum-allocation-mb": "1024", - "yarn.scheduler.minimum-allocation-vcores": "1", - "yarn.timeline-service.address": "%HOSTGROUP::host_group_1%:10200", - "yarn.timeline-service.bind-host": "0.0.0.0", - "yarn.timeline-service.client.max-retries": "30", - "yarn.timeline-service.client.retry-interval-ms": "1000", - "yarn.timeline-service.enabled": "true", - "yarn.timeline-service.entity-group-fs-store.active-dir": "/ats/active/", - "yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds": "3600", - "yarn.timeline-service.entity-group-fs-store.done-dir": "/ats/done/", - "yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes": "org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl,org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin", - "yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath": "/usr/hdp/${hdp.version}/spark/hdpLib/*", - "yarn.timeline-service.entity-group-fs-store.retain-seconds": "604800", - "yarn.timeline-service.entity-group-fs-store.scan-interval-seconds": "60", - "yarn.timeline-service.entity-group-fs-store.summary-store": "org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore", - "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore", - "yarn.timeline-service.http-authentication.proxyuser.root.groups": "*", - "yarn.timeline-service.http-authentication.proxyuser.root.hosts": "vgt-imaster-0.novalocal", - "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", - "yarn.timeline-service.http-authentication.type": "simple", - "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600", - "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", - "yarn.timeline-service.recovery.enabled": "true", - "yarn.timeline-service.state-store-class": "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore", - "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore", - "yarn.timeline-service.ttl-enable": "true", - "yarn.timeline-service.ttl-ms": "2678400000", - "yarn.timeline-service.version": "1.5", - "yarn.timeline-service.webapp.address": "%HOSTGROUP::host_group_1%:8188", - "yarn.timeline-service.webapp.https.address": "%HOSTGROUP::host_group_1%:8190" - }, - "zeppelin-config": { - "zeppelin.anonymous.allowed": "true", - "zeppelin.interpreter.connect.timeout": "30000", - "zeppelin.interpreter.dir": "interpreter", - "zeppelin.interpreters": "org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter", - "zeppelin.notebook.dir": "notebook", - "zeppelin.notebook.homescreen": "", - "zeppelin.notebook.homescreen.hide": "false", - "zeppelin.notebook.s3.bucket": "zeppelin", - "zeppelin.notebook.s3.user": "user", - "zeppelin.notebook.storage": "org.apache.zeppelin.notebook.repo.VFSNotebookRepo", - "zeppelin.server.addr": "0.0.0.0", - "zeppelin.server.allowed.origins": "*", - "zeppelin.server.port": "9995", - "zeppelin.ssl": "false", - "zeppelin.ssl.client.auth": "false", - "zeppelin.ssl.keystore.path": "conf/keystore", - "zeppelin.ssl.keystore.type": "JKS", - "zeppelin.ssl.truststore.path": "conf/truststore", - "zeppelin.ssl.truststore.type": "JKS", - "zeppelin.websocket.max.text.message.size": "1024000" - }, - "zeppelin-env": { - "log4j_properties_content": "log4j.rootLogger = INFO, dailyfile log4j.appender.stdout = org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout = org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n log4j.appender.dailyfile.DatePattern=.yyyy-MM-dd log4j.appender.dailyfile.Threshold = INFO log4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender log4j.appender.dailyfile.File = ${zeppelin.log.file} log4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout log4j.appender.dailyfile.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n", - "shiro_ini_content": "[users] # List of users with their password allowed to access Zeppelin. # To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections #admin = password1 #user1 = password2, role1, role2 #user2 = password3, role3 #user3 = password4, role2 # Sample LDAP configuration, for user Authentication, currently tested for single Realm [main] #activeDirectoryRealm = org.apache.zeppelin.server.ActiveDirectoryGroupRealm #activeDirectoryRealm.systemUsername = CN=Administrator,CN=Users,DC=HW,DC=EXAMPLE,DC=COM #activeDirectoryRealm.systemPassword = Password1! #activeDirectoryRealm.hadoopSecurityCredentialPath = jceks://user/zeppelin/zeppelin.jceks #activeDirectoryRealm.searchBase = CN=Users,DC=HW,DC=TEST,DC=COM #activeDirectoryRealm.url = ldap://ad-nano.test.example.com:389 #activeDirectoryRealm.groupRolesMap = \"\" #activeDirectoryRealm.authorizationCachingEnabled = true #ldapRealm = org.apache.shiro.realm.ldap.JndiLdapRealm #ldapRealm.userDnTemplate = uid={0},cn=users,cn=accounts,dc=example,dc=com #ldapRealm.contextFactory.url = ldap://ldaphost:389 #ldapRealm.contextFactory.authenticationMechanism = SIMPLE #sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager #securityManager.sessionManager = $sessionManager # 86,400,000 milliseconds = 24 hour #securityManager.sessionManager.globalSessionTimeout = 86400000 shiro.loginUrl = /api/login [urls] # anon means the access is anonymous. # authcBasic means Basic Auth Security # To enfore security, comment the line below and uncomment the next one /api/version = anon /** = anon #/** = authc", - "zeppelin.executor.instances": "2", - "zeppelin.executor.mem": "512m", - "zeppelin.server.kerberos.keytab": "", - "zeppelin.server.kerberos.principal": "", - "zeppelin.spark.jar.dir": "/apps/zeppelin", - "zeppelin_env_content": "# Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode export MASTER=yarn-client export SPARK_YARN_JAR={{spark_jar}} # Where log files are stored. PWD by default. export ZEPPELIN_LOG_DIR={{zeppelin_log_dir}} # The pid files are stored. /tmp by default. export ZEPPELIN_PID_DIR={{zeppelin_pid_dir}} export JAVA_HOME={{java64_home}} # Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS=\"-Dspark.executor.memory=8g -Dspark.cores.max=16\" export ZEPPELIN_JAVA_OPTS=\"-Dhdp.version={{full_stack_version}} -Dspark.executor.memory={{executor_mem}} -Dspark.executor.instances={{executor_instances}} -Dspark.yarn.queue={{spark_queue}}\" # Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m # export ZEPPELIN_MEM # zeppelin interpreter process jvm mem options. Defualt = ZEPPELIN_MEM # export ZEPPELIN_INTP_MEM # zeppelin interpreter process jvm options. Default = ZEPPELIN_JAVA_OPTS # export ZEPPELIN_INTP_JAVA_OPTS # Where notebook saved # export ZEPPELIN_NOTEBOOK_DIR # Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z # export ZEPPELIN_NOTEBOOK_HOMESCREEN # hide homescreen notebook from list when this value set to \"true\". default \"false\" # export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE # Bucket where notebook saved # export ZEPPELIN_NOTEBOOK_S3_BUCKET # User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json # export ZEPPELIN_NOTEBOOK_S3_USER # A string representing this instance of zeppelin. $USER by default # export ZEPPELIN_IDENT_STRING # The scheduling priority for daemons. Defaults to 0. # export ZEPPELIN_NICENESS #### Spark interpreter configuration #### ## Use provided spark installation ## ## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit ## # (required) When it is defined, load it instead of Zeppelin embedded Spark libraries export SPARK_HOME={{spark_home}} # (optional) extra options to pass to spark submit. eg) \"--driver-memory 512M --executor-memory 1G\". # export SPARK_SUBMIT_OPTIONS ## Use embedded spark binaries ## ## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries. ## however, it is not encouraged when you can define SPARK_HOME ## # Options read in YARN client mode # yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR. export HADOOP_CONF_DIR=/etc/hadoop/conf # Pyspark (supported with Spark 1.2.1 and above) # To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI # path to the python command. must be the same path on the driver(Zeppelin) and all workers. # export PYSPARK_PYTHON export PYTHONPATH=\"${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip\" export SPARK_YARN_USER_ENV=\"PYTHONPATH=${PYTHONPATH}\" ## Spark interpreter options ## ## # Use HiveContext instead of SQLContext if set true. true by default. # export ZEPPELIN_SPARK_USEHIVECONTEXT # Execute multiple SQL concurrently if set true. false by default. # export ZEPPELIN_SPARK_CONCURRENTSQL # Max number of SparkSQL result to display. 1000 by default. # export ZEPPELIN_SPARK_MAXRESULT", - "zeppelin_group": "zeppelin", - "zeppelin_log_dir": "/var/log/zeppelin", - "zeppelin_pid_dir": "/var/run/zeppelin", - "zeppelin_user": "zeppelin" - }, - "zoo.cfg": { - "autopurge.purgeInterval": "24", - "autopurge.snapRetainCount": "30", - "clientPort": "2181", - "dataDir": "/hadoop/zookeeper", - "initLimit": "10", - "syncLimit": "5", - "tickTime": "2000" - }, - "zookeeper-env": { - "zk_log_dir": "/var/log/zookeeper", - "zk_pid_dir": "/var/run/zookeeper", - "zk_user": "zookeeper" - } -} \ No newline at end of file diff --git a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.6.json b/sahara_plugin_ambari/plugins/ambari/resources/configs-2.6.json deleted file mode 100644 index 72e846a..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/configs-2.6.json +++ /dev/null @@ -1,2008 +0,0 @@ -{ - "accumulo-env": { - "accumulo_gc_heapsize": "256", - "accumulo_instance_name": "hdp-accumulo-instance", - "accumulo_log_dir": "/var/log/accumulo", - "accumulo_master_heapsize": "1024", - "accumulo_monitor_bind_all": "false", - "accumulo_monitor_heapsize": "1024", - "accumulo_other_heapsize": "1024", - "accumulo_pid_dir": "/var/run/accumulo", - "accumulo_tserver_heapsize": "1536", - "accumulo_user": "accumulo", - "server_content": "#! /usr/bin/env bash export HADOOP_PREFIX={{hadoop_prefix}} export HADOOP_CONF_DIR={{hadoop_conf_dir}} export JAVA_HOME={{java64_home}} export ZOOKEEPER_HOME={{zookeeper_home}} export ACCUMULO_PID_DIR={{pid_dir}} export ACCUMULO_LOG_DIR={{log_dir}} export ACCUMULO_CONF_DIR={{server_conf_dir}} export ACCUMULO_TSERVER_OPTS=\"-Xmx{{accumulo_tserver_heapsize}}m -Xms{{accumulo_tserver_heapsize}}m\" export ACCUMULO_MASTER_OPTS=\"-Xmx{{accumulo_master_heapsize}}m -Xms{{accumulo_master_heapsize}}m\" export ACCUMULO_MONITOR_OPTS=\"-Xmx{{accumulo_monitor_heapsize}}m -Xms{{accumulo_monitor_heapsize}}m\" export ACCUMULO_GC_OPTS=\"-Xmx{{accumulo_gc_heapsize}}m -Xms{{accumulo_gc_heapsize}}m\" export ACCUMULO_GENERAL_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -Djava.net.preferIPv4Stack=true ${ACCUMULO_GENERAL_OPTS}\" export ACCUMULO_OTHER_OPTS=\"-Xmx{{accumulo_other_heapsize}}m -Xms{{accumulo_other_heapsize}}m ${ACCUMULO_OTHER_OPTS}\" export ACCUMULO_MONITOR_BIND_ALL={{monitor_bind_str}} # what do when the JVM runs out of heap memory export ACCUMULO_KILL_CMD='kill -9 %p'" - }, - "accumulo-log4j": { - "audit_log_level": "OFF", - "debug_log_size": "512M", - "debug_num_logs": "10", - "info_log_size": "512M", - "info_num_logs": "10", - "monitor_forwarding_log_level": "WARN" - }, - "accumulo-site": { - "gc.port.client": "50092", - "general.classpaths": "$ACCUMULO_HOME/lib/accumulo-server.jar, $ACCUMULO_HOME/lib/accumulo-core.jar, $ACCUMULO_HOME/lib/accumulo-start.jar, $ACCUMULO_HOME/lib/accumulo-fate.jar, $ACCUMULO_HOME/lib/accumulo-proxy.jar, $ACCUMULO_HOME/lib/[^.].*.jar, $ZOOKEEPER_HOME/zookeeper[^.].*.jar, $HADOOP_CONF_DIR, /usr/hdp/current/hadoop-client/[^.].*.jar, /usr/hdp/current/hadoop-client/lib/(?!slf4j)[^.].*.jar, /usr/hdp/current/hadoop-hdfs-client/[^.].*.jar, /usr/hdp/current/hadoop-mapreduce-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/[^.].*.jar, /usr/hdp/current/hadoop-yarn-client/lib/jersey.*.jar, /usr/hdp/current/hive-client/lib/hive-accumulo-handler.jar", - "instance.volumes": "hdfs://%HOSTGROUP::host_group_1%:8020/apps/accumulo/data", - "instance.zookeeper.host": "%HOSTGROUP::host_group_1%:2181", - "instance.zookeeper.timeout": "30s", - "master.port.client": "9999", - "monitor.port.client": "50095", - "monitor.port.log4j": "4560", - "trace.port.client": "12234", - "trace.user": "trace", - "tserver.cache.data.size": "128M", - "tserver.cache.index.size": "256M", - "tserver.memory.maps.max": "1G", - "tserver.memory.maps.native.enabled": "true", - "tserver.port.client": "9997", - "tserver.sort.buffer.size": "200M", - "tserver.walog.max.size": "1G" - }, - "activity-conf": { - "activity.explorer.user": "activity_explorer", - "global.activity.analyzer.user": "activity_analyzer", - "global.activity.processing.parallelism": "8", - "global.activity.processor.pool.max.wait.seconds": "60", - "hdfs.activity.watcher.enabled": "true", - "mr_job.activity.watcher.enabled": "true", - "mr_job.max.job.size.mb.for.parallel.execution": "500", - "phoenix.sink.batch.size": "100", - "phoenix.sink.flush.interval.seconds": "30", - "tez_job.activity.watcher.enabled": "true", - "tez_job.tmp.dir": "/var/lib/smartsense/activity-analyzer/tez/tmp/", - "yarn_app.activity.watcher.enabled": "true" - }, - "activity-env": { - "activity-env-content": "#!/bin/bash # Copyright 2014 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # \"License\"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Enable verbose shell execution #set -xv ## Set HOME for various components export HADOOP_HOME=/usr/hdp/current/hadoop-client export HDFS_HOME=/usr/hdp/current/hadoop-hdfs-client export MAPREDUCE_HOME=/usr/hdp/current/hadoop-mapreduce-client export YARN_HOME=/usr/hdp/current/hadoop-yarn-client export HIVE_HOME=/usr/hdp/current/hive-client export HCAT_HOME=/usr/hdp/current/hive-webhcat export TEZ_HOME=/usr/hdp/current/tez-client export HBASE_HOME=/usr/hdp/current/hbase-client export PHOENIX_HOME=/usr/hdp/current/phoenix-client export ACTIVITY_ANALYZER_HOME=/usr/hdp/share/hst/activity-analyzer export AMS_COLLECTOR_HOME=/usr/lib/ambari-metrics-collector ## Set conf dir for various components export HADOOP_CONF_DIR=/etc/hadoop/conf/ export HIVE_CONF_DIR=/etc/hive/conf/ export HBASE_CONF_DIR=/etc/hbase/conf/ export TEZ_CONF_DIR=/etc/tez/conf/ export ACTIVITY_ANALYZER_CONF_DIR=/etc/smartsense-activity/conf/ export AMS_HBASE_CONF=/etc/ams-hbase/conf export DEBUG_ENABLED=false" - }, - "activity-log4j": { - "activity-log4j-content": "# Copyright 2014 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # \"License\"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Define some default values that can be overridden by system properties # Root logger option log4j.rootLogger=INFO,file log4j.appender.file=org.apache.log4j.RollingFileAppender log4j.appender.file.File={{activity_log_dir}}/${log.file.name} log4j.appender.file.MaxFileSize=30MB log4j.appender.file.MaxBackupIndex=10 log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n", - "activity_log_dir": "/var/log/smartsense-activity" - }, - "activity-zeppelin-env": { - "activity-zeppelin-env-content": "#!/bin/bash # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the \"License\"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # export JAVA_HOME={{java_home}} # Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS=\"-Dspark.executor.memory=8g -Dspark.cores.max=16\" export ZEPPELIN_JAVA_OPTS=\"-Dhdp.version={{hdp_version}} -Dlog.file.name=activity-explorer.log -DSmartSenseActivityExplorer\" # export ZEPPELIN_MEM # Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m # export ZEPPELIN_INTP_MEM # zeppelin interpreter process jvm mem options. Default = ZEPPELIN_MEM # export ZEPPELIN_INTP_JAVA_OPTS # zeppelin interpreter process jvm options. Default = ZEPPELIN_JAVA_OPTS export ZEPPELIN_LOG_DIR={{activity_log_dir}} export ZEPPELIN_PID_DIR=/var/run/smartsense-activity-explorer export ZEPPELIN_WAR_TEMPDIR=/var/lib/smartsense/activity-explorer/webapp export ZEPPELIN_NOTEBOOK_DIR=/var/lib/smartsense/activity-explorer/notebook export ZEPPELIN_CLASSPATH=\"/etc/ams-hbase/conf:${ZEPPELIN_CLASSPATH}\" export CLASSPATH=${ZEPPELIN_CLASSPATH}" - }, - "activity-zeppelin-interpreter": { - "activity-zeppelin-interpreter-content": "{ \"interpreterSettings\": { \"2BJB693M8\": { \"id\": \"2BJB693M8\", \"name\": \"phoenix\", \"group\": \"phoenix\", \"properties\": { \"phoenix.jdbc.url\": \"{{activity_explorer_jdbc_url}}\", \"phoenix.user\": \"\", \"phoenix.password\": \"\", \"phoenix.max.result\": \"1000\", \"phoenix.driver.name\": \"org.apache.phoenix.jdbc.PhoenixDriver\" }, \"interpreterGroup\": [ { \"class\": \"org.apache.zeppelin.phoenix.PhoenixInterpreter\", \"name\": \"sql\" } ], \"dependencies\": [], \"option\": { \"remote\": true, \"perNoteSession\": false } } }, \"interpreterBindings\": { \"2BNVQJUBK\": [ \"2BJB693M8\" ], \"2BPD7951H\": [ \"2BJB693M8\" ], \"2BQH91X36\": [ \"2BJB693M8\" ], \"2BTCVPTMH\": [ \"2BJB693M8\" ] }, \"interpreterRepositories\": [ { \"id\": \"central\", \"type\": \"default\", \"url\": \"http://repo1.maven.org/maven2/\", \"releasePolicy\": { \"enabled\": true, \"updatePolicy\": \"daily\", \"checksumPolicy\": \"warn\" }, \"snapshotPolicy\": { \"enabled\": true, \"updatePolicy\": \"daily\", \"checksumPolicy\": \"warn\" }, \"mirroredRepositories\": [], \"repositoryManager\": false } ] }" - }, - "activity-zeppelin-shiro": { - "main.securityManager.sessionManager": "$sessionManager", - "main.sessionManager": "org.apache.shiro.web.session.mgt.DefaultWebSessionManager", - "securityManager.sessionManager.globalSessionTimeout": "86400000" - }, - "activity-zeppelin-site": { - "zeppelin.anonymous.allowed": "false", - "zeppelin.interpreter.connect.timeout": "30000", - "zeppelin.interpreter.dir": "/usr/hdp/share/hst/activity-explorer/interpreter", - "zeppelin.interpreters": "org.apache.zeppelin.phoenix.PhoenixInterpreter", - "zeppelin.notebook.dir": "/var/lib/smartsense/activity-explorer/notebook", - "zeppelin.notebook.homescreen.hide": "false", - "zeppelin.notebook.storage": "org.apache.zeppelin.notebook.repo.VFSNotebookRepo", - "zeppelin.server.addr": "0.0.0.0", - "zeppelin.server.allowed.origins": "*", - "zeppelin.server.context.path": "/", - "zeppelin.server.port": "9060", - "zeppelin.ssl": "false", - "zeppelin.ssl.client.auth": "false", - "zeppelin.ssl.keystore.path": "/var/lib/smartsense/activity-explorer/keystore", - "zeppelin.ssl.keystore.type": "JKS", - "zeppelin.ssl.truststore.path": "/var/lib/smartsense/activity-explorer/truststore", - "zeppelin.ssl.truststore.type": "JKS", - "zeppelin.war.tempdir": "/var/lib/smartsense/activity-explorer/webapp", - "zeppelin.websocket.max.text.message.size": "1024000" - }, - "ams-env": { - "ambari_metrics_user": "ams", - "metrics_collector_heapsize": "512", - "metrics_collector_log_dir": "/var/log/ambari-metrics-collector", - "metrics_collector_pid_dir": "/var/run/ambari-metrics-collector", - "metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor", - "metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor" - }, - "ams-grafana-env": { - "metrics_grafana_data_dir": "/var/lib/ambari-metrics-grafana", - "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana", - "metrics_grafana_pid_dir": "/var/run/ambari-metrics-grafana", - "metrics_grafana_username": "admin" - }, - "ams-grafana-ini": { - "cert_file": "/etc/ambari-metrics-grafana/conf/ams-grafana.crt", - "cert_key": "/etc/ambari-metrics-grafana/conf/ams-grafana.key", - "port": "3000", - "protocol": "http" - }, - "ams-hbase-env": { - "hbase_classpath_additional": "", - "hbase_log_dir": "/var/log/ambari-metrics-collector", - "hbase_master_heapsize": "768", - "hbase_master_maxperm_size": "128", - "hbase_master_xmn_size": "256", - "hbase_pid_dir": "/var/run/ambari-metrics-collector/", - "hbase_regionserver_heapsize": "768", - "hbase_regionserver_shutdown_timeout": "30", - "hbase_regionserver_xmn_ratio": "0.2", - "max_open_files_limit": "32768", - "regionserver_xmn_size": "128" - }, - "ams-hbase-policy": { - "security.admin.protocol.acl": "*", - "security.client.protocol.acl": "*", - "security.masterregion.protocol.acl": "*" - }, - "ams-hbase-security-site": { - "ams.zookeeper.keytab": "", - "ams.zookeeper.principal": "", - "hadoop.security.authentication": "", - "hbase.coprocessor.master.classes": "", - "hbase.coprocessor.region.classes": "", - "hbase.master.kerberos.principal": "", - "hbase.master.keytab.file": "", - "hbase.myclient.keytab": "", - "hbase.myclient.principal": "", - "hbase.regionserver.kerberos.principal": "", - "hbase.regionserver.keytab.file": "", - "hbase.security.authentication": "", - "hbase.security.authorization": "", - "hbase.zookeeper.property.authProvider.1": "", - "hbase.zookeeper.property.jaasLoginRenew": "", - "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "", - "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "" - }, - "ams-hbase-site": { - "dfs.client.read.shortcircuit": "true", - "hbase.client.scanner.caching": "10000", - "hbase.client.scanner.timeout.period": "300000", - "hbase.cluster.distributed": "false", - "hbase.hregion.majorcompaction": "0", - "hbase.hregion.max.filesize": "4294967296", - "hbase.hregion.memstore.block.multiplier": "4", - "hbase.hregion.memstore.flush.size": "134217728", - "hbase.hstore.blockingStoreFiles": "200", - "hbase.hstore.flusher.count": "2", - "hbase.local.dir": "${hbase.tmp.dir}/local", - "hbase.master.info.bindAddress": "0.0.0.0", - "hbase.master.info.port": "61310", - "hbase.master.normalizer.class": "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer", - "hbase.master.port": "61300", - "hbase.master.wait.on.regionservers.mintostart": "1", - "hbase.normalizer.enabled": "false", - "hbase.normalizer.period": "600000", - "hbase.regionserver.global.memstore.lowerLimit": "0.3", - "hbase.regionserver.global.memstore.upperLimit": "0.35", - "hbase.regionserver.info.port": "61330", - "hbase.regionserver.port": "61320", - "hbase.regionserver.thread.compaction.large": "2", - "hbase.regionserver.thread.compaction.small": "3", - "hbase.replication": "false", - "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase", - "hbase.rpc.timeout": "300000", - "hbase.snapshot.enabled": "false", - "hbase.superuser": "activity_explorer,activity_analyzer", - "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp", - "hbase.zookeeper.leaderport": "61388", - "hbase.zookeeper.peerport": "61288", - "hbase.zookeeper.property.clientPort": "{{zookeeper_clientPort}}", - "hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper", - "hbase.zookeeper.property.tickTime": "6000", - "hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}", - "hfile.block.cache.size": "0.3", - "phoenix.coprocessor.maxMetaDataCacheSize": "20480000", - "phoenix.coprocessor.maxServerCacheTimeToLiveMs": "60000", - "phoenix.groupby.maxCacheSize": "307200000", - "phoenix.mutate.batchSize": "10000", - "phoenix.query.keepAliveMs": "300000", - "phoenix.query.maxGlobalMemoryPercentage": "15", - "phoenix.query.rowKeyOrderSaltedTable": "true", - "phoenix.query.spoolThresholdBytes": "20971520", - "phoenix.query.timeoutMs": "300000", - "phoenix.sequence.saltBuckets": "2", - "phoenix.spool.directory": "${hbase.tmp.dir}/phoenix-spool", - "zookeeper.session.timeout": "120000", - "zookeeper.session.timeout.localHBaseCluster": "120000", - "zookeeper.znode.parent": "/ams-hbase-unsecure" - }, - "ams-site": { - "phoenix.query.maxGlobalMemoryPercentage": "25", - "phoenix.spool.directory": "/tmp", - "timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint", - "timeline.metrics.aggregators.skip.blockcache.enabled": "false", - "timeline.metrics.cache.commit.interval": "3", - "timeline.metrics.cache.enabled": "true", - "timeline.metrics.cache.size": "150", - "timeline.metrics.cluster.aggregate.splitpoints": "mapred.ShuffleMetrics.ShuffleOutputsFailed", - "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.daily.disabled": "false", - "timeline.metrics.cluster.aggregator.daily.interval": "86400", - "timeline.metrics.cluster.aggregator.daily.ttl": "63072000", - "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.hourly.disabled": "false", - "timeline.metrics.cluster.aggregator.hourly.interval": "3600", - "timeline.metrics.cluster.aggregator.hourly.ttl": "31536000", - "timeline.metrics.cluster.aggregator.interpolation.enabled": "true", - "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.minute.disabled": "false", - "timeline.metrics.cluster.aggregator.minute.interval": "300", - "timeline.metrics.cluster.aggregator.minute.ttl": "2592000", - "timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier": "2", - "timeline.metrics.cluster.aggregator.second.disabled": "false", - "timeline.metrics.cluster.aggregator.second.interval": "120", - "timeline.metrics.cluster.aggregator.second.timeslice.interval": "30", - "timeline.metrics.cluster.aggregator.second.ttl": "259200", - "timeline.metrics.daily.aggregator.minute.interval": "86400", - "timeline.metrics.hbase.compression.scheme": "SNAPPY", - "timeline.metrics.hbase.data.block.encoding": "FAST_DIFF", - "timeline.metrics.hbase.fifo.compaction.enabled": "true", - "timeline.metrics.hbase.init.check.enabled": "true", - "timeline.metrics.host.aggregate.splitpoints": "mapred.ShuffleMetrics.ShuffleOutputsFailed", - "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.daily.disabled": "false", - "timeline.metrics.host.aggregator.daily.ttl": "31536000", - "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.hourly.disabled": "false", - "timeline.metrics.host.aggregator.hourly.interval": "3600", - "timeline.metrics.host.aggregator.hourly.ttl": "2592000", - "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2", - "timeline.metrics.host.aggregator.minute.disabled": "false", - "timeline.metrics.host.aggregator.minute.interval": "300", - "timeline.metrics.host.aggregator.minute.ttl": "604800", - "timeline.metrics.host.aggregator.ttl": "86400", - "timeline.metrics.service.checkpointDelay": "60", - "timeline.metrics.service.cluster.aggregator.appIds": "datanode,nodemanager,hbase", - "timeline.metrics.service.default.result.limit": "15840", - "timeline.metrics.service.handler.thread.count": "20", - "timeline.metrics.service.http.policy": "HTTP_ONLY", - "timeline.metrics.service.operation.mode": "embedded", - "timeline.metrics.service.resultset.fetchSize": "2000", - "timeline.metrics.service.rpc.address": "0.0.0.0:60200", - "timeline.metrics.service.use.groupBy.aggregators": "true", - "timeline.metrics.service.watcher.delay": "30", - "timeline.metrics.service.watcher.disabled": "false", - "timeline.metrics.service.watcher.initial.delay": "600", - "timeline.metrics.service.watcher.timeout": "30", - "timeline.metrics.service.webapp.address": "%HOSTGROUP::host_group_1%:6188", - "timeline.metrics.sink.collection.period": "10", - "timeline.metrics.sink.report.interval": "60" - }, - "ams-ssl-client": { - "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", - "ssl.client.truststore.type": "jks" - }, - "ams-ssl-server": { - "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", - "ssl.server.keystore.type": "jks", - "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", - "ssl.server.truststore.reload.interval": "10000", - "ssl.server.truststore.type": "jks" - }, - "anonymization-rules": { - "anonymization-rules-content": "{ \"rules\":[ { \"name\":\"ip_address\", \"ruleId\": \"Pattern\", \"path\":null, \"pattern\": \"([^a-z0-9\\\\.]|^)[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}([^a-z0-9\\\\.\\\\-]|(\\\\.[^0-9])|$)\", \"extract\": \"[ :\\\\/]?([0-9\\\\.]+)[ :\\\\/]?\", \"excludes\": [\"hdp-select*.*\", \"*version.txt\"], \"shared\":true }, { \"name\":\"domain\", \"ruleId\": \"Domain\", \"path\":null, \"pattern\": \"$DOMAIN_RULE$\", \"shared\":true }, { \"name\":\"delete_oozie_jdbc_password\", \"ruleId\": \"Property\", \"path\":\"oozie-site.xml\", \"property\": \"oozie.service.JPAService.jdbc.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_sqoop_metastore_password\", \"ruleId\": \"Property\", \"path\":\"sqoop-site.xml\", \"property\": \"sqoop.metastore.client.autoconnect.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_hive_metastore_password\", \"ruleId\": \"Property\", \"path\":\"hive-site.xml\", \"property\": \"javax.jdo.option.ConnectionPassword\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3.awsAccessKeyId\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3_secret_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3.awsSecretAccessKey\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3n_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3n.awsAccessKeyId\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_s3n_secret_accesskey\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.s3n.awsSecretAccessKey\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_azure_account_key\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"fs.azure.account.key.*\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"delete_ldap_password\", \"ruleId\": \"Property\", \"path\":\"core-site.xml\", \"property\": \"hadoop.security.group.mapping.ldap.bind.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_client_keystore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-client.xml\", \"property\": \"ssl.client.keystore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_client_truststore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-client.xml\", \"property\": \"ssl.client.truststore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_server_keystore_keypwd\", \"ruleId\": \"Property\", \"path\":\"ssl-server.xml\", \"property\": \"ssl.server.keystore.keypassword\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_server_keystore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-server.xml\", \"property\": \"ssl.server.keystore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_ssl_server_truststore_pwd\", \"ruleId\": \"Property\", \"path\":\"ssl-server.xml\", \"property\": \"ssl.server.truststore.password\", \"operation\":\"REPLACE\", \"value\":\"Hidden\" }, { \"name\":\"hide_oozie_pwd_in_java_process_info\", \"ruleId\": \"Pattern\", \"path\":\"java_process.txt\", \"pattern\": \"oozie.https.keystore.pass=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"hide_oozie_pwd_in_process_info\", \"ruleId\": \"Pattern\", \"path\":\"pid.txt\", \"pattern\": \"oozie.https.keystore.pass=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"hide_oozie_pwd_in_ambariagent_log\", \"ruleId\": \"Pattern\", \"path\":\"ambari-agent.log\", \"pattern\": \"oozie.https.keystore.pass=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"delete_oozie_https_keystore_pass\", \"ruleId\": \"Pattern\", \"path\":\"oozie-env.cmd\", \"pattern\":\"OOZIE_HTTPS_KEYSTORE_PASS=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"java_process_ganglia_password\", \"ruleId\": \"Pattern\", \"path\":\"java_process.txt\", \"pattern\":\"ganglia_password=([^ ]*)\", \"extract\": \"=([^ ]*)\", \"shared\":false }, { \"name\":\"hide_ssn_from_logs\", \"ruleId\": \"Pattern\", \"path\":\"*\\\\.log*\", \"pattern\": \"(^|[^0-9x])[0-9x]{3}-[0-9x]{2}-[0-9]{4}($|[^0-9x])\", \"extract\": \"(? 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}", - "llap_log_level": "INFO", - "llap_queue_capacity": "0", - "num_llap_nodes": "1", - "num_retries_for_checking_llap_status": "10", - "slider_am_container_mb": "341" - }, - "hive-interactive-site": { - "hive.driver.parallel.compilation": "true", - "hive.exec.orc.split.strategy": "HYBRID", - "hive.execution.engine": "tez", - "hive.execution.mode": "llap", - "hive.llap.auto.allow.uber": "false", - "hive.llap.client.consistent.splits": "true", - "hive.llap.daemon.allow.permanent.fns": "false", - "hive.llap.daemon.num.executors": "1", - "hive.llap.daemon.queue.name": "default", - "hive.llap.daemon.rpc.port": "15001", - "hive.llap.daemon.service.hosts": "@llap0", - "hive.llap.daemon.task.scheduler.enable.preemption": "true", - "hive.llap.daemon.vcpus.per.instance": "${hive.llap.daemon.num.executors}", - "hive.llap.daemon.yarn.container.mb": "341", - "hive.llap.daemon.yarn.shuffle.port": "15551", - "hive.llap.execution.mode": "all", - "hive.llap.io.enabled": "true", - "hive.llap.io.memory.mode": "", - "hive.llap.io.memory.size": "0", - "hive.llap.io.threadpool.size": "2", - "hive.llap.io.use.lrfu": "true", - "hive.llap.management.rpc.port": "15004", - "hive.llap.object.cache.enabled": "true", - "hive.llap.task.scheduler.locality.delay": "-1", - "hive.llap.zk.sm.connectionString": "%HOSTGROUP::host_group_1%:2181", - "hive.mapjoin.hybridgrace.hashtable": "false", - "hive.metastore.event.listeners": "", - "hive.metastore.uris": "", - "hive.optimize.dynamic.partition.hashjoin": "true", - "hive.prewarm.enabled": "false", - "hive.server2.enable.doAs": "false", - "hive.server2.tez.default.queues": "default", - "hive.server2.tez.initialize.default.sessions": "true", - "hive.server2.tez.sessions.per.default.queue": "1", - "hive.server2.thrift.http.port": "10501", - "hive.server2.thrift.port": "10500", - "hive.server2.webui.port": "10502", - "hive.server2.webui.use.ssl": "false", - "hive.server2.zookeeper.namespace": "hiveserver2-hive2", - "hive.tez.bucket.pruning": "true", - "hive.tez.exec.print.summary": "true", - "hive.tez.input.generate.consistent.splits": "true", - "hive.vectorized.execution.mapjoin.minmax.enabled": "true", - "hive.vectorized.execution.mapjoin.native.enabled": "true", - "hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled": "true", - "hive.vectorized.execution.reduce.enabled": "true", - "llap.shuffle.connection-keep-alive.enable": "true", - "llap.shuffle.connection-keep-alive.timeout": "60" - }, - "hive-site": { - "ambari.hive.db.schema.name": "hive", - "atlas.hook.hive.maxThreads": "1", - "atlas.hook.hive.minThreads": "1", - "atlas.rest.address": "http://%HOSTGROUP::host_group_1%:21000", - "datanucleus.autoCreateSchema": "false", - "datanucleus.cache.level2.type": "none", - "datanucleus.fixedDatastore": "true", - "hive.auto.convert.join": "true", - "hive.auto.convert.join.noconditionaltask": "true", - "hive.auto.convert.join.noconditionaltask.size": "286331153", - "hive.auto.convert.sortmerge.join": "true", - "hive.auto.convert.sortmerge.join.to.mapjoin": "false", - "hive.cbo.enable": "true", - "hive.cli.print.header": "false", - "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore", - "hive.cluster.delegation.token.store.zookeeper.connectString": "%HOSTGROUP::host_group_1%:2181", - "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation", - "hive.compactor.abortedtxn.threshold": "1000", - "hive.compactor.check.interval": "300L", - "hive.compactor.delta.num.threshold": "10", - "hive.compactor.delta.pct.threshold": "0.1f", - "hive.compactor.initiator.on": "false", - "hive.compactor.worker.threads": "0", - "hive.compactor.worker.timeout": "86400L", - "hive.compute.query.using.stats": "true", - "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role", - "hive.convert.join.bucket.mapjoin.tez": "false", - "hive.default.fileformat": "TextFile", - "hive.default.fileformat.managed": "TextFile", - "hive.enforce.bucketing": "false", - "hive.enforce.sorting": "true", - "hive.enforce.sortmergebucketmapjoin": "true", - "hive.exec.compress.intermediate": "false", - "hive.exec.compress.output": "false", - "hive.exec.dynamic.partition": "true", - "hive.exec.dynamic.partition.mode": "strict", - "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.max.created.files": "100000", - "hive.exec.max.dynamic.partitions": "5000", - "hive.exec.max.dynamic.partitions.pernode": "2000", - "hive.exec.orc.compression.strategy": "SPEED", - "hive.exec.orc.default.compress": "ZLIB", - "hive.exec.orc.default.stripe.size": "67108864", - "hive.exec.orc.encoding.strategy": "SPEED", - "hive.exec.parallel": "false", - "hive.exec.parallel.thread.number": "8", - "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook,org.apache.atlas.hive.hook.HiveHook", - "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook", - "hive.exec.reducers.bytes.per.reducer": "67108864", - "hive.exec.reducers.max": "1009", - "hive.exec.scratchdir": "/tmp/hive", - "hive.exec.submit.local.task.via.child": "true", - "hive.exec.submitviachild": "false", - "hive.execution.engine": "tez", - "hive.fetch.task.aggr": "false", - "hive.fetch.task.conversion": "more", - "hive.fetch.task.conversion.threshold": "1073741824", - "hive.limit.optimize.enable": "true", - "hive.limit.pushdown.memory.usage": "0.04", - "hive.map.aggr": "true", - "hive.map.aggr.hash.force.flush.memory.threshold": "0.9", - "hive.map.aggr.hash.min.reduction": "0.5", - "hive.map.aggr.hash.percentmemory": "0.5", - "hive.mapjoin.bucket.cache.size": "10000", - "hive.mapjoin.optimized.hashtable": "true", - "hive.mapred.reduce.tasks.speculative.execution": "false", - "hive.merge.mapfiles": "true", - "hive.merge.mapredfiles": "false", - "hive.merge.orcfile.stripe.level": "true", - "hive.merge.rcfile.block.level": "true", - "hive.merge.size.per.task": "256000000", - "hive.merge.smallfiles.avgsize": "16000000", - "hive.merge.tezfiles": "false", - "hive.metastore.authorization.storage.checks": "false", - "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", - "hive.metastore.client.connect.retry.delay": "5s", - "hive.metastore.client.socket.timeout": "1800s", - "hive.metastore.connect.retries": "24", - "hive.metastore.execute.setugi": "true", - "hive.metastore.failure.retries": "24", - "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab", - "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM", - "hive.metastore.pre.event.listeners": "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener", - "hive.metastore.sasl.enabled": "false", - "hive.metastore.server.max.threads": "100000", - "hive.metastore.uris": "thrift://%HOSTGROUP::host_group_1%:9083", - "hive.metastore.warehouse.dir": "/apps/hive/warehouse", - "hive.optimize.bucketmapjoin": "true", - "hive.optimize.bucketmapjoin.sortedmerge": "false", - "hive.optimize.constant.propagation": "true", - "hive.optimize.index.filter": "true", - "hive.optimize.metadataonly": "true", - "hive.optimize.null.scan": "true", - "hive.optimize.reducededuplication": "true", - "hive.optimize.reducededuplication.min.reducer": "4", - "hive.optimize.sort.dynamic.partition": "false", - "hive.orc.compute.splits.num.threads": "10", - "hive.orc.splits.include.file.footer": "false", - "hive.prewarm.enabled": "false", - "hive.prewarm.numcontainers": "3", - "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", - "hive.security.authorization.enabled": "false", - "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory", - "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", - "hive.security.metastore.authorization.auth.reads": "true", - "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", - "hive.server2.allow.user.substitution": "true", - "hive.server2.authentication": "NONE", - "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM", - "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab", - "hive.server2.enable.doAs": "true", - "hive.server2.logging.operation.enabled": "true", - "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs", - "hive.server2.max.start.attempts": "5", - "hive.server2.support.dynamic.service.discovery": "true", - "hive.server2.table.type.mapping": "CLASSIC", - "hive.server2.tez.default.queues": "default", - "hive.server2.tez.initialize.default.sessions": "false", - "hive.server2.tez.sessions.per.default.queue": "1", - "hive.server2.thrift.http.path": "cliservice", - "hive.server2.thrift.http.port": "10001", - "hive.server2.thrift.max.worker.threads": "500", - "hive.server2.thrift.port": "10000", - "hive.server2.thrift.sasl.qop": "auth", - "hive.server2.transport.mode": "binary", - "hive.server2.use.SSL": "false", - "hive.server2.zookeeper.namespace": "hiveserver2", - "hive.smbjoin.cache.rows": "10000", - "hive.stats.autogather": "true", - "hive.stats.dbclass": "fs", - "hive.stats.fetch.column.stats": "true", - "hive.stats.fetch.partition.stats": "true", - "hive.support.concurrency": "false", - "hive.tez.auto.reducer.parallelism": "true", - "hive.tez.container.size": "1024", - "hive.tez.cpu.vcores": "-1", - "hive.tez.dynamic.partition.pruning": "true", - "hive.tez.dynamic.partition.pruning.max.data.size": "104857600", - "hive.tez.dynamic.partition.pruning.max.event.size": "1048576", - "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat", - "hive.tez.java.opts": "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps", - "hive.tez.log.level": "INFO", - "hive.tez.max.partition.factor": "2.0", - "hive.tez.min.partition.factor": "0.25", - "hive.tez.smb.number.waves": "0.5", - "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", - "hive.txn.max.open.batch": "1000", - "hive.txn.timeout": "300", - "hive.user.install.directory": "/user/", - "hive.vectorized.execution.enabled": "true", - "hive.vectorized.execution.reduce.enabled": "false", - "hive.vectorized.groupby.checkinterval": "4096", - "hive.vectorized.groupby.flush.percent": "0.1", - "hive.vectorized.groupby.maxentries": "100000", - "hive.zookeeper.client.port": "2181", - "hive.zookeeper.namespace": "hive_zookeeper_namespace", - "hive.zookeeper.quorum": "%HOSTGROUP::host_group_1%:2181", - "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", - "javax.jdo.option.ConnectionURL": "jdbc:mysql://%HOSTGROUP::host_group_1%/hive?createDatabaseIfNotExist=true", - "javax.jdo.option.ConnectionUserName": "hive" - }, - "hivemetastore-site": { - "hive.metastore.metrics.enabled": "true", - "hive.service.metrics.file.location": "/var/log/hive/hivemetastore-report.json", - "hive.service.metrics.hadoop2.component": "hivemetastore", - "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2" - }, - "hiveserver2-interactive-site": { - "hive.async.log.enabled": "false", - "hive.metastore.metrics.enabled": "true", - "hive.service.metrics.file.location": "/var/log/hive/hiveserver2Interactive-report.json", - "hive.service.metrics.hadoop2.component": "hiveserver2", - "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2" - }, - "hiveserver2-site": { - "hive.metastore.metrics.enabled": "true", - "hive.security.authorization.enabled": "false", - "hive.service.metrics.file.location": "/var/log/hive/hiveserver2-report.json", - "hive.service.metrics.hadoop2.component": "hiveserver2", - "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2" - }, - "hst-agent-conf": { - "agent.loglevel": "INFO", - "agent.tmp_dir": "/var/lib/smartsense/hst-agent/data/tmp", - "bundle.logs_to_capture": "(.*).log$,(.*).out$", - "server.connection_retry_count": "100", - "server.connection_retry_interval": "10", - "upload.retry_count": "100", - "upload.retry_interval": "15" - }, - "hst-log4j": { - "hst-log4j-content": "# Copyright 2014 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # \"License\"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Define some default values that can be overridden by system properties # Root logger option log4j.rootLogger=INFO,file log4j.appender.file=org.apache.log4j.RollingFileAppender log4j.appender.file.File={{hst_log_dir}}/${log.file.name} log4j.appender.file.MaxFileSize=30MB log4j.appender.file.MaxBackupIndex=10 log4j.appender.file.layout=org.apache.log4j.PatternLayout log4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n # HST logger log4j.logger.com.hortonworks=INFO com.github.oxo42.stateless4j=WARN log4j.logger.com.sun.jersey=WARN log4j.logger.org.eclipse.jetty.server=INFO", - "hst_log_dir": "/var/log/hst" - }, - "hst-server-conf": { - "agent.request.processing.timeout": "7200", - "agent.request.syncup.interval": "180", - "client.threadpool.size.max": "40", - "customer.account.name": "unspecified", - "customer.notification.email": "unspecified", - "customer.smartsense.id": "unspecified", - "gateway.host": "embedded", - "gateway.port": "9451", - "gateway.registration.port": "9450", - "server.port": "9000", - "server.storage.dir": "/var/lib/smartsense/hst-server/data", - "server.tmp.dir": "/var/lib/smartsense/hst-server/tmp" - }, - "infra-solr-client-log4j": { - "infra_solr_client_log_dir": "/var/log/ambari-infra-solr-client" - }, - "infra-solr-env": { - "infra_solr_datadir": "/opt/ambari_infra_solr/data", - "infra_solr_jmx_port": "18886", - "infra_solr_kerberos_keytab": "/etc/security/keytabs/infra_solr.service.keytab", - "infra_solr_kerberos_name_rules": "DEFAULT", - "infra_solr_kerberos_principal": "infra-solr", - "infra_solr_keystore_location": "/etc/security/serverKeys/infra.solr.keyStore.jks", - "infra_solr_keystore_type": "jks", - "infra_solr_log_dir": "/var/log/ambari-infra-solr", - "infra_solr_maxmem": "2048", - "infra_solr_minmem": "1024", - "infra_solr_pid_dir": "/var/run/ambari-infra-solr", - "infra_solr_port": "8886", - "infra_solr_ssl_enabled": "false", - "infra_solr_truststore_location": "/etc/security/serverKeys/infra.solr.trustStore.jks", - "infra_solr_truststore_type": "jks", - "infra_solr_user": "infra-solr", - "infra_solr_web_kerberos_keytab": "/etc/security/keytabs/spnego.service.keytab", - "infra_solr_web_kerberos_principal": "HTTP/_HOST@EXAMPLE.COM", - "infra_solr_znode": "/infra-solr" - }, - "kafka-broker": { - "auto.create.topics.enable": "true", - "auto.leader.rebalance.enable": "true", - "compression.type": "producer", - "controlled.shutdown.enable": "true", - "controlled.shutdown.max.retries": "3", - "controlled.shutdown.retry.backoff.ms": "5000", - "controller.message.queue.size": "10", - "controller.socket.timeout.ms": "30000", - "default.replication.factor": "1", - "delete.topic.enable": "false", - "external.kafka.metrics.exclude.prefix": "kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec", - "external.kafka.metrics.include.prefix": "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile,kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile,kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile,kafka.network.RequestMetrics.RequestsPerSec.request", - "fetch.purgatory.purge.interval.requests": "10000", - "kafka.ganglia.metrics.group": "kafka", - "kafka.ganglia.metrics.port": "8671", - "kafka.ganglia.metrics.reporter.enabled": "true", - "kafka.metrics.reporters": "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter", - "kafka.timeline.metrics.host": "{{metric_collector_host}}", - "kafka.timeline.metrics.maxRowCacheSize": "10000", - "kafka.timeline.metrics.port": "{{metric_collector_port}}", - "kafka.timeline.metrics.protocol": "{{metric_collector_protocol}}", - "kafka.timeline.metrics.reporter.enabled": "true", - "kafka.timeline.metrics.reporter.sendInterval": "5900", - "kafka.timeline.metrics.truststore.path": "{{metric_truststore_path}}", - "kafka.timeline.metrics.truststore.type": "{{metric_truststore_type}}", - "leader.imbalance.check.interval.seconds": "300", - "leader.imbalance.per.broker.percentage": "10", - "listeners": "PLAINTEXT://localhost:6667", - "log.cleanup.interval.mins": "10", - "log.dirs": "/kafka-logs", - "log.index.interval.bytes": "4096", - "log.index.size.max.bytes": "10485760", - "log.retention.bytes": "-1", - "log.retention.hours": "168", - "log.roll.hours": "168", - "log.segment.bytes": "1073741824", - "message.max.bytes": "1000000", - "min.insync.replicas": "1", - "num.io.threads": "8", - "num.network.threads": "3", - "num.partitions": "1", - "num.recovery.threads.per.data.dir": "1", - "num.replica.fetchers": "1", - "offset.metadata.max.bytes": "4096", - "offsets.commit.required.acks": "-1", - "offsets.commit.timeout.ms": "5000", - "offsets.load.buffer.size": "5242880", - "offsets.retention.check.interval.ms": "600000", - "offsets.retention.minutes": "86400000", - "offsets.topic.compression.codec": "0", - "offsets.topic.num.partitions": "50", - "offsets.topic.replication.factor": "3", - "offsets.topic.segment.bytes": "104857600", - "port": "6667", - "producer.purgatory.purge.interval.requests": "10000", - "queued.max.requests": "500", - "replica.fetch.max.bytes": "1048576", - "replica.fetch.min.bytes": "1", - "replica.fetch.wait.max.ms": "500", - "replica.high.watermark.checkpoint.interval.ms": "5000", - "replica.lag.max.messages": "4000", - "replica.lag.time.max.ms": "10000", - "replica.socket.receive.buffer.bytes": "65536", - "replica.socket.timeout.ms": "30000", - "socket.receive.buffer.bytes": "102400", - "socket.request.max.bytes": "104857600", - "socket.send.buffer.bytes": "102400", - "zookeeper.connect": "%HOSTGROUP::host_group_1%:2181", - "zookeeper.connection.timeout.ms": "25000", - "zookeeper.session.timeout.ms": "30000", - "zookeeper.sync.time.ms": "2000" - }, - "kafka-env": { - "is_supported_kafka_ranger": "true", - "kafka_log_dir": "/var/log/kafka", - "kafka_pid_dir": "/var/run/kafka", - "kafka_user": "kafka", - "kafka_user_nofile_limit": "128000", - "kafka_user_nproc_limit": "65536" - }, - "knox-env": { - "knox_group": "knox", - "knox_pid_dir": "/var/run/knox", - "knox_user": "knox" - }, - "livy-conf": { - "livy.environment": "production", - "livy.impersonation.enabled": "true", - "livy.server.csrf_protection.enabled": "true", - "livy.server.port": "8998", - "livy.server.session.timeout": "3600000" - }, - "livy-env": { - "livy_group": "livy", - "livy_log_dir": "/var/log/livy", - "livy_pid_dir": "/var/run/livy", - "livy_user": "livy", - "spark_home": "/usr/hdp/current/spark-client" - }, - "logfeeder-env": { - "logfeeder_kerberos_keytab": "/etc/security/keytabs/logfeeder.service.keytab", - "logfeeder_kerberos_principal": "logfeeder", - "logfeeder_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks", - "logfeeder_keystore_type": "jks", - "logfeeder_log_dir": "/var/log/ambari-logsearch-logfeeder", - "logfeeder_max_mem": "512m", - "logfeeder_pid_dir": "/var/run/ambari-logsearch-logfeeder", - "logfeeder_solr_audit_logs_enable": "true", - "logfeeder_solr_service_logs_enable": "true", - "logfeeder_truststore_location": "/etc/security/serverKeys/logsearch.trustStore.jks", - "logfeeder_truststore_type": "jks" - }, - "logfeeder-properties": { - "logfeeder.checkpoint.folder": "/etc/ambari-logsearch-logfeeder/conf/checkpoints", - "logfeeder.config.files": "{default_config_files}", - "logfeeder.log.filter.enable": "true", - "logfeeder.metrics.collector.hosts": "{metrics_collector_hosts}", - "logfeeder.solr.config.interval": "5" - }, - "logsearch-admin-json": { - "logsearch_admin_username": "ambari_logsearch_admin" - }, - "logsearch-audit_logs-solrconfig": { - "logsearch_audit_logs_max_retention": "7", - "logsearch_audit_logs_merge_factor": "5" - }, - "logsearch-env": { - "logsearch_app_max_memory": "1024", - "logsearch_debug_enabled": "false", - "logsearch_debug_port": "5005", - "logsearch_kerberos_keytab": "/etc/security/keytabs/logsearch.service.keytab", - "logsearch_kerberos_principal": "logsearch", - "logsearch_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks", - "logsearch_keystore_type": "jks", - "logsearch_log_dir": "/var/log/ambari-logsearch-portal", - "logsearch_pid_dir": "/var/run/ambari-logsearch-portal", - "logsearch_solr_audit_logs_zk_node": "{infra_solr_znode}", - "logsearch_solr_audit_logs_zk_quorum": "{zookeeper_quorum}", - "logsearch_truststore_location": "/etc/security/serverKeys/logsearch.trustStore.jks", - "logsearch_truststore_type": "jks", - "logsearch_ui_port": "61888", - "logsearch_ui_protocol": "http", - "logsearch_user": "logsearch" - }, - "logsearch-properties": { - "logsearch.audit.logs.split.interval.mins": "15", - "logsearch.collection.audit.logs.numshards": "2", - "logsearch.collection.audit.logs.replication.factor": "1", - "logsearch.collection.service.logs.numshards": "2", - "logsearch.collection.service.logs.replication.factor": "1", - "logsearch.external.auth.enabled": "true", - "logsearch.external.auth.host_url": "{ambari_server_auth_host_url}", - "logsearch.external.auth.login_url": "/api/v1/users/$USERNAME/privileges?fields=*", - "logsearch.logfeeder.include.default.level": "FATAL,ERROR,WARN", - "logsearch.service.logs.fields": "logtime,level,event_count,ip,type,seq_num,path,file,line_number,host,log_message,id", - "logsearch.service.logs.split.interval.mins": "15", - "logsearch.solr.collection.audit.logs": "audit_logs", - "logsearch.solr.collection.service.logs": "hadoop_logs", - "logsearch.solr.metrics.collector.hosts": "{metrics_collector_hosts}" - }, - "logsearch-service_logs-solrconfig": { - "logsearch_service_logs_max_retention": "7", - "logsearch_service_logs_merge_factor": "5" - }, - "mahout-env": { - "mahout_user": "mahout" - }, - "mapred-env": { - "jobhistory_heapsize": "900", - "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", - "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", - "mapred_user": "mapred", - "mapred_user_nofile_limit": "32768", - "mapred_user_nproc_limit": "65536" - }, - "mapred-site": { - "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "mapreduce.am.max-attempts": "2", - "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", - "mapreduce.cluster.administrators": "hadoop", - "mapreduce.framework.name": "yarn", - "mapreduce.job.counters.max": "130", - "mapreduce.job.emit-timeline-data": "false", - "mapreduce.job.queuename": "default", - "mapreduce.job.reduce.slowstart.completedmaps": "0.05", - "mapreduce.jobhistory.address": "%HOSTGROUP::host_group_1%:10020", - "mapreduce.jobhistory.bind-host": "0.0.0.0", - "mapreduce.jobhistory.done-dir": "/mr-history/done", - "mapreduce.jobhistory.http.policy": "HTTP_ONLY", - "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", - "mapreduce.jobhistory.recovery.enable": "true", - "mapreduce.jobhistory.recovery.store.class": "org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService", - "mapreduce.jobhistory.recovery.store.leveldb.path": "/hadoop/mapreduce/jhs", - "mapreduce.jobhistory.webapp.address": "%HOSTGROUP::host_group_1%:19888", - "mapreduce.map.java.opts": "-Xmx1228m", - "mapreduce.map.log.level": "INFO", - "mapreduce.map.memory.mb": "1536", - "mapreduce.map.output.compress": "false", - "mapreduce.map.sort.spill.percent": "0.7", - "mapreduce.map.speculative": "false", - "mapreduce.output.fileoutputformat.compress": "false", - "mapreduce.output.fileoutputformat.compress.type": "BLOCK", - "mapreduce.reduce.input.buffer.percent": "0.0", - "mapreduce.reduce.java.opts": "-Xmx1638m", - "mapreduce.reduce.log.level": "INFO", - "mapreduce.reduce.memory.mb": "2048", - "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", - "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", - "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000", - "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", - "mapreduce.reduce.shuffle.merge.percent": "0.66", - "mapreduce.reduce.shuffle.parallelcopies": "30", - "mapreduce.reduce.speculative": "false", - "mapreduce.shuffle.port": "13562", - "mapreduce.task.io.sort.factor": "100", - "mapreduce.task.io.sort.mb": "859", - "mapreduce.task.timeout": "300000", - "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}", - "yarn.app.mapreduce.am.command-opts": "-Xmx819m -Dhdp.version=${hdp.version}", - "yarn.app.mapreduce.am.log.level": "INFO", - "yarn.app.mapreduce.am.resource.mb": "1024", - "yarn.app.mapreduce.am.staging-dir": "/user" - }, - "oozie-env": { - "oozie_admin_port": "11001", - "oozie_admin_users": "{oozie_user}, oozie-admin,falcon", - "oozie_data_dir": "/hadoop/oozie/data", - "oozie_database": "New Derby Database", - "oozie_heapsize": "2048m", - "oozie_log_dir": "/var/log/oozie", - "oozie_permsize": "256m", - "oozie_pid_dir": "/var/run/oozie", - "oozie_tmp_dir": "/var/tmp/oozie", - "oozie_user": "oozie", - "oozie_user_nofile_limit": "32000", - "oozie_user_nproc_limit": "16000" - }, - "oozie-site": { - "oozie.action.retry.interval": "30", - "oozie.authentication.simple.anonymous.allowed": "true", - "oozie.authentication.type": "simple", - "oozie.base.url": "http://%HOSTGROUP::host_group_1%:11000/oozie", - "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials", - "oozie.db.schema.name": "oozie", - "oozie.service.AuthorizationService.security.enabled": "true", - "oozie.service.ELService.ext.functions.coord-action-create": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-action-create-inst": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek_inst, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek_inst, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-action-start": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now, today=org.apache.oozie.extensions.OozieELExtensions#ph2_today, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear, latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest, future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future, dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn, instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime, dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset, formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-job-submit-data": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo, instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-job-submit-instances": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo, lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo", - "oozie.service.ELService.ext.functions.coord-sla-create": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.coord-sla-submit": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed, user=org.apache.oozie.coord.CoordELFunctions#coord_user", - "oozie.service.ELService.ext.functions.workflow": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo, today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo, yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo, currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo, lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo, currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo, lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo, formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo", - "oozie.service.HadoopAccessorService.hadoop.configurations": "*={{hadoop_conf_dir}}", - "oozie.service.HadoopAccessorService.kerberos.enabled": "false", - "oozie.service.HadoopAccessorService.supported.filesystems": "*", - "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", - "oozie.service.JPAService.jdbc.username": "oozie", - "oozie.service.ProxyUserService.proxyuser.falcon.groups": "*", - "oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*", - "oozie.service.SparkConfigurationService.spark.configurations": "*=spark-conf", - "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", - "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService" - }, - "ranger-hbase-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hbase/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hbase/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "true" - }, - "ranger-hbase-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hbase", - "common.name.for.certificate": "", - "policy_user": "ambari-qa", - "ranger-hbase-plugin-enabled": "No" - }, - "ranger-hbase-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-hbase-security": { - "ranger.plugin.hbase.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.hbase.policy.pollIntervalMs": "30000", - "ranger.plugin.hbase.policy.rest.ssl.config.file": "/etc/hbase/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.hbase.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.hbase.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.hbase.service.name": "{{repo_name}}", - "xasecure.hbase.update.xapolicies.on.grant.revoke": "true" - }, - "ranger-hdfs-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-hdfs-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hadoop", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "authentication", - "policy_user": "ambari-qa", - "ranger-hdfs-plugin-enabled": "No" - }, - "ranger-hdfs-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-hdfs-security": { - "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", - "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.hdfs.service.name": "{{repo_name}}", - "xasecure.add-hadoop-authorization": "true" - }, - "ranger-hive-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hive/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hive/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-hive-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "hive", - "common.name.for.certificate": "", - "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", - "policy_user": "ambari-qa" - }, - "ranger-hive-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-hive-security": { - "ranger.plugin.hive.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.hive.policy.pollIntervalMs": "30000", - "ranger.plugin.hive.policy.rest.ssl.config.file": "/usr/hdp/current/{{ranger_hive_component}}/conf/conf.server/ranger-policymgr-ssl.xml", - "ranger.plugin.hive.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.hive.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.hive.service.name": "{{repo_name}}", - "xasecure.hive.update.xapolicies.on.grant.revoke": "true" - }, - "ranger-kafka-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/kafka/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/kafka/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "true" - }, - "ranger-kafka-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "kafka", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "", - "policy_user": "ambari-qa", - "ranger-kafka-plugin-enabled": "No", - "zookeeper.connect": "localhost:2181" - }, - "ranger-kafka-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file/{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file/{{credential_file}}" - }, - "ranger-kafka-security": { - "ranger.plugin.kafka.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.kafka.policy.pollIntervalMs": "30000", - "ranger.plugin.kafka.policy.rest.ssl.config.file": "/etc/kafka/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.kafka.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.kafka.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.kafka.service.name": "{{repo_name}}" - }, - "ranger-knox-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/knox/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/knox/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-knox-plugin-properties": { - "KNOX_HOME": "/usr/hdp/current/knox-server", - "REPOSITORY_CONFIG_USERNAME": "admin", - "common.name.for.certificate": "", - "policy_user": "ambari-qa", - "ranger-knox-plugin-enabled": "No" - }, - "ranger-knox-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-knox-security": { - "ranger.plugin.knox.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.knox.policy.pollIntervalMs": "30000", - "ranger.plugin.knox.policy.rest.ssl.config.file": "/usr/hdp/current/knox-server/conf/ranger-policymgr-ssl.xml", - "ranger.plugin.knox.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.knox.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminJersey2RESTClient", - "ranger.plugin.knox.service.name": "{{repo_name}}" - }, - "ranger-yarn-audit": { - "xasecure.audit.destination.hdfs": "true", - "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/yarn/audit/hdfs/spool", - "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit", - "xasecure.audit.destination.solr": "false", - "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/yarn/audit/solr/spool", - "xasecure.audit.destination.solr.urls": "", - "xasecure.audit.destination.solr.zookeepers": "NONE", - "xasecure.audit.is.enabled": "true", - "xasecure.audit.provider.summary.enabled": "false" - }, - "ranger-yarn-plugin-properties": { - "REPOSITORY_CONFIG_USERNAME": "yarn", - "common.name.for.certificate": "", - "hadoop.rpc.protection": "", - "policy_user": "ambari-qa", - "ranger-yarn-plugin-enabled": "No" - }, - "ranger-yarn-policymgr-ssl": { - "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks", - "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", - "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks", - "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}" - }, - "ranger-yarn-security": { - "ranger.plugin.yarn.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", - "ranger.plugin.yarn.policy.pollIntervalMs": "30000", - "ranger.plugin.yarn.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml", - "ranger.plugin.yarn.policy.rest.url": "{{policymgr_mgr_url}}", - "ranger.plugin.yarn.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", - "ranger.plugin.yarn.service.name": "{{repo_name}}" - }, - "spark-defaults": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.eventLog.dir": "hdfs:///spark-history", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.history.fs.logDirectory": "hdfs:///spark-history", - "spark.history.kerberos.keytab": "none", - "spark.history.kerberos.principal": "none", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.history.ui.port": "18080", - "spark.yarn.containerLauncherMaxThreads": "25", - "spark.yarn.driver.memoryOverhead": "384", - "spark.yarn.executor.memoryOverhead": "384", - "spark.yarn.historyServer.address": "{{spark_history_server_host}}:{{spark_history_ui_port}}", - "spark.yarn.preserve.staging.files": "false", - "spark.yarn.queue": "default", - "spark.yarn.scheduler.heartbeat.interval-ms": "5000", - "spark.yarn.submit.file.replication": "3" - }, - "spark-env": { - "hive_kerberos_keytab": "{{hive_kerberos_keytab}}", - "hive_kerberos_principal": "{{hive_kerberos_principal}}", - "spark_daemon_memory": "1024", - "spark_group": "spark", - "spark_log_dir": "/var/log/spark", - "spark_pid_dir": "/var/run/spark", - "spark_thrift_cmd_opts": "", - "spark_user": "spark" - }, - "spark-hive-site-override": { - "hive.metastore.client.connect.retry.delay": "5", - "hive.metastore.client.socket.timeout": "1800", - "hive.server2.enable.doAs": "false", - "hive.server2.thrift.port": "10015", - "hive.server2.transport.mode": "binary" - }, - "spark-thrift-fairscheduler": { - "fairscheduler_content": " FAIR 1 2 " - }, - "spark-thrift-sparkconf": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.dynamicAllocation.enabled": "true", - "spark.dynamicAllocation.initialExecutors": "0", - "spark.dynamicAllocation.maxExecutors": "10", - "spark.dynamicAllocation.minExecutors": "0", - "spark.eventLog.dir": "{{spark_history_dir}}", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.executor.memory": "1g", - "spark.hadoop.cacheConf": "false", - "spark.history.fs.logDirectory": "{{spark_history_dir}}", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.master": "{{spark_thrift_master}}", - "spark.scheduler.allocation.file": "{{spark_conf}}/spark-thrift-fairscheduler.xml", - "spark.scheduler.mode": "FAIR", - "spark.shuffle.service.enabled": "true", - "spark.yarn.am.memory": "512m", - "spark.yarn.queue": "default" - }, - "spark2-defaults": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.eventLog.dir": "hdfs:///spark2-history/", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.history.fs.logDirectory": "hdfs:///spark2-history/", - "spark.history.kerberos.keytab": "none", - "spark.history.kerberos.principal": "none", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.history.ui.port": "18081", - "spark.yarn.historyServer.address": "{{spark_history_server_host}}:{{spark_history_ui_port}}", - "spark.yarn.queue": "default" - }, - "spark2-env": { - "hive_kerberos_keytab": "{{hive_kerberos_keytab}}", - "hive_kerberos_principal": "{{hive_kerberos_principal}}", - "spark_daemon_memory": "1024", - "spark_group": "spark", - "spark_log_dir": "/var/log/spark2", - "spark_pid_dir": "/var/run/spark2", - "spark_thrift_cmd_opts": "", - "spark_user": "spark" - }, - "spark2-hive-site-override": { - "hive.metastore.client.connect.retry.delay": "5", - "hive.metastore.client.socket.timeout": "1800", - "hive.server2.enable.doAs": "false", - "hive.server2.thrift.port": "10016", - "hive.server2.transport.mode": "binary" - }, - "spark2-thrift-fairscheduler": { - "fairscheduler_content": " FAIR 1 2 " - }, - "spark2-thrift-sparkconf": { - "spark.driver.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.dynamicAllocation.enabled": "true", - "spark.dynamicAllocation.initialExecutors": "0", - "spark.dynamicAllocation.maxExecutors": "10", - "spark.dynamicAllocation.minExecutors": "0", - "spark.eventLog.dir": "{{spark_history_dir}}", - "spark.eventLog.enabled": "true", - "spark.executor.extraLibraryPath": "{{spark_hadoop_lib_native}}", - "spark.hadoop.cacheConf": "false", - "spark.history.fs.logDirectory": "{{spark_history_dir}}", - "spark.history.provider": "org.apache.spark.deploy.history.FsHistoryProvider", - "spark.master": "{{spark_thrift_master}}", - "spark.scheduler.allocation.file": "{{spark_conf}}/spark-thrift-fairscheduler.xml", - "spark.scheduler.mode": "FAIR", - "spark.shuffle.service.enabled": "true", - "spark.yarn.queue": "default" - }, - "sqoop-atlas-application.properties": { - "atlas.jaas.KafkaClient.option.renewTicket": "true", - "atlas.jaas.KafkaClient.option.useTicketCache": "true" - }, - "sqoop-env": { - "jdbc_drivers": "", - "sqoop_user": "sqoop" - }, - "ssl-client": { - "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", - "ssl.client.keystore.type": "jks", - "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", - "ssl.client.truststore.reload.interval": "10000", - "ssl.client.truststore.type": "jks" - }, - "ssl-server": { - "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", - "ssl.server.keystore.type": "jks", - "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", - "ssl.server.truststore.reload.interval": "10000", - "ssl.server.truststore.type": "jks" - }, - "storm-atlas-application.properties": { - "atlas.hook.storm.numRetries": "3" - }, - "storm-env": { - "jmxremote_port": "56431", - "nimbus_seeds_supported": "true", - "storm_log_dir": "/var/log/storm", - "storm_logs_supported": "true", - "storm_pid_dir": "/var/run/storm", - "storm_user": "storm", - "storm_user_nofile_limit": "128000", - "storm_user_nproc_limit": "65536" - }, - "storm-site": { - "_storm.min.ruid": "null", - "_storm.thrift.nonsecure.transport": "org.apache.storm.security.auth.SimpleTransportPlugin", - "_storm.thrift.secure.transport": "org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin", - "client.jartransformer.class": "org.apache.storm.hack.StormShadeTransformer", - "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", - "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "drpc.invocations.port": "3773", - "drpc.port": "3772", - "drpc.queue.size": "128", - "drpc.request.timeout.secs": "600", - "drpc.worker.threads": "64", - "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib", - "logviewer.appender.name": "A1", - "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER", - "logviewer.port": "8000", - "metrics.reporter.register": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter", - "nimbus.cleanup.inbox.freq.secs": "600", - "nimbus.file.copy.expiration.secs": "600", - "nimbus.inbox.jar.expiration.secs": "3600", - "nimbus.monitor.freq.secs": "10", - "nimbus.reassign": "true", - "nimbus.seeds": "['%HOSTGROUP::host_group_2%']", - "nimbus.supervisor.timeout.secs": "60", - "nimbus.task.launch.secs": "120", - "nimbus.task.timeout.secs": "30", - "nimbus.thrift.max_buffer_size": "1048576", - "nimbus.thrift.port": "6627", - "nimbus.thrift.threads": "196", - "nimbus.topology.validator": "org.apache.storm.nimbus.DefaultTopologyValidator", - "storm.cluster.mode": "distributed", - "storm.local.dir": "/hadoop/storm", - "storm.local.mode.zmq": "false", - "storm.log.dir": "{{log_dir}}", - "storm.messaging.netty.buffer_size": "5242880", - "storm.messaging.netty.client_worker_threads": "1", - "storm.messaging.netty.max_retries": "30", - "storm.messaging.netty.max_wait_ms": "1000", - "storm.messaging.netty.min_wait_ms": "100", - "storm.messaging.netty.server_worker_threads": "1", - "storm.messaging.transport": "org.apache.storm.messaging.netty.Context", - "storm.thrift.transport": "{{storm_thrift_transport}}", - "storm.topology.submission.notifier.plugin.class": "org.apache.atlas.storm.hook.StormAtlasHook", - "storm.zookeeper.connection.timeout": "30000", - "storm.zookeeper.port": "2181", - "storm.zookeeper.retry.interval": "1000", - "storm.zookeeper.retry.intervalceiling.millis": "30000", - "storm.zookeeper.retry.times": "5", - "storm.zookeeper.root": "/storm", - "storm.zookeeper.servers": "['%HOSTGROUP::host_group_1%']", - "storm.zookeeper.session.timeout": "30000", - "supervisor.heartbeat.frequency.secs": "5", - "supervisor.monitor.frequency.secs": "3", - "supervisor.slots.ports": "[6700, 6701]", - "supervisor.worker.start.timeout.secs": "120", - "supervisor.worker.timeout.secs": "30", - "task.heartbeat.frequency.secs": "3", - "task.refresh.poll.secs": "10", - "topology.acker.executors": "null", - "topology.builtin.metrics.bucket.size.secs": "60", - "topology.debug": "false", - "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", - "topology.enable.message.timeouts": "true", - "topology.error.throttle.interval.secs": "10", - "topology.executor.receive.buffer.size": "1024", - "topology.executor.send.buffer.size": "1024", - "topology.fall.back.on.java.serialization": "true", - "topology.kryo.factory": "org.apache.storm.serialization.DefaultKryoFactory", - "topology.max.error.report.per.interval": "5", - "topology.max.replication.wait.time.sec": "{{actual_topology_max_replication_wait_time_sec}}", - "topology.max.replication.wait.time.sec.default": "60", - "topology.max.spout.pending": "1000", - "topology.max.task.parallelism": "null", - "topology.message.timeout.secs": "30", - "topology.min.replication.count": "{{actual_topology_min_replication_count}}", - "topology.min.replication.count.default": "1", - "topology.optimize": "true", - "topology.receiver.buffer.size": "8", - "topology.skip.missing.kryo.registrations": "false", - "topology.sleep.spout.wait.strategy.time.ms": "1", - "topology.spout.wait.strategy": "org.apache.storm.spout.SleepSpoutWaitStrategy", - "topology.state.synchronization.timeout.secs": "60", - "topology.stats.sample.rate": "0.05", - "topology.tick.tuple.freq.secs": "null", - "topology.transfer.buffer.size": "1024", - "topology.trident.batch.emit.interval.millis": "500", - "topology.tuple.serializer": "org.apache.storm.serialization.types.ListDelegateSerializer", - "topology.worker.childopts": "null", - "topology.worker.shared.thread.pool.size": "4", - "topology.workers": "1", - "transactional.zookeeper.port": "null", - "transactional.zookeeper.root": "/transactional", - "transactional.zookeeper.servers": "null", - "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "ui.filter": "null", - "ui.port": "8744", - "worker.heartbeat.frequency.secs": "1", - "zmq.hwm": "0", - "zmq.linger.millis": "5000", - "zmq.threads": "1" - }, - "tez-env": { - "tez_user": "tez" - }, - "tez-interactive-site": { - "tez.am.resource.memory.mb": "1536", - "tez.dag.recovery.enabled": "false", - "tez.grouping.node.local.only": "true", - "tez.lib.uris": "/hdp/apps/${hdp.version}/tez_hive2/tez.tar.gz", - "tez.runtime.pipelined-shuffle.enabled": "false", - "tez.runtime.pipelined.sorter.lazy-allocate.memory": "true", - "tez.runtime.report.partition.stats": "true", - "tez.runtime.shuffle.fetch.buffer.percent": "0.6", - "tez.runtime.shuffle.fetch.verify-disk-checksum": "false", - "tez.runtime.shuffle.memory.limit.percent": "0.25", - "tez.session.am.dag.submit.timeout.secs": "3600" - }, - "tez-site": { - "tez.am.am-rm.heartbeat.interval-ms.max": "250", - "tez.am.container.idle.release-timeout-max.millis": "20000", - "tez.am.container.idle.release-timeout-min.millis": "10000", - "tez.am.container.reuse.enabled": "true", - "tez.am.container.reuse.locality.delay-allocation-millis": "250", - "tez.am.container.reuse.non-local-fallback.enabled": "false", - "tez.am.container.reuse.rack-fallback.enabled": "true", - "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", - "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.am.log.level": "INFO", - "tez.am.max.app.attempts": "2", - "tez.am.maxtaskfailures.per.node": "10", - "tez.am.resource.memory.mb": "2048", - "tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", - "tez.am.view-acls": "*", - "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", - "tez.counters.max": "10000", - "tez.counters.max.groups": "3000", - "tez.generate.debug.artifacts": "false", - "tez.grouping.max-size": "1073741824", - "tez.grouping.min-size": "16777216", - "tez.grouping.split-waves": "1.7", - "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSV15HistoryLoggingService", - "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", - "tez.queue.name": "default", - "tez.runtime.compress": "true", - "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", - "tez.runtime.convert.user-payload.to.history-text": "false", - "tez.runtime.io.sort.mb": "270", - "tez.runtime.optimize.local.fetch": "true", - "tez.runtime.pipelined.sorter.sort.threads": "2", - "tez.runtime.shuffle.fetch.buffer.percent": "0.6", - "tez.runtime.shuffle.memory.limit.percent": "0.25", - "tez.runtime.sorter.class": "PIPELINED", - "tez.runtime.unordered.output.buffer.size-mb": "76", - "tez.session.am.dag.submit.timeout.secs": "600", - "tez.session.client.timeout.secs": "-1", - "tez.shuffle-vertex-manager.max-src-fraction": "0.4", - "tez.shuffle-vertex-manager.min-src-fraction": "0.2", - "tez.staging-dir": "/tmp/${user.name}/staging", - "tez.task.am.heartbeat.counter.interval-ms.max": "4000", - "tez.task.generate.counters.per.io": "true", - "tez.task.get-task.sleep.interval-ms.max": "200", - "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", - "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", - "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", - "tez.task.max-events-per-heartbeat": "500", - "tez.task.resource.memory.mb": "1024", - "tez.use.cluster.hadoop-libs": "false" - }, - "webhcat-site": { - "templeton.exec.timeout": "60000", - "templeton.hadoop": "/usr/hdp/${hdp.version}/hadoop/bin/hadoop", - "templeton.hadoop.conf.dir": "/etc/hadoop/conf", - "templeton.hadoop.queue.name": "default", - "templeton.hcat": "/usr/hdp/${hdp.version}/hive/bin/hcat", - "templeton.hcat.home": "hive.tar.gz/hive/hcatalog", - "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz", - "templeton.hive.extra.files": "/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib", - "templeton.hive.home": "hive.tar.gz/hive", - "templeton.hive.path": "hive.tar.gz/hive/bin/hive", - "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://%HOSTGROUP::host_group_1%:9083,hive.metastore.sasl.enabled=false", - "templeton.jar": "/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar", - "templeton.libjars": "/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar", - "templeton.override.enabled": "false", - "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz", - "templeton.pig.path": "pig.tar.gz/pig/bin/pig", - "templeton.port": "50111", - "templeton.python": "${env.PYTHON_CMD}", - "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz", - "templeton.sqoop.home": "sqoop.tar.gz/sqoop", - "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop", - "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", - "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar", - "templeton.zookeeper.hosts": "%HOSTGROUP::host_group_1%:2181", - "webhcat.proxyuser.root.groups": "*", - "webhcat.proxyuser.root.hosts": "vgt-imaster-0.novalocal" - }, - "yarn-env": { - "apptimelineserver_heapsize": "1024", - "is_supported_yarn_ranger": "true", - "min_user_id": "1000", - "nodemanager_heapsize": "1024", - "resourcemanager_heapsize": "1024", - "service_check.queue.name": "default", - "yarn_cgroups_enabled": "false", - "yarn_heapsize": "1024", - "yarn_log_dir_prefix": "/var/log/hadoop-yarn", - "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", - "yarn_user": "yarn", - "yarn_user_nofile_limit": "32768", - "yarn_user_nproc_limit": "65536" - }, - "yarn-site": { - "hadoop.registry.rm.enabled": "true", - "hadoop.registry.zk.quorum": "%HOSTGROUP::host_group_1%:2181", - "yarn.acl.enable": "false", - "yarn.admin.acl": "yarn", - "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*", - "yarn.client.nodemanager-connect.max-wait-ms": "60000", - "yarn.client.nodemanager-connect.retry-interval-ms": "10000", - "yarn.http.policy": "HTTP_ONLY", - "yarn.log-aggregation-enable": "true", - "yarn.log-aggregation.retain-seconds": "2592000", - "yarn.log.server.url": "http://%HOSTGROUP::host_group_1%:19888/jobhistory/logs", - "yarn.node-labels.enabled": "false", - "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", - "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", - "yarn.nodemanager.address": "0.0.0.0:45454", - "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", - "yarn.nodemanager.aux-services": "mapreduce_shuffle,spark_shuffle,spark2_shuffle", - "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler", - "yarn.nodemanager.aux-services.spark2_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService", - "yarn.nodemanager.aux-services.spark2_shuffle.classpath": "{{stack_root}}/${hdp.version}/spark2/aux/*", - "yarn.nodemanager.aux-services.spark_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService", - "yarn.nodemanager.aux-services.spark_shuffle.classpath": "{{stack_root}}/${hdp.version}/spark/aux/*", - "yarn.nodemanager.bind-host": "0.0.0.0", - "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", - "yarn.nodemanager.container-metrics.unregister-delay-ms": "60000", - "yarn.nodemanager.container-monitor.interval-ms": "3000", - "yarn.nodemanager.delete.debug-delay-sec": "0", - "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90", - "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", - "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", - "yarn.nodemanager.health-checker.interval-ms": "135000", - "yarn.nodemanager.health-checker.script.timeout-ms": "60000", - "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn", - "yarn.nodemanager.linux-container-executor.cgroups.mount": "false", - "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false", - "yarn.nodemanager.linux-container-executor.group": "hadoop", - "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", - "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", - "yarn.nodemanager.log-aggregation.compression-type": "gz", - "yarn.nodemanager.log-aggregation.debug-enabled": "false", - "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", - "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "3600", - "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", - "yarn.nodemanager.log.retain-second": "604800", - "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state", - "yarn.nodemanager.recovery.enabled": "true", - "yarn.nodemanager.remote-app-log-dir": "/app-logs", - "yarn.nodemanager.remote-app-log-dir-suffix": "logs", - "yarn.nodemanager.resource.cpu-vcores": "6", - "yarn.nodemanager.resource.memory-mb": "9216", - "yarn.nodemanager.resource.percentage-physical-cpu-limit": "80", - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.nodemanager.vmem-pmem-ratio": "2.1", - "yarn.resourcemanager.address": "%HOSTGROUP::host_group_1%:8050", - "yarn.resourcemanager.admin.address": "%HOSTGROUP::host_group_1%:8141", - "yarn.resourcemanager.am.max-attempts": "2", - "yarn.resourcemanager.bind-host": "0.0.0.0", - "yarn.resourcemanager.connect.max-wait.ms": "900000", - "yarn.resourcemanager.connect.retry-interval.ms": "30000", - "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", - "yarn.resourcemanager.fs.state-store.uri": "", - "yarn.resourcemanager.ha.enabled": "false", - "yarn.resourcemanager.hostname": "%HOSTGROUP::host_group_1%", - "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", - "yarn.resourcemanager.recovery.enabled": "true", - "yarn.resourcemanager.resource-tracker.address": "%HOSTGROUP::host_group_1%:8025", - "yarn.resourcemanager.scheduler.address": "%HOSTGROUP::host_group_1%:8030", - "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", - "yarn.resourcemanager.scheduler.monitor.enable": "false", - "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}", - "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", - "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", - "yarn.resourcemanager.system-metrics-publisher.enabled": "true", - "yarn.resourcemanager.webapp.address": "%HOSTGROUP::host_group_1%:8088", - "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", - "yarn.resourcemanager.webapp.https.address": "%HOSTGROUP::host_group_1%:8090", - "yarn.resourcemanager.work-preserving-recovery.enabled": "true", - "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", - "yarn.resourcemanager.zk-acl": "world:anyone:rwcda", - "yarn.resourcemanager.zk-address": "%HOSTGROUP::host_group_1%:2181", - "yarn.resourcemanager.zk-num-retries": "1000", - "yarn.resourcemanager.zk-retry-interval-ms": "1000", - "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", - "yarn.resourcemanager.zk-timeout-ms": "10000", - "yarn.scheduler.maximum-allocation-mb": "9216", - "yarn.scheduler.maximum-allocation-vcores": "6", - "yarn.scheduler.minimum-allocation-mb": "1024", - "yarn.scheduler.minimum-allocation-vcores": "1", - "yarn.timeline-service.address": "%HOSTGROUP::host_group_1%:10200", - "yarn.timeline-service.bind-host": "0.0.0.0", - "yarn.timeline-service.client.max-retries": "30", - "yarn.timeline-service.client.retry-interval-ms": "1000", - "yarn.timeline-service.enabled": "true", - "yarn.timeline-service.entity-group-fs-store.active-dir": "/ats/active/", - "yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds": "3600", - "yarn.timeline-service.entity-group-fs-store.done-dir": "/ats/done/", - "yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes": "org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl,org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin", - "yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath": "/usr/hdp/${hdp.version}/spark/hdpLib/*", - "yarn.timeline-service.entity-group-fs-store.retain-seconds": "604800", - "yarn.timeline-service.entity-group-fs-store.scan-interval-seconds": "60", - "yarn.timeline-service.entity-group-fs-store.summary-store": "org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore", - "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore", - "yarn.timeline-service.http-authentication.proxyuser.root.groups": "*", - "yarn.timeline-service.http-authentication.proxyuser.root.hosts": "vgt-imaster-0.novalocal", - "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", - "yarn.timeline-service.http-authentication.type": "simple", - "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline", - "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600", - "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", - "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", - "yarn.timeline-service.recovery.enabled": "true", - "yarn.timeline-service.state-store-class": "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore", - "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore", - "yarn.timeline-service.ttl-enable": "true", - "yarn.timeline-service.ttl-ms": "2678400000", - "yarn.timeline-service.version": "1.5", - "yarn.timeline-service.webapp.address": "%HOSTGROUP::host_group_1%:8188", - "yarn.timeline-service.webapp.https.address": "%HOSTGROUP::host_group_1%:8190" - }, - "zeppelin-config": { - "zeppelin.anonymous.allowed": "true", - "zeppelin.interpreter.connect.timeout": "30000", - "zeppelin.interpreter.dir": "interpreter", - "zeppelin.interpreters": "org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter", - "zeppelin.notebook.dir": "notebook", - "zeppelin.notebook.homescreen": "", - "zeppelin.notebook.homescreen.hide": "false", - "zeppelin.notebook.s3.bucket": "zeppelin", - "zeppelin.notebook.s3.user": "user", - "zeppelin.notebook.storage": "org.apache.zeppelin.notebook.repo.VFSNotebookRepo", - "zeppelin.server.addr": "0.0.0.0", - "zeppelin.server.allowed.origins": "*", - "zeppelin.server.port": "9995", - "zeppelin.ssl": "false", - "zeppelin.ssl.client.auth": "false", - "zeppelin.ssl.keystore.path": "conf/keystore", - "zeppelin.ssl.keystore.type": "JKS", - "zeppelin.ssl.truststore.path": "conf/truststore", - "zeppelin.ssl.truststore.type": "JKS", - "zeppelin.websocket.max.text.message.size": "1024000" - }, - "zeppelin-env": { - "log4j_properties_content": "log4j.rootLogger = INFO, dailyfile log4j.appender.stdout = org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout = org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n log4j.appender.dailyfile.DatePattern=.yyyy-MM-dd log4j.appender.dailyfile.Threshold = INFO log4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender log4j.appender.dailyfile.File = ${zeppelin.log.file} log4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout log4j.appender.dailyfile.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n", - "shiro_ini_content": "[users] # List of users with their password allowed to access Zeppelin. # To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections #admin = password1 #user1 = password2, role1, role2 #user2 = password3, role3 #user3 = password4, role2 # Sample LDAP configuration, for user Authentication, currently tested for single Realm [main] #activeDirectoryRealm = org.apache.zeppelin.server.ActiveDirectoryGroupRealm #activeDirectoryRealm.systemUsername = CN=Administrator,CN=Users,DC=HW,DC=EXAMPLE,DC=COM #activeDirectoryRealm.systemPassword = Password1! #activeDirectoryRealm.hadoopSecurityCredentialPath = jceks://user/zeppelin/zeppelin.jceks #activeDirectoryRealm.searchBase = CN=Users,DC=HW,DC=TEST,DC=COM #activeDirectoryRealm.url = ldap://ad-nano.test.example.com:389 #activeDirectoryRealm.groupRolesMap = \"\" #activeDirectoryRealm.authorizationCachingEnabled = true #ldapRealm = org.apache.shiro.realm.ldap.JndiLdapRealm #ldapRealm.userDnTemplate = uid={0},cn=users,cn=accounts,dc=example,dc=com #ldapRealm.contextFactory.url = ldap://ldaphost:389 #ldapRealm.contextFactory.authenticationMechanism = SIMPLE #sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager #securityManager.sessionManager = $sessionManager # 86,400,000 milliseconds = 24 hour #securityManager.sessionManager.globalSessionTimeout = 86400000 shiro.loginUrl = /api/login [urls] # anon means the access is anonymous. # authcBasic means Basic Auth Security # To enfore security, comment the line below and uncomment the next one /api/version = anon /** = anon #/** = authc", - "zeppelin.executor.instances": "2", - "zeppelin.executor.mem": "512m", - "zeppelin.server.kerberos.keytab": "", - "zeppelin.server.kerberos.principal": "", - "zeppelin.spark.jar.dir": "/apps/zeppelin", - "zeppelin_env_content": "# Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode export MASTER=yarn-client export SPARK_YARN_JAR={{spark_jar}} # Where log files are stored. PWD by default. export ZEPPELIN_LOG_DIR={{zeppelin_log_dir}} # The pid files are stored. /tmp by default. export ZEPPELIN_PID_DIR={{zeppelin_pid_dir}} export JAVA_HOME={{java64_home}} # Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS=\"-Dspark.executor.memory=8g -Dspark.cores.max=16\" export ZEPPELIN_JAVA_OPTS=\"-Dhdp.version={{full_stack_version}} -Dspark.executor.memory={{executor_mem}} -Dspark.executor.instances={{executor_instances}} -Dspark.yarn.queue={{spark_queue}}\" # Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m # export ZEPPELIN_MEM # zeppelin interpreter process jvm mem options. Defualt = ZEPPELIN_MEM # export ZEPPELIN_INTP_MEM # zeppelin interpreter process jvm options. Default = ZEPPELIN_JAVA_OPTS # export ZEPPELIN_INTP_JAVA_OPTS # Where notebook saved # export ZEPPELIN_NOTEBOOK_DIR # Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z # export ZEPPELIN_NOTEBOOK_HOMESCREEN # hide homescreen notebook from list when this value set to \"true\". default \"false\" # export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE # Bucket where notebook saved # export ZEPPELIN_NOTEBOOK_S3_BUCKET # User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json # export ZEPPELIN_NOTEBOOK_S3_USER # A string representing this instance of zeppelin. $USER by default # export ZEPPELIN_IDENT_STRING # The scheduling priority for daemons. Defaults to 0. # export ZEPPELIN_NICENESS #### Spark interpreter configuration #### ## Use provided spark installation ## ## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit ## # (required) When it is defined, load it instead of Zeppelin embedded Spark libraries export SPARK_HOME={{spark_home}} # (optional) extra options to pass to spark submit. eg) \"--driver-memory 512M --executor-memory 1G\". # export SPARK_SUBMIT_OPTIONS ## Use embedded spark binaries ## ## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries. ## however, it is not encouraged when you can define SPARK_HOME ## # Options read in YARN client mode # yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR. export HADOOP_CONF_DIR=/etc/hadoop/conf # Pyspark (supported with Spark 1.2.1 and above) # To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI # path to the python command. must be the same path on the driver(Zeppelin) and all workers. # export PYSPARK_PYTHON export PYTHONPATH=\"${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip\" export SPARK_YARN_USER_ENV=\"PYTHONPATH=${PYTHONPATH}\" ## Spark interpreter options ## ## # Use HiveContext instead of SQLContext if set true. true by default. # export ZEPPELIN_SPARK_USEHIVECONTEXT # Execute multiple SQL concurrently if set true. false by default. # export ZEPPELIN_SPARK_CONCURRENTSQL # Max number of SparkSQL result to display. 1000 by default. # export ZEPPELIN_SPARK_MAXRESULT", - "zeppelin_group": "zeppelin", - "zeppelin_log_dir": "/var/log/zeppelin", - "zeppelin_pid_dir": "/var/run/zeppelin", - "zeppelin_user": "zeppelin" - }, - "zoo.cfg": { - "autopurge.purgeInterval": "24", - "autopurge.snapRetainCount": "30", - "clientPort": "2181", - "dataDir": "/hadoop/zookeeper", - "initLimit": "10", - "syncLimit": "5", - "tickTime": "2000" - }, - "zookeeper-env": { - "zk_log_dir": "/var/log/zookeeper", - "zk_pid_dir": "/var/run/zookeeper", - "zk_user": "zookeeper" - } -} diff --git a/sahara_plugin_ambari/plugins/ambari/resources/generate_config.py b/sahara_plugin_ambari/plugins/ambari/resources/generate_config.py deleted file mode 100755 index c1ccc08..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/generate_config.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import sys - -from oslo_serialization import jsonutils -import requests - - -def get_blueprint(ambari_address, username, password, cluster_name): - url = "http://%s:8080/api/v1/clusters/%s?format=blueprint" % ( - ambari_address, cluster_name) - resp = requests.get(url, auth=(username, password)) - resp.raise_for_status() - if resp.text: - return jsonutils.loads(resp.text) - - -def generate_config(blueprint): - configs = {} - for entity in blueprint["configurations"]: - for cfg in entity: - p = entity[cfg]["properties"] - if not p: - continue - if "content" in p: - del p["content"] - for k, v in p.items(): - p[k] = " ".join(v.split()) - if p: - configs[cfg] = p - return configs - - -def write_config(cfg, version): - with open("sahara/plugins/ambari/resources/configs-%s.json" % version, - "w") as fp: - jsonutils.dump(cfg, fp, indent=4, sort_keys=True, - separators=(",", ": ")) - - -def main(): - parser = argparse.ArgumentParser( - description="Ambari sample config generator") - parser.add_argument("--address", help="Ambari address", - default="localhost") - parser.add_argument("--username", help="Ambari username", - default="admin") - parser.add_argument("--password", help="Ambari password", - default="admin") - parser.add_argument("--cluster-name", help="Name of cluster", - default="cluster") - ns = parser.parse_args(sys.argv[1:]) - - bp = get_blueprint(ns.address, - ns.username, - ns.password, - ns.cluster_name) - cfg = generate_config(bp) - write_config(cfg, bp["Blueprints"]["stack_version"]) - - -if __name__ == "__main__": - main() diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_ambari b/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_ambari deleted file mode 100644 index a181823..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_ambari +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -if [ $test_only -eq 0 ]; then - chkconfig ambari-server off - chkconfig ambari-agent off -else - exit 0 -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_certificate_check b/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_certificate_check deleted file mode 100644 index 18b805d..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_certificate_check +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -config=/etc/python/cert-verification.cfg -check=$(cat $config | grep 'verify=disable' | wc -l) - -if [ $check -eq 0 ]; then - if [ $test_only -eq 0 ]; then - [ -e $config ] && sed -i "s%^\(verify=\s*\).*$%verify=disable%" $config - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_firewall b/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_firewall deleted file mode 100644 index e82c456..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_firewall +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -check=$(systemctl --no-pager list-unit-files iptables.service | grep 'enabled' | wc -l) - -if [ $check -eq 1 ]; then - if [ $test_only -eq 0 ]; then - if type -p systemctl && [[ "$(systemctl --no-pager list-unit-files firewalld)" =~ 'enabled' ]]; then - systemctl disable firewalld - fi - - if type -p service; then - service ip6tables save - service iptables save - chkconfig ip6tables off - chkconfig iptables off - fi - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_selinux b/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_selinux deleted file mode 100644 index b24dc12..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/disable_selinux +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -check=$(cat /etc/selinux/config | grep 'SELINUX=disabled' | wc -l) - -if [ $check -eq 0 ]; then - if [ $test_only -eq 0 ]; then - config=/etc/selinux/config - [ -e $config ] && sed -i "s%^\(SELINUX=\s*\).*$%SELINUX=disabled%" $config - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/setup_java_home b/sahara_plugin_ambari/plugins/ambari/resources/images/centos/setup_java_home deleted file mode 100644 index 7bbe27a..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/setup_java_home +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -JAVA_RC="/etc/profile.d/99-java.sh" -JAVA_BIN_RC="/etc/profile.d/98-java-bin.sh" - -if [ ! -f $JAVA_RC ]; then - if [ $test_only -eq 0 ]; then - case "$java_distro" in - openjdk ) - JRE_HOME="/usr/lib/jvm/java-openjdk/jre" - JDK_HOME="/usr/lib/jvm/java-openjdk" - ;; - oracle-java ) - JRE_HOME="/usr/java/oracle-jdk" - JDK_HOME="/usr/java/oracle-jdk" - ;; - esac - - echo "export JAVA_HOME=$JRE_HOME" >> $JAVA_RC - chmod +x $JAVA_RC - - echo "export PATH=$JRE_HOME/bin:\$PATH" >> $JAVA_BIN_RC - echo "export PATH=$JDK_HOME/bin:\$PATH" >> $JAVA_BIN_RC - chmod +x $JAVA_BIN_RC - - alternatives --install /usr/bin/java java $JRE_HOME/bin/java 200000 - alternatives --install /usr/bin/javac javac $JDK_HOME/bin/javac 200000 - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/unlimited_security_artifacts b/sahara_plugin_ambari/plugins/ambari/resources/images/centos/unlimited_security_artifacts deleted file mode 100644 index bd2e0a7..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/unlimited_security_artifacts +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -if [ ! -d /tmp/UnlimitedPolicy/ ]; then - if [ $test_only -eq 0 ]; then - mkdir /tmp/UnlimitedPolicy/ - curl -sS https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/local_policy.jar -o /tmp/UnlimitedPolicy/local_policy.jar - curl -sS https://tarballs.openstack.org/sahara-extra/dist/common-artifacts/US_export_policy.jar -o /tmp/UnlimitedPolicy/US_export_policy.jar - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/wget_repo b/sahara_plugin_ambari/plugins/ambari/resources/images/centos/wget_repo deleted file mode 100644 index 9033c84..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/centos/wget_repo +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -if [ ! -f /etc/yum.repos.d/ambari.repo ]; then - if [ $test_only -eq 0 ]; then - wget http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/$ambari_version/ambari.repo -O /etc/yum.repos.d/ambari.repo - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/common/add_jar b/sahara_plugin_ambari/plugins/ambari/resources/images/common/add_jar deleted file mode 100644 index 5727a97..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/common/add_jar +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -hadoop="2.7.1" - -HDFS_LIB_DIR=${hdfs_lib_dir:-"/usr/share/hadoop/lib"} -JAR_BUILD_DATE="2016-03-17" -SWIFT_LIB_URI="https://tarballs.openstack.org/sahara-extra/dist/hadoop-openstack/master/hadoop-openstack-${hadoop}.jar" -HADOOP_SWIFT_JAR_NAME=hadoop-openstack.jar - -if [ ! -f $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME ]; then - if [ $test_only -eq 0 ]; then - if [ -z "${swift_url:-}" ]; then - curl -sS -o $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME $SWIFT_LIB_URI - else - curl -sS -o $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME $swift_url - fi - - if [ $? -ne 0 ]; then - printf "Could not download Swift Hadoop FS implementation.\nAborting\n" - exit 1 - fi - - chmod 0644 $HDFS_LIB_DIR/$HADOOP_SWIFT_JAR_NAME - else - exit 0 - fi -fi - - - - diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/common/fix_tls_ambari_agent b/sahara_plugin_ambari/plugins/ambari/resources/images/common/fix_tls_ambari_agent deleted file mode 100644 index 2345e76..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/common/fix_tls_ambari_agent +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -AMBARI_AGENT_INI="/etc/ambari-agent/conf/ambari-agent.ini" -FORCE_HTTPS_CONF="force_https_protocol=PROTOCOL_TLSv1_2" - -if [ $test_only -eq 0 ]; then - if grep -q '\[security\]' ${AMBARI_AGENT_INI}; then - if ! grep -q "${FORCE_HTTPS_CONF}" ${AMBARI_AGENT_INI}; then - sed -i '/^\[security\]/a\'${FORCE_HTTPS_CONF} ${AMBARI_AGENT_INI} - fi - else - printf "[security]\n${FORCE_HTTPS_CONF}\n" >>${AMBARI_AGENT_INI} - fi -else - grep -q "${FORCE_HTTPS_CONF}" ${AMBARI_AGENT_INI} - exit $? -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/common/mysql_connector_java_link b/sahara_plugin_ambari/plugins/ambari/resources/images/common/mysql_connector_java_link deleted file mode 100644 index 8640208..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/common/mysql_connector_java_link +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -x - -# This is necessary due to the information on the link below -# https://community.hortonworks.com/articles/170133/hive-start-failed-because-of-ambari-error-mysql-co.html - -if [ ! -L /var/lib/ambari-server/resources/mysql-connector-java.jar ]; then - if [ $test_only -eq 0 ]; then - ln -s /usr/share/java/mysql-connector-java.jar /var/lib/ambari-server/resources/mysql-connector-java.jar - else - exit 1 - fi -else - exit 0 -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/common/oracle_java b/sahara_plugin_ambari/plugins/ambari/resources/images/common/oracle_java deleted file mode 100644 index e09bf94..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/common/oracle_java +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh - - -# NOTE: $(dirname $0) is read-only, use space under $TARGET_ROOT -JAVA_LOCATION=${JAVA_TARGET_LOCATION:-"/usr/java"} -JAVA_NAME="oracle-jdk" -JAVA_HOME=$JAVA_LOCATION/$JAVA_NAME -JAVA_DOWNLOAD_URL=${JAVA_DOWNLOAD_URL:-"http://download.oracle.com/otn-pub/java/jdk/7u51-b13/jdk-7u51-linux-x64.tar.gz"} - -if [ ! -d $JAVA_LOCATION ]; then - if [ $test_only -eq 0 ]; then - echo "Begin: installation of Java" - mkdir -p $JAVA_LOCATION - - if [ -n "$JAVA_DOWNLOAD_URL" ]; then - JAVA_FILE=$(basename $JAVA_DOWNLOAD_URL) - wget --no-check-certificate --no-cookies -c \ - --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" \ - -O $JAVA_LOCATION/$JAVA_FILE $JAVA_DOWNLOAD_URL - elif [ -n "$JAVA_FILE" ]; then - install -D -g root -o root -m 0755 $(dirname $0)/$JAVA_FILE $JAVA_LOCATION - fi - - cd $JAVA_LOCATION - - echo "Decompressing Java archive" - printf "\n\n" | tar -zxf $JAVA_FILE - echo "Setting up $JAVA_NAME" - chown -R root:root $JAVA_LOCATION - JAVA_DIR=`ls -1 $JAVA_LOCATION | grep -v tar.gz` - ln -s $JAVA_LOCATION/$JAVA_DIR $JAVA_HOME - - setup-java-home $JAVA_HOME $JAVA_HOME - - rm $JAVA_FILE - - echo "End: installation of Java" - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/image.yaml b/sahara_plugin_ambari/plugins/ambari/resources/images/image.yaml deleted file mode 100644 index eb8df56..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/image.yaml +++ /dev/null @@ -1,140 +0,0 @@ -arguments: - ambari_version: - description: The version of Ambari to install. Defaults to 2.6.2.0. - default: 2.6.2.0 - choices: - - 2.6.2.0 # HDP 2.6 / HDP 2.5 / HDP 2.4 - - 2.4.3.0 # HDP 2.5 / HDP 2.4 / HDP 2.3 - java_distro: - default: openjdk - description: The distribution of Java to install. Defaults to openjdk. - choices: - - openjdk - - oracle-java - hdfs_lib_dir: - default: /opt - description: The path to HDFS lib. Defaults to /opt. - required: False - swift_url: - default: https://tarballs.openstack.org/sahara-extra/dist/hadoop-openstack/master/hadoop-openstack-2.7.1.jar - description: Location of the swift jar file. - required: False - -validators: - - os_case: - - ubuntu: - - script: - apt_update: - inline: apt-get update - - argument_case: - argument_name: java_distro - cases: - openjdk: - - os_case: - - redhat: - - package: java-1.8.0-openjdk-devel - - ubuntu: - - argument_case: - argument_name: ambari_version - cases: - 2.6.2.0: - - package: openjdk-8-jdk - 2.4.3.0: - - package: openjdk-7-jdk - oracle-java: - - script: common/oracle_java - - argument_case: - argument_name: ambari_version - cases: - 2.6.2.0: - - os_case: - - redhat: - - package: libtirpc-devel - - ubuntu: - - package: libtirpc-dev - - os_case: - - redhat: - - script: centos/disable_selinux - - script: centos/disable_certificate_check - - script: - centos/setup_java_home: - env_vars: [java_distro] - - package: wget - - script: - centos/wget_repo: - env_vars: [ambari_version] - - package: redhat-lsb - - package: - - mariadb - - mariadb-libs - - mariadb-server - - mysql-connector-java - - package: ntp - - package: - - ambari-metrics-monitor - - ambari-server - - ambari-metrics-collector - - ambari-metrics-hadoop-sink - - package: nmap-ncat - - package: fuse-libs - - package: snappy-devel - - package: iptables-services - - ubuntu: - - script: - ubuntu/wget_repo: - env_vars: [ambari_version] - - script: - ubuntu/setup_java_home: - env_vars: [java_distro] - - package: - - ambari-metrics-assembly - - ambari-server - - ambari-logsearch-portal - - ambari-logsearch-logfeeder - - ambari-infra-solr-client - - ambari-infra-solr - - netcat - - iptables - - iptables-persistent - - package: fuse - - package: - - mysql-client - - mysql-server - - libmysql-java - - script: common/mysql_connector_java_link - - package: ambari-agent - - script: common/fix_tls_ambari_agent - - package: - - unzip - - zip - - curl - - tar - - rpcbind - - rng-tools - - os_case: - - redhat: - - script: centos/disable_ambari - - script: centos/disable_firewall - - script: - common/add_jar: - env_vars: [hdfs_lib_dir, swift_url] - - script: - centos/unlimited_security_artifacts: - env_vars: [unlimited_security_location] - - ubuntu: - - script: - common/add_jar: - env_vars: [hdfs_lib_dir, swift_url] - - os_case: - - redhat: - - package: - - krb5-server - - krb5-libs - - krb5-workstation - - ubuntu: - - package: - - krb5-admin-server - - libpam-krb5 - - krb5-user - - ldap-utils - diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/setup_java_home b/sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/setup_java_home deleted file mode 100644 index 6a525ee..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/setup_java_home +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -JAVA_RC="/etc/profile.d/99-java.sh" -JAVA_BIN_RC="/etc/profile.d/98-java-bin.sh" - -if [ ! -f $JAVA_RC ]; then - if [ $test_only -eq 0 ]; then - case "$java_distro" in - openjdk ) - JDK_HOME=$(echo /usr/lib/jvm/java-?-openjdk-amd64) - JRE_HOME="$JDK_HOME/jre" - ;; - oracle-java ) - JRE_HOME="/usr/java/oracle-jdk" - JDK_HOME="/usr/java/oracle-jdk" - ;; - esac - - echo "export JAVA_HOME=$JRE_HOME" >> $JAVA_RC - chmod +x $JAVA_RC - - echo "export PATH=$JRE_HOME/bin:\$PATH" >> $JAVA_BIN_RC - echo "export PATH=$JDK_HOME/bin:\$PATH" >> $JAVA_BIN_RC - chmod +x $JAVA_BIN_RC - - update-alternatives --remove-all java - update-alternatives --remove-all javac - update-alternatives --install /usr/bin/java java $JRE_HOME/bin/java 200000 - update-alternatives --install /usr/bin/javac javac $JDK_HOME/bin/javac 200000 - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/wget_repo b/sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/wget_repo deleted file mode 100644 index 206b52a..0000000 --- a/sahara_plugin_ambari/plugins/ambari/resources/images/ubuntu/wget_repo +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -if [ ! -f /etc/apt/sources.list.d/ambari.list ]; then - if [ $test_only -eq 0 ]; then - wget http://public-repo-1.hortonworks.com/ambari/ubuntu14/2.x/updates/$ambari_version/ambari.list -O /etc/apt/sources.list.d/ambari.list && \ - apt-key adv --recv-keys --keyserver keyserver.ubuntu.com B9733A7A07513CAD && \ - apt-get update - else - exit 0 - fi -fi diff --git a/sahara_plugin_ambari/plugins/ambari/validation.py b/sahara_plugin_ambari/plugins/ambari/validation.py deleted file mode 100644 index a82c868..0000000 --- a/sahara_plugin_ambari/plugins/ambari/validation.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from sahara.plugins import conductor -from sahara.plugins import context -from sahara.plugins import exceptions as ex -from sahara.plugins import utils -from sahara_plugin_ambari.i18n import _ -from sahara_plugin_ambari.plugins.ambari import common - - -def validate(cluster_id): - ctx = context.ctx() - cluster = conductor.cluster_get(ctx, cluster_id) - _check_ambari(cluster) - _check_hdfs(cluster) - _check_yarn(cluster) - _check_oozie(cluster) - _check_hive(cluster) - _check_hbase(cluster) - _check_spark(cluster) - _check_ranger(cluster) - _check_storm(cluster) - - -def _check_ambari(cluster): - am_count = utils.get_instances_count(cluster, common.AMBARI_SERVER) - zk_count = utils.get_instances_count(cluster, common.ZOOKEEPER_SERVER) - if am_count != 1: - raise ex.InvalidComponentCountException(common.AMBARI_SERVER, 1, - am_count) - if zk_count == 0: - raise ex.InvalidComponentCountException(common.ZOOKEEPER_SERVER, - _("1 or more"), zk_count) - - -def _check_hdfs(cluster): - nn_count = utils.get_instances_count(cluster, common.NAMENODE) - dn_count = utils.get_instances_count(cluster, common.DATANODE) - snn_count = utils.get_instances_count(cluster, common.SECONDARY_NAMENODE) - - if cluster.cluster_configs.get("general", {}).get(common.NAMENODE_HA): - _check_zk_ha(cluster) - _check_jn_ha(cluster) - - if nn_count != 2: - raise ex.InvalidComponentCountException(common.NAMENODE, 2, - nn_count) - else: - if nn_count != 1: - raise ex.InvalidComponentCountException(common.NAMENODE, 1, - nn_count) - - if snn_count != 1: - raise ex.InvalidComponentCountException(common.SECONDARY_NAMENODE, - 1, snn_count) - - if dn_count == 0: - raise ex.InvalidComponentCountException( - common.DATANODE, _("1 or more"), dn_count) - - -def _check_yarn(cluster): - rm_count = utils.get_instances_count(cluster, common.RESOURCEMANAGER) - nm_count = utils.get_instances_count(cluster, common.NODEMANAGER) - hs_count = utils.get_instances_count(cluster, common.HISTORYSERVER) - at_count = utils.get_instances_count(cluster, common.APP_TIMELINE_SERVER) - - if cluster.cluster_configs.get("general", {}).get( - common.RESOURCEMANAGER_HA): - _check_zk_ha(cluster) - - if rm_count != 2: - raise ex.InvalidComponentCountException(common.RESOURCEMANAGER, 2, - rm_count) - else: - if rm_count != 1: - raise ex.InvalidComponentCountException(common.RESOURCEMANAGER, 1, - rm_count) - - if hs_count != 1: - raise ex.InvalidComponentCountException(common.HISTORYSERVER, 1, - hs_count) - if at_count != 1: - raise ex.InvalidComponentCountException(common.APP_TIMELINE_SERVER, 1, - at_count) - if nm_count == 0: - raise ex.InvalidComponentCountException(common.NODEMANAGER, - _("1 or more"), nm_count) - - -def _check_zk_ha(cluster): - zk_count = utils.get_instances_count(cluster, common.ZOOKEEPER_SERVER) - if zk_count < 3: - raise ex.InvalidComponentCountException( - common.ZOOKEEPER_SERVER, - _("3 or more. Odd number"), - zk_count, _("At least 3 ZooKeepers are required for HA")) - if zk_count % 2 != 1: - raise ex.InvalidComponentCountException( - common.ZOOKEEPER_SERVER, - _("Odd number"), - zk_count, _("Odd number of ZooKeepers are required for HA")) - - -def _check_jn_ha(cluster): - jn_count = utils.get_instances_count(cluster, common.JOURNAL_NODE) - if jn_count < 3: - raise ex.InvalidComponentCountException( - common.JOURNAL_NODE, - _("3 or more. Odd number"), - jn_count, _("At least 3 JournalNodes are required for HA")) - if jn_count % 2 != 1: - raise ex.InvalidComponentCountException( - common.JOURNAL_NODE, - _("Odd number"), - jn_count, _("Odd number of JournalNodes are required for HA")) - - -def _check_oozie(cluster): - count = utils.get_instances_count(cluster, common.OOZIE_SERVER) - if count > 1: - raise ex.InvalidComponentCountException(common.OOZIE_SERVER, - _("0 or 1"), count) - - -def _check_hive(cluster): - hs_count = utils.get_instances_count(cluster, common.HIVE_SERVER) - hm_count = utils.get_instances_count(cluster, common.HIVE_METASTORE) - if hs_count > 1: - raise ex.InvalidComponentCountException(common.HIVE_SERVER, - _("0 or 1"), hs_count) - if hm_count > 1: - raise ex.InvalidComponentCountException(common.HIVE_METASTORE, - _("0 or 1"), hm_count) - if hs_count == 0 and hm_count == 1: - raise ex.RequiredServiceMissingException( - common.HIVE_SERVER, required_by=common.HIVE_METASTORE) - if hs_count == 1 and hm_count == 0: - raise ex.RequiredServiceMissingException( - common.HIVE_METASTORE, required_by=common.HIVE_SERVER) - - -def _check_hbase(cluster): - hm_count = utils.get_instances_count(cluster, common.HBASE_MASTER) - hr_count = utils.get_instances_count(cluster, common.HBASE_REGIONSERVER) - if hm_count > 1: - raise ex.InvalidComponentCountException(common.HBASE_MASTER, - _("0 or 1"), hm_count) - if hm_count == 1 and hr_count == 0: - raise ex.RequiredServiceMissingException( - common.HBASE_REGIONSERVER, required_by=common.HBASE_MASTER) - if hr_count > 0 and hm_count == 0: - raise ex.RequiredServiceMissingException( - common.HBASE_MASTER, required_by=common.HBASE_REGIONSERVER) - - -def _check_spark(cluster): - count = utils.get_instances_count(cluster, common.SPARK_JOBHISTORYSERVER) - if count > 1: - raise ex.InvalidComponentCountException(common.SPARK_JOBHISTORYSERVER, - _("0 or 1"), count) - - -def _check_ranger(cluster): - ra_count = utils.get_instances_count(cluster, common.RANGER_ADMIN) - ru_count = utils.get_instances_count(cluster, common.RANGER_USERSYNC) - if ra_count > 1: - raise ex.InvalidComponentCountException(common.RANGER_ADMIN, - _("0 or 1"), ra_count) - if ru_count > 1: - raise ex.InvalidComponentCountException(common.RANGER_USERSYNC, - _("0 or 1"), ru_count) - if ra_count == 1 and ru_count == 0: - raise ex.RequiredServiceMissingException( - common.RANGER_USERSYNC, required_by=common.RANGER_ADMIN) - if ra_count == 0 and ru_count == 1: - raise ex.RequiredServiceMissingException( - common.RANGER_ADMIN, required_by=common.RANGER_USERSYNC) - - -def _check_storm(cluster): - dr_count = utils.get_instances_count(cluster, common.DRPC_SERVER) - ni_count = utils.get_instances_count(cluster, common.NIMBUS) - su_count = utils.get_instances_count(cluster, common.STORM_UI_SERVER) - sv_count = utils.get_instances_count(cluster, common.SUPERVISOR) - if dr_count > 1: - raise ex.InvalidComponentCountException(common.DRPC_SERVER, - _("0 or 1"), dr_count) - if ni_count > 1: - raise ex.InvalidComponentCountException(common.NIMBUS, - _("0 or 1"), ni_count) - if su_count > 1: - raise ex.InvalidComponentCountException(common.STORM_UI_SERVER, - _("0 or 1"), su_count) - if dr_count == 0 and ni_count == 1: - raise ex.RequiredServiceMissingException( - common.DRPC_SERVER, required_by=common.NIMBUS) - if dr_count == 1 and ni_count == 0: - raise ex.RequiredServiceMissingException( - common.NIMBUS, required_by=common.DRPC_SERVER) - if su_count == 1 and (dr_count == 0 or ni_count == 0): - raise ex.RequiredServiceMissingException( - common.NIMBUS, required_by=common.STORM_UI_SERVER) - if dr_count == 1 and sv_count == 0: - raise ex.RequiredServiceMissingException( - common.SUPERVISOR, required_by=common.DRPC_SERVER) - if sv_count > 0 and dr_count == 0: - raise ex.RequiredServiceMissingException( - common.DRPC_SERVER, required_by=common.SUPERVISOR) diff --git a/sahara_plugin_ambari/tests/__init__.py b/sahara_plugin_ambari/tests/__init__.py deleted file mode 100644 index c791fac..0000000 --- a/sahara_plugin_ambari/tests/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2014 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara_plugin_ambari.utils import patches -patches.patch_all() diff --git a/sahara_plugin_ambari/tests/unit/__init__.py b/sahara_plugin_ambari/tests/unit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sahara_plugin_ambari/tests/unit/base.py b/sahara_plugin_ambari/tests/unit/base.py deleted file mode 100644 index 364f04a..0000000 --- a/sahara_plugin_ambari/tests/unit/base.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslotest import base - -from sahara.plugins import context -from sahara.plugins import db as db_api -from sahara.plugins import main -from sahara.plugins import utils - - -class SaharaTestCase(base.BaseTestCase): - - def setUp(self): - super(SaharaTestCase, self).setUp() - self.setup_context() - utils.rpc_setup('all-in-one') - - def setup_context(self, username="test_user", tenant_id="tenant_1", - auth_token="test_auth_token", tenant_name='test_tenant', - service_catalog=None, **kwargs): - self.addCleanup(context.set_ctx, - context.ctx() if context.has_ctx() else None) - - context.set_ctx(context.PluginsContext( - username=username, tenant_id=tenant_id, - auth_token=auth_token, service_catalog=service_catalog or {}, - tenant_name=tenant_name, **kwargs)) - - def override_config(self, name, override, group=None): - main.set_override(name, override, group) - self.addCleanup(main.clear_override, name, group) - - -class SaharaWithDbTestCase(SaharaTestCase): - def setUp(self): - super(SaharaWithDbTestCase, self).setUp() - - self.override_config('connection', "sqlite://", group='database') - db_api.setup_db() - self.addCleanup(db_api.drop_db) diff --git a/sahara_plugin_ambari/tests/unit/plugins/__init__.py b/sahara_plugin_ambari/tests/unit/plugins/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/__init__.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_client.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_client.py deleted file mode 100644 index e5d415c..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_client.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from unittest import mock - -from oslo_serialization import jsonutils - -from sahara.plugins import exceptions as p_exc -from sahara_plugin_ambari.plugins.ambari import client as ambari_client -from sahara_plugin_ambari.tests.unit import base - - -class AmbariClientTestCase(base.SaharaTestCase): - def setUp(self): - super(AmbariClientTestCase, self).setUp() - - self.http_client = mock.Mock() - self.http_client.get = mock.Mock() - self.http_client.post = mock.Mock() - self.http_client.put = mock.Mock() - self.http_client.delete = mock.Mock() - - self.headers = {"X-Requested-By": "sahara"} - - self.remote = mock.Mock() - self.remote.get_http_client.return_value = self.http_client - - self.instance = mock.Mock() - self.instance.remote.return_value = self.remote - self.instance.management_ip = "1.2.3.4" - - self.good_pending_resp = mock.MagicMock() - self.good_pending_resp.status_code = 200 - self.good_pending_resp.text = ('{"Requests": ' - '{"id": 1, "status": "PENDING"}}') - - def test_init_client_default(self): - client = ambari_client.AmbariClient(self.instance) - self.assertEqual(self.http_client, client._http_client) - self.assertEqual("http://1.2.3.4:8080/api/v1", client._base_url) - self.assertEqual("admin", client._auth.username) - self.assertEqual("admin", client._auth.password) - self.remote.get_http_client.assert_called_with("8080") - - def test_init_client_manual(self): - client = ambari_client.AmbariClient(self.instance, port="1234", - username="user", password="pass") - self.assertEqual("http://1.2.3.4:1234/api/v1", client._base_url) - self.assertEqual("user", client._auth.username) - self.assertEqual("pass", client._auth.password) - self.remote.get_http_client.assert_called_with("1234") - - def test_close_http_session(self): - with ambari_client.AmbariClient(self.instance): - pass - self.remote.close_http_session.assert_called_with("8080") - - def test_get_method(self): - client = ambari_client.AmbariClient(self.instance) - client.get("http://spam") - self.http_client.get.assert_called_with( - "http://spam", verify=False, auth=client._auth, - headers=self.headers) - - def test_post_method(self): - client = ambari_client.AmbariClient(self.instance) - client.post("http://spam", data="data") - self.http_client.post.assert_called_with( - "http://spam", data="data", verify=False, auth=client._auth, - headers=self.headers) - - def test_put_method(self): - client = ambari_client.AmbariClient(self.instance) - client.put("http://spam", data="data") - self.http_client.put.assert_called_with( - "http://spam", data="data", verify=False, auth=client._auth, - headers=self.headers) - - def test_delete_method(self): - client = ambari_client.AmbariClient(self.instance) - client.delete("http://spam") - self.http_client.delete.assert_called_with( - "http://spam", verify=False, auth=client._auth, - headers=self.headers) - - def test_import_credential(self): - resp = mock.Mock() - resp.text = "" - resp.status_code = 200 - self.http_client.post.return_value = resp - client = ambari_client.AmbariClient(self.instance) - - client.import_credential("test", alias="credential", - data={"some": "data"}) - self.http_client.post.assert_called_once_with( - "http://1.2.3.4:8080/api/v1/clusters/test/credentials/credential", - verify=False, data=jsonutils.dumps({"some": "data"}), - auth=client._auth, headers=self.headers) - - def test_get_credential(self): - resp = mock.Mock() - resp.text = "" - resp.status_code = 200 - self.http_client.get.return_value = resp - client = ambari_client.AmbariClient(self.instance) - - client.get_credential("test", alias="credential") - self.http_client.get.assert_called_once_with( - "http://1.2.3.4:8080/api/v1/clusters/test/credentials/credential", - verify=False, auth=client._auth, headers=self.headers) - - resp.status_code = 404 - self.assertRaises(ambari_client.AmbariNotFound, - ambari_client.AmbariClient.check_response, - resp, True) - - @mock.patch("sahara_plugin_ambari.plugins.ambari.client." - "AmbariClient.check_response") - def test_get_alerts_data(self, mock_check_response): - cluster = mock.Mock() - cluster.name = "test_cluster" - - client = ambari_client.AmbariClient(self.instance) - - # check_response returning empty json - mock_check_response.return_value = {} - - res = client.get_alerts_data(cluster) - self.assertEqual(res, []) - - self.http_client.get.assert_called_once_with( - "http://1.2.3.4:8080/api/v1/clusters/test_cluster/alerts?fields=*", - verify=False, auth=client._auth, - headers=self.headers) - - mock_check_response.assert_called_once() - - # check_response returning json with items as key - mock_check_response.return_value = {'items': ['item1', 'item2']} - - res = client.get_alerts_data(cluster) - self.assertEqual(res, ['item1', 'item2']) - - self.http_client.get.assert_called_with( - "http://1.2.3.4:8080/api/v1/clusters/test_cluster/alerts?fields=*", - verify=False, auth=client._auth, - headers=self.headers) - - self.assertEqual(self.http_client.get.call_count, 2) - self.assertEqual(mock_check_response.call_count, 2) - - def test_check_response(self): - resp = mock.Mock() - resp.status_code = 404 - - self.assertRaises(ambari_client.AmbariNotFound, - ambari_client.AmbariClient.check_response, - resp, True) - - resp.status_code = 200 - resp.text = '{"json": "example"}' - resp.raise_for_status = mock.Mock() - - res = ambari_client.AmbariClient.check_response(resp) - - self.assertEqual(res, {"json": "example"}) - resp.raise_for_status.assert_called_once() - - def test_req_id(self): - resp = mock.Mock() - - resp.text = None - self.assertRaises(p_exc.HadoopProvisionError, - ambari_client.AmbariClient.req_id, resp) - - resp.text = '{"text" : "example"}' - self.assertRaises(p_exc.HadoopProvisionError, - ambari_client.AmbariClient.req_id, resp) - - resp.text = '{"Requests": {"example" : "text"}}' - self.assertRaises(p_exc.HadoopProvisionError, - ambari_client.AmbariClient.req_id, resp) - - resp.text = '{"Requests" : {"id" : "test_id"}}' - res = ambari_client.AmbariClient.req_id(resp) - self.assertEqual(res, "test_id") - - def test_get_registered_hosts(self): - client = ambari_client.AmbariClient(self.instance) - resp_data = """{ - "href" : "http://1.2.3.4:8080/api/v1/hosts", - "items" : [ - { - "href" : "http://1.2.3.4:8080/api/v1/hosts/host1", - "Hosts" : { - "host_name" : "host1" - } - }, - { - "href" : "http://1.2.3.4:8080/api/v1/hosts/host2", - "Hosts" : { - "host_name" : "host2" - } - }, - { - "href" : "http://1.2.3.4:8080/api/v1/hosts/host3", - "Hosts" : { - "host_name" : "host3" - } - } - ] - }""" - resp = mock.Mock() - resp.text = resp_data - resp.status_code = 200 - self.http_client.get.return_value = resp - hosts = client.get_registered_hosts() - self.http_client.get.assert_called_with( - "http://1.2.3.4:8080/api/v1/hosts", verify=False, - auth=client._auth, headers=self.headers) - self.assertEqual(3, len(hosts)) - self.assertEqual("host1", hosts[0]["Hosts"]["host_name"]) - self.assertEqual("host2", hosts[1]["Hosts"]["host_name"]) - self.assertEqual("host3", hosts[2]["Hosts"]["host_name"]) - - def test_update_user_password(self): - client = ambari_client.AmbariClient(self.instance) - resp = mock.Mock() - resp.text = "" - resp.status_code = 200 - self.http_client.put.return_value = resp - client.update_user_password("bart", "old_pw", "new_pw") - exp_req = jsonutils.dumps({ - "Users": { - "old_password": "old_pw", - "password": "new_pw" - } - }) - self.http_client.put.assert_called_with( - "http://1.2.3.4:8080/api/v1/users/bart", data=exp_req, - verify=False, auth=client._auth, headers=self.headers) - - def test_create_blueprint(self): - client = ambari_client.AmbariClient(self.instance) - resp = mock.Mock() - resp.text = "" - resp.status_code = 200 - self.http_client.post.return_value = resp - client.create_blueprint("cluster_name", {"some": "data"}) - self.http_client.post.assert_called_with( - "http://1.2.3.4:8080/api/v1/blueprints/cluster_name", - data=jsonutils.dumps({"some": "data"}), verify=False, - auth=client._auth, headers=self.headers) - - def test_create_cluster(self): - client = ambari_client.AmbariClient(self.instance) - resp = mock.Mock() - resp.text = """{ - "Requests": { - "id": 1, - "status": "InProgress" - } - }""" - resp.status_code = 200 - self.http_client.post.return_value = resp - req_info = client.create_cluster("cluster_name", {"some": "data"}) - self.assertEqual(1, req_info["id"]) - self.http_client.post.assert_called_with( - "http://1.2.3.4:8080/api/v1/clusters/cluster_name", - data=jsonutils.dumps({"some": "data"}), verify=False, - auth=client._auth, headers=self.headers) - - def test_add_host_to_cluster(self): - client = ambari_client.AmbariClient(self.instance) - resp = mock.Mock() - resp.text = "" - resp.status_code = 200 - self.http_client.post.return_value = resp - - instance = mock.MagicMock() - instance.fqdn.return_value = "i1" - instance.cluster.name = "cl" - - client.add_host_to_cluster(instance) - self.http_client.post.assert_called_with( - "http://1.2.3.4:8080/api/v1/clusters/cl/hosts/i1", - verify=False, auth=client._auth, headers=self.headers) - - def test_start_process_on_host(self): - client = ambari_client.AmbariClient(self.instance) - self.http_client.put.return_value = self.good_pending_resp - client.wait_ambari_request = mock.MagicMock() - - instance = mock.MagicMock() - instance.fqdn.return_value = "i1" - instance.cluster.name = "cl" - - client.start_service_on_host(instance, "HDFS", 'STATE') - self.http_client.put.assert_called_with( - "http://1.2.3.4:8080/api/v1/clusters/" - "cl/hosts/i1/host_components/HDFS", - data=jsonutils.dumps( - { - "HostRoles": {"state": "STATE"}, - "RequestInfo": { - "context": "Starting service HDFS, " - "moving to state STATE"} - }), - verify=False, auth=client._auth, headers=self.headers) - - def test_stop_process_on_host(self): - client = ambari_client.AmbariClient(self.instance) - check_mock = mock.MagicMock() - check_mock.status_code = 200 - check_mock.text = '{"HostRoles": {"state": "SOME_STATE"}}' - self.http_client.get.return_value = check_mock - self.http_client.put.return_value = self.good_pending_resp - client.wait_ambari_request = mock.MagicMock() - instance = mock.MagicMock() - instance.fqdn.return_value = "i1" - - client.stop_process_on_host("cluster_name", instance, "p1") - self.http_client.put.assert_called_with( - "http://1.2.3.4:8080/api/v1/clusters/" - "cluster_name/hosts/i1/host_components/p1", - data=jsonutils.dumps( - { - "HostRoles": {"state": "INSTALLED"}, - "RequestInfo": {"context": "Stopping p1"} - }), - verify=False, auth=client._auth, headers=self.headers) - - @mock.patch("sahara_plugin_ambari.plugins.ambari.client.context") - def test_wait_ambari_request(self, mock_context): - client = ambari_client.AmbariClient(self.instance) - check_mock = mock.MagicMock() - d1 = {"request_context": "r1", "request_status": "PENDING", - "progress_percent": 42} - d2 = {"request_context": "r1", "request_status": "COMPLETED", - "progress_percent": 100} - check_mock.side_effect = [d1, d2] - client.check_request_status = check_mock - - client.wait_ambari_request("id1", "c1") - - check_mock.assert_has_calls([mock.call("c1", "id1"), - mock.call("c1", "id1")]) - - @mock.patch("sahara_plugin_ambari.plugins.ambari.client.context") - def test_wait_ambari_request_error(self, mock_context): - client = ambari_client.AmbariClient(self.instance) - check_mock = mock.MagicMock() - d1 = {"request_context": "r1", "request_status": "ERROR", - "progress_percent": 146} - check_mock.return_value = d1 - client.check_request_status = check_mock - - self.assertRaises(p_exc.HadoopProvisionError, - client.wait_ambari_request, "id1", "c1") diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_common.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_common.py deleted file mode 100644 index 99904ca..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_common.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2017 EasyStack Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from sahara_plugin_ambari.plugins.ambari import common -from sahara_plugin_ambari.tests.unit import base - - -class AmbariCommonTestCase(base.SaharaTestCase): - def setUp(self): - super(AmbariCommonTestCase, self).setUp() - self.master_ng = mock.Mock() - self.master_ng.node_processes = ['Ambari', 'HiveServer'] - - self.worker_ng = mock.Mock() - self.worker_ng.node_processes = ['DataNode', 'Oozie'] - - self.cluster = mock.Mock() - self.cluster.node_groups = [self.master_ng, self.worker_ng] - - def test_get_ambari_proc_list(self): - procs = common.get_ambari_proc_list(self.master_ng) - expected = ['METRICS_COLLECTOR', 'HIVE_SERVER', - 'MYSQL_SERVER', 'WEBHCAT_SERVER'] - self.assertEqual(procs, expected) - - procs = common.get_ambari_proc_list(self.worker_ng) - expected = ['DATANODE', 'OOZIE_SERVER', 'PIG'] - self.assertEqual(procs, expected) - - @mock.patch('sahara.plugins.kerberos.is_kerberos_security_enabled') - def test_get_clients(self, kerberos): - kerberos.return_value = False - clients = common.get_clients(self.cluster) - expected = ['OOZIE_CLIENT', 'HIVE_CLIENT', 'HDFS_CLIENT', - 'TEZ_CLIENT', 'METRICS_MONITOR'] - for e in expected: - self.assertIn(e, clients) - - kerberos.return_value = True - clients = common.get_clients(self.cluster) - expected = ['OOZIE_CLIENT', 'HIVE_CLIENT', 'HDFS_CLIENT', - 'TEZ_CLIENT', 'METRICS_MONITOR', 'KERBEROS_CLIENT'] - for e in expected: - self.assertIn(e, clients) - - def test_instances_have_process(self): - instance1 = mock.Mock() - instance2 = mock.Mock() - instance1.node_group = self.master_ng - instance2.node_group = self.worker_ng - self.assertTrue(common.instances_have_process([instance1], "Ambari")) - self.assertTrue(common.instances_have_process([instance1, instance2], - "DataNode")) - self.assertFalse(common.instances_have_process([instance1, instance2], - "DRPC Server")) diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_configs.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_configs.py deleted file mode 100644 index 58ded60..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_configs.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import collections -from unittest import mock - - -from sahara_plugin_ambari.plugins.ambari import configs -from sahara_plugin_ambari.tests.unit import base - - -class AmbariConfigsTestCase(base.SaharaTestCase): - def setUp(self): - super(AmbariConfigsTestCase, self).setUp() - configs.load_configs("2.3") - self.ng = mock.Mock() - self.ng.node_configs = {} - self.ng.cluster = mock.Mock() - self.ng.cluster.hadoop_version = "2.3" - self.instance = mock.Mock() - self.instance.node_group = self.ng - self.instance.storage_paths = mock.Mock() - self.instance.storage_paths.return_value = ["/data1", "/data2"] - - def assertConfigEqual(self, expected, actual): - self.assertEqual(len(expected), len(actual)) - cnt_ex = collections.Counter() - cnt_act = collections.Counter() - for i, ex in enumerate(expected): - for j, act in enumerate(actual): - if ex == act: - cnt_ex[i] += 1 - cnt_act[j] += 1 - self.assertEqual(len(expected), len(cnt_ex)) - self.assertEqual(len(actual), len(cnt_act)) - - def test_get_service_to_configs_map(self): - self.assertIsNone(configs.SERVICES_TO_CONFIGS_MAP) - configs_map = configs.get_service_to_configs_map() - configs_expected = { - 'ZooKeeper': ['zoo.cfg', 'zookeeper-env'], - 'Knox': ['knox-env', 'ranger-knox-plugin-properties', - 'gateway-site'], - 'YARN': ['yarn-site', 'mapred-env', 'yarn-env', - 'capacity-scheduler', 'mapred-site'], - 'general': ['cluster-env'], 'Flume': ['flume-env'], - 'Ambari': ['ams-hbase-policy', 'ams-site', 'ams-env', - 'ams-hbase-site', 'ams-hbase-env', - 'ams-hbase-security-site'], - 'HDFS': ['core-site', 'ranger-hdfs-plugin-properties', - 'hadoop-policy', 'hdfs-site', 'hadoop-env'], - 'Ranger': ['ranger-env', 'admin-properties', - 'usersync-properties', 'ranger-site'], - 'Spark': ['spark-defaults', 'spark-env'], - 'Hive': ['hive-env', 'hive-site', 'hiveserver2-site', - 'ranger-hive-plugin-properties'], - 'Storm': ['ranger-storm-plugin-properties', 'storm-site', - 'storm-env'], - 'Oozie': ['oozie-env', 'oozie-site', 'tez-site'], - 'HBase': ['ranger-hbase-plugin-properties', 'hbase-env', - 'hbase-site', 'hbase-policy'], - 'Sqoop': ['sqoop-env'], 'Kafka': ['kafka-broker', 'kafka-env'], - 'Falcon': ['falcon-startup.properties', - 'falcon-runtime.properties', 'falcon-env'] - } - for (key, item) in configs_map.items(): - item.sort() - for (key, item) in configs_expected.items(): - item.sort() - self.assertEqual(configs_map, configs_expected) - self.assertIsNotNone(configs.SERVICES_TO_CONFIGS_MAP) - - def test_get_instance_params_default(self): - instance_configs = configs.get_instance_params(self.instance) - expected = [ - { - "hdfs-site": { - "dfs.datanode.data.dir": - "/data1/hdfs/data,/data2/hdfs/data", - "dfs.journalnode.edits.dir": - "/data1/hdfs/journalnode,/data2/hdfs/journalnode", - "dfs.namenode.checkpoint.dir": - "/data1/hdfs/namesecondary,/data2/hdfs/namesecondary", - "dfs.namenode.name.dir": - "/data1/hdfs/namenode,/data2/hdfs/namenode" - } - }, - { - "yarn-site": { - "yarn.nodemanager.local-dirs": - "/data1/yarn/local,/data2/yarn/local", - "yarn.nodemanager.log-dirs": - "/data1/yarn/log,/data2/yarn/log", - "yarn.timeline-service.leveldb-timeline-store.path": - "/data1/yarn/timeline,/data2/yarn/timeline" - } - }, - { - "oozie-site": { - "oozie.service.AuthorizationService.security.enabled": - "false" - } - } - ] - self.assertConfigEqual(expected, instance_configs) - - def test_get_instance_params(self): - self.ng.node_configs = { - "YARN": { - "mapreduce.map.java.opts": "-Dk=v", - "yarn.scheduler.minimum-allocation-mb": "256" - } - } - instance_configs = configs.get_instance_params(self.instance) - expected = [ - { - "hdfs-site": { - "dfs.datanode.data.dir": - "/data1/hdfs/data,/data2/hdfs/data", - "dfs.journalnode.edits.dir": - "/data1/hdfs/journalnode,/data2/hdfs/journalnode", - "dfs.namenode.checkpoint.dir": - "/data1/hdfs/namesecondary,/data2/hdfs/namesecondary", - "dfs.namenode.name.dir": - "/data1/hdfs/namenode,/data2/hdfs/namenode" - } - }, - { - "mapred-site": { - "mapreduce.map.java.opts": "-Dk=v" - } - }, - { - "yarn-site": { - "yarn.nodemanager.local-dirs": - "/data1/yarn/local,/data2/yarn/local", - "yarn.nodemanager.log-dirs": - "/data1/yarn/log,/data2/yarn/log", - "yarn.scheduler.minimum-allocation-mb": "256", - "yarn.timeline-service.leveldb-timeline-store.path": - "/data1/yarn/timeline,/data2/yarn/timeline" - } - }, - { - "oozie-site": { - "oozie.service.AuthorizationService.security.enabled": - "false" - } - } - ] - self.assertConfigEqual(expected, instance_configs) diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_deploy.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_deploy.py deleted file mode 100644 index 9455538..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_deploy.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from functools import wraps -from unittest import mock - -from oslo_serialization import jsonutils - - -def mock_event_wrapper(*args, **kwargs): - def decorator(f): - @wraps(f) - def decorated_function(*args, **kwargs): - return f(*args, **kwargs) - return decorated_function - return decorator - - -mock.patch('sahara.plugins.utils.event_wrapper', mock_event_wrapper).start() - -from sahara.plugins import utils as pu -from sahara_plugin_ambari.plugins.ambari import deploy -from sahara_plugin_ambari.tests.unit import base - - -class TestDeploy(base.SaharaTestCase): - @mock.patch('sahara.plugins.utils.add_provisioning_step') - @mock.patch('sahara.plugins.utils.check_cluster_exists') - @mock.patch('sahara.plugins.utils.get_instance') - @mock.patch('sahara_plugin_ambari.plugins.ambari.client.AmbariClient.get') - @mock.patch('sahara_plugin_ambari.plugins.ambari.client.' - 'AmbariClient.delete') - def test_cleanup_config_groups(self, client_delete, client_get, - get_instance, check_cluster_exists, - add_provisioning_step): - def response(data): - fake = mock.Mock() - fake.text = jsonutils.dumps(data) - fake.raise_for_status.return_value = True - return fake - - fake_config_groups = { - 'items': [ - {'ConfigGroup': {'id': "1"}}, - {'ConfigGroup': {'id': "2"}} - ] - } - - config_group1 = { - 'ConfigGroup': {'id': '1', 'group_name': "test:fakename"}} - config_group2 = { - 'ConfigGroup': {'id': '2', 'group_name': "test:toremove"}} - - pu.event_wrapper = mock_event_wrapper - fake_ambari = mock.Mock() - fake_ambari.management_ip = "127.0.0.1" - get_instance.return_value = fake_ambari - - inst1 = mock.Mock() - inst1.instance_name = "toremove" - - cl = mock.Mock(extra={'ambari_password': "SUPER_STRONG"}) - cl.name = "test" - - client_get.side_effect = [ - response(fake_config_groups), response(config_group1), - response(config_group2) - ] - client_delete.side_effect = [response({})] - - check_cluster_exists.return_value = True - - deploy.cleanup_config_groups(cl, [inst1]) - get_calls = [ - mock.call( - 'http://127.0.0.1:8080/api/v1/clusters/test/config_groups'), - mock.call( - 'http://127.0.0.1:8080/api/v1/clusters/test/config_groups/1'), - mock.call( - 'http://127.0.0.1:8080/api/v1/clusters/test/config_groups/2') - ] - - self.assertEqual(get_calls, client_get.call_args_list) - - delete_calls = [ - mock.call( - 'http://127.0.0.1:8080/api/v1/clusters/test/config_groups/2') - ] - - self.assertEqual(delete_calls, client_delete.call_args_list) diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_ha_helper.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_ha_helper.py deleted file mode 100644 index 20d70cf..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_ha_helper.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -from unittest import mock - - -from sahara_plugin_ambari.plugins.ambari import ha_helper as ha -from sahara_plugin_ambari.tests.unit import base - - -class HAHelperTestCase(base.SaharaTestCase): - - def setUp(self): - super(HAHelperTestCase, self).setUp() - self.cluster = mock.MagicMock() - self.cluster.name = "clusterName" - for i in range(1, 4): - instance = mock.MagicMock() - instance.fqdn.return_value = "in{}".format(i) - instance.instance_name = "in{}name".format(i) - setattr(self, "in{}".format(i), instance) - self.bp = { - "host_groups": [ - { - "components": [ - {"name": "NAMENODE"} - ] - } - ], - "configurations": [ - {"hdfs-site": {}}, - {"yarn-site": {}}, - {"core-site": {}}, - {"hadoop-env": {}}, - {"zoo.cfg": {}} - ] - } - - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper." - "_set_high_zk_limits") - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper." - "_set_default_fs") - def test_update_bp_ha_common(self, mock__set_default_fs, - mock__set_high_zk_limits): - ha.update_bp_ha_common(self.cluster, copy.deepcopy(self.bp)) - self.assertTrue(mock__set_default_fs.called) - self.assertTrue(mock__set_high_zk_limits.called) - - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper." - "_configure_hdfs_site") - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper._set_zk_quorum") - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper." - "_add_zkfc_to_namenodes") - def test_update_bp_for_namenode_ha(self, mock__add_zkfc_to_namenodes, - mock__set_zk_quorum, - mock__configure_hdfs_site): - ha.update_bp_for_namenode_ha(self.cluster, copy.deepcopy(self.bp)) - self.assertTrue(mock__add_zkfc_to_namenodes.called) - self.assertTrue(mock__set_zk_quorum.called) - self.assertTrue(mock__configure_hdfs_site.called) - - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper." - "_set_default_fs") - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper._set_zk_quorum") - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper." - "_configure_yarn_site") - def test_update_bp_for_resourcemanager_ha(self, mock__configure_yarn_site, - mock__set_zk_quorum, - mock__set_default_fs): - ha.update_bp_for_resourcemanager_ha(self.cluster, - copy.deepcopy(self.bp)) - self.assertTrue(mock__configure_yarn_site.called) - self.assertTrue(mock__set_zk_quorum.called) - self.assertTrue(mock__set_default_fs.called) - - @mock.patch("sahara_plugin_ambari.plugins.ambari.ha_helper." - "_confgure_hbase_site") - def test_update_bp_for_hbase_ha(self, mock__confgure_hbase_site): - ha.update_bp_for_hbase_ha(self.cluster, copy.deepcopy(self.bp)) - self.assertTrue(mock__confgure_hbase_site.called) - - def test__add_zkfc_to_namenodes(self): - bp = ha._add_zkfc_to_namenodes(copy.deepcopy(self.bp)) - self.assertIn({"name": "ZKFC"}, bp["host_groups"][0]["components"]) - - @mock.patch("sahara.plugins.utils.get_instances") - def test__set_default_fs(self, mock_get_instances): - bp = ha._set_default_fs(self.cluster, copy.deepcopy(self.bp), - ha.p_common.NAMENODE_HA) - self.assertEqual("hdfs://hdfs-ha", - ha._find_core_site(bp)["fs.defaultFS"]) - - mock_get_instances.return_value = [self.in1] - bp = ha._set_default_fs(self.cluster, copy.deepcopy(self.bp), - ha.p_common.RESOURCEMANAGER_HA) - self.assertEqual("hdfs://{}:8020".format(self.in1.fqdn()), - ha._find_core_site(bp)["fs.defaultFS"]) - - @mock.patch("sahara.plugins.utils.get_instances") - def test__set_zk_quorum(self, mock_get_instances): - mock_get_instances.return_value = [self.in1, self.in2, self.in3] - bp = ha._set_zk_quorum(self.cluster, copy.deepcopy(self.bp), - ha.CORE_SITE) - self.assertEqual( - "{}:2181,{}:2181,{}:2181".format( - self.in1.fqdn(), self.in2.fqdn(), self.in3.fqdn()), - ha._find_core_site(bp)['ha.zookeeper.quorum']) - - bp = ha._set_zk_quorum(self.cluster, copy.deepcopy(self.bp), - ha.YARN_SITE) - self.assertEqual( - "{}:2181,{}:2181,{}:2181".format( - self.in1.fqdn(), self.in2.fqdn(), self.in3.fqdn()), - ha._find_yarn_site(bp)['hadoop.registry.zk.quorum']) - - def test__set_high_zk_limits(self): - bp = ha._set_high_zk_limits(copy.deepcopy(self.bp)) - self.assertEqual("10000", ha._find_zoo_cfg(bp)["tickTime"]) - - @mock.patch("sahara.plugins.utils.get_instances") - def test__set_primary_and_standby_namenode(self, mock_get_instances): - mock_get_instances.return_value = [self.in1, self.in2] - bp = ha._set_primary_and_standby_namenode(self.cluster, - copy.deepcopy(self.bp)) - self.assertEqual( - self.in1.fqdn(), - ha._find_hadoop_env(bp)['dfs_ha_initial_namenode_active']) - self.assertEqual( - self.in2.fqdn(), - ha._find_hadoop_env(bp)['dfs_ha_initial_namenode_standby']) - - @mock.patch("sahara.plugins.utils.get_instances") - def test__configure_hdfs_site(self, mock_get_instances): - mock_get_instances.return_value = [self.in1, self.in2] - bp = ha._configure_hdfs_site(self.cluster, copy.deepcopy(self.bp)) - - j_nodes = ";".join( - ["{}:8485".format(i.fqdn()) for i in mock_get_instances()]) - nn_id_concat = ",".join( - [i.instance_name for i in mock_get_instances()]) - result = { - "hdfs-site": { - "dfs.client.failover.proxy.provider.hdfs-ha": - "org.apache.hadoop.hdfs.server.namenode.ha." - "ConfiguredFailoverProxyProvider", - "dfs.ha.automatic-failover.enabled": "true", - "dfs.ha.fencing.methods": "shell(/bin/true)", - "dfs.nameservices": "hdfs-ha", - "dfs.namenode.shared.edits.dir": - "qjournal://{}/hdfs-ha".format(j_nodes), - "dfs.ha.namenodes.hdfs-ha": nn_id_concat, - "dfs.namenode.http-address": "{}:50070".format( - self.in1.fqdn()), - "dfs.namenode.https-address": "{}:50470".format( - self.in1.fqdn()), - } - } - prop = result["hdfs-site"] - for i in mock_get_instances(): - prop["dfs.namenode.http-address.hdfs-ha.%s" % i.instance_name] = ( - "%s:50070" % i.fqdn()) - prop["dfs.namenode.https-address.hdfs-ha.%s" % i.instance_name] = ( - "%s:50470" % i.fqdn()) - prop["dfs.namenode.rpc-address.hdfs-ha.%s" % i.instance_name] = ( - "%s:8020" % i.fqdn()) - self.assertEqual(result["hdfs-site"], ha._find_hdfs_site(bp)) - - @mock.patch("sahara.plugins.utils.get_instance") - @mock.patch("sahara.plugins.utils.get_instances") - def test__configure_yarn_site(self, mock_get_instances, mock_get_instance): - mock_get_instances.return_value = [self.in1, self.in2, self.in3] - mock_get_instance.return_value = self.in1 - bp = ha._configure_yarn_site(self.cluster, copy.deepcopy(self.bp)) - - zks = ",".join(["%s:2181" % i.fqdn() for i in mock_get_instances()]) - rm_ids = ",".join([i.instance_name for i in mock_get_instances()]) - result = { - "yarn-site": { - "hadoop.registry.rm.enabled": "false", - "yarn.resourcemanager.zk-address": zks, - "yarn.log.server.url": "{}:19888/jobhistory/logs/".format( - mock_get_instance().fqdn()), - "yarn.resourcemanager.address": "{}:8050".format( - mock_get_instances()[0].fqdn()), - "yarn.resourcemanager.admin.address": "{}:8141".format( - mock_get_instances()[0].fqdn()), - "yarn.resourcemanager.cluster-id": self.cluster.name, - "yarn.resourcemanager.ha.automatic-failover.zk-base-path": - "/yarn-leader-election", - "yarn.resourcemanager.ha.enabled": "true", - "yarn.resourcemanager.ha.rm-ids": rm_ids, - "yarn.resourcemanager.hostname": - mock_get_instances()[0].fqdn(), - "yarn.resourcemanager.recovery.enabled": "true", - "yarn.resourcemanager.resource-tracker.address": - "{}:8025".format(mock_get_instances()[0].fqdn()), - "yarn.resourcemanager.scheduler.address": "{}:8030".format( - mock_get_instances()[0].fqdn()), - "yarn.resourcemanager.store.class": - "org.apache.hadoop.yarn.server.resourcemanager.recovery." - "ZKRMStateStore", - "yarn.resourcemanager.webapp.address": "{}:8088".format( - mock_get_instances()[0].fqdn()), - "yarn.resourcemanager.webapp.https.address": "{}:8090".format( - mock_get_instances()[0].fqdn()), - "yarn.timeline-service.address": "{}:10200".format( - mock_get_instance().fqdn()), - "yarn.timeline-service.webapp.address": "{}:8188".format( - mock_get_instance().fqdn()), - "yarn.timeline-service.webapp.https.address": "{}:8190".format( - mock_get_instance().fqdn()) - } - } - props = result["yarn-site"] - for i in mock_get_instances(): - props["yarn.resourcemanager.hostname.{}".format( - i.instance_name)] = i.fqdn() - props["yarn.resourcemanager.webapp.address.{}".format( - i.instance_name)] = "{}:8088".format(i.fqdn()) - props["yarn.resourcemanager.webapp.https.address.{}".format( - i.instance_name)] = "{}:8090".format(i.fqdn()) - self.assertEqual(result["yarn-site"], ha._find_yarn_site(bp)) - - @mock.patch("sahara.plugins.utils.get_instances") - def test__confgure_hbase_site(self, mock_get_instances): - mock_get_instances.return_value = [self.in1, self.in2, self.in3] - bp = ha._confgure_hbase_site(self.cluster, copy.deepcopy(self.bp)) - - result = { - "hbase-site": { - "hbase.regionserver.global.memstore.lowerLimit": "0.38", - "hbase.regionserver.global.memstore.upperLimit": "0.4", - "hbase.regionserver.handler.count": "60", - "hbase.regionserver.info.port": "16030", - "hbase.regionserver.storefile.refresh.period": "20", - "hbase.rootdir": "hdfs://hdfs-ha/apps/hbase/data", - "hbase.security.authentication": "simple", - "hbase.security.authorization": "false", - "hbase.superuser": "hbase", - "hbase.tmp.dir": "/hadoop/hbase", - "hbase.zookeeper.property.clientPort": "2181", - "hbase.zookeeper.useMulti": "true", - "hfile.block.cache.size": "0.40", - "zookeeper.session.timeout": "30000", - "zookeeper.znode.parent": "/hbase-unsecure", - "hbase.zookeeper.quorum": - ",".join([i.fqdn() for i in mock_get_instances()]) - } - } - self.assertEqual(result["hbase-site"], ha._find_hbase_site(bp)) diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_health.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_health.py deleted file mode 100644 index 365a9a0..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_health.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -import testtools - -from sahara.plugins import health_check_base -from sahara_plugin_ambari.plugins.ambari import health -from sahara_plugin_ambari.tests.unit import base - - -class TestAmbariHealthCheck(base.SaharaTestCase): - def _standard_negative_test(self, mockclient, return_value, col, count): - mockclient.return_value = return_value - pr = health.AlertsProvider(mock.Mock()) - service = return_value[0].get('Alert').get('service_name') - expected_exc = health_check_base.YellowHealthError - if col == 'RED': - expected_exc = health_check_base.RedHealthError - with testtools.ExpectedException(expected_exc): - try: - health.AmbariServiceHealthCheck( - mock.Mock(extra={}), pr, service).check_health() - except Exception as e: - self.assertEqual( - "Cluster health is %s. Reason: " - "Ambari Monitor has responded that cluster " - "has %s alert(s)" % (col, count), str(e)) - raise - - @mock.patch('sahara_plugin_ambari.plugins.ambari.client.AmbariClient.' - '__init__') - @mock.patch('sahara_plugin_ambari.plugins.ambari.client.AmbariClient.' - 'close') - @mock.patch('sahara_plugin_ambari.plugins.ambari.client.AmbariClient.' - 'get_alerts_data') - @mock.patch('sahara.plugins.utils.get_instance') - def test_check_health(self, get_instance, alerts_response, close, init): - init.return_value = None - alerts_response.return_value = [ - { - 'Alert': { - 'state': 'OK', - 'service_name': 'ZOOKEEPER' - } - } - ] - result = health.AmbariServiceHealthCheck( - mock.Mock(extra={}), health.AlertsProvider(mock.Mock()), - 'ZOOKEEPER').check_health() - - self.assertEqual('No alerts found', result) - - self._standard_negative_test(alerts_response, [ - { - 'Alert': { - 'state': 'WARNING', - 'service_name': 'ZOOKEEPER' - } - } - ], 'YELLOW', "1 warning") - - self._standard_negative_test(alerts_response, [ - { - 'Alert': { - 'state': 'CRITICAL', - 'service_name': 'ZOOKEEPER' - } - } - ], 'RED', "1 critical") - - # not important service: only contribute as yellow - self._standard_negative_test(alerts_response, [ - { - 'Alert': { - 'state': 'CRITICAL', - 'service_name': 'Kafka' - } - } - ], 'YELLOW', "1 warning") - - self._standard_negative_test(alerts_response, [ - { - 'Alert': { - 'state': 'CRITICAL', - 'service_name': 'ZOOKEEPER' - }, - }, - { - 'Alert': { - 'state': 'WARNING', - 'service_name': 'ZOOKEEPER' - } - } - ], 'RED', "1 critical and 1 warning") - - alerts_response.side_effect = [ValueError( - "OOUCH!")] - with testtools.ExpectedException(health_check_base.RedHealthError): - try: - health.AmbariHealthCheck( - mock.Mock(extra={}), health.AlertsProvider(mock.Mock()) - ).check_health() - except Exception as e: - self.assertEqual( - "Cluster health is RED. Reason: " - "Can't get response from Ambari Monitor: OOUCH!", - str(e)) - raise diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_open_ports.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_open_ports.py deleted file mode 100644 index a8c5071..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_open_ports.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from unittest import mock - -from sahara_plugin_ambari.plugins.ambari import common as p_common -from sahara_plugin_ambari.plugins.ambari import plugin -from sahara_plugin_ambari.tests.unit import base - - -class GetPortsTestCase(base.SaharaTestCase): - def setUp(self): - super(GetPortsTestCase, self).setUp() - self.plugin = plugin.AmbariPluginProvider() - - def test_get_ambari_port(self): - ng = mock.Mock() - ng.node_processes = [p_common.AMBARI_SERVER] - ports = self.plugin.get_open_ports(ng) - self.assertEqual([8080], ports) diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_plugin.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_plugin.py deleted file mode 100644 index 76ddf8f..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_plugin.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara_plugin_ambari.plugins.ambari import plugin -from sahara_plugin_ambari.tests.unit import base as test_base - - -class TestPlugin(test_base.SaharaTestCase): - def setUp(self): - self.plugin = plugin.AmbariPluginProvider() - super(TestPlugin, self).setUp() - - def test_job_types(self): - self.assertEqual({ - '2.3': [ - 'Hive', 'Java', 'MapReduce', 'MapReduce.Streaming', - 'Pig', 'Shell', 'Spark'], - '2.4': [ - 'Hive', 'Java', 'MapReduce', 'MapReduce.Streaming', - 'Pig', 'Shell', 'Spark'], - '2.5': [ - 'Hive', 'Java', 'MapReduce', 'MapReduce.Streaming', - 'Pig', 'Shell', 'Spark'], - '2.6': [ - 'Hive', 'Java', 'MapReduce', 'MapReduce.Streaming', - 'Pig', 'Shell', 'Spark'], - }, self.plugin.get_edp_job_types()) - - self.assertEqual({ - '2.3': [ - 'Hive', 'Java', 'MapReduce', 'MapReduce.Streaming', - 'Pig', 'Shell', 'Spark'], - }, self.plugin.get_edp_job_types(versions=['2.3'])) - - self.assertEqual({ - '2.4': [ - 'Hive', 'Java', 'MapReduce', 'MapReduce.Streaming', - 'Pig', 'Shell', 'Spark'], - }, self.plugin.get_edp_job_types(versions=['2.4'])) diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_requests_helper.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_requests_helper.py deleted file mode 100644 index 803a606..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_requests_helper.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from sahara_plugin_ambari.plugins.ambari import requests_helper -from sahara_plugin_ambari.tests.unit import base - - -class RequestsHelperTestCase(base.SaharaTestCase): - - def setUp(self): - super(RequestsHelperTestCase, self).setUp() - self.i1 = mock.MagicMock() - self.i1.fqdn.return_value = "i1" - - self.i2 = mock.MagicMock() - self.i2.fqdn.return_value = "i2" - - def test_build_datanode_decommission_request(self): - c_name = "c1" - instances = [self.i1, self.i2] - - res = requests_helper.build_datanode_decommission_request(c_name, - instances) - self.assertEqual("i1,i2", - res["RequestInfo"]["parameters"]["excluded_hosts"]) - self.assertEqual("c1", - res["RequestInfo"]["operation_level"]["cluster_name"]) - - def test_build_nodemanager_decommission_request(self): - c_name = "c1" - instances = [self.i1, self.i2] - - res = requests_helper.build_nodemanager_decommission_request( - c_name, instances) - - self.assertEqual("i1,i2", - res["RequestInfo"]["parameters"]["excluded_hosts"]) - self.assertEqual("c1", - res["RequestInfo"]["operation_level"]["cluster_name"]) - - def test_build_namenode_restart_request(self): - res = requests_helper.build_namenode_restart_request("c1", self.i1) - - self.assertEqual("i1", res["Requests/resource_filters"][0]["hosts"]) - self.assertEqual("c1", - res["RequestInfo"]["operation_level"]["cluster_name"]) - - def test_build_resourcemanager_restart_request(self): - res = requests_helper.build_resourcemanager_restart_request("c1", - self.i1) - - self.assertEqual("i1", res["Requests/resource_filters"][0]["hosts"]) - self.assertEqual("c1", - res["RequestInfo"]["operation_level"]["cluster_name"]) - - def test_build_stop_service_request(self): - res = requests_helper.build_stop_service_request("HDFS") - expected = { - "RequestInfo": { - "context": "Restart HDFS service (stopping)", - }, - "Body": { - "ServiceInfo": { - "state": "INSTALLED" - } - } - } - self.assertEqual(res, expected) - - def test_build_start_service_request(self): - res = requests_helper.build_start_service_request("HDFS") - expected = { - "RequestInfo": { - "context": "Restart HDFS service (starting)", - }, - "Body": { - "ServiceInfo": { - "state": "STARTED" - } - } - } - self.assertEqual(res, expected) diff --git a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_validation.py b/sahara_plugin_ambari/tests/unit/plugins/ambari/test_validation.py deleted file mode 100644 index 6730e2e..0000000 --- a/sahara_plugin_ambari/tests/unit/plugins/ambari/test_validation.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2015 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from unittest import mock - -from sahara.plugins import exceptions -from sahara_plugin_ambari.plugins.ambari import common as p_common -from sahara_plugin_ambari.plugins.ambari import plugin -from sahara_plugin_ambari.tests.unit import base - - -def make_cluster(processes_map): - m = mock.Mock() - ngs = [] - for count, processes in processes_map.items(): - ng = mock.Mock() - ng.count = count - ng.node_processes = processes - ngs.append(ng) - m.node_groups = ngs - return m - - -class AmbariValidationTestCase(base.SaharaTestCase): - def setUp(self): - super(AmbariValidationTestCase, self).setUp() - self.plugin = plugin.AmbariPluginProvider() - - def test_cluster_with_ambari(self): - cluster = make_cluster({1: [p_common.AMBARI_SERVER, - p_common.ZOOKEEPER_SERVER, - p_common.NAMENODE, - p_common.DATANODE, - p_common.RESOURCEMANAGER, - p_common.NODEMANAGER, - p_common.HISTORYSERVER, - p_common.APP_TIMELINE_SERVER, - p_common.SECONDARY_NAMENODE]}) - cluster.cluster_configs = {"general": {}} - with mock.patch( - "sahara_plugin_ambari.plugins.ambari.validation." - "conductor") as p: - p.cluster_get = mock.Mock() - p.cluster_get.return_value = cluster - self.assertIsNone(self.plugin.validate(cluster)) - - def test_cluster_without_ambari(self): - cluster = make_cluster({1: ["spam"]}) - with mock.patch( - "sahara_plugin_ambari.plugins.ambari.validation." - "conductor") as p: - p.cluster_get = mock.Mock() - p.cluster_get.return_value = cluster - self.assertRaises(exceptions.InvalidComponentCountException, - self.plugin.validate, cluster) diff --git a/sahara_plugin_ambari/utils/__init__.py b/sahara_plugin_ambari/utils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sahara_plugin_ambari/utils/patches.py b/sahara_plugin_ambari/utils/patches.py deleted file mode 100644 index 756a46b..0000000 --- a/sahara_plugin_ambari/utils/patches.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import eventlet - - -EVENTLET_MONKEY_PATCH_MODULES = dict(os=True, - select=True, - socket=True, - thread=True, - time=True) - - -def patch_all(): - """Apply all patches. - - List of patches: - - * eventlet's monkey patch for all cases; - * minidom's writexml patch for py < 2.7.3 only. - """ - eventlet_monkey_patch() - patch_minidom_writexml() - - -def eventlet_monkey_patch(): - """Apply eventlet's monkey patch. - - This call should be the first call in application. It's safe to call - monkey_patch multiple times. - """ - eventlet.monkey_patch(**EVENTLET_MONKEY_PATCH_MODULES) - - -def eventlet_import_monkey_patched(module): - """Returns module monkey patched by eventlet. - - It's needed for some tests, for example, context test. - """ - return eventlet.import_patched(module, **EVENTLET_MONKEY_PATCH_MODULES) - - -def patch_minidom_writexml(): - """Patch for xml.dom.minidom toprettyxml bug with whitespaces around text - - We apply the patch to avoid excess whitespaces in generated xml - configuration files that brakes Hadoop. - - (This patch will be applied for all Python versions < 2.7.3) - - Issue: http://bugs.python.org/issue4147 - Patch: http://hg.python.org/cpython/rev/cb6614e3438b/ - Description: http://ronrothman.com/public/leftbraned/xml-dom-minidom-\ - toprettyxml-and-silly-whitespace/#best-solution - """ - - import sys - if sys.version_info >= (2, 7, 3): - return - - import xml.dom.minidom as md - - def element_writexml(self, writer, indent="", addindent="", newl=""): - # indent = current indentation - # addindent = indentation to add to higher levels - # newl = newline string - writer.write(indent + "<" + self.tagName) - - attrs = self._get_attributes() - a_names = list(attrs.keys()) - a_names.sort() - - for a_name in a_names: - writer.write(" %s=\"" % a_name) - md._write_data(writer, attrs[a_name].value) - writer.write("\"") - if self.childNodes: - writer.write(">") - if (len(self.childNodes) == 1 - and self.childNodes[0].nodeType == md.Node.TEXT_NODE): - self.childNodes[0].writexml(writer, '', '', '') - else: - writer.write(newl) - for node in self.childNodes: - node.writexml(writer, indent + addindent, addindent, newl) - writer.write(indent) - writer.write("%s" % (self.tagName, newl)) - else: - writer.write("/>%s" % (newl)) - - md.Element.writexml = element_writexml - - def text_writexml(self, writer, indent="", addindent="", newl=""): - md._write_data(writer, "%s%s%s" % (indent, self.data, newl)) - - md.Text.writexml = text_writexml diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 4ba8f5c..0000000 --- a/setup.cfg +++ /dev/null @@ -1,43 +0,0 @@ -[metadata] -name = sahara-plugin-ambari -summary = Ambari Plugin for Sahara Project -description_file = README.rst -license = Apache Software License -python_requires = >=3.8 -classifiers = - Programming Language :: Python - Programming Language :: Python :: Implementation :: CPython - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/sahara/latest/ - -[files] -packages = - sahara_plugin_ambari - -[entry_points] -sahara.cluster.plugins = - ambari = sahara_plugin_ambari.plugins.ambari.plugin:AmbariPluginProvider - -[compile_catalog] -directory = sahara_plugin_ambari/locale -domain = sahara_plugin_ambari - -[update_catalog] -domain = sahara_plugin_ambari -output_dir = sahara_plugin_ambari/locale -input_file = sahara_plugin_ambari/locale/sahara_plugin_ambari.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = sahara_plugin_ambari/locale/sahara_plugin_ambari.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index cd35c3c..0000000 --- a/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 8cab1d3..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -hacking>=3.0.1,<3.1.0 # Apache-2.0 - -bandit>=1.1.0 # Apache-2.0 -bashate>=0.5.1 # Apache-2.0 -coverage!=4.4,>=4.0 # Apache-2.0 -doc8>=0.6.0 # Apache-2.0 -fixtures>=3.0.0 # Apache-2.0/BSD -oslotest>=3.2.0 # Apache-2.0 -stestr>=1.0.0 # Apache-2.0 -pylint==1.4.5 # GPLv2 -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.4.0 # MIT diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 98a50de..0000000 --- a/tox.ini +++ /dev/null @@ -1,99 +0,0 @@ -[tox] -envlist = py38,pep8 -minversion = 3.1.1 -skipsdist = True -# this allows tox to infer the base python from the environment name -# and override any basepython configured in this file -ignore_basepython_conflict = true - -[testenv] -basepython = python3 -usedevelop = True -install_command = pip install {opts} {packages} -setenv = - VIRTUAL_ENV={envdir} - DISCOVER_DIRECTORY=sahara_plugin_ambari/tests/unit -deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} -passenv = http_proxy - https_proxy - no_proxy - -[testenv:debug-py36] -basepython = python3.6 -commands = oslo_debug_helper -t sahara_plugin_ambari/tests/unit {posargs} - -[testenv:debug-py37] -basepython = python3.7 -commands = oslo_debug_helper -t sahara_plugin_ambari/tests/unit {posargs} - -[testenv:pep8] -deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/doc/requirements.txt -commands = - flake8 {posargs} - doc8 doc/source - -[testenv:venv] -commands = {posargs} - -[testenv:docs] -deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt -commands = - rm -rf doc/build/html - sphinx-build -W -b html doc/source doc/build/html -allowlist_externals = - rm - -[testenv:pdf-docs] -deps = {[testenv:docs]deps} -commands = - rm -rf doc/build/pdf - sphinx-build -W -b latex doc/source doc/build/pdf - make -C doc/build/pdf -allowlist_externals = - make - rm - -[testenv:releasenotes] -deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/doc/requirements.txt -commands = - rm -rf releasenotes/build releasenotes/html - sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html -allowlist_externals = rm - -[testenv:debug] -# It runs tests from the specified dir (default is sahara_plugin_ambari/tests) -# in interactive mode, so, you could use pbr for tests debug. -# Example usage: tox -e debug -- -t sahara_plugin_ambari/tests/unit some.test.path -# https://docs.openstack.org/oslotest/latest/features.html#debugging-with-oslo-debug-helper -commands = oslo_debug_helper -t sahara_plugin_ambari/tests/unit {posargs} - -[flake8] -show-source = true -builtins = _ -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools -# [H904] Delay string interpolations at logging calls -# [H106] Don't put vim configuration in source files -# [H203] Use assertIs(Not)None to check for None. -# [H204] Use assert(Not)Equal to check for equality -# [H205] Use assert(Greater|Less)(Equal) for comparison -enable-extensions=H904,H106,H203,H204,H205 -# [E123] Closing bracket does not match indentation of opening bracket's line -# [E226] Missing whitespace around arithmetic operator -# [E402] Module level import not at top of file -# [E731] Do not assign a lambda expression, use a def -# [W503] Line break occurred before a binary operator -# [W504] line break after binary operator -ignore=E123,E226,E402,E731,W503,W504 -