diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index d1aa06385e..0000000000 --- a/.coveragerc +++ /dev/null @@ -1,13 +0,0 @@ -[run] -branch = True -source = sahara -omit = - .tox/* - sahara/tests/* - -[paths] -source = sahara - -[report] -ignore_errors = True -precision = 3 \ No newline at end of file diff --git a/.gitignore b/.gitignore deleted file mode 100644 index f7c82e2e69..0000000000 --- a/.gitignore +++ /dev/null @@ -1,31 +0,0 @@ -*.egg-info -*.egg[s] -*.log -*.py[co] -*.un~ -.coverage -.testrepository -.tox -.stestr -.venv -.idea -AUTHORS -ChangeLog -build -cover -develop-eggs -dist -doc/build -doc/html -eggs -etc/sahara.conf -etc/sahara/*.conf -etc/sahara/*.topology -sdist -target -tools/lintstack.head.py -tools/pylint_exceptions -doc/source/sample.config - -# Files created by releasenotes build -releasenotes/build diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index d81c293e07..0000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=./sahara/tests/unit -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 925f7b53c4..0000000000 --- a/.zuul.yaml +++ /dev/null @@ -1,155 +0,0 @@ -- project: - queue: sahara - templates: - - openstack-python3-jobs - - periodic-stable-jobs - - publish-openstack-docs-pti - - check-requirements - - release-notes-jobs-python3 - check: - jobs: - - openstack-tox-pylint: - voting: false - - sahara-tests-scenario: - voting: false - - sahara-tests-scenario-v2: - voting: false - - sahara-tests-tempest: - voting: false - - sahara-tests-tempest-v2: - voting: false - - openstack-tox-py38: - voting: false - - openstack-tox-py311: - voting: false - - openstack-tox-cover: - voting: false - - openstack-ansible-deploy-aio_sahara_metal-ubuntu-focal: - voting: false - gate: - jobs: - - sahara-tests-scenario: - voting: false - - sahara-tests-scenario-v2: - voting: false - - sahara-tests-tempest: - voting: false - - sahara-tests-tempest-v2: - voting: false - - openstack-tox-py38: - voting: false - - openstack-tox-py311: - voting: false - # - sahara-grenade - # - openstack-ansible-deploy-aio_sahara_metal-ubuntu-focal - experimental: - jobs: - - sahara-buildimages-ambari - - sahara-buildimages-cloudera - - sahara-buildimages-mapr - - sahara-buildimages-spark - - sahara-tests-scenario-multinode-spark - -- job: - name: sahara-grenade - parent: grenade - required-projects: - - opendev.org/openstack/grenade - - opendev.org/openstack/sahara - - opendev.org/openstack/python-saharaclient - - opendev.org/openstack/heat - - opendev.org/openstack/heat-tempest-plugin - - opendev.org/openstack/python-heatclient - - opendev.org/openstack/sahara-tests - - opendev.org/openstack/sahara-plugin-ambari - - opendev.org/openstack/sahara-plugin-cdh - - opendev.org/openstack/sahara-plugin-mapr - - opendev.org/openstack/sahara-plugin-spark - - opendev.org/openstack/sahara-plugin-storm - - opendev.org/openstack/sahara-plugin-vanilla - vars: - grenade_localrc: - RUN_HEAT_INTEGRATION_TESTS: False - grenade_devstack_localrc: - shared: - IMAGE_URLS: http://tarballs.openstack.org/heat-test-image/fedora-heat-test-image.qcow2,https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img - devstack_local_conf: - test-config: - $TEMPEST_CONFIG: - data_processing: - test_image_name: xenial-server-cloudimg-amd64-disk1 - test_ssh_user: ubuntu - data-processing-feature-enabled: - s3: True - devstack_plugins: - sahara: https://opendev.org/openstack/sahara - heat: https://opendev.org/openstack/heat - devstack_services: - h-api: true - h-api-cfn: true - h-eng: true - heat: true - tls-proxy: false - tempest_plugins: - - sahara-tests - - heat-tempest-plugin - tempest_test_regex: ^(sahara_tempest_plugin.tests.) - tox_envlist: all - group-vars: - subnode: - devstack_services: - tls-proxy: false - irrelevant-files: - - ^(test-|)requirements.txt$ - - ^.*\.rst$ - - ^doc/.*$ - - ^sahara/locale/.*$ - - ^sahara/tests/unit/.*$ - - ^releasenotes/.*$ - - ^tools/.*$ - - ^tox.ini$ - -- job: - name: openstack-ansible-deploy-aio_sahara_metal-ubuntu-focal - parent: openstack-ansible-deploy-aio - nodeset: ubuntu-focal - -- job: - name: sahara-buildimages-base - nodeset: centos-8-stream - vars: - sahara_src_dir: src/opendev.org/openstack/sahara - run: playbooks/buildimages/run.yaml - timeout: 7200 - required-projects: - - openstack/sahara - - openstack/sahara-plugin-ambari - - openstack/sahara-plugin-cdh - - openstack/sahara-plugin-mapr - - openstack/sahara-plugin-spark - - openstack/sahara-plugin-storm - - openstack/sahara-plugin-vanilla - -- job: - name: sahara-buildimages-ambari - parent: sahara-buildimages-base - vars: - sahara_plugin: ambari - -- job: - name: sahara-buildimages-cloudera - parent: sahara-buildimages-base - vars: - sahara_plugin: cdh - -- job: - name: sahara-buildimages-mapr - parent: sahara-buildimages-base - vars: - sahara_plugin: mapr - -- job: - name: sahara-buildimages-spark - parent: sahara-buildimages-base - vars: - sahara_plugin: spark diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index b7df2c28ca..0000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,19 +0,0 @@ -The source repository for this project can be found at: - - https://opendev.org/openstack/sahara - -Pull requests submitted through GitHub are not monitored. - -To start contributing to OpenStack, follow the steps in the contribution guide -to set up and use Gerrit: - - https://docs.openstack.org/contributors/code-and-documentation/quick-start.html - -Bugs should be filed on Storyboard: - - https://storyboard.openstack.org/#!/project/openstack/sahara - -For more specific information about contributing to this repository, see the -sahara contributor guide: - - https://docs.openstack.org/sahara/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index ade538fcf4..0000000000 --- a/HACKING.rst +++ /dev/null @@ -1,45 +0,0 @@ -Sahara Style Commandments -========================= - -- Step 1: Read the OpenStack Style Commandments - https://docs.openstack.org/hacking/latest/ -- Step 2: Read on - -Sahara Specific Commandments ----------------------------- - -Commit Messages ---------------- -Using a common format for commit messages will help keep our git history -readable. Follow these guidelines: - -- [S365] First, provide a brief summary of 50 characters or less. Summaries - of greater than 72 characters will be rejected by the gate. - -- [S364] The first line of the commit message should provide an accurate - description of the change, not just a reference to a bug or blueprint. - -Imports -------- -- [S366, S367] Organize your imports according to the ``Import order`` - -Dictionaries/Lists ------------------- - -- [S360] Ensure default arguments are not mutable. -- [S368] Must use a dict comprehension instead of a dict constructor with a - sequence of key-value pairs. For more information, please refer to - http://legacy.python.org/dev/peps/pep-0274/ - -Logs ----- - -- [S373] Don't translate logs - -- [S374] You used a deprecated log level - -Importing json --------------- - -- [S375] It's more preferable to use ``jsonutils`` from ``oslo_serialization`` - instead of ``json`` for operating with ``json`` objects. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db858821..0000000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/README.rst b/README.rst index e90cb1f577..4ee2c5f138 100644 --- a/README.rst +++ b/README.rst @@ -1,34 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/sahara.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -OpenStack Data Processing ("Sahara") project -============================================ - -Sahara at wiki.openstack.org: https://wiki.openstack.org/wiki/Sahara - -Storyboard project: https://storyboard.openstack.org/#!/project/935 - -Sahara docs site: https://docs.openstack.org/sahara/latest/ - -Roadmap: https://wiki.openstack.org/wiki/Sahara/Roadmap - -Quickstart guide: https://docs.openstack.org/sahara/latest/user/quickstart.html - -How to participate: https://docs.openstack.org/sahara/latest/contributor/how-to-participate.html - -Source: https://opendev.org/openstack/sahara - -Bugs and feature requests: https://storyboard.openstack.org/#!/project/935 - -Release notes: https://docs.openstack.org/releasenotes/sahara/ - -License -------- - -Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 1e2a0147d4..0000000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# sahara documentation build configuration file, created Fri May 6 15:19:20 -# 2016. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -extensions = [ - 'os_api_ref', - 'openstackdocstheme' -] - -# openstackdocstheme options -repository_name = 'openstack/sahara' -use_storyboard = True - -html_theme = 'openstackdocs' -html_theme_options = { - "sidebar_dropdown": "api_ref", - "sidebar_mode": "toc", -} - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2010-present, OpenStack Foundation' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', 'title', 'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'saharaoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Sahara.tex', 'OpenStack Data Processing API Documentation', - 'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index fa2a090c90..0000000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -=================== -Data Processing API -=================== - -Contents: - -API content can be searched using the :ref:`search`. - -.. toctree:: - :maxdepth: 2 - - v1.1/index - v2/index diff --git a/api-ref/source/v1.1/cluster-templates.inc b/api-ref/source/v1.1/cluster-templates.inc deleted file mode 100644 index 5d699c121a..0000000000 --- a/api-ref/source/v1.1/cluster-templates.inc +++ /dev/null @@ -1,253 +0,0 @@ -.. -*- rst -*- - -================= -Cluster templates -================= - -A cluster template configures a Hadoop cluster. A cluster template -lists node groups with the number of instances in each group. You -can also define cluster-scoped configurations in a cluster -template. - - -Show cluster template details -============================= - -.. rest_method:: GET /v1.1/{project_id}/cluster-templates/{cluster_template_id} - -Shows details for a cluster template. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_template_id: url_cluster_template_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - tenant_id: tenant_id - - node_groups: node_groups - - is_public: object_is_public - - hadoop_version: hadoop_version - - id: cluster_template_id - - name: cluster_template_name - - - -Response Example ----------------- - -.. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json - :language: javascript - - - - -Update cluster templates -======================== - -.. rest_method:: PUT /v1.1/{project_id}/cluster-templates/{cluster_template_id} - -Updates a cluster template. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_template_id: cluster_template_id - -Request Example ---------------- - -.. literalinclude:: samples/cluster-templates/cluster-template-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - tenant_id: tenant_id - - node_groups: node_groups - - is_public: object_is_public - - hadoop_version: hadoop_version - - id: cluster_template_id - - name: cluster_template_name - - - - - -Delete cluster template -======================= - -.. rest_method:: DELETE /v1.1/{project_id}/cluster-templates/{cluster_template_id} - -Deletes a cluster template. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_template_id: cluster_template_id - - - - - - -List cluster templates -====================== - -.. rest_method:: GET /v1.1/{project_id}/cluster-templates - -Lists available cluster templates. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_cluster_templates - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - tenant_id: tenant_id - - node_groups: node_groups - - is_public: object_is_public - - hadoop_version: hadoop_version - - id: cluster_template_id - - name: cluster_template_name - - - -Response Example ----------------- -.. rest_method:: GET /v1.1/{project_id}/cluster-templates?limit=2 - -.. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json - :language: javascript - - - - -Create cluster templates -======================== - -.. rest_method:: POST /v1.1/{project_id}/cluster-templates - -Creates a cluster template. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id - - -Request Example ---------------- - -.. literalinclude:: samples/cluster-templates/cluster-template-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - tenant_id: tenant_id - - node_groups: node_groups - - is_public: object_is_public - - hadoop_version: hadoop_version - - id: cluster_template_id - - name: cluster_template_name - - - - diff --git a/api-ref/source/v1.1/clusters.inc b/api-ref/source/v1.1/clusters.inc deleted file mode 100644 index d36a06e03e..0000000000 --- a/api-ref/source/v1.1/clusters.inc +++ /dev/null @@ -1,335 +0,0 @@ -.. -*- rst -*- - -======== -Clusters -======== - -A cluster is a group of nodes with the same configuration. - - -List available clusters -======================= - -.. rest_method:: GET /v1.1/{project_id}/clusters - -Lists available clusters. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_clusters - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - clusters: clusters - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - -Response Example ----------------- -.. rest_method:: GET /v1.1/{project_id}/clusters - -.. literalinclude:: samples/clusters/clusters-list-response.json - :language: javascript - - - - -Create cluster -============== - -.. rest_method:: POST /v1.1/{project_id}/clusters - -Creates a cluster. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - -Request Example ---------------- - -.. literalinclude:: samples/clusters/cluster-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - - - -Create multiple clusters -======================== - -.. rest_method:: POST /v1.1/{project_id}/clusters/multiple - -Creates multiple clusters. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - -Request Example ---------------- - -.. literalinclude:: samples/clusters/multiple-clusters-create-request.json - :language: javascript - - - - - - - -Show details of a cluster -========================= - -.. rest_method:: GET /v1.1/{project_id}/clusters/{cluster_id} - -Shows details for a cluster, by ID. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_id: url_cluster_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - -Response Example ----------------- - -.. literalinclude:: samples/clusters/cluster-show-response.json - :language: javascript - - - - -Delete a cluster -================ - -.. rest_method:: DELETE /v1.1/{project_id}/clusters/{cluster_id} - -Deletes a cluster. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_id: url_cluster_id - - - - - - -Scale cluster -============= - -.. rest_method:: PUT /v1.1/{project_id}/clusters/{cluster_id} - -Scales a cluster. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_id: cluster_id - -Request Example ---------------- - -.. literalinclude:: samples/clusters/cluster-scale-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - - -Update cluster -============== - -.. rest_method:: PATCH /v1.1/{project_id}/clusters/{cluster_id} - -Updates a cluster. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_id: url_cluster_id - -Request Example ---------------- - -.. literalinclude:: samples/clusters/cluster-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - - -Show progress -============= - -.. rest_method:: GET /v1.1/{project_id}/clusters/{cluster_id} - -Shows provisioning progress for a cluster. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_id: url_cluster_id - - - - -Response Example ----------------- - -.. literalinclude:: samples/event-log/cluster-progress-response.json - :language: javascript - - - diff --git a/api-ref/source/v1.1/data-sources.inc b/api-ref/source/v1.1/data-sources.inc deleted file mode 100644 index 9804dae274..0000000000 --- a/api-ref/source/v1.1/data-sources.inc +++ /dev/null @@ -1,212 +0,0 @@ -.. -*- rst -*- - -============ -Data sources -============ - -A data source object defines the location of input or output for -MapReduce jobs and might reference different types of storage. - -The Data Processing service does not validate data source -locations. - - -Show data source details -======================== - -.. rest_method:: GET /v1.1/{project_id}/data-sources/{data_source_id} - -Shows details for a data source. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - data_source_id: url_data_source_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: data_source_description - - url: url - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - type: type - - id: data_source_id - - name: data_source_name - - - -Response Example ----------------- - -.. literalinclude:: samples/data-sources/data-source-show-response.json - :language: javascript - - - - -Delete data source -================== - -.. rest_method:: DELETE /v1.1/{project_id}/data-sources/{data_source_id} - -Deletes a data source. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - data_source_id: url_data_source_id - - - - - - -Update data source -================== - -.. rest_method:: PUT /v1.1/{project_id}/data-sources/{data_source_id} - -Updates a data source. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - data_source_id: url_data_source_id - -Request Example ---------------- - -.. literalinclude:: samples/data-sources/data-source-update-request.json - :language: javascript - - - - - - - -List data sources -================= - -.. rest_method:: GET /v1.1/{project_id}/data-sources - -Lists all data sources. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_data_sources - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - description: data_source_description - - url: url - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - type: type - - id: data_source_id - - name: data_source_name - - - -Response Example ----------------- - -.. rest_method:: GET /v1.1/{project_id}/data-sourses?sort_by=-name - -.. literalinclude:: samples/data-sources/data-sources-list-response.json - :language: javascript - - - - -Create data source -================== - -.. rest_method:: POST /v1.1/{project_id}/data-sources - -Creates a data source. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - -Request Example ---------------- - -.. literalinclude:: samples/data-sources/data-source-register-hdfs-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: data_source_description - - url: url - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - type: type - - id: data_source_id - - name: data_source_name - - - - diff --git a/api-ref/source/v1.1/event-log.inc b/api-ref/source/v1.1/event-log.inc deleted file mode 100644 index 88299a3d2c..0000000000 --- a/api-ref/source/v1.1/event-log.inc +++ /dev/null @@ -1,42 +0,0 @@ -.. -*- rst -*- - -========= -Event log -========= - -The event log feature provides information about cluster -provisioning. In the event of errors, the event log shows the -reason for the failure. - - -Show progress -============= - -.. rest_method:: GET /v1.1/{project_id}/clusters/{cluster_id} - -Shows provisioning progress of cluster. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - cluster_id: cluster_id - - - - -Response Example ----------------- - -.. literalinclude:: samples/event-log/cluster-progress-response.json - :language: javascript - - - diff --git a/api-ref/source/v1.1/image-registry.inc b/api-ref/source/v1.1/image-registry.inc deleted file mode 100644 index 8c5c2a26eb..0000000000 --- a/api-ref/source/v1.1/image-registry.inc +++ /dev/null @@ -1,249 +0,0 @@ -.. -*- rst -*- - -============== -Image registry -============== - -Use the image registry tool to manage images, add tags to and -remove tags from images, and define the user name for an instance -operating system. Each plugin lists required tags for an image. To -run remote operations, the Data Processing service requires a user -name with which to log in to the operating system for an instance. - - -Add tags to image -================= - -.. rest_method:: POST /v1.1/{project_id}/images/{image_id}/tag - -Adds tags to an image. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - tags: tags - - image_id: url_image_id - -Request Example ---------------- - -.. literalinclude:: samples/image-registry/image-tags-add-request.json - :language: javascript - - - - - - - -Show image details -================== - -.. rest_method:: GET /v1.1/{project_id}/images/{image_id} - -Shows details for an image. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - image_id: url_image_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - username: username - - updated: updated - - description: image_description - - created: created - - image: image - - tags: tags - - minDisk: minDisk - - name: image_name - - progress: progress - - minRam: minRam - - id: image_id - - metadata: metadata - - - -Response Example ----------------- - -.. literalinclude:: samples/image-registry/image-show-response.json - :language: javascript - - - - -Register image -============== - -.. rest_method:: POST /v1.1/{project_id}/images/{image_id} - -Registers an image in the registry. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - username: username - - description: image_description - - image_id: url_image_id - -Request Example ---------------- - -.. literalinclude:: samples/image-registry/image-register-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - username: username - - updated: updated - - description: image_description - - created: created - - image: image - - tags: tags - - minDisk: minDisk - - name: image_name - - progress: progress - - minRam: minRam - - id: image_id - - metadata: metadata - - - - - -Unregister image -================ - -.. rest_method:: DELETE /v1.1/{project_id}/images/{image_id} - -Removes an image from the registry. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - image_id: url_image_id - - - - - - -Remove tags from image -====================== - -.. rest_method:: POST /v1.1/{project_id}/images/{image_id}/untag - -Removes tags from an image. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - tags: tags - - image_id: url_image_id - -Request Example ---------------- - -.. literalinclude:: samples/image-registry/image-tags-delete-request.json - :language: javascript - - - - - - - -List images -=========== - -.. rest_method:: GET /v1.1/{project_id}/images - -Lists all images registered in the registry. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - tags: tags - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - username: username - - updated: updated - - description: image_description - - created: created - - image: image - - tags: tags - - minDisk: minDisk - - name: image_name - - images: images - - progress: progress - - minRam: minRam - - id: image_id - - metadata: metadata - - - -Response Example ----------------- - -.. literalinclude:: samples/image-registry/images-list-response.json - :language: javascript - - - diff --git a/api-ref/source/v1.1/index.rst b/api-ref/source/v1.1/index.rst deleted file mode 100644 index a733181352..0000000000 --- a/api-ref/source/v1.1/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -:tocdepth: 3 - ------------------------- -Data Processing API v1.1 ------------------------- - -.. rest_expand_all:: - -.. include:: cluster-templates.inc -.. include:: clusters.inc -.. include:: data-sources.inc -.. include:: event-log.inc -.. include:: image-registry.inc -.. include:: job-binaries.inc -.. include:: job-executions.inc -.. include:: job-types.inc -.. include:: job-binary-internals.inc -.. include:: jobs.inc -.. include:: node-group-templates.inc -.. include:: plugins.inc diff --git a/api-ref/source/v1.1/job-binaries.inc b/api-ref/source/v1.1/job-binaries.inc deleted file mode 100644 index 1ac9f8ee78..0000000000 --- a/api-ref/source/v1.1/job-binaries.inc +++ /dev/null @@ -1,266 +0,0 @@ -.. -*- rst -*- - -============ -Job binaries -============ - -Job binary objects represent data processing applications and -libraries that are stored in either the internal database or the -Object Storage service. - - -List job binaries -================= - -.. rest_method:: GET /v1.1/{project_id}/job-binaries - -Lists the available job binaries. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_job_binary - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - description: job_binary_description - - url: url - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - binaries: binaries - - id: job_binary_id - - name: job_binary_name - - - -Response Example ----------------- -.. rest_method:: GET /v1.1/{project_id}/job-binaries?sort_by=created_at - -.. literalinclude:: samples/job-binaries/list-response.json - :language: javascript - - - - -Create job binary -================= - -.. rest_method:: POST /v1.1/{project_id}/job-binaries - -Creates a job binary. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - -Request Example ---------------- - -.. literalinclude:: samples/job-binaries/create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_binary_description - - url: url - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - id: job_binary_id - - name: job_binary_name - - - - - -Show job binary details -======================= - -.. rest_method:: GET /v1.1/{project_id}/job-binaries/{job_binary_id} - -Shows details for a job binary. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_binary_id: url_job_binary_id - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_binary_description - - url: url - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - id: job_binary_id - - name: job_binary_name - - - -Response Example ----------------- - -.. literalinclude:: samples/job-binaries/show-response.json - :language: javascript - - - - -Delete job binary -================= - -.. rest_method:: DELETE /v1.1/{project_id}/job-binaries/{job_binary_id} - -Deletes a job binary. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - - project_id: url_project_id - - job_binary_id: url_job_binary_id - - - - - - -Update job binary -================= - -.. rest_method:: PUT /v1.1/{project_id}/job-binaries/{job_binary_id} - -Updates a job binary. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - - project_id: url_project_id - - job_binary_id: url_job_binary_id - - -Request Example ---------------- - -.. literalinclude:: samples/job-binaries/update-request.json - :language: javascript - - - - - - - -Show job binary data -==================== - -.. rest_method:: GET /v1.1/{project_id}/job-binaries/{job_binary_id}/data - -Shows data for a job binary. - -The response body shows the job binary raw data and the response -headers show the data length. - -Example response: - -:: - - HTTP/1.1 200 OK - Connection: keep-alive - Content-Length: 161 - Content-Type: text/html; charset=utf-8 - Date: Sat, 28 Mar 2016 02:42:48 GMT - A = load '$INPUT' using PigStorage(':') as (fruit: chararray); - B = foreach A generate com.hadoopbook.pig.Trim(fruit); - store B into '$OUTPUT' USING PigStorage(); - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_binary_id: url_job_binary_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - Content-Length: Content-Length - - - -Response Example ----------------- - -.. literalinclude:: samples/job-binaries/show-data-response - :language: text - - - diff --git a/api-ref/source/v1.1/job-binary-internals.inc b/api-ref/source/v1.1/job-binary-internals.inc deleted file mode 100644 index 81b8f989a1..0000000000 --- a/api-ref/source/v1.1/job-binary-internals.inc +++ /dev/null @@ -1,258 +0,0 @@ -.. -*- rst -*- - -==================== -Job binary internals -==================== - -Job binary internal objects represent data processing applications -and libraries that are stored in the internal database. - - -Create job binary internal -========================== - -.. rest_method:: PUT /v1.1/{project_id}/job-binary-internals/{name} - -Creates a job binary internal. - -Job binary internals are objects that represent data processing -applications and libraries that are stored in the internal -database. - -Specify the file contents (raw data or script text) in the request -body. Specify the file name in the URI. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - name: url_job_binary_internals_name - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: job_binary_internals_name - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - datasize: datasize - - id: job_binary_internals_id - - - - - -Show job binary internal data -============================= - -.. rest_method:: GET /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id}/data - -Shows data for a job binary internal. - -The response body shows the job binary raw data and the response -headers show the data length. - -Example response: - -:: - - HTTP/1.1 200 OK - Connection: keep-alive - Content-Length: 161 - Content-Type: text/html; charset=utf-8 - Date: Sat, 28 Mar 2016 02:21:13 GMT - A = load '$INPUT' using PigStorage(':') as (fruit: chararray); - B = foreach A generate com.hadoopbook.pig.Trim(fruit); - store B into '$OUTPUT' USING PigStorage(); - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_binary_internals_id: url_job_binary_internals_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - Content-Length: Content-Length - - - -Response Example ----------------- - -.. literalinclude:: samples/job-binary-internals/show-data-response - :language: text - - - - -Show job binary internal details -================================ - -.. rest_method:: GET /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id} - -Shows details for a job binary internal. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_binary_internals_id: url_job_binary_internals_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: job_binary_internals_name - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - datasize: datasize - - id: job_binary_internals_id - - - -Response Example ----------------- - -.. literalinclude:: samples/job-binary-internals/show-response.json - :language: javascript - - - - -Delete job binary internal -========================== - -.. rest_method:: DELETE /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id} - -Deletes a job binary internal. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_binary_internals_id: url_job_binary_internals_id - - - - - - -Update job binary internal -========================== - -.. rest_method:: PATCH /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id} - -Updates a job binary internal. - -Normal respose codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_binary_internals_id: url_job_binary_internals_id - -Request Example ---------------- - -.. literalinclude:: samples/job-binary-internals/update-request.json - :language: javascript - - - - - - - -List job binary internals -========================= - -.. rest_method:: GET /v1.1/{project_id}/job-binary-internals - -Lists the available job binary internals. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_job_binary_internals - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - binaries: binaries - - name: job_binary_internals_name - - tenant_id: tenant_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - datasize: datasize - - id: job_binary_internals_id - - - -Response Example ----------------- -.. rest_method:: GET /v1.1/{project_id}/job-binary-internals - -.. literalinclude:: samples/job-binary-internals/list-response.json - :language: javascript - - - diff --git a/api-ref/source/v1.1/job-executions.inc b/api-ref/source/v1.1/job-executions.inc deleted file mode 100644 index 9d9f5628bd..0000000000 --- a/api-ref/source/v1.1/job-executions.inc +++ /dev/null @@ -1,325 +0,0 @@ -.. -*- rst -*- - -============== -Job executions -============== - -A job execution object represents a Hadoop job that runs on a -cluster. A job execution polls the status of a running job and -reports it to the user. Also a user can cancel a running job. - - -Refresh job execution status -============================ - -.. rest_method:: GET /v1.1/{project_id}/job-executions/{job_execution_id}/refresh-status - -Refreshes the status of and shows information for a job execution. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_execution_id: url_job_execution_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - info: info - - output_id: output_id - - start_time: start_time - - job_id: job_id - - updated_at: updated_at - - tenant_id: tenant_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_execution_is_public - - input_id: input_id - - configs: configs - - job_execution: job_execution - - id: job_execution_id - - - -Response Example ----------------- - -.. literalinclude:: samples/job-executions/job-ex-response.json - :language: javascript - - - - -List job executions -=================== - -.. rest_method:: GET /v1.1/{project_id}/job-executions - -Lists available job executions. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_job_execution - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - info: info - - output_id: output_id - - start_time: start_time - - job_id: job_id - - updated_at: updated_at - - tenant_id: tenant_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_execution_is_public - - input_id: input_id - - configs: configs - - job_execution: job_execution - - id: job_execution_id - - job_executions: job_executions - - - -Response Example ----------------- -.. rest_method:: /v1.1/{project_id}/job-executions - -.. literalinclude:: samples/job-executions/list-response.json - :language: javascript - - - - -Show job execution details -========================== - -.. rest_method:: GET /v1.1/{project_id}/job-executions/{job_execution_id} - -Shows details for a job execution, by ID. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_execution_id: url_job_execution_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - info: info - - output_id: output_id - - start_time: start_time - - job_id: job_id - - updated_at: updated_at - - tenant_id: tenant_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_execution_is_public - - input_id: input_id - - configs: configs - - job_execution: job_execution - - id: job_execution_id - - - -Response Example ----------------- - -.. literalinclude:: samples/job-executions/job-ex-response.json - :language: javascript - - - - -Delete job execution -==================== - -.. rest_method:: DELETE /v1.1/{project_id}/job-executions/{job_execution_id} - -Deletes a job execution. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_execution_id: url_job_execution_id - - - - - - -Update job execution -==================== - -.. rest_method:: PATCH /v1.1/{project_id}/job-executions/{job_execution_id} - -Updates a job execution. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_execution_id: url_job_execution_id - -Request Example ---------------- - -.. literalinclude:: samples/job-executions/job-ex-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - info: info - - output_id: output_id - - start_time: start_time - - job_id: job_id - - updated_at: updated_at - - tenant_id: tenant_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_execution_is_public - - input_id: input_id - - configs: configs - - job_execution: job_execution - - id: job_execution_id - - - - - -Cancel job execution -==================== - -.. rest_method:: GET /v1.1/{project_id}/job-executions/{job_execution_id}/cancel - -Cancels a job execution. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_execution_id: url_job_execution_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - info: info - - output_id: output_id - - start_time: start_time - - job_id: job_id - - updated_at: updated_at - - tenant_id: tenant_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_execution_is_public - - input_id: input_id - - configs: configs - - job_execution: job_execution - - id: job_execution_id - - - -Response Example ----------------- - -.. literalinclude:: samples/job-executions/cancel-response.json - :language: javascript - - - diff --git a/api-ref/source/v1.1/job-types.inc b/api-ref/source/v1.1/job-types.inc deleted file mode 100644 index ab719dfdd7..0000000000 --- a/api-ref/source/v1.1/job-types.inc +++ /dev/null @@ -1,61 +0,0 @@ -.. -*- rst -*- - -========= -Job types -========= - -Each plugin that supports EDP also supports specific job types. -Different versions of a plugin might actually support different job -types. Configuration options vary by plugin, version, and job type. - -The job types provide information about which plugins support which -job types and how to configure the job types. - - -List job types -============== - -.. rest_method:: GET /v1.1/{project_id}/job-types - -Lists all job types. - -You can use query parameters to filter the response. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - plugin: plugin - - version: version - - type: type - - hints: hints - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - versions: versions - - title: title - - description: description_plugin - - job_types: job_types - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/job-types/job-types-list-response.json - :language: javascript - - - diff --git a/api-ref/source/v1.1/jobs.inc b/api-ref/source/v1.1/jobs.inc deleted file mode 100644 index c4e0cb7a9f..0000000000 --- a/api-ref/source/v1.1/jobs.inc +++ /dev/null @@ -1,265 +0,0 @@ -.. -*- rst -*- - -==== -Jobs -==== - -A job object lists the binaries that a job needs to run. To run a -job, you must specify data sources and job parameters. - -You can run a job on an existing or new transient cluster. - - -Run job -======= - -.. rest_method:: POST /v1.1/{project_id}/jobs/{job_id}/execute - -Runs a job. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_id: url_job_id - -Request Example ---------------- - -.. literalinclude:: samples/jobs/job-execute-request.json - :language: javascript - - - - - - - -List jobs -========= - -.. rest_method:: GET /v1.1/{project_id}/jobs - -Lists all jobs. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_jobs - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - jobs: jobs - - description: job_description - - tenant_id: tenant_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_id - - name: job_name - - markers: markers - - prev: prev - - next: next - - -Response Example ----------------- -..rest_method:: GET /v1.1/{project_id}/jobs?limit=2 - -.. literalinclude:: samples/jobs/jobs-list-response.json - :language: javascript - - - - -Create job -========== - -.. rest_method:: POST /v1.1/{project_id}/jobs - -Creates a job object. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - -Request Example ---------------- - -.. literalinclude:: samples/jobs/job-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_description - - tenant_id: tenant_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_id - - name: job_name - - - - - -Show job details -================ - -.. rest_method:: GET /v1.1/{project_id}/jobs/{job_id} - -Shows details for a job. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_id: url_job_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_description - - tenant_id: tenant_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_id - - name: job_name - - - -Response Example ----------------- - -.. literalinclude:: samples/jobs/job-show-response.json - :language: javascript - - - - -Remove job -========== - -.. rest_method:: DELETE /v1.1/{project_id}/jobs/{job_id} - -Removes a job. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_id: url_job_id - - - - - - -Update job object -================= - -.. rest_method:: PATCH /v1.1/{project_id}/jobs/{job_id} - -Updates a job object. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - job_id: url_job_id - -Request Example ---------------- - -.. literalinclude:: samples/jobs/job-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_description - - tenant_id: tenant_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_id - - name: job_name - - - - diff --git a/api-ref/source/v1.1/node-group-templates.inc b/api-ref/source/v1.1/node-group-templates.inc deleted file mode 100644 index e61a61981d..0000000000 --- a/api-ref/source/v1.1/node-group-templates.inc +++ /dev/null @@ -1,269 +0,0 @@ -.. -*- rst -*- - -==================== -Node group templates -==================== - -A cluster is a group of nodes with the same configuration. A node -group template configures a node in the cluster. - -A template configures Hadoop processes and VM characteristics, such -as the number of reduced slots for task tracker, the number of -CPUs, and the amount of RAM. The template specifies the VM -characteristics through an OpenStack flavor. - - -List node group templates -========================= - -.. rest_method:: GET /v1.1/{project_id}/node-group-templates - -Lists available node group templates. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - limit: limit - - marker: marker - - sort_by: sort_by_node_group_templates - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - volume_local_to_instance: volume_local_to_instance - - availability_zone: availability_zone - - updated_at: updated_at - - use_autoconfig: use_autoconfig - - volumes_per_node: volumes_per_node - - id: node_group_template_id - - security_groups: security_groups - - shares: object_shares - - node_configs: node_configs - - auto_security_group: auto_security_group - - volumes_availability_zone: volumes_availability_zone - - description: node_group_template_description - - volume_mount_prefix: volume_mount_prefix - - plugin_name: plugin_name - - floating_ip_pool: floating_ip_pool - - is_default: is_default - - image_id: image_id - - volumes_size: volumes_size - - is_proxy_gateway: is_proxy_gateway - - is_public: object_is_public - - hadoop_version: hadoop_version - - name: node_group_template_name - - tenant_id: tenant_id - - created_at: created_at - - volume_type: volume_type - - is_protected: object_is_protected - - node_processes: node_processes - - flavor_id: flavor_id - - - -Response Example ----------------- -.. rest_method:: GET /v1.1/{project_id}/node-group-templates?limit=2&marker=38b4e146-1d39-4822-bad2-fef1bf304a52&sort_by=name - -.. literalinclude:: samples/node-group-templates/node-group-templates-list-response.json - :language: javascript - - - - -Create node group template -========================== - -.. rest_method:: POST /v1.1/{project_id}/node-group-templates - -Creates a node group template. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - -Request Example ---------------- - -.. literalinclude:: samples/node-group-templates/node-group-template-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_local_to_instance: volume_local_to_instance - - availability_zone: availability_zone - - updated_at: updated_at - - use_autoconfig: use_autoconfig - - volumes_per_node: volumes_per_node - - id: node_group_template_id - - security_groups: security_groups - - shares: object_shares - - node_configs: node_configs - - auto_security_group: auto_security_group - - volumes_availability_zone: volumes_availability_zone - - description: node_group_template_description - - volume_mount_prefix: volume_mount_prefix - - plugin_name: plugin_name - - floating_ip_pool: floating_ip_pool - - is_default: is_default - - image_id: image_id - - volumes_size: volumes_size - - is_proxy_gateway: is_proxy_gateway - - is_public: object_is_public - - hadoop_version: hadoop_version - - name: node_group_template_name - - tenant_id: tenant_id - - created_at: created_at - - volume_type: volume_type - - is_protected: object_is_protected - - node_processes: node_processes - - flavor_id: flavor_id - - - - - -Show node group template details -================================ - -.. rest_method:: GET /v1.1/{project_id}/node-group-templates/{node_group_template_id} - -Shows a node group template, by ID. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - node_group_template_id: url_node_group_template_id - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_local_to_instance: volume_local_to_instance - - availability_zone: availability_zone - - updated_at: updated_at - - use_autoconfig: use_autoconfig - - volumes_per_node: volumes_per_node - - id: node_group_template_id - - security_groups: security_groups - - shares: object_shares - - node_configs: node_configs - - auto_security_group: auto_security_group - - volumes_availability_zone: volumes_availability_zone - - description: node_group_template_description - - volume_mount_prefix: volume_mount_prefix - - plugin_name: plugin_name - - floating_ip_pool: floating_ip_pool - - is_default: is_default - - image_id: image_id - - volumes_size: volumes_size - - is_proxy_gateway: is_proxy_gateway - - is_public: object_is_public - - hadoop_version: hadoop_version - - name: node_group_template_name - - tenant_id: tenant_id - - created_at: created_at - - volume_type: volume_type - - is_protected: object_is_protected - - node_processes: node_processes - - flavor_id: flavor_id - - - -Response Example ----------------- - -.. literalinclude:: samples/node-group-templates/node-group-template-show-response.json - :language: javascript - - - - -Delete node group template -========================== - -.. rest_method:: DELETE /v1.1/{project_id}/node-group-templates/{node_group_template_id} - -Deletes a node group template. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - node_group_template_id: url_node_group_template_id - - - - - - -Update node group template -========================== - -.. rest_method:: PUT /v1.1/{project_id}/node-group-templates/{node_group_template_id} - -Updates a node group template. - -Normal respose codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - node_group_template_id: url_node_group_template_id - -Request Example ---------------- - -.. literalinclude:: samples/node-group-templates/node-group-template-update-request.json - :language: javascript - - - - - - diff --git a/api-ref/source/v1.1/parameters.yaml b/api-ref/source/v1.1/parameters.yaml deleted file mode 100644 index fc28dc6ec0..0000000000 --- a/api-ref/source/v1.1/parameters.yaml +++ /dev/null @@ -1,1159 +0,0 @@ -# variables in header -Content-Length: - description: | - The length of the data, in bytes. - in: header - required: true - type: string - -# variables in path -hints: - description: | - Includes configuration hints in the response. - in: path - required: false - type: boolean -job_binary_id: - description: | - The UUID of the job binary. - in: path - required: true - type: string -limit: - description: | - Maximum number of objects in response data. - in: path - required: false - type: integer -marker: - description: | - ID of the last element on the list which - won't be in response. - in: path - required: false - type: string -plugin: - description: | - Filters the response by a plugin name. - in: path - required: false - type: string -sort_by_cluster_templates: - description: | - The field for sorting cluster templates. - this parameter accepts the following values: - ``name``, ``plugin_name``, ``hadoop_version``, - ``created_at``, ``updated_at``, ``id``. Also - this values can started with ``-`` prefix for - descending sort. For example: ``-name``. - in: path - required: false - type: string - -sort_by_clusters: - description: | - The field for sorting clusters. - this parameter accepts the following values: - ``name``, ``plugin_name``, ``hadoop_version``, - ``status``, ``id``. Also this values can - started with ``-`` prefix for descending sort. - For example: ``-name``. - in: path - required: false - type: string - -sort_by_data_sources: - description: | - The field for sorting data sources. - this parameter accepts the following values: - ``id``, ``name``, ``type``, ``created_at``, - ``updated_at``. Also this values can started - with ``-`` prefix for descending sort. - For example: ``-name``. - in: path - required: false - type: string - -sort_by_job_binary: - description: | - The field for sorting job binaries. - this parameter accepts the following values: - ``id``, ``name``, ``created_at``, ``updated_at``. - Also this values can started with ``-`` prefix - for descending sort. For example: ``-name``. - in: path - required: false - type: string - -sort_by_job_binary_internals: - description: | - The field for sorting job binary internals. - this parameter accepts the following values: - ``id``, ``name``, ``created_at``, ``updated_at``. - Also this values can started with ``-`` prefix - for descending sort. For example: ``-name``. - in: path - required: false - type: string - -sort_by_job_execution: - description: | - The field for sorting job executions. - this parameter accepts the following values: - ``id``, ``job_template``, ``cluster``, - ``status``. Also this values can started - with ``-`` prefix for descending sort. - For example: ``-cluster``. - in: path - required: false - type: string - -sort_by_jobs: - description: | - The field for sorting jobs. - this parameter accepts the following values: - ``id``, ``name``, ``type``, ``created_at``, - ``updated_at``. Also this values can started - with ``-`` prefix for descending sort. - For example: ``-name``. - in: path - required: false - type: string - -sort_by_node_group_templates: - description: | - The field for sorting node group templates. - this parameter accepts the following values: - ``name``, ``plugin_name``, ``hadoop_version``, - ``created_at``, ``updated_at``, ``id``. Also - this values can started with ``-`` prefix for - descending sort. For example: ``-name``. - in: path - required: false - type: string - -type_2: - description: | - Filters the response by a job type. - in: path - required: false - type: string -url_cluster_id: - description: | - The ID of the cluster - in: path - required: true - type: string -url_cluster_template_id: - description: | - The unique identifier of the cluster template. - in: path - required: true - type: string -url_data_source_id: - description: | - The UUID of the data source. - in: path - required: true - type: string -url_image_id: - description: | - The UUID of the image. - in: path - required: true - type: string -url_job_binary_id: - description: | - The UUID of the job binary. - in: path - required: true - type: string -url_job_binary_internals_id: - description: | - The UUID of the job binary internal. - in: path - required: true - type: string -url_job_binary_internals_name: - description: | - The name of the job binary internal. - in: path - required: true - type: string -url_job_execution_id: - description: | - The UUID of the job execution. - in: path - required: true - type: string -url_job_id: - description: | - The UUID of the job. - in: path - required: true - type: string -url_node_group_template_id: - description: | - The UUID of the node group template. - in: path - required: true - type: string -url_plugin_name: - description: | - Name of the plugin. - in: path - required: true - type: string -url_project_id: - description: | - UUID of the project. - in: path - required: true - type: string -version: - description: | - Filters the response by a plugin version. - in: path - required: true - type: string -version_1: - description: | - Version of the plugin. - in: path - required: false - type: string - - -# variables in body -args: - description: | - The list of arguments. - in: body - required: true - type: array -auto_security_group: - description: | - If set to ``True``, the cluster group is - automatically secured. - in: body - required: true - type: boolean -availability_zone: - description: | - The availability of the node in the cluster. - in: body - required: true - type: string -binaries: - description: | - The list of job binary internal objects. - in: body - required: true - type: array -cluster_configs: - description: | - A set of key and value pairs that contain the - cluster configuration. - in: body - required: true - type: object -cluster_id: - description: | - The UUID of the cluster. - in: body - required: true - type: string -cluster_template_description: - description: | - Description of the cluster template - in: body - required: false - type: string -cluster_template_id: - description: | - The UUID of the cluster template. - in: body - required: true - type: string -cluster_template_name: - description: | - The name of the cluster template. - in: body - required: true - type: string -clusters: - description: | - The list of clusters. - in: body - required: true - type: array -configs: - description: | - The mappings of the job tasks. - in: body - required: true - type: object -count: - description: | - The number of nodes in the cluster. - in: body - required: true - type: integer -created: - description: | - The date and time when the image was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -created_at: - description: | - The date and time when the cluster was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -created_at_1: - description: | - The date and time when the object was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -created_at_2: - description: | - The date and time when the node was created in the cluster. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -created_at_3: - description: | - The date and time when the job execution object was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -data_source_description: - description: | - The description of the data source object. - in: body - required: true - type: string -data_source_id: - description: | - The UUID of the data source. - in: body - required: true - type: string -data_source_name: - description: | - The name of the data source. - in: body - required: true - type: string -data_source_urls: - description: | - The data source URLs. - in: body - required: true - type: object -datasize: - description: | - The size of the data stored in the internal - database. - in: body - required: true - type: integer -default_image_id: - description: | - The default ID of the image. - in: body - required: true - type: string -description: - description: | - The description of the cluster. - in: body - required: true - type: string -description_3: - description: | - The description of the node in the cluster. - in: body - required: true - type: string -description_7: - description: | - Description of the image. - in: body - required: false - type: string -description_plugin: - description: | - The full description of the plugin. - in: body - required: true - type: string -domain_name: - description: | - Domain name for internal and external hostname resolution. - Required if DNS service is enabled. - in: body - required: false - type: string -end_time: - description: | - The end date and time of the job execution. - - The date and time when the job completed execution. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -flavor_id: - description: | - The ID of the flavor. - in: body - required: true - type: string -floating_ip_pool: - description: | - The UUID of the pool in the template. - in: body - required: true - type: string -hadoop_version: - description: | - The version of the Hadoop used in the cluster. - in: body - required: true - type: string -hadoop_version_1: - description: | - The version of the Hadoop. - in: body - required: true - type: string -id: - description: | - The UUID of the cluster. - in: body - required: true - type: string -id_1: - description: | - The ID of the object. - in: body - required: true - type: string -image: - description: | - A set of key and value pairs that contain image - properties. - in: body - required: true - type: object -image_description: - description: | - The description of the image. - in: body - required: true - type: string -image_id: - description: | - The UUID of the image. - in: body - required: true - type: string -image_name: - description: | - The name of the operating system image. - in: body - required: true - type: string -images: - description: | - The list of images and their properties. - in: body - required: true - type: array -info: - description: | - A set of key and value pairs that contain cluster - information. - in: body - required: true - type: object -info_1: - description: | - The report of the executed job objects. - in: body - required: true - type: object -input_id: - description: | - The UUID of the input. - in: body - required: true - type: string -interface: - description: | - The interfaces of the job object. - in: body - required: true - type: array -is_default: - description: | - If set to ``true``, the cluster is the default - cluster. - in: body - required: true - type: boolean -is_protected: - description: | - If set to ``true``, the cluster is protected. - in: body - required: true - type: boolean -is_protected_2: - description: | - If set to ``true``, the node is protected. - in: body - required: true - type: boolean -is_protected_3: - description: | - If set to ``true``, the job execution object is - protected. - in: body - required: true - type: boolean -is_proxy_gateway: - description: | - If set to ``true``, the node is the proxy - gateway. - in: body - required: true - type: boolean -is_public: - description: | - If set to ``true``, the cluster is public. - in: body - required: true - type: boolean -is_transient: - description: | - If set to ``true``, the cluster is transient. - in: body - required: true - type: boolean -job_binary_description: - description: | - The description of the job binary object. - in: body - required: true - type: string -job_binary_internals_id: - description: | - The UUID of the job binary internal. - in: body - required: true - type: string -job_binary_internals_name: - description: | - The name of the job binary internal. - in: body - required: true - type: string -job_binary_name: - description: | - The name of the object. - in: body - required: true - type: string -job_description: - description: | - The description of the job object. - in: body - required: true - type: string -job_execution: - description: | - A set of key and value pairs that contain the job - object. - in: body - required: true - type: object -job_execution_id: - description: | - The UUID of the job execution object. - in: body - required: true - type: string -job_execution_is_public: - description: | - If set to ``true``, the job execution object is - public. - in: body - required: true - type: boolean -job_executions: - description: | - The list of job execution objects. - in: body - required: true - type: array -job_id: - description: | - The UUID of the job object. - in: body - required: true - type: string -job_name: - description: | - The name of the job object. - in: body - required: true - type: string -job_types: - description: | - The list of plugins and their job types. - in: body - required: true - type: array -jobs: - description: | - The list of the jobs. - in: body - required: true - type: array -libs: - description: | - The list of the job object properties. - in: body - required: true - type: array -mains: - description: | - The list of the job object and their properties. - in: body - required: true - type: array -management_public_key: - description: | - The SSH key for the management network. - in: body - required: true - type: string -markers: - description: | - The markers of previous and following pages of data. - This field exists only if ``limit`` is passed to - request. - in: body - required: false - type: object -metadata: - description: | - A set of key and value pairs that contain image - metadata. - in: body - required: true - type: object -minDisk: - description: | - The minimum disk space, in GB. - in: body - required: true - type: integer -minRam: - description: | - The minimum amount of random access memory (RAM) - for the image, in GB. - in: body - required: true - type: integer -name: - description: | - The name of the cluster. - in: body - required: true - type: string -name_1: - description: | - The name of the object. - in: body - required: true - type: string -neutron_management_network: - description: | - The UUID of the neutron management network. - in: body - required: true - type: string -next: - description: | - The marker of next page of list data. - in: body - required: false - type: string -node_configs: - description: | - A set of key and value pairs that contain the - node configuration in the cluster. - in: body - required: true - type: object -node_group_template_description: - description: | - Description of the node group template - in: body - required: false - type: string -node_group_template_id: - description: | - The UUID of the node group template. - in: body - required: true - type: string -node_group_template_name: - description: | - The name of the node group template. - in: body - required: true - type: string -node_groups: - description: | - The detail properties of the node in key-value - pairs. - in: body - required: true - type: object -node_processes: - description: | - The list of the processes performed by the node. - in: body - required: true - type: array -object_is_protected: - description: | - If set to ``true``, the object is protected. - in: body - required: true - type: boolean -object_is_public: - description: | - If set to ``true``, the object is public. - in: body - required: true - type: boolean -object_shares: - description: | - The sharing of resources in the cluster. - in: body - required: true - type: string -oozie_job_id: - description: | - The UUID of the ``oozie_job``. - in: body - required: true - type: string -output_id: - description: | - The UUID of the output of job execution object. - in: body - required: true - type: string -params: - description: | - The mappings of values to the parameters. - in: body - required: true - type: object -plugin_name: - description: | - The name of the plugin. - in: body - required: true - type: string -plugins: - description: | - The list of plugins. - in: body - required: true - type: array -prev: - description: | - The marker of previous page. May be ``null`` if - previous page is first or if current page is first. - in: body - required: false - type: string -progress: - description: | - A progress indicator, as a percentage value, for - the amount of image content that has been processed. - in: body - required: true - type: integer -project_id: - description: | - The UUID of the project. - in: body - required: true - type: string -provision_progress: - description: | - A list of the cluster progresses. - in: body - required: true - type: array -return_code: - description: | - The code returned after job has executed. - in: body - required: true - type: string -security_groups: - description: | - The security groups of the node. - in: body - required: true - type: string -shares: - description: | - The shares of the cluster. - in: body - required: true - type: string -start_time: - description: | - The date and time when the job started. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -status: - description: | - The status of the cluster. - in: body - required: true - type: string -status_1: - description: | - The current status of the image. - in: body - required: true - type: string -status_description: - description: | - The description of the cluster status. - in: body - required: true - type: string -tags: - description: | - List of tags to add. - in: body - required: true - type: array -tags_1: - description: | - Lists images only with specific tag. Can be used - multiple times. - in: body - required: false - type: string -tags_2: - description: | - One or more image tags. - in: body - required: true - type: array -tags_3: - description: | - List of tags to remove. - in: body - required: true - type: array -tenant_id: - description: | - The UUID of the tenant. - in: body - required: true - type: string -title: - description: | - The title of the plugin. - in: body - required: true - type: string -trust_id: - description: | - The id of the trust. - in: body - required: true - type: integer -type: - description: | - The type of the data source object. - in: body - required: true - type: string -type_1: - description: | - The type of the job object. - in: body - required: true - type: string -updated: - description: | - The date and time when the image was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -updated_at: - description: | - The date and time when the cluster was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -updated_at_1: - description: | - The date and time when the object was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -updated_at_2: - description: | - The date and time when the node was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -updated_at_3: - description: | - The date and time when the job execution object was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -url: - description: | - The url of the data source object. - in: body - required: true - type: string -url_1: - description: | - The url of the job binary object. - in: body - required: true - type: string -use_autoconfig: - description: | - If set to ``true``, the cluster is auto - configured. - in: body - required: true - type: boolean -use_autoconfig_1: - description: | - If set to ``true``, the node is auto configured. - in: body - required: true - type: boolean -username: - description: | - The name of the user for the image. - in: body - required: true - type: string -username_1: - description: | - The user name to log in to an instance operating - system for remote operations execution. - in: body - required: true - type: string -versions: - description: | - The list of plugin versions. - in: body - required: true - type: array -volume_local_to_instance: - description: | - If set to ``true``, the volume is local to the - instance. - in: body - required: true - type: boolean -volume_mount_prefix: - description: | - The mount point of the node. - in: body - required: true - type: string -volume_type: - description: | - The type of volume in a node. - in: body - required: true - type: string -volumes_availability_zone: - description: | - The availability zone of the volumes. - in: body - required: true - type: string -volumes_per_node: - description: | - The number of volumes for the node. - in: body - required: true - type: integer -volumes_size: - description: | - The size of the volumes in a node. - in: body - required: true - type: integer - diff --git a/api-ref/source/v1.1/plugins.inc b/api-ref/source/v1.1/plugins.inc deleted file mode 100644 index 0708b70fb7..0000000000 --- a/api-ref/source/v1.1/plugins.inc +++ /dev/null @@ -1,187 +0,0 @@ -.. -*- rst -*- - -======= -Plugins -======= - -A plugin object defines the Hadoop or Spark version that it can -install and which configurations can be set for the cluster. - - -Show plugin details -=================== - -.. rest_method:: GET /v1.1/{project_id}/plugins/{plugin_name} - -Shows details for a plugin. - - -Normal response codes: 200 -Error response codes: 400, 500 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - plugin_name: url_plugin_name - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - versions: versions - - title: title - - description: description_plugin - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugin-show-response.json - :language: javascript - - - - -List plugins -============ - -.. rest_method:: GET /v1.1/{project_id}/plugins - -Lists all registered plugins. - - -Normal response codes: 200 -Error response codes: 400, 500 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - title: title - - versions: versions - - plugins: plugins - - description: description_plugin - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugins-list-response.json - :language: javascript - - - - -Show plugin version details -=========================== - -.. rest_method:: GET /v1.1/{project_id}/plugins/{plugin_name}/{version} - -Shows details for a plugin version. - - -Normal response codes: 200 -Error response codes: 400, 500 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - plugin_name: url_plugin_name - - version: version - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - versions: versions - - title: title - - description: description_plugin - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugin-version-show-response.json - :language: javascript - - - - -Update plugin details -===================== - -.. rest_method:: PATCH /v1.1/{project_id}/plugins/{plugin_name} - -Updates details for a plugin. - - -Normal response codes: 202 -Error response codes: 400, 500 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: url_project_id - - plugin_name: url_plugin_name - - -Request Example ---------------- - -.. literalinclude:: samples/plugins/plugin-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - title: title - - versions: versions - - description: description_plugin - - name: plugin_name - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugin-update-response.json - :language: javascript - - - - diff --git a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-request.json b/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-request.json deleted file mode 100644 index e7d9027f6a..0000000000 --- a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-request.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "2.7.1", - "node_groups": [ - { - "name": "worker", - "count": 3, - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251" - }, - { - "name": "master", - "count": 1, - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae" - } - ], - "name": "cluster-template" -} diff --git a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-response.json b/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-response.json deleted file mode 100644 index 0e24853bfb..0000000000 --- a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-response.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "cluster_template": { - "is_public": false, - "anti_affinity": [], - "name": "cluster-template", - "created_at": "2015-09-14T10:38:44", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": {}, - "shares": null, - "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": null, - "is_default": false, - "updated_at": null, - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "1751c04e-8f39-467e-a421-480961172d4b", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "3ee85068-c455-4391-9db2-b54a20b99df3", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": null, - "domain_name": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "description": null, - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-show-response.json b/api-ref/source/v1.1/samples/cluster-templates/cluster-template-show-response.json deleted file mode 100644 index 2c70d53e8a..0000000000 --- a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-show-response.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "cluster_template": { - "is_public": false, - "anti_affinity": [], - "name": "cluster-template", - "created_at": "2015-09-14T10:38:44", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": {}, - "shares": null, - "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": null, - "is_default": false, - "updated_at": null, - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "1751c04e-8f39-467e-a421-480961172d4b", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "3ee85068-c455-4391-9db2-b54a20b99df3", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "domain_name": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "description": null, - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-request.json b/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-request.json deleted file mode 100644 index 885150e607..0000000000 --- a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "description": "Updated template", - "plugin_name": "vanilla", - "hadoop_version": "2.7.1", - "name": "vanilla-updated", - "cluster_configs": { - "HDFS": { - "dfs.replication": 2 - } - } -} diff --git a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-response.json b/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-response.json deleted file mode 100644 index bc0800b0e6..0000000000 --- a/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-response.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "cluster_template": { - "is_public": false, - "anti_affinity": [], - "name": "vanilla-updated", - "created_at": "2015-08-21T08:41:24", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": { - "HDFS": { - "dfs.replication": 2 - } - }, - "shares": null, - "id": "84d47e85-6094-473f-bf6d-5a7e6e86564e", - "default_image_id": null, - "is_default": false, - "updated_at": "2015-09-14T10:45:57", - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": {}, - "JobFlow": {}, - "MapReduce": {}, - "Hive": {}, - "Hadoop": {}, - "HDFS": {} - }, - "auto_security_group": true, - "availability_zone": "", - "count": 1, - "flavor_id": "3", - "id": "57b966ab-617e-4735-bf60-0cb991208a52", - "security_groups": [], - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-08-21T08:41:24", - "node_group_template_id": "a5533187-3f14-42c3-ba3a-196c13fe0fb5", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "all", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "datanode", - "historyserver", - "resourcemanager", - "nodemanager", - "oozie" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": null, - "domain_name": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "description": "Updated template", - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/cluster-templates/cluster-templates-list-response.json b/api-ref/source/v1.1/samples/cluster-templates/cluster-templates-list-response.json deleted file mode 100644 index a5ebbf7b88..0000000000 --- a/api-ref/source/v1.1/samples/cluster-templates/cluster-templates-list-response.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "cluster_templates": [ - { - "is_public": false, - "anti_affinity": [], - "name": "cluster-template", - "created_at": "2015-09-14T10:38:44", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": {}, - "shares": null, - "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": null, - "is_default": false, - "updated_at": null, - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "1751c04e-8f39-467e-a421-480961172d4b", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "3ee85068-c455-4391-9db2-b54a20b99df3", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "domain_name": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "description": null, - "is_protected": false - }, - { - "is_public": true, - "anti_affinity": [], - "name": "asd", - "created_at": "2015-08-18T08:39:39", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": { - "general": {} - }, - "shares": null, - "id": "5a9c787c-2078-4f7d-9a66-27759be9051b", - "default_image_id": null, - "is_default": false, - "updated_at": "2015-09-14T08:41:15", - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": true, - "availability_zone": "", - "count": 1, - "flavor_id": "2", - "id": "a65864dd-3f99-4d29-a011-f7711cc23fa0", - "security_groups": [], - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-08-18T08:39:39", - "node_group_template_id": "42ce49de-1b8f-41d5-8f4a-244ec0826d92", - "updated_at": null, - "volumes_per_node": 1, - "is_proxy_gateway": false, - "name": "asd", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "jobtracker" - ], - "volumes_size": 10, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": null, - "domain_name": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "description": "", - "is_protected": false - } - ], - "markers": { - "prev": null, - "next": "2c76e0d3-56cd-4d28-bb4f-4808e538c7b9" - } -} diff --git a/api-ref/source/v1.1/samples/clusters/cluster-create-request.json b/api-ref/source/v1.1/samples/clusters/cluster-create-request.json deleted file mode 100644 index c579a285cb..0000000000 --- a/api-ref/source/v1.1/samples/clusters/cluster-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "2.7.1", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "user_keypair_id": "test", - "name": "vanilla-cluster", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd" -} diff --git a/api-ref/source/v1.1/samples/clusters/cluster-create-response.json b/api-ref/source/v1.1/samples/clusters/cluster-create-response.json deleted file mode 100644 index 992c22eaa0..0000000000 --- a/api-ref/source/v1.1/samples/clusters/cluster-create-response.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "cluster": { - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": {}, - "user_keypair_id": "test", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [], - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T10:57:12", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Validating" - } -} diff --git a/api-ref/source/v1.1/samples/clusters/cluster-scale-request.json b/api-ref/source/v1.1/samples/clusters/cluster-scale-request.json deleted file mode 100644 index 8b61d5ea0f..0000000000 --- a/api-ref/source/v1.1/samples/clusters/cluster-scale-request.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "add_node_groups": [ - { - "count": 1, - "name": "b-worker", - "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622" - } - ], - "resize_node_groups": [ - { - "count": 4, - "name": "worker" - } - ] -} diff --git a/api-ref/source/v1.1/samples/clusters/cluster-scale-response.json b/api-ref/source/v1.1/samples/clusters/cluster-scale-response.json deleted file mode 100644 index fa33ae3639..0000000000 --- a/api-ref/source/v1.1/samples/clusters/cluster-scale-response.json +++ /dev/null @@ -1,370 +0,0 @@ -{ - "cluster": { - "info": { - "YARN": { - "Web UI": "http://172.18.168.115:8088", - "ResourceManager": "http://172.18.168.115:8032" - }, - "HDFS": { - "Web UI": "http://172.18.168.115:50070", - "NameNode": "hdfs://vanilla-cluster-master-0:9000" - }, - "MapReduce JobHistory Server": { - "Web UI": "http://172.18.168.115:19888" - }, - "JobFlow": { - "Oozie": "http://172.18.168.115:11000" - } - }, - "plugin_name": "vanilla", - "hadoop_version": "2.7.1", - "updated_at": "2015-09-14T11:01:15", - "name": "vanilla-cluster", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "trust_id": null, - "status_description": "", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "is_protected": false, - "is_transient": false, - "provision_progress": [ - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Create Heat stack", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:57:38", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:57:18", - "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Configure instances", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:58:22", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:58:16", - "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): Oozie", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:01:15", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:27", - "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Configure instances", - "step_type": "Plugin: configure cluster", - "updated_at": "2015-09-14T10:59:21", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:58:22", - "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Configure topology data", - "step_type": "Plugin: configure cluster", - "updated_at": "2015-09-14T10:59:37", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:59:21", - "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 3, - "successful": true, - "step_name": "Start the following process(es): DataNodes, NodeManagers", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:11", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:01", - "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Await DataNodes start up", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:21", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:11", - "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): HistoryServer", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:27", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:21", - "id": "c6327532-222b-416c-858f-73dbb32b8e97" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Wait for instance accessibility", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:58:14", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:57:41", - "id": "d3eca726-8b44-473a-ac29-fba45a893725" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 0, - "successful": true, - "step_name": "Mount volumes to instances", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:58:15", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:58:14", - "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): ResourceManager", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:00", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:59:55", - "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): NameNode", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T10:59:54", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:59:38", - "id": "e1701ff5-930a-4212-945a-43515dfe24d1" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Assign IPs", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:57:41", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:57:38", - "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9" - } - ], - "status": "Active", - "description": null, - "use_autoconfig": true, - "shares": null, - "domain_name": null, - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "node_groups": [ - { - "volumes_per_node": 0, - "volume_type": null, - "updated_at": "2015-09-14T10:57:37", - "name": "b-worker", - "id": "b7a6dea4-c898-446b-8c67-4f378d4c06c4", - "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048, - "yarn.scheduler.maximum-allocation-mb": 2048 - }, - "MapReduce": { - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "yarn.app.mapreduce.am.resource.mb": 256 - } - }, - "auto_security_group": false, - "volumes_availability_zone": null, - "use_autoconfig": true, - "security_groups": null, - "shares": null, - "node_processes": [ - "datanode", - "nodemanager" - ], - "availability_zone": null, - "flavor_id": "2", - "image_id": null, - "volume_local_to_instance": false, - "count": 1, - "volumes_size": 0, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "volume_mount_prefix": "/volumes/disk", - "instances": [], - "is_proxy_gateway": false, - "created_at": "2015-09-14T10:57:11" - }, - { - "volumes_per_node": 0, - "volume_type": null, - "updated_at": "2015-09-14T10:57:36", - "name": "master", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048, - "yarn.scheduler.maximum-allocation-mb": 2048 - }, - "MapReduce": { - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "yarn.app.mapreduce.am.resource.mb": 256 - } - }, - "auto_security_group": false, - "volumes_availability_zone": null, - "use_autoconfig": true, - "security_groups": null, - "shares": null, - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "availability_zone": null, - "flavor_id": "2", - "image_id": null, - "volume_local_to_instance": false, - "count": 1, - "volumes_size": 0, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "volume_mount_prefix": "/volumes/disk", - "instances": [ - { - "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", - "internal_ip": "10.50.0.60", - "instance_name": "vanilla-cluster-master-0", - "updated_at": "2015-09-14T10:57:39", - "management_ip": "172.18.168.115", - "created_at": "2015-09-14T10:57:36", - "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491" - } - ], - "is_proxy_gateway": false, - "created_at": "2015-09-14T10:57:11" - }, - { - "volumes_per_node": 0, - "volume_type": null, - "updated_at": "2015-09-14T10:57:37", - "name": "worker", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048, - "yarn.scheduler.maximum-allocation-mb": 2048 - }, - "MapReduce": { - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "yarn.app.mapreduce.am.resource.mb": 256 - } - }, - "auto_security_group": false, - "volumes_availability_zone": null, - "use_autoconfig": true, - "security_groups": null, - "shares": null, - "node_processes": [ - "datanode", - "nodemanager" - ], - "availability_zone": null, - "flavor_id": "2", - "image_id": null, - "volume_local_to_instance": false, - "count": 4, - "volumes_size": 0, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "volume_mount_prefix": "/volumes/disk", - "instances": [ - { - "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", - "internal_ip": "10.50.0.63", - "instance_name": "vanilla-cluster-worker-0", - "updated_at": "2015-09-14T10:57:39", - "management_ip": "172.18.168.118", - "created_at": "2015-09-14T10:57:37", - "id": "f3633b30-c1e4-4144-930b-ab5b780b87be" - }, - { - "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", - "internal_ip": "10.50.0.62", - "instance_name": "vanilla-cluster-worker-1", - "updated_at": "2015-09-14T10:57:40", - "management_ip": "172.18.168.117", - "created_at": "2015-09-14T10:57:37", - "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f" - }, - { - "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", - "internal_ip": "10.50.0.61", - "instance_name": "vanilla-cluster-worker-2", - "updated_at": "2015-09-14T10:57:40", - "management_ip": "172.18.168.116", - "created_at": "2015-09-14T10:57:37", - "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7" - } - ], - "is_proxy_gateway": false, - "created_at": "2015-09-14T10:57:11" - } - ], - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "user_keypair_id": "apavlov", - "anti_affinity": [], - "created_at": "2015-09-14T10:57:11" - } -} diff --git a/api-ref/source/v1.1/samples/clusters/cluster-show-response.json b/api-ref/source/v1.1/samples/clusters/cluster-show-response.json deleted file mode 100644 index 992c22eaa0..0000000000 --- a/api-ref/source/v1.1/samples/clusters/cluster-show-response.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "cluster": { - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": {}, - "user_keypair_id": "test", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [], - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T10:57:12", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Validating" - } -} diff --git a/api-ref/source/v1.1/samples/clusters/cluster-update-request.json b/api-ref/source/v1.1/samples/clusters/cluster-update-request.json deleted file mode 100644 index ab01348afa..0000000000 --- a/api-ref/source/v1.1/samples/clusters/cluster-update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "name": "public-vanilla-cluster", - "is_public": true -} diff --git a/api-ref/source/v1.1/samples/clusters/cluster-update-response.json b/api-ref/source/v1.1/samples/clusters/cluster-update-response.json deleted file mode 100644 index 4dae13f4c9..0000000000 --- a/api-ref/source/v1.1/samples/clusters/cluster-update-response.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "cluster": { - "is_public": true, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": {}, - "user_keypair_id": "test", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [], - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T10:57:12", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "public-vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Validating" - } -} diff --git a/api-ref/source/v1.1/samples/clusters/clusters-list-response.json b/api-ref/source/v1.1/samples/clusters/clusters-list-response.json deleted file mode 100644 index 13d9d0650c..0000000000 --- a/api-ref/source/v1.1/samples/clusters/clusters-list-response.json +++ /dev/null @@ -1,327 +0,0 @@ -{ - "clusters": [ - { - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": { - "YARN": { - "Web UI": "http://172.18.168.115:8088", - "ResourceManager": "http://172.18.168.115:8032" - }, - "HDFS": { - "Web UI": "http://172.18.168.115:50070", - "NameNode": "hdfs://vanilla-cluster-master-0:9000" - }, - "JobFlow": { - "Oozie": "http://172.18.168.115:11000" - }, - "MapReduce JobHistory Server": { - "Web UI": "http://172.18.168.115:19888" - } - }, - "user_keypair_id": "apavlov", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [ - { - "created_at": "2015-09-14T10:57:36", - "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491", - "management_ip": "172.18.168.115", - "updated_at": "2015-09-14T10:57:39", - "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", - "internal_ip": "10.50.0.60", - "instance_name": "vanilla-cluster-master-0" - } - ], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:36", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [ - { - "created_at": "2015-09-14T10:57:37", - "id": "f3633b30-c1e4-4144-930b-ab5b780b87be", - "management_ip": "172.18.168.118", - "updated_at": "2015-09-14T10:57:39", - "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", - "internal_ip": "10.50.0.63", - "instance_name": "vanilla-cluster-worker-0" - }, - { - "created_at": "2015-09-14T10:57:37", - "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f", - "management_ip": "172.18.168.117", - "updated_at": "2015-09-14T10:57:40", - "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", - "internal_ip": "10.50.0.62", - "instance_name": "vanilla-cluster-worker-1" - }, - { - "created_at": "2015-09-14T10:57:37", - "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7", - "management_ip": "172.18.168.116", - "updated_at": "2015-09-14T10:57:40", - "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", - "internal_ip": "10.50.0.61", - "instance_name": "vanilla-cluster-worker-2" - } - ], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:37", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [ - { - "created_at": "2015-09-14T10:57:18", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af", - "step_type": "Engine: create cluster", - "step_name": "Create Heat stack", - "updated_at": "2015-09-14T10:57:38", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:58:16", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a", - "step_type": "Engine: create cluster", - "step_name": "Configure instances", - "updated_at": "2015-09-14T10:58:22", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:27", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): Oozie", - "updated_at": "2015-09-14T11:01:15", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:58:22", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72", - "step_type": "Plugin: configure cluster", - "step_name": "Configure instances", - "updated_at": "2015-09-14T10:59:21", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:59:21", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17", - "step_type": "Plugin: configure cluster", - "step_name": "Configure topology data", - "updated_at": "2015-09-14T10:59:37", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:01", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): DataNodes, NodeManagers", - "updated_at": "2015-09-14T11:00:11", - "successful": true, - "total": 3, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:11", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971", - "step_type": "Plugin: start cluster", - "step_name": "Await DataNodes start up", - "updated_at": "2015-09-14T11:00:21", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:21", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "c6327532-222b-416c-858f-73dbb32b8e97", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): HistoryServer", - "updated_at": "2015-09-14T11:00:27", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:57:41", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "d3eca726-8b44-473a-ac29-fba45a893725", - "step_type": "Engine: create cluster", - "step_name": "Wait for instance accessibility", - "updated_at": "2015-09-14T10:58:14", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:58:14", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152", - "step_type": "Engine: create cluster", - "step_name": "Mount volumes to instances", - "updated_at": "2015-09-14T10:58:15", - "successful": true, - "total": 0, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:59:55", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): ResourceManager", - "updated_at": "2015-09-14T11:00:00", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:59:38", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "e1701ff5-930a-4212-945a-43515dfe24d1", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): NameNode", - "updated_at": "2015-09-14T10:59:54", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:57:38", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9", - "step_type": "Engine: create cluster", - "step_name": "Assign IPs", - "updated_at": "2015-09-14T10:57:41", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - } - ], - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T11:01:15", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Active" - } - ] -} diff --git a/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-request.json b/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-request.json deleted file mode 100644 index d5f0d29e87..0000000000 --- a/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "cluster_template_id": "9951f86d-57ba-43d6-9cb0-14ed2ec7a6cf", - "default_image_id": "bc3c3d3c-2684-4bf8-a9fa-388fb71288a9", - "user_keypair_id": "test", - "name": "def-cluster", - "count": 2, - "cluster_configs": {}, - "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076" -} diff --git a/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-response.json b/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-response.json deleted file mode 100644 index 5b13bca55d..0000000000 --- a/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "clusters": [ - "a007a3e7-658f-4568-b0f2-fe2fd5efc554", - "b012a6et-65hf-4566-b0f2-fe3fd7efc567" - ] -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-request.json b/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-request.json deleted file mode 100644 index 9d9c9c945c..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "description": "This is hdfs input", - "url": "hdfs://test-master-node:8020/user/hadoop/input", - "type": "hdfs", - "name": "hdfs_input" -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-response.json b/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-response.json deleted file mode 100644 index 6d637044f9..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:09:36.148464", - "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", - "updated_at": null, - "name": "hdfs_input", - "description": "This is hdfs input", - "url": "hdfs://test-master-node:8020/user/hadoop/input", - "type": "hdfs" - } -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-request.json b/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-request.json deleted file mode 100644 index 30a1e535dd..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "description": "This is input", - "url": "swift://container/text", - "credentials": { - "password": "swordfish", - "user": "dev" - }, - "type": "swift", - "name": "swift_input" -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-response.json b/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-response.json deleted file mode 100644 index 66a8c7bf0d..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:18:10.691493", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "updated_at": null, - "name": "swift_input", - "description": "This is input", - "url": "swift://container/text", - "type": "swift" - } -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-source-show-response.json b/api-ref/source/v1.1/samples/data-sources/data-source-show-response.json deleted file mode 100644 index 66a8c7bf0d..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-source-show-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:18:10.691493", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "updated_at": null, - "name": "swift_input", - "description": "This is input", - "url": "swift://container/text", - "type": "swift" - } -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-source-update-request.json b/api-ref/source/v1.1/samples/data-sources/data-source-update-request.json deleted file mode 100644 index 8397ae6545..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-source-update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "description": "This is public input", - "is_protected": true -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-source-update-response.json b/api-ref/source/v1.1/samples/data-sources/data-source-update-response.json deleted file mode 100644 index d874ed1cd1..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-source-update-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": true, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-09-15 12:32:24.847493", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "updated_at": "2015-09-15 12:34:42.597435", - "name": "swift_input", - "description": "This is public input", - "url": "swift://container/text", - "type": "swift" - } -} diff --git a/api-ref/source/v1.1/samples/data-sources/data-sources-list-response.json b/api-ref/source/v1.1/samples/data-sources/data-sources-list-response.json deleted file mode 100644 index 724542ec7c..0000000000 --- a/api-ref/source/v1.1/samples/data-sources/data-sources-list-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "data_sources": [ - { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:18:10", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "name": "swift_input", - "updated_at": null, - "description": "This is input", - "url": "swift://container/text", - "type": "swift" - }, - { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:09:36", - "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", - "name": "hdfs_input", - "updated_at": null, - "description": "This is hdfs input", - "url": "hdfs://test-master-node:8020/user/hadoop/input", - "type": "hdfs" - } - ] -} diff --git a/api-ref/source/v1.1/samples/event-log/cluster-progress-response.json b/api-ref/source/v1.1/samples/event-log/cluster-progress-response.json deleted file mode 100644 index f1923093e3..0000000000 --- a/api-ref/source/v1.1/samples/event-log/cluster-progress-response.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "status": "Error", - "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076", - "is_transient": false, - "description": "", - "user_keypair_id": "vgridnev", - "updated_at": "2015-03-31 14:10:59", - "plugin_name": "spark", - "provision_progress": [ - { - "successful": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-03-31 14:10:20", - "step_type": "Engine: create cluster", - "updated_at": "2015-03-31 14:10:35", - "events": [ - { - "instance_name": "sample-worker-spark-004", - "successful": false, - "created_at": "2015-03-31 14:10:35", - "updated_at": null, - "event_info": "Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", - "instance_id": "b5ba5ba8-e9c1-47f7-9355-3ce0ec0e449d", - "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", - "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", - "id": "34afcfc7-bdb0-43cb-b142-283d560dc6ad" - }, - { - "instance_name": "sample-worker-spark-001", - "successful": true, - "created_at": "2015-03-31 14:10:35", - "updated_at": null, - "event_info": null, - "instance_id": "c532ab71-38da-475a-95f8-f8eb93b8f1c2", - "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", - "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", - "id": "4ba50414-5216-4161-bc7a-12716122b99d" - } - ], - "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", - "step_name": "Wait for instances to become active", - "total": 5, - "id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6" - }, - { - "successful": true, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-03-31 14:10:12", - "step_type": "Engine: create cluster", - "updated_at": "2015-03-31 14:10:19", - "events": [], - "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", - "step_name": "Run instances", - "total": 5, - "id": "407ba50a-c799-46af-9dfb-6aa5f6ade426" - } - ], - "anti_affinity": [], - "node_groups": [], - "management_public_key": "Sahara", - "status_description": "Creating cluster failed for the following reason(s): Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", - "hadoop_version": "1.0.0", - "id": "c26ec982-ba6b-4d75-1f8c-a50240164af0", - "trust_id": null, - "info": {}, - "cluster_template_id": "5a9a09a3-9349-43bd-9058-16c401fad2d5", - "name": "sample", - "cluster_configs": {}, - "created_at": "2015-03-31 14:10:07", - "default_image_id": "e6a6c5da-67be-4017-a7d2-81f466efe67e", - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda" -} diff --git a/api-ref/source/v1.1/samples/image-registry/image-register-request.json b/api-ref/source/v1.1/samples/image-registry/image-register-request.json deleted file mode 100644 index 7bd4d15efd..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/image-register-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "username": "ubuntu", - "description": "Ubuntu image for Hadoop 2.7.1" -} diff --git a/api-ref/source/v1.1/samples/image-registry/image-register-response.json b/api-ref/source/v1.1/samples/image-registry/image-register-response.json deleted file mode 100644 index 5851a58ec2..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/image-register-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "image": { - "updated": "2015-03-24T10:05:10Z", - "metadata": { - "_sahara_description": "Ubuntu image for Hadoop 2.7.1", - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true, - "_sahara_tag_2.7.1": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "2.7.1" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.7.1-ubuntu-14.04", - "description": "Ubuntu image for Hadoop 2.7.1", - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v1.1/samples/image-registry/image-show-response.json b/api-ref/source/v1.1/samples/image-registry/image-show-response.json deleted file mode 100644 index 0f09f23f56..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/image-show-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "image": { - "updated": "2015-02-03T10:29:32Z", - "metadata": { - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true, - "_sahara_tag_2.6.0": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "2.6.0" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.6.0-ubuntu-14.04", - "description": null, - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v1.1/samples/image-registry/image-tags-add-request.json b/api-ref/source/v1.1/samples/image-registry/image-tags-add-request.json deleted file mode 100644 index aa69662a6a..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/image-tags-add-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "tags": [ - "vanilla", - "2.7.1", - "some_other_tag" - ] -} diff --git a/api-ref/source/v1.1/samples/image-registry/image-tags-add-response.json b/api-ref/source/v1.1/samples/image-registry/image-tags-add-response.json deleted file mode 100644 index 2c66b2930d..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/image-tags-add-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "image": { - "updated": "2015-03-24T10:18:33Z", - "metadata": { - "_sahara_tag_vanilla": true, - "_sahara_description": "Ubuntu image for Hadoop 2.7.1", - "_sahara_username": "ubuntu", - "_sahara_tag_some_other_tag": true, - "_sahara_tag_2.7.1": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "some_other_tag", - "2.7.1" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.6.0-ubuntu-14.04", - "description": "Ubuntu image for Hadoop 2.7.1", - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v1.1/samples/image-registry/image-tags-delete-request.json b/api-ref/source/v1.1/samples/image-registry/image-tags-delete-request.json deleted file mode 100644 index 44e1cef468..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/image-tags-delete-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "tags": [ - "some_other_tag" - ] -} diff --git a/api-ref/source/v1.1/samples/image-registry/image-tags-delete-response.json b/api-ref/source/v1.1/samples/image-registry/image-tags-delete-response.json deleted file mode 100644 index 44eb131390..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/image-tags-delete-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "image": { - "updated": "2015-03-24T10:19:28Z", - "metadata": { - "_sahara_description": "Ubuntu image for Hadoop 2.7.1", - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true, - "_sahara_tag_2.7.1": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "2.7.1" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.7.1-ubuntu-14.04", - "description": "Ubuntu image for Hadoop 2.7.1", - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v1.1/samples/image-registry/images-list-response.json b/api-ref/source/v1.1/samples/image-registry/images-list-response.json deleted file mode 100644 index d40f0c215f..0000000000 --- a/api-ref/source/v1.1/samples/image-registry/images-list-response.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "images": [ - { - "name": "ubuntu-vanilla-2.7.1", - "id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "created": "2015-08-06T08:17:14Z", - "metadata": { - "_sahara_tag_2.7.1": true, - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true - }, - "username": "ubuntu", - "progress": 100, - "OS-EXT-IMG-SIZE:size": 998716928, - "status": "ACTIVE", - "minDisk": 0, - "tags": [ - "vanilla", - "2.7.1" - ], - "updated": "2015-09-04T09:35:09Z", - "minRam": 0, - "description": null - }, - { - "name": "cdh-latest", - "id": "ff74035b-9da7-4edf-981d-57f270ed337d", - "created": "2015-09-04T11:56:44Z", - "metadata": { - "_sahara_username": "ubuntu", - "_sahara_tag_5.4.0": true, - "_sahara_tag_cdh": true - }, - "username": "ubuntu", - "progress": 100, - "OS-EXT-IMG-SIZE:size": 3281453056, - "status": "ACTIVE", - "minDisk": 0, - "tags": [ - "5.4.0", - "cdh" - ], - "updated": "2015-09-04T12:46:42Z", - "minRam": 0, - "description": null - } - ] -} diff --git a/api-ref/source/v1.1/samples/job-binaries/create-request.json b/api-ref/source/v1.1/samples/job-binaries/create-request.json deleted file mode 100644 index f32e15b40f..0000000000 --- a/api-ref/source/v1.1/samples/job-binaries/create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "url": "swift://container/jar-example.jar", - "name": "jar-example.jar", - "description": "This is a job binary", - "extra": { - "password": "swordfish", - "user": "admin" - } -} diff --git a/api-ref/source/v1.1/samples/job-binaries/create-response.json b/api-ref/source/v1.1/samples/job-binaries/create-response.json deleted file mode 100644 index feba49ef79..0000000000 --- a/api-ref/source/v1.1/samples/job-binaries/create-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "job_binary": { - "is_public": false, - "description": "This is a job binary", - "url": "swift://container/jar-example.jar", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 14:49:20.106452", - "id": "07f86352-ee8a-4b08-b737-d705ded5ff9c", - "updated_at": null, - "name": "jar-example.jar", - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/job-binaries/list-response.json b/api-ref/source/v1.1/samples/job-binaries/list-response.json deleted file mode 100644 index 3e5e5539a4..0000000000 --- a/api-ref/source/v1.1/samples/job-binaries/list-response.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "binaries": [ - { - "is_public": false, - "description": "", - "url": "internal-db://d2498cbf-4589-484a-a814-81436c18beb3", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 12:36:59.375060", - "updated_at": null, - "id": "84248975-3c82-4206-a58d-6e7fb3a563fd", - "name": "example.pig", - "is_protected": false - }, - { - "is_public": false, - "description": "", - "url": "internal-db://22f1d87a-23c8-483e-a0dd-cb4a16dde5f9", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 12:43:52.265899", - "updated_at": null, - "id": "508fc62d-1d58-4412-b603-bdab307bb926", - "name": "udf.jar", - "is_protected": false - }, - { - "is_public": false, - "description": "", - "url": "swift://container/jar-example.jar", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 14:25:04.970513", - "updated_at": null, - "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", - "name": "jar-example.jar", - "is_protected": false - } - ] -} diff --git a/api-ref/source/v1.1/samples/job-binaries/show-data-response b/api-ref/source/v1.1/samples/job-binaries/show-data-response deleted file mode 100644 index 8765f0c6c5..0000000000 --- a/api-ref/source/v1.1/samples/job-binaries/show-data-response +++ /dev/null @@ -1,3 +0,0 @@ -A = load '$INPUT' using PigStorage(':') as (fruit: chararray); -B = foreach A generate com.hadoopbook.pig.Trim(fruit); -store B into '$OUTPUT' USING PigStorage(); \ No newline at end of file diff --git a/api-ref/source/v1.1/samples/job-binaries/show-response.json b/api-ref/source/v1.1/samples/job-binaries/show-response.json deleted file mode 100644 index a46f819067..0000000000 --- a/api-ref/source/v1.1/samples/job-binaries/show-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "job_binary": { - "is_public": false, - "description": "an example jar file", - "url": "swift://container/jar-example.jar", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 14:25:04.970513", - "updated_at": null, - "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", - "name": "jar-example.jar", - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/job-binaries/update-request.json b/api-ref/source/v1.1/samples/job-binaries/update-request.json deleted file mode 100644 index 456b0b209c..0000000000 --- a/api-ref/source/v1.1/samples/job-binaries/update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "url": "swift://container/new-jar-example.jar", - "name": "new-jar-example.jar", - "description": "This is a new job binary" -} diff --git a/api-ref/source/v1.1/samples/job-binaries/update-response.json b/api-ref/source/v1.1/samples/job-binaries/update-response.json deleted file mode 100644 index 947049e6eb..0000000000 --- a/api-ref/source/v1.1/samples/job-binaries/update-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "job_binary": { - "is_public": false, - "description": "This is a new job binary", - "url": "swift://container/new-jar-example.jar", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2015-09-15 12:42:51.421542", - "updated_at": null, - "id": "b713d7ad-4add-4f12-g1b6-cdg71aaef350", - "name": "new-jar-example.jar", - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/job-binary-internals/create-response.json b/api-ref/source/v1.1/samples/job-binary-internals/create-response.json deleted file mode 100644 index a8d23d58ce..0000000000 --- a/api-ref/source/v1.1/samples/job-binary-internals/create-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "job_binary_internal": { - "is_public": false, - "name": "script.pig", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 13:17:35.994466", - "updated_at": null, - "datasize": 160, - "id": "4833dc4b-8682-4d5b-8a9f-2036b47a0996", - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/job-binary-internals/list-response.json b/api-ref/source/v1.1/samples/job-binary-internals/list-response.json deleted file mode 100644 index d5a2909ab3..0000000000 --- a/api-ref/source/v1.1/samples/job-binary-internals/list-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "binaries": [ - { - "is_public": false, - "name": "example.pig", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 12:36:59.329034", - "updated_at": null, - "datasize": 161, - "id": "d2498cbf-4589-484a-a814-81436c18beb3", - "is_protected": false - }, - { - "is_public": false, - "name": "udf.jar", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 12:43:52.008620", - "updated_at": null, - "datasize": 3745, - "id": "22f1d87a-23c8-483e-a0dd-cb4a16dde5f9", - "is_protected": false - } - ] -} diff --git a/api-ref/source/v1.1/samples/job-binary-internals/show-data-response b/api-ref/source/v1.1/samples/job-binary-internals/show-data-response deleted file mode 100644 index 12df7a847a..0000000000 --- a/api-ref/source/v1.1/samples/job-binary-internals/show-data-response +++ /dev/null @@ -1,3 +0,0 @@ -A = load '$INPUT' using PigStorage(':') as (fruit: chararray); -B = foreach A generate com.hadoopbook.pig.Trim(fruit); -store B into '$OUTPUT' USING PigStorage() \ No newline at end of file diff --git a/api-ref/source/v1.1/samples/job-binary-internals/show-response.json b/api-ref/source/v1.1/samples/job-binary-internals/show-response.json deleted file mode 100644 index a8d23d58ce..0000000000 --- a/api-ref/source/v1.1/samples/job-binary-internals/show-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "job_binary_internal": { - "is_public": false, - "name": "script.pig", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 13:17:35.994466", - "updated_at": null, - "datasize": 160, - "id": "4833dc4b-8682-4d5b-8a9f-2036b47a0996", - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/job-binary-internals/update-request.json b/api-ref/source/v1.1/samples/job-binary-internals/update-request.json deleted file mode 100644 index d6e2920525..0000000000 --- a/api-ref/source/v1.1/samples/job-binary-internals/update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "name": "public-jbi", - "is_public": true -} diff --git a/api-ref/source/v1.1/samples/job-binary-internals/update-response.json b/api-ref/source/v1.1/samples/job-binary-internals/update-response.json deleted file mode 100644 index 19fed48c5f..0000000000 --- a/api-ref/source/v1.1/samples/job-binary-internals/update-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "job_binary_internal": { - "is_public": true, - "name": "public-jbi", - "tenant_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2015-09-15 13:21:54.485912", - "updated_at": "2015-09-15 13:24:24.590124", - "datasize": 200, - "id": "2433dc4b-8682-4d5b-8a9f-2036d47a0996", - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/job-executions/cancel-response.json b/api-ref/source/v1.1/samples/job-executions/cancel-response.json deleted file mode 100644 index 251c746c6b..0000000000 --- a/api-ref/source/v1.1/samples/job-executions/cancel-response.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "job_execution": { - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "is_protected": false, - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "created_at": "2015-09-15T09:49:24", - "end_time": "2015-09-15T12:50:46", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "is_public": false, - "updated_at": "2015-09-15T09:50:46", - "return_code": null, - "data_source_urls": { - "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", - "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" - }, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "start_time": "2015-09-15T12:49:43", - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "oozie_job_id": "0000001-150915094349962-oozie-hado-W", - "info": { - "user": "hadoop", - "actions": [ - { - "name": ":start:", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": "job-node", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "errorCode": null, - "id": "0000001-150915094349962-oozie-hado-W@:start:", - "consoleUrl": "-", - "errorMessage": null, - "toString": "Action name[:start:] status[OK]", - "stats": null, - "type": ":START:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "job-node", - "trackerUri": "http://172.18.168.119:8032", - "externalStatus": "FAILED/KILLED", - "status": "ERROR", - "externalId": "job_1442310173665_0002", - "transition": "fail", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "JA018", - "id": "0000001-150915094349962-oozie-hado-W@job-node", - "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", - "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", - "toString": "Action name[job-node] status[ERROR]", - "stats": null, - "type": "pig", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "fail", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": null, - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "E0729", - "id": "0000001-150915094349962-oozie-hado-W@fail", - "consoleUrl": "-", - "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", - "toString": "Action name[fail] status[OK]", - "stats": null, - "type": ":KILL:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "externalChildIDs": null, - "cred": "null" - } - ], - "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", - "status": "KILLED", - "group": null, - "externalId": null, - "acl": null, - "run": 0, - "appName": "job-wf", - "parentId": null, - "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", - "id": "0000001-150915094349962-oozie-hado-W", - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", - "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" - } - } -} diff --git a/api-ref/source/v1.1/samples/job-executions/job-ex-response.json b/api-ref/source/v1.1/samples/job-executions/job-ex-response.json deleted file mode 100644 index 251c746c6b..0000000000 --- a/api-ref/source/v1.1/samples/job-executions/job-ex-response.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "job_execution": { - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "is_protected": false, - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "created_at": "2015-09-15T09:49:24", - "end_time": "2015-09-15T12:50:46", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "is_public": false, - "updated_at": "2015-09-15T09:50:46", - "return_code": null, - "data_source_urls": { - "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", - "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" - }, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "start_time": "2015-09-15T12:49:43", - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "oozie_job_id": "0000001-150915094349962-oozie-hado-W", - "info": { - "user": "hadoop", - "actions": [ - { - "name": ":start:", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": "job-node", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "errorCode": null, - "id": "0000001-150915094349962-oozie-hado-W@:start:", - "consoleUrl": "-", - "errorMessage": null, - "toString": "Action name[:start:] status[OK]", - "stats": null, - "type": ":START:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "job-node", - "trackerUri": "http://172.18.168.119:8032", - "externalStatus": "FAILED/KILLED", - "status": "ERROR", - "externalId": "job_1442310173665_0002", - "transition": "fail", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "JA018", - "id": "0000001-150915094349962-oozie-hado-W@job-node", - "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", - "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", - "toString": "Action name[job-node] status[ERROR]", - "stats": null, - "type": "pig", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "fail", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": null, - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "E0729", - "id": "0000001-150915094349962-oozie-hado-W@fail", - "consoleUrl": "-", - "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", - "toString": "Action name[fail] status[OK]", - "stats": null, - "type": ":KILL:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "externalChildIDs": null, - "cred": "null" - } - ], - "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", - "status": "KILLED", - "group": null, - "externalId": null, - "acl": null, - "run": 0, - "appName": "job-wf", - "parentId": null, - "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", - "id": "0000001-150915094349962-oozie-hado-W", - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", - "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" - } - } -} diff --git a/api-ref/source/v1.1/samples/job-executions/job-ex-update-request.json b/api-ref/source/v1.1/samples/job-executions/job-ex-update-request.json deleted file mode 100644 index 647a4175b9..0000000000 --- a/api-ref/source/v1.1/samples/job-executions/job-ex-update-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "is_public": true -} diff --git a/api-ref/source/v1.1/samples/job-executions/job-ex-update-response.json b/api-ref/source/v1.1/samples/job-executions/job-ex-update-response.json deleted file mode 100644 index a5d2484792..0000000000 --- a/api-ref/source/v1.1/samples/job-executions/job-ex-update-response.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "job_execution": { - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "is_protected": false, - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "created_at": "2015-09-15T09:49:24", - "end_time": "2015-09-15T12:50:46", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "is_public": true, - "updated_at": "2015-09-15T09:50:46", - "return_code": null, - "data_source_urls": { - "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", - "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" - }, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "start_time": "2015-09-15T12:49:43", - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "oozie_job_id": "0000001-150915094349962-oozie-hado-W", - "info": { - "user": "hadoop", - "actions": [ - { - "name": ":start:", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": "job-node", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "errorCode": null, - "id": "0000001-150915094349962-oozie-hado-W@:start:", - "consoleUrl": "-", - "errorMessage": null, - "toString": "Action name[:start:] status[OK]", - "stats": null, - "type": ":START:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "job-node", - "trackerUri": "http://172.18.168.119:8032", - "externalStatus": "FAILED/KILLED", - "status": "ERROR", - "externalId": "job_1442310173665_0002", - "transition": "fail", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "JA018", - "id": "0000001-150915094349962-oozie-hado-W@job-node", - "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", - "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", - "toString": "Action name[job-node] status[ERROR]", - "stats": null, - "type": "pig", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "fail", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": null, - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "E0729", - "id": "0000001-150915094349962-oozie-hado-W@fail", - "consoleUrl": "-", - "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", - "toString": "Action name[fail] status[OK]", - "stats": null, - "type": ":KILL:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "externalChildIDs": null, - "cred": "null" - } - ], - "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", - "status": "KILLED", - "group": null, - "externalId": null, - "acl": null, - "run": 0, - "appName": "job-wf", - "parentId": null, - "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", - "id": "0000001-150915094349962-oozie-hado-W", - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", - "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" - } - } -} diff --git a/api-ref/source/v1.1/samples/job-executions/list-response.json b/api-ref/source/v1.1/samples/job-executions/list-response.json deleted file mode 100644 index fb085254a3..0000000000 --- a/api-ref/source/v1.1/samples/job-executions/list-response.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "job_executions": [ - { - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "is_protected": false, - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "created_at": "2015-09-15T09:49:24", - "end_time": "2015-09-15T12:50:46", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "is_public": false, - "updated_at": "2015-09-15T09:50:46", - "return_code": null, - "data_source_urls": { - "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", - "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" - }, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "start_time": "2015-09-15T12:49:43", - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "oozie_job_id": "0000001-150915094349962-oozie-hado-W", - "info": { - "user": "hadoop", - "actions": [ - { - "name": ":start:", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": "job-node", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "errorCode": null, - "id": "0000001-150915094349962-oozie-hado-W@:start:", - "consoleUrl": "-", - "errorMessage": null, - "toString": "Action name[:start:] status[OK]", - "stats": null, - "type": ":START:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "job-node", - "trackerUri": "http://172.18.168.119:8032", - "externalStatus": "FAILED/KILLED", - "status": "ERROR", - "externalId": "job_1442310173665_0002", - "transition": "fail", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "JA018", - "id": "0000001-150915094349962-oozie-hado-W@job-node", - "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", - "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", - "toString": "Action name[job-node] status[ERROR]", - "stats": null, - "type": "pig", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "fail", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": null, - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "E0729", - "id": "0000001-150915094349962-oozie-hado-W@fail", - "consoleUrl": "-", - "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", - "toString": "Action name[fail] status[OK]", - "stats": null, - "type": ":KILL:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "externalChildIDs": null, - "cred": "null" - } - ], - "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", - "status": "KILLED", - "group": null, - "externalId": null, - "acl": null, - "run": 0, - "appName": "job-wf", - "parentId": null, - "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", - "id": "0000001-150915094349962-oozie-hado-W", - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", - "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" - } - } - ] -} diff --git a/api-ref/source/v1.1/samples/job-types/job-types-list-response.json b/api-ref/source/v1.1/samples/job-types/job-types-list-response.json deleted file mode 100644 index c321c4fbd3..0000000000 --- a/api-ref/source/v1.1/samples/job-types/job-types-list-response.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "job_types": [ - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Hive" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Java" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "MapReduce" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "MapReduce.Streaming" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Pig" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Shell" - }, - { - "plugins": [ - { - "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", - "versions": { - "1.0.0": {} - }, - "title": "Apache Spark", - "name": "spark" - } - ], - "name": "Spark" - } - ] -} diff --git a/api-ref/source/v1.1/samples/jobs/job-create-request.json b/api-ref/source/v1.1/samples/jobs/job-create-request.json deleted file mode 100644 index b8d1a8ed19..0000000000 --- a/api-ref/source/v1.1/samples/jobs/job-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "description": "This is pig job example", - "mains": [ - "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e" - ], - "libs": [ - "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27" - ], - "type": "Pig", - "name": "pig-job-example" -} diff --git a/api-ref/source/v1.1/samples/jobs/job-create-response.json b/api-ref/source/v1.1/samples/jobs/job-create-response.json deleted file mode 100644 index 1413d83d22..0000000000 --- a/api-ref/source/v1.1/samples/jobs/job-create-response.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "job": { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-03-27 08:48:38.630827", - "id": "71defc8f-d005-484f-9d86-1aedf644d1ef", - "name": "pig-job-example", - "description": "This is pig job example", - "interface": [], - "libs": [ - { - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:53", - "id": "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27", - "name": "binary-job", - "updated_at": null, - "description": "", - "url": "internal-db://c6a925fa-ac1d-4b2e-b88a-7054e1927521" - } - ], - "type": "Pig", - "is_protected": false, - "mains": [ - { - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-03 10:47:51", - "id": "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e", - "name": "pig", - "updated_at": null, - "description": "", - "url": "internal-db://872878f6-72ea-44db-8d1d-e6a6396d2df0" - } - ] - } -} diff --git a/api-ref/source/v1.1/samples/jobs/job-execute-request.json b/api-ref/source/v1.1/samples/jobs/job-execute-request.json deleted file mode 100644 index 588358c819..0000000000 --- a/api-ref/source/v1.1/samples/jobs/job-execute-request.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "job_configs": { - "configs": { - "mapred.map.tasks": "1", - "mapred.reduce.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - } -} diff --git a/api-ref/source/v1.1/samples/jobs/job-execute-response.json b/api-ref/source/v1.1/samples/jobs/job-execute-response.json deleted file mode 100644 index 28e66fe908..0000000000 --- a/api-ref/source/v1.1/samples/jobs/job-execute-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "job_execution": { - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "is_protected": false, - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "created_at": "2015-09-15T09:49:24", - "is_public": false, - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "info": { - "status": "PENDING" - } - } -} diff --git a/api-ref/source/v1.1/samples/jobs/job-show-response.json b/api-ref/source/v1.1/samples/jobs/job-show-response.json deleted file mode 100644 index 43653e971e..0000000000 --- a/api-ref/source/v1.1/samples/jobs/job-show-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "job": { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", - "name": "Edp-test-job", - "updated_at": null, - "description": "", - "interface": [], - "libs": [ - { - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", - "name": "binary-job.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" - } - ], - "type": "MapReduce", - "mains": [], - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/jobs/job-update-request.json b/api-ref/source/v1.1/samples/jobs/job-update-request.json deleted file mode 100644 index 810b8a60b1..0000000000 --- a/api-ref/source/v1.1/samples/jobs/job-update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "description": "This is public pig job example", - "name": "public-pig-job-example" -} diff --git a/api-ref/source/v1.1/samples/jobs/job-update-response.json b/api-ref/source/v1.1/samples/jobs/job-update-response.json deleted file mode 100644 index 0ee7e27343..0000000000 --- a/api-ref/source/v1.1/samples/jobs/job-update-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "job": { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", - "name": "public-pig-job-example", - "updated_at": null, - "description": "This is public pig job example", - "interface": [], - "libs": [ - { - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", - "name": "binary-job.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" - } - ], - "type": "MapReduce", - "mains": [], - "is_protected": false - } -} diff --git a/api-ref/source/v1.1/samples/jobs/jobs-list-response.json b/api-ref/source/v1.1/samples/jobs/jobs-list-response.json deleted file mode 100644 index e19b3d59c4..0000000000 --- a/api-ref/source/v1.1/samples/jobs/jobs-list-response.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "jobs": [ - { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", - "name": "Edp-test-job-3d60854e", - "updated_at": null, - "description": "", - "interface": [], - "libs": [ - { - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", - "name": "binary-job-339c2d1a.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-c71e6bce.sahara/binary-job-339c2d1a.jar" - } - ], - "type": "MapReduce", - "mains": [], - "is_protected": false - }, - { - "is_public": false, - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:44", - "id": "4d1f3759-3497-4927-8352-910bacf24e62", - "name": "Edp-test-job-6b6953c8", - "updated_at": null, - "description": "", - "interface": [], - "libs": [ - { - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:44", - "id": "e0d47800-4ac1-4d63-a2e1-c92d669a44e2", - "name": "binary-job-6f21a2f8.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-b409ec68.sahara/binary-job-6f21a2f8.jar" - } - ], - "type": "Pig", - "mains": [ - { - "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:44", - "id": "e073e896-f123-4b76-995f-901d786262df", - "name": "binary-job-d4f8bd75.pig", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-b409ec68.sahara/binary-job-d4f8bd75.pig" - } - ], - "is_protected": false - } - ], - "markers": { - "prev": null, - "next": "c53832da-6e7b-449e-a166-9f9ce1718d03" - } -} diff --git a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-request.json b/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-request.json deleted file mode 100644 index 802fcfb307..0000000000 --- a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-request.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "2.7.1", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "name": "master", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "flavor_id": "2" -} diff --git a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-response.json b/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-response.json deleted file mode 100644 index c9f9851a41..0000000000 --- a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "node_group_template": { - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "is_protected": false, - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "security_groups": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } -} diff --git a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-show-response.json b/api-ref/source/v1.1/samples/node-group-templates/node-group-template-show-response.json deleted file mode 100644 index 78fa9f970c..0000000000 --- a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-show-response.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "node_group_template": { - "is_public": false, - "image_id": null, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "description": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "is_protected": false, - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "security_groups": null, - "volume_type": null - } -} diff --git a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-request.json b/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-request.json deleted file mode 100644 index 124ef61875..0000000000 --- a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "2.7.1", - "node_processes": [ - "datanode" - ], - "name": "new", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "flavor_id": "2" -} diff --git a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-response.json b/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-response.json deleted file mode 100644 index bbb3161f2e..0000000000 --- a/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "node_group_template": { - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "is_protected": false, - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "security_groups": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "new", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } -} diff --git a/api-ref/source/v1.1/samples/node-group-templates/node-group-templates-list-response.json b/api-ref/source/v1.1/samples/node-group-templates/node-group-templates-list-response.json deleted file mode 100644 index c41683e926..0000000000 --- a/api-ref/source/v1.1/samples/node-group-templates/node-group-templates-list-response.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "node_group_templates": [ - { - "is_public": false, - "image_id": null, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "description": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "is_protected": false, - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "security_groups": null, - "volume_type": null - }, - { - "is_public": false, - "image_id": null, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "flavor_id": "2", - "id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "description": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:27:00", - "is_protected": false, - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "security_groups": null, - "volume_type": null - } - ], - "markers": { - "prev":"39dfc852-8588-4b61-8d2b-eb08a67ab240", - "next":"eaa0bd97-ab54-43df-83ab-77a9774d7358" - } -} diff --git a/api-ref/source/v1.1/samples/plugins/plugin-show-response.json b/api-ref/source/v1.1/samples/plugins/plugin-show-response.json deleted file mode 100644 index 00b948a0e6..0000000000 --- a/api-ref/source/v1.1/samples/plugins/plugin-show-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "plugin": { - "name": "vanilla", - "versions": [ - "1.2.1", - "2.4.1", - "2.6.0" - ], - "title": "Vanilla Apache Hadoop", - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component." - } -} diff --git a/api-ref/source/v1.1/samples/plugins/plugin-update-request.json b/api-ref/source/v1.1/samples/plugins/plugin-update-request.json deleted file mode 100644 index 97a17c38f2..0000000000 --- a/api-ref/source/v1.1/samples/plugins/plugin-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "plugin_labels": { - "enabled": { - "status": false - } - } -} diff --git a/api-ref/source/v1.1/samples/plugins/plugin-update-response.json b/api-ref/source/v1.1/samples/plugins/plugin-update-response.json deleted file mode 100644 index 7541ae939c..0000000000 --- a/api-ref/source/v1.1/samples/plugins/plugin-update-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "plugin": { - "plugin_labels": { - "hidden": { - "status": true, - "mutable": true, - "description": "Existence of plugin or its version is hidden, but still can be used for cluster creation by CLI and directly by client." - }, - "enabled": { - "status": false, - "mutable": true, - "description": "Plugin or its version is enabled and can be used by user." - } - }, - "description": "It's a fake plugin that aimed to work on the CirrOS images. It doesn't install Hadoop. It's needed to be able to test provisioning part of Sahara codebase itself.", - "versions": [ - "0.1" - ], - "tenant_id": "993f53c1f51845e48e013aeb632358d8", - "title": "Fake Plugin", - "version_labels": { - "0.1": { - "enabled": { - "status": true, - "mutable": true, - "description": "Plugin or its version is enabled and can be used by user." - } - } - }, - "name": "fake" - } -} diff --git a/api-ref/source/v1.1/samples/plugins/plugin-version-show-response.json b/api-ref/source/v1.1/samples/plugins/plugin-version-show-response.json deleted file mode 100644 index cb1c175a59..0000000000 --- a/api-ref/source/v1.1/samples/plugins/plugin-version-show-response.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "plugin": { - "name": "vanilla", - "versions": [ - "1.2.1", - "2.4.1", - "2.6.0" - ], - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "required_image_tags": [ - "vanilla", - "2.6.0" - ], - "node_processes": { - "JobFlow": [ - "oozie" - ], - "HDFS": [ - "namenode", - "datanode", - "secondarynamenode" - ], - "YARN": [ - "resourcemanager", - "nodemanager" - ], - "MapReduce": [ - "historyserver" - ], - "Hadoop": [], - "Hive": [ - "hiveserver" - ] - }, - "configs": [ - { - "default_value": "/tmp/hadoop-${user.name}", - "name": "hadoop.tmp.dir", - "priority": 2, - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "node", - "description": "A base for other temporary directories." - }, - { - "default_value": true, - "name": "hadoop.native.lib", - "priority": 2, - "config_type": "bool", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "node", - "description": "Should native hadoop libraries, if present, be used." - }, - { - "default_value": 1024, - "name": "NodeManager Heap Size", - "config_values": null, - "priority": 1, - "config_type": "int", - "applicable_target": "YARN", - "is_optional": false, - "scope": "node", - "description": null - }, - { - "default_value": true, - "name": "Enable Swift", - "config_values": null, - "priority": 1, - "config_type": "bool", - "applicable_target": "general", - "is_optional": false, - "scope": "cluster", - "description": null - }, - { - "default_value": true, - "name": "Enable MySQL", - "config_values": null, - "priority": 1, - "config_type": "bool", - "applicable_target": "general", - "is_optional": true, - "scope": "cluster", - "description": null - } - ], - "title": "Vanilla Apache Hadoop" - } -} diff --git a/api-ref/source/v1.1/samples/plugins/plugins-list-response.json b/api-ref/source/v1.1/samples/plugins/plugins-list-response.json deleted file mode 100644 index d92d85c114..0000000000 --- a/api-ref/source/v1.1/samples/plugins/plugins-list-response.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "plugins": [ - { - "name": "vanilla", - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": [ - "1.2.1", - "2.4.1", - "2.6.0" - ], - "title": "Vanilla Apache Hadoop" - }, - { - "name": "hdp", - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": [ - "1.3.2", - "2.0.6" - ], - "title": "Hortonworks Data Platform" - }, - { - "name": "spark", - "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", - "versions": [ - "1.0.0", - "0.9.1" - ], - "title": "Apache Spark" - }, - { - "name": "cdh", - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": [ - "5", - "5.3.0" - ], - "title": "Cloudera Plugin" - } - ] -} diff --git a/api-ref/source/v2/cluster-templates.inc b/api-ref/source/v2/cluster-templates.inc deleted file mode 100644 index 575af2a737..0000000000 --- a/api-ref/source/v2/cluster-templates.inc +++ /dev/null @@ -1,241 +0,0 @@ -.. -*- rst -*- - -================= -Cluster templates -================= - -A cluster template configures a cluster. A cluster template -lists node groups with the number of instances in each group. You -can also define cluster-scoped configurations in a cluster -template. - - -Show cluster template details -============================= - -.. rest_method:: GET /v2/cluster-templates/{cluster_template_id} - -Shows details for a cluster template. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_template_id: url_cluster_template_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - project_id: project_id - - node_groups: node_groups - - is_public: object_is_public - - plugin_version: plugin_version - - id: cluster_template_id - - name: cluster_template_name - - - -Response Example ----------------- - -.. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json - :language: javascript - - - - -Update cluster templates -======================== - -.. rest_method:: PATCH /v2/cluster-templates/{cluster_template_id} - -Updates a cluster template. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_template_id: cluster_template_id - -Request Example ---------------- - -.. literalinclude:: samples/cluster-templates/cluster-template-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - project_id: project_id - - node_groups: node_groups - - is_public: object_is_public - - plugin_version: plugin_version - - id: cluster_template_id - - name: cluster_template_name - - - - - -Delete cluster template -======================= - -.. rest_method:: DELETE /v2/cluster-templates/{cluster_template_id} - -Deletes a cluster template. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_template_id: cluster_template_id - - - - - - -List cluster templates -====================== - -.. rest_method:: GET /v2/cluster-templates - -Lists available cluster templates. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - sort_by: sort_by_cluster_templates - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - project_id: project_id - - node_groups: node_groups - - is_public: object_is_public - - plugin_version: plugin_version - - id: cluster_template_id - - name: cluster_template_name - - - -Response Example ----------------- -.. rest_method:: GET /v2/cluster-templates?limit=2 - -.. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json - :language: javascript - - - - -Create cluster templates -======================== - -.. rest_method:: POST /v2/cluster-templates - -Creates a cluster template. - -Normal response codes:202 - - -Request Example ---------------- - -.. literalinclude:: samples/cluster-templates/cluster-template-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: cluster_template_description - - use_autoconfig: use_autoconfig - - cluster_configs: cluster_configs - - created_at: created_at - - default_image_id: default_image_id - - updated_at: updated_at - - plugin_name: plugin_name - - is_default: is_default - - is_protected: object_is_protected - - shares: object_shares - - domain_name: domain_name - - project_id: project_id - - node_groups: node_groups - - is_public: object_is_public - - plugin_version: plugin_version - - id: cluster_template_id - - name: cluster_template_name - - - - diff --git a/api-ref/source/v2/clusters.inc b/api-ref/source/v2/clusters.inc deleted file mode 100644 index 6fad3aa4a4..0000000000 --- a/api-ref/source/v2/clusters.inc +++ /dev/null @@ -1,293 +0,0 @@ -.. -*- rst -*- - -======== -Clusters -======== - -A cluster is a group of nodes with the same configuration. - - -List available clusters -======================= - -.. rest_method:: GET /v2/clusters - -Lists available clusters. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - sort_by: sort_by_clusters - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - clusters: clusters - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - -Response Example ----------------- -.. rest_method:: GET /v2/clusters - -.. literalinclude:: samples/clusters/clusters-list-response.json - :language: javascript - - - - -Create cluster -============== - -.. rest_method:: POST /v2/clusters - -Creates a cluster. - -Normal response codes: 202 - - - -Request Example ---------------- - -.. literalinclude:: samples/clusters/cluster-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - - - -Show details of a cluster -========================= - -.. rest_method:: GET /v2/clusters/{cluster_id} - -Shows details for a cluster, by ID. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_id: url_cluster_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - -Response Example ----------------- - -.. literalinclude:: samples/clusters/cluster-show-response.json - :language: javascript - - - - -Delete a cluster -================ - -.. rest_method:: DELETE /v2/clusters/{cluster_id} - -Deletes a cluster. - -Normal response codes: 204 or 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_id: url_cluster_id - - force: force - - - - - - -Scale cluster -============= - -.. rest_method:: PUT /v2/clusters/{cluster_id} - -Scales a cluster. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_id: cluster_id - -Request Example ---------------- - -.. literalinclude:: samples/clusters/cluster-scale-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - - -Update cluster -============== - -.. rest_method:: PATCH /v2/clusters/{cluster_id} - -Updates a cluster. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_id: url_cluster_id - -Request Example ---------------- - -.. literalinclude:: samples/clusters/cluster-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - count: count - - info: info - - cluster_template_id: cluster_template_id - - is_transient: is_transient - - provision_progress: provision_progress - - status: status - - neutron_management_network: neutron_management_network - - management_public_key: management_public_key - - status_description: status_description - - trust_id: trust_id - - domain_name: domain_name - - - - -Show progress -============= - -.. rest_method:: GET /v2/clusters/{cluster_id} - -Shows provisioning progress for a cluster. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_id: url_cluster_id - - - - -Response Example ----------------- - -.. literalinclude:: samples/event-log/cluster-progress-response.json - :language: javascript - - - diff --git a/api-ref/source/v2/data-sources.inc b/api-ref/source/v2/data-sources.inc deleted file mode 100644 index f4e98ae7bf..0000000000 --- a/api-ref/source/v2/data-sources.inc +++ /dev/null @@ -1,202 +0,0 @@ -.. -*- rst -*- - -============ -Data sources -============ - -A data source object defines the location of input or output for -MapReduce jobs and might reference different types of storage. - -The Data Processing service does not validate data source -locations. - - -Show data source details -======================== - -.. rest_method:: GET /v2/data-sources/{data_source_id} - -Shows details for a data source. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - data_source_id: url_data_source_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: data_source_description - - url: url - - project_id: project_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - type: type - - id: data_source_id - - name: data_source_name - - - -Response Example ----------------- - -.. literalinclude:: samples/data-sources/data-source-show-response.json - :language: javascript - - - - -Delete data source -================== - -.. rest_method:: DELETE /v2/data-sources/{data_source_id} - -Deletes a data source. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - data_source_id: url_data_source_id - - - - - - -Update data source -================== - -.. rest_method:: PATCH /v2/data-sources/{data_source_id} - -Updates a data source. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - data_source_id: url_data_source_id - -Request Example ---------------- - -.. literalinclude:: samples/data-sources/data-source-update-request.json - :language: javascript - - - - - - - -List data sources -================= - -.. rest_method:: GET /v2/data-sources - -Lists all data sources. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - sort_by: sort_by_data_sources - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - description: data_source_description - - url: url - - project_id: project_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - type: type - - id: data_source_id - - name: data_source_name - - - -Response Example ----------------- - -.. rest_method:: GET /v2/data-sourses?sort_by=-name - -.. literalinclude:: samples/data-sources/data-sources-list-response.json - :language: javascript - - - - -Create data source -================== - -.. rest_method:: POST /v2/data-sources - -Creates a data source. - -Normal response codes:202 - - - -Request Example ---------------- - -.. literalinclude:: samples/data-sources/data-source-register-hdfs-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: data_source_description - - url: url - - project_id: project_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - type: type - - id: data_source_id - - name: data_source_name - - - - - diff --git a/api-ref/source/v2/event-log.inc b/api-ref/source/v2/event-log.inc deleted file mode 100644 index 11de7c1fc9..0000000000 --- a/api-ref/source/v2/event-log.inc +++ /dev/null @@ -1,42 +0,0 @@ -.. -*- rst -*- - -========= -Event log -========= - -The event log feature provides information about cluster -provisioning. In the event of errors, the event log shows the -reason for the failure. - - -Show progress -============= - -.. rest_method:: GET /v2/clusters/{cluster_id} - -Shows provisioning progress of cluster. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - cluster_id: cluster_id - - - - -Response Example ----------------- - -.. literalinclude:: samples/event-log/cluster-progress-response.json - :language: javascript - - - - diff --git a/api-ref/source/v2/image-registry.inc b/api-ref/source/v2/image-registry.inc deleted file mode 100644 index 89acb09dde..0000000000 --- a/api-ref/source/v2/image-registry.inc +++ /dev/null @@ -1,244 +0,0 @@ -.. -*- rst -*- - -============== -Image registry -============== - -Use the image registry tool to manage images, add tags to and -remove tags from images, and define the user name for an instance -operating system. Each plugin lists required tags for an image. To -run remote operations, the Data Processing service requires a user -name with which to log in to the operating system for an instance. - - -Add tags to image -================= - -.. rest_method:: PUT /v2/images/{image_id}/tags - -Adds tags to an image. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tags: tags - - image_id: url_image_id - -Request Example ---------------- - -.. literalinclude:: samples/image-registry/image-tags-add-request.json - :language: javascript - - - - - - - -Show image details -================== - -.. rest_method:: GET /v2/images/{image_id} - -Shows details for an image. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: url_image_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - username: username - - updated: updated - - description: image_description - - created: created - - image: image - - tags: tags - - minDisk: minDisk - - name: image_name - - progress: progress - - minRam: minRam - - id: image_id - - metadata: metadata - - - -Response Example ----------------- - -.. literalinclude:: samples/image-registry/image-show-response.json - :language: javascript - - - - -Register image -============== - -.. rest_method:: POST /v2/images/{image_id} - -Registers an image in the registry. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - username: username - - description: image_description - - image_id: url_image_id - -Request Example ---------------- - -.. literalinclude:: samples/image-registry/image-register-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - username: username - - updated: updated - - description: image_description - - created: created - - image: image - - tags: tags - - minDisk: minDisk - - name: image_name - - progress: progress - - minRam: minRam - - id: image_id - - metadata: metadata - - - - - -Unregister image -================ - -.. rest_method:: DELETE /v2/images/{image_id} - -Removes an image from the registry. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - image_id: url_image_id - - - - - - -Remove tags from image -====================== - -.. rest_method:: DELETE /v2/images/{image_id}/tag - -Removes tags from an image. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tags: tags - - image_id: url_image_id - -Request Example ---------------- - -.. literalinclude:: samples/image-registry/image-tags-delete-request.json - :language: javascript - - - - - - - -List images -=========== - -.. rest_method:: GET /v2/images - -Lists all images registered in the registry. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tags: tags - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - username: username - - updated: updated - - description: image_description - - created: created - - image: image - - tags: tags - - minDisk: minDisk - - name: image_name - - images: images - - progress: progress - - minRam: minRam - - id: image_id - - metadata: metadata - - - -Response Example ----------------- - -.. literalinclude:: samples/image-registry/images-list-response.json - :language: javascript - - - - diff --git a/api-ref/source/v2/index.rst b/api-ref/source/v2/index.rst deleted file mode 100644 index 3f346bf0dd..0000000000 --- a/api-ref/source/v2/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -:tocdepth: 3 - ----------------------- -Data Processing API v2 ----------------------- - -.. rest_expand_all:: - -.. include:: cluster-templates.inc -.. include:: clusters.inc -.. include:: data-sources.inc -.. include:: event-log.inc -.. include:: image-registry.inc -.. include:: job-binaries.inc -.. include:: job-templates.inc -.. include:: job-types.inc -.. include:: jobs.inc -.. include:: node-group-templates.inc -.. include:: plugins.inc - diff --git a/api-ref/source/v2/job-binaries.inc b/api-ref/source/v2/job-binaries.inc deleted file mode 100644 index 9093b17a5e..0000000000 --- a/api-ref/source/v2/job-binaries.inc +++ /dev/null @@ -1,256 +0,0 @@ -.. -*- rst -*- - -============ -Job binaries -============ - -Job binary objects represent data processing applications and -libraries that are stored in Object Storage service(S3 or Swift) or -in Manila Shares. - - -List job binaries -================= - -.. rest_method:: GET /v2/job-binaries - -Lists the available job binaries. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - sort_by: sort_by_job_binary - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - description: job_binary_description - - url: url - - project_id: project_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - binaries: binaries - - id: job_binary_id - - name: job_binary_name - - - -Response Example ----------------- -.. rest_method:: GET /v2/job-binaries?sort_by=created_at - -.. literalinclude:: samples/job-binaries/list-response.json - :language: javascript - - - - -Create job binary -================= - -.. rest_method:: POST /v2/job-binaries - -Creates a job binary. - -Normal response codes:202 - - - - -Request Example ---------------- - -.. literalinclude:: samples/job-binaries/create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_binary_description - - url: url - - project_id: project_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - id: job_binary_id - - name: job_binary_name - - - - - -Show job binary details -======================= - -.. rest_method:: GET /v2/job-binaries/{job_binary_id} - -Shows details for a job binary. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_binary_id: url_job_binary_id - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_binary_description - - url: url - - project_id: project_id - - created_at: created_at - - updated_at: updated_at - - is_protected: object_is_protected - - is_public: object_is_public - - id: job_binary_id - - name: job_binary_name - - - -Response Example ----------------- - -.. literalinclude:: samples/job-binaries/show-response.json - :language: javascript - - - - -Delete job binary -================= - -.. rest_method:: DELETE /v2/job-binaries/{job_binary_id} - -Deletes a job binary. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - - job_binary_id: url_job_binary_id - - - - - - -Update job binary -================= - -.. rest_method:: PATCH /v2/job-binaries/{job_binary_id} - -Updates a job binary. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - - job_binary_id: url_job_binary_id - - -Request Example ---------------- - -.. literalinclude:: samples/job-binaries/update-request.json - :language: javascript - - - - - - - -Show job binary data -==================== - -.. rest_method:: GET /v2/job-binaries/{job_binary_id}/data - -Shows data for a job binary. - -The response body shows the job binary raw data and the response -headers show the data length. - -Example response: - -:: - - HTTP/1.1 200 OK - Connection: keep-alive - Content-Length: 161 - Content-Type: text/html; charset=utf-8 - Date: Sat, 28 Mar 2016 02:42:48 GMT - A = load '$INPUT' using PigStorage(':') as (fruit: chararray); - B = foreach A generate com.hadoopbook.pig.Trim(fruit); - store B into '$OUTPUT' USING PigStorage(); - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_binary_id: url_job_binary_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - Content-Length: Content-Length - - - -Response Example ----------------- - -.. literalinclude:: samples/job-binaries/show-data-response - :language: text - - - - diff --git a/api-ref/source/v2/job-templates.inc b/api-ref/source/v2/job-templates.inc deleted file mode 100644 index 9d94645854..0000000000 --- a/api-ref/source/v2/job-templates.inc +++ /dev/null @@ -1,257 +0,0 @@ -.. -*- rst -*- - -============= -Job templates -============= - -A job templates object lists the binaries that a job needs to run. -To run a job, you must specify data sources and job parameters. - -You can run a job on an existing or new transient cluster. - - -List job templates -================== - -.. rest_method:: GET /v2/job-templates - -Lists all job templates. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - sort_by: sort_by_job_templates - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - job_templates: job_templates - - description: job_description - - project_id: project_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_template_id - - name: job_template_name - - markers: markers - - prev: prev - - next: next - - -Response Example ----------------- -..rest_method:: GET /v2/job-templates?limit=2 - -.. literalinclude:: samples/job-templates/job-templates-list-response.json - :language: javascript - - - - -Create job template -=================== - -.. rest_method:: POST /v2/job-templates - -Creates a job object. - -Normal response codes:202 - - - - -Request Example ---------------- - -.. literalinclude:: samples/job-templates/job-template-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_description - - project_id: project_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_template_id - - name: job_template_name - - - - - -Show job template details -========================= - -.. rest_method:: GET /v2/job-templates/{job_template_id} - -Shows details for a job template. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_template_id: url_job_template_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_description - - project_id: project_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_template_id - - name: job_template_name - - - -Response Example ----------------- - -.. literalinclude:: samples/job-templates/job-template-show-response.json - :language: javascript - - - - -Remove job template -=================== - -.. rest_method:: DELETE /v2/job-templates/{job_template_id} - -Removes a job. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_template_id: url_job_template_id - - - - - - -Update job template object -========================== - -.. rest_method:: PATCH /v2/job-templates/{job_template_id} - -Updates a job template object. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_template_id: url_job_template_id - -Request Example ---------------- - -.. literalinclude:: samples/job-templates/job-template-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: job_description - - project_id: project_id - - created_at: created_at - - mains: mains - - updated_at: updated_at - - libs: libs - - is_protected: object_is_protected - - interface: interface - - is_public: object_is_public - - type: type - - id: job_template_id - - name: job_template_name - - - - - -Get job template config hints -============================= - -.. rest_method:: GET /v2/job-templates/config-hints/{job_type} - -Get job template config hints - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_type: url_job_type - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - job_config: job_config - - args: args - - configs: configs - - - - - diff --git a/api-ref/source/v2/job-types.inc b/api-ref/source/v2/job-types.inc deleted file mode 100644 index ff7dc9b50d..0000000000 --- a/api-ref/source/v2/job-types.inc +++ /dev/null @@ -1,61 +0,0 @@ -.. -*- rst -*- - -========= -Job types -========= - -Each plugin that supports EDP also supports specific job types. -Different versions of a plugin might actually support different job -types. Configuration options vary by plugin, version, and job type. - -The job types provide information about which plugins support which -job types and how to configure the job types. - - -List job types -============== - -.. rest_method:: GET /v2/job-types - -Lists all job types. - -You can use query parameters to filter the response. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - plugin: plugin - - version: version - - type: type - - hints: hints - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - versions: versions - - title: title - - description: description_plugin - - job_types: job_types - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/job-types/job-types-list-response.json - :language: javascript - - - - diff --git a/api-ref/source/v2/jobs.inc b/api-ref/source/v2/jobs.inc deleted file mode 100644 index 7e5b1024cc..0000000000 --- a/api-ref/source/v2/jobs.inc +++ /dev/null @@ -1,262 +0,0 @@ -.. -*- rst -*- - -==== -Jobs -==== - -A job object represents a job that runs on a cluster. -A job polls the status of a running job and reports it to the user. - - -Execute Job -=========== - -.. rest_method:: POST /v2/jobs - -Executes a job. - - -Normal response codes: 200 - -Request Example ----------------- -.. rest_method:: /v2/jobs - -.. literalinclude:: samples/jobs/job-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - info: info - - output_id: output_id - - start_time: start_time - - job_template_id: job_template_id - - updated_at: updated_at - - project_id: project_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_is_public - - input_id: input_id - - configs: configs - - job: job - - id: job_id - - - -Response Example ----------------- - -.. literalinclude:: samples/jobs/job-response.json - :language: javascript - - - - -List jobs -========= - -.. rest_method:: GET /v2/jobs - -Lists available jobs. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - sort_by: sort_by_job - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - info: info - - output_id: output_id - - start_time: start_time - - job_template_id: job_template_id - - updated_at: updated_at - - project_id: project_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_is_public - - input_id: input_id - - configs: configs - - job: job - - id: job_id - - jobs: jobs - - - -Response Example ----------------- -.. rest_method:: /v2/jobs - -.. literalinclude:: samples/jobs/list-response.json - :language: javascript - - - - -Show job -======== - -.. rest_method:: GET /v2/jobs/{job_id} - -Shows details for a job, by ID. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_id: url_job_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - info: info - - output_id: output_id - - start_time: start_time - - job_template_id: job_template_id - - updated_at: updated_at - - project_id: project_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_is_public - - input_id: input_id - - configs: configs - - job: job - - id: job_id - - - -Response Example ----------------- - -.. literalinclude:: samples/jobs/job-response.json - :language: javascript - - - - -Delete job -========== - -.. rest_method:: DELETE /v2/jobs/{job_id} - -Deletes a job. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_id: url_job_id - - - - - - -Update job -========== - -.. rest_method:: PATCH /v2/jobs/{job_id} - -Updates a job. - -Normal response codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - job_id: url_job_id - -Request Example ---------------- - -.. literalinclude:: samples/jobs/job-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - info: info - - output_id: output_id - - start_time: start_time - - job_template_id: job_template_id - - updated_at: updated_at - - project_id: project_id - - created_at: created_at - - args: args - - data_source_urls: data_source_urls - - return_code: return_code - - oozie_job_id: oozie_job_id - - is_protected: is_protected_3 - - cluster_id: cluster_id - - end_time: end_time - - params: params - - is_public: job_is_public - - input_id: input_id - - configs: configs - - job: job - - id: job_id - - - - - diff --git a/api-ref/source/v2/node-group-templates.inc b/api-ref/source/v2/node-group-templates.inc deleted file mode 100644 index c91e4e36ae..0000000000 --- a/api-ref/source/v2/node-group-templates.inc +++ /dev/null @@ -1,289 +0,0 @@ -.. -*- rst -*- - -==================== -Node group templates -==================== - -A cluster is a group of nodes with the same configuration. A node -group template configures a node in the cluster. - -A template configures Hadoop processes and VM characteristics, such -as the number of reduced slots for task tracker, the number of -CPUs, and the amount of RAM. The template specifies the VM -characteristics through an OpenStack flavor. - - -List node group templates -========================= - -.. rest_method:: GET /v2/node-group-templates - -Lists available node group templates. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - limit: limit - - marker: marker - - sort_by: sort_by_node_group_templates - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - markers: markers - - prev: prev - - next: next - - volume_local_to_instance: volume_local_to_instance - - availability_zone: availability_zone - - updated_at: updated_at - - use_autoconfig: use_autoconfig - - volumes_per_node: volumes_per_node - - id: node_group_template_id - - security_groups: security_groups - - shares: object_shares - - node_configs: node_configs - - auto_security_group: auto_security_group - - volumes_availability_zone: volumes_availability_zone - - description: node_group_template_description - - volume_mount_prefix: volume_mount_prefix - - plugin_name: plugin_name - - floating_ip_pool: floating_ip_pool - - is_default: is_default - - image_id: image_id - - volumes_size: volumes_size - - is_proxy_gateway: is_proxy_gateway - - is_public: object_is_public - - plugin_version: plugin_version - - name: node_group_template_name - - project_id: project_id - - created_at: created_at - - volume_type: volume_type - - is_protected: object_is_protected - - node_processes: node_processes - - flavor_id: flavor_id - - - -Response Example ----------------- -.. rest_method:: GET /v2/node-group-templates?limit=2&marker=38b4e146-1d39-4822-bad2-fef1bf304a52&sort_by=name - -.. literalinclude:: samples/node-group-templates/node-group-templates-list-response.json - :language: javascript - - - - -Create node group template -========================== - -.. rest_method:: POST /v2/node-group-templates - -Creates a node group template. - -Normal response codes: 202 - - - - -Request Example ---------------- - -.. literalinclude:: samples/node-group-templates/node-group-template-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_local_to_instance: volume_local_to_instance - - availability_zone: availability_zone - - updated_at: updated_at - - use_autoconfig: use_autoconfig - - volumes_per_node: volumes_per_node - - id: node_group_template_id - - security_groups: security_groups - - shares: object_shares - - node_configs: node_configs - - auto_security_group: auto_security_group - - volumes_availability_zone: volumes_availability_zone - - description: node_group_template_description - - volume_mount_prefix: volume_mount_prefix - - plugin_name: plugin_name - - floating_ip_pool: floating_ip_pool - - is_default: is_default - - image_id: image_id - - volumes_size: volumes_size - - is_proxy_gateway: is_proxy_gateway - - is_public: object_is_public - - plugin_version: plugin_version - - name: node_group_template_name - - project_id: project_id - - created_at: created_at - - volume_type: volume_type - - is_protected: object_is_protected - - node_processes: node_processes - - flavor_id: flavor_id - - - - - -Show node group template details -================================ - -.. rest_method:: GET /v2/node-group-templates/{node_group_template_id} - -Shows a node group template, by ID. - - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - node_group_template_id: url_node_group_template_id - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_local_to_instance: volume_local_to_instance - - availability_zone: availability_zone - - updated_at: updated_at - - use_autoconfig: use_autoconfig - - volumes_per_node: volumes_per_node - - id: node_group_template_id - - security_groups: security_groups - - shares: object_shares - - node_configs: node_configs - - auto_security_group: auto_security_group - - volumes_availability_zone: volumes_availability_zone - - description: node_group_template_description - - volume_mount_prefix: volume_mount_prefix - - plugin_name: plugin_name - - floating_ip_pool: floating_ip_pool - - is_default: is_default - - image_id: image_id - - volumes_size: volumes_size - - is_proxy_gateway: is_proxy_gateway - - is_public: object_is_public - - plugin_version: plugin_version - - name: node_group_template_name - - project_id: project_id - - created_at: created_at - - volume_type: volume_type - - is_protected: object_is_protected - - node_processes: node_processes - - flavor_id: flavor_id - - - -Response Example ----------------- - -.. literalinclude:: samples/node-group-templates/node-group-template-show-response.json - :language: javascript - - - - -Delete node group template -========================== - -.. rest_method:: DELETE /v2/node-group-templates/{node_group_template_id} - -Deletes a node group template. - -Normal response codes:204 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - node_group_template_id: url_node_group_template_id - - - - - - -Update node group template -========================== - -.. rest_method:: PATCH /v2/node-group-templates/{node_group_template_id} - -Updates a node group template. - -Normal respose codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - node_group_template_id: url_node_group_template_id - -Request Example ---------------- - -.. literalinclude:: samples/node-group-templates/node-group-template-update-request.json - :language: javascript - - - - - - - -Export node group template -========================== - -.. rest_method:: GET /v2/node-group-templates/{node_group_template_id}/export - -Exports a node group template. - -Normal respose codes:202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - node_group_template_id: url_node_group_template_id - -Request Example ---------------- - -.. literalinclude:: samples/node-group-templates/node-group-template-update-request.json - :language: javascript - - - - - - - diff --git a/api-ref/source/v2/parameters.yaml b/api-ref/source/v2/parameters.yaml deleted file mode 100644 index 52d568afc1..0000000000 --- a/api-ref/source/v2/parameters.yaml +++ /dev/null @@ -1,1183 +0,0 @@ -# variables in header -Content-Length: - description: | - The length of the data, in bytes. - in: header - required: true - type: string - -# variables in path -hints: - description: | - Includes configuration hints in the response. - in: path - required: false - type: boolean -job_binary_id: - description: | - The UUID of the job binary. - in: path - required: true - type: string -limit: - description: | - Maximum number of objects in response data. - in: path - required: false - type: integer -marker: - description: | - ID of the last element on the list which - won't be in response. - in: path - required: false - type: string -plugin: - description: | - Filters the response by a plugin name. - in: path - required: false - type: string -sort_by_cluster_templates: - description: | - The field for sorting cluster templates. - this parameter accepts the following values: - ``name``, ``plugin_name``, ``plugin_version``, - ``created_at``, ``updated_at``, ``id``. Also - this values can started with ``-`` prefix for - descending sort. For example: ``-name``. - in: path - required: false - type: string - -sort_by_clusters: - description: | - The field for sorting clusters. - this parameter accepts the following values: - ``name``, ``plugin_name``, ``plugin_version``, - ``status``, ``id``. Also this values can - started with ``-`` prefix for descending sort. - For example: ``-name``. - in: path - required: false - type: string - -sort_by_data_sources: - description: | - The field for sorting data sources. - this parameter accepts the following values: - ``id``, ``name``, ``type``, ``created_at``, - ``updated_at``. Also this values can started - with ``-`` prefix for descending sort. - For example: ``-name``. - in: path - required: false - type: string - -sort_by_job: - description: | - The field for sorting job executions. - this parameter accepts the following values: - ``id``, ``job_template``, ``cluster``, - ``status``. Also this values can started - with ``-`` prefix for descending sort. - For example: ``-cluster``. - in: path - required: false - type: string - -sort_by_job_binary: - description: | - The field for sorting job binaries. - this parameter accepts the following values: - ``id``, ``name``, ``created_at``, ``updated_at``. - Also this values can started with ``-`` prefix - for descending sort. For example: ``-name``. - in: path - required: false - type: string - -sort_by_job_binary_internals: - description: | - The field for sorting job binary internals. - this parameter accepts the following values: - ``id``, ``name``, ``created_at``, ``updated_at``. - Also this values can started with ``-`` prefix - for descending sort. For example: ``-name``. - in: path - required: false - type: string - -sort_by_job_templates: - description: | - The field for sorting jobs. - this parameter accepts the following values: - ``id``, ``name``, ``type``, ``created_at``, - ``updated_at``. Also this values can started - with ``-`` prefix for descending sort. - For example: ``-name``. - in: path - required: false - type: string - -sort_by_node_group_templates: - description: | - The field for sorting node group templates. - this parameter accepts the following values: - ``name``, ``plugin_name``, ``plugin_version``, - ``created_at``, ``updated_at``, ``id``. Also - this values can started with ``-`` prefix for - descending sort. For example: ``-name``. - in: path - required: false - type: string - -type_2: - description: | - Filters the response by a job type. - in: path - required: false - type: string -url_cluster_id: - description: | - The ID of the cluster - in: path - required: true - type: string -url_cluster_template_id: - description: | - The unique identifier of the cluster template. - in: path - required: true - type: string -url_data_source_id: - description: | - The UUID of the data source. - in: path - required: true - type: string -url_image_id: - description: | - The UUID of the image. - in: path - required: true - type: string -url_job_binary_id: - description: | - The UUID of the job binary. - in: path - required: true - type: string -url_job_binary_internals_id: - description: | - The UUID of the job binary internal. - in: path - required: true - type: string -url_job_binary_internals_name: - description: | - The name of the job binary internal. - in: path - required: true - type: string -url_job_id: - description: | - The UUID of the job. - in: path - required: true - type: string -url_job_template_id: - description: | - The UUID of the template job. - in: path - required: true - type: string -url_job_type: - description: | - The job type. - in: path - required: true - type: string -url_node_group_template_id: - description: | - The UUID of the node group template. - in: path - required: true - type: string -url_plugin_name: - description: | - Name of the plugin. - in: path - required: true - type: string -url_project_id: - description: | - UUID of the project. - in: path - required: true - type: string -version: - description: | - Filters the response by a plugin version. - in: path - required: true - type: string -version_1: - description: | - Version of the plugin. - in: path - required: false - type: string - - -# variables in body -args: - description: | - The list of arguments. - in: body - required: true - type: array -auto_security_group: - description: | - If set to ``True``, the cluster group is - automatically secured. - in: body - required: true - type: boolean -availability_zone: - description: | - The availability of the node in the cluster. - in: body - required: true - type: string -binaries: - description: | - The list of job binary internal objects. - in: body - required: true - type: array -cluster_configs: - description: | - A set of key and value pairs that contain the - cluster configuration. - in: body - required: true - type: object -cluster_id: - description: | - The UUID of the cluster. - in: body - required: true - type: string -cluster_template_description: - description: | - Description of the cluster template - in: body - required: false - type: string -cluster_template_id: - description: | - The UUID of the cluster template. - in: body - required: true - type: string -cluster_template_name: - description: | - The name of the cluster template. - in: body - required: true - type: string -clusters: - description: | - The list of clusters. - in: body - required: true - type: array -configs: - description: | - The mappings of the job tasks. - in: body - required: true - type: object -count: - description: | - The number of nodes in the cluster. - in: body - required: true - type: integer -created: - description: | - The date and time when the image was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -created_at: - description: | - The date and time when the cluster was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -created_at_1: - description: | - The date and time when the object was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -created_at_2: - description: | - The date and time when the node was created in the cluster. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -created_at_3: - description: | - The date and time when the job execution object was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -data_source_description: - description: | - The description of the data source object. - in: body - required: true - type: string -data_source_id: - description: | - The UUID of the data source. - in: body - required: true - type: string -data_source_name: - description: | - The name of the data source. - in: body - required: true - type: string -data_source_urls: - description: | - The data source URLs. - in: body - required: true - type: object -datasize: - description: | - The size of the data stored in the internal - database. - in: body - required: true - type: integer -default_image_id: - description: | - The default ID of the image. - in: body - required: true - type: string -description: - description: | - The description of the cluster. - in: body - required: true - type: string -description_3: - description: | - The description of the node in the cluster. - in: body - required: true - type: string -description_7: - description: | - Description of the image. - in: body - required: false - type: string -description_plugin: - description: | - The full description of the plugin. - in: body - required: true - type: string -domain_name: - description: | - Domain name for internal and external hostname resolution. - Required if DNS service is enabled. - in: body - required: false - type: string -end_time: - description: | - The end date and time of the job execution. - - The date and time when the job completed execution. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -flavor_id: - description: | - The ID of the flavor. - in: body - required: true - type: string -floating_ip_pool: - description: | - The UUID of the pool in the template. - in: body - required: true - type: string -force: - description: | - If set to ``true``, Sahara will force cluster deletion. - in: body - required: false - type: boolean -id: - description: | - The UUID of the cluster. - in: body - required: true - type: string -id_1: - description: | - The ID of the object. - in: body - required: true - type: string -image: - description: | - A set of key and value pairs that contain image - properties. - in: body - required: true - type: object -image_description: - description: | - The description of the image. - in: body - required: true - type: string -image_id: - description: | - The UUID of the image. - in: body - required: true - type: string -image_name: - description: | - The name of the operating system image. - in: body - required: true - type: string -images: - description: | - The list of images and their properties. - in: body - required: true - type: array -info: - description: | - A set of key and value pairs that contain cluster - information. - in: body - required: true - type: object -info_1: - description: | - The report of the executed job objects. - in: body - required: true - type: object -input_id: - description: | - The UUID of the input. - in: body - required: true - type: string -interface: - description: | - The interfaces of the job object. - in: body - required: true - type: array -is_default: - description: | - If set to ``true``, the cluster is the default - cluster. - in: body - required: true - type: boolean -is_protected: - description: | - If set to ``true``, the cluster is protected. - in: body - required: true - type: boolean -is_protected_2: - description: | - If set to ``true``, the node is protected. - in: body - required: true - type: boolean -is_protected_3: - description: | - If set to ``true``, the job execution object is - protected. - in: body - required: true - type: boolean -is_proxy_gateway: - description: | - If set to ``true``, the node is the proxy - gateway. - in: body - required: true - type: boolean -is_public: - description: | - If set to ``true``, the cluster is public. - in: body - required: true - type: boolean -is_transient: - description: | - If set to ``true``, the cluster is transient. - in: body - required: true - type: boolean -job: - description: | - A set of key and value pairs that contain the job - object. - in: body - required: true - type: object -job_binary_description: - description: | - The description of the job binary object. - in: body - required: true - type: string -job_binary_internals_id: - description: | - The UUID of the job binary internal. - in: body - required: true - type: string -job_binary_internals_name: - description: | - The name of the job binary internal. - in: body - required: true - type: string -job_binary_name: - description: | - The name of the object. - in: body - required: true - type: string -job_config: - description: | - The job configuration. - in: body - required: true - type: string -job_description: - description: | - The description of the job object. - in: body - required: true - type: string -job_id: - description: | - The UUID of the job object. - in: body - required: true - type: string -job_is_public: - description: | - If set to ``true``, the job object is - public. - in: body - required: true - type: boolean -job_name: - description: | - The name of the job object. - in: body - required: true - type: string -job_template_id: - description: | - The UUID of the job template object. - in: body - required: true - type: string -job_template_name: - description: | - The name of the job template object. - in: body - required: true - type: string -job_templates: - description: | - The list of the job templates. - in: body - required: true - type: array -job_types: - description: | - The list of plugins and their job types. - in: body - required: true - type: array -jobs: - description: | - The list of job objects. - in: body - required: true - type: array -libs: - description: | - The list of the job object properties. - in: body - required: true - type: array -mains: - description: | - The list of the job object and their properties. - in: body - required: true - type: array -management_public_key: - description: | - The SSH key for the management network. - in: body - required: true - type: string -markers: - description: | - The markers of previous and following pages of data. - This field exists only if ``limit`` is passed to - request. - in: body - required: false - type: object -metadata: - description: | - A set of key and value pairs that contain image - metadata. - in: body - required: true - type: object -minDisk: - description: | - The minimum disk space, in GB. - in: body - required: true - type: integer -minRam: - description: | - The minimum amount of random access memory (RAM) - for the image, in GB. - in: body - required: true - type: integer -name: - description: | - The name of the cluster. - in: body - required: true - type: string -name_1: - description: | - The name of the object. - in: body - required: true - type: string -neutron_management_network: - description: | - The UUID of the neutron management network. - in: body - required: true - type: string -next: - description: | - The marker of next page of list data. - in: body - required: false - type: string -node_configs: - description: | - A set of key and value pairs that contain the - node configuration in the cluster. - in: body - required: true - type: object -node_group_template_description: - description: | - Description of the node group template - in: body - required: false - type: string -node_group_template_id: - description: | - The UUID of the node group template. - in: body - required: true - type: string -node_group_template_name: - description: | - The name of the node group template. - in: body - required: true - type: string -node_groups: - description: | - The detail properties of the node in key-value - pairs. - in: body - required: true - type: object -node_processes: - description: | - The list of the processes performed by the node. - in: body - required: true - type: array -object_is_protected: - description: | - If set to ``true``, the object is protected. - in: body - required: true - type: boolean -object_is_public: - description: | - If set to ``true``, the object is public. - in: body - required: true - type: boolean -object_shares: - description: | - The sharing of resources in the cluster. - in: body - required: true - type: string -oozie_job_id: - description: | - The UUID of the ``oozie_job``. - in: body - required: true - type: string -output_id: - description: | - The UUID of the output of job execution object. - in: body - required: true - type: string -params: - description: | - The mappings of values to the parameters. - in: body - required: true - type: object -plugin_name: - description: | - The name of the plugin. - in: body - required: true - type: string -plugin_version: - description: | - The version of the Plugin used in the cluster. - in: body - required: true - type: string -plugin_version_1: - description: | - The version of the Plugin. - in: body - required: true - type: string -plugins: - description: | - The list of plugins. - in: body - required: true - type: array -prev: - description: | - The marker of previous page. May be ``null`` if - previous page is first or if current page is first. - in: body - required: false - type: string -progress: - description: | - A progress indicator, as a percentage value, for - the amount of image content that has been processed. - in: body - required: true - type: integer -project_id: - description: | - The UUID of the project. - in: body - required: true - type: string -provision_progress: - description: | - A list of the cluster progresses. - in: body - required: true - type: array -return_code: - description: | - The code returned after job has executed. - in: body - required: true - type: string -security_groups: - description: | - The security groups of the node. - in: body - required: true - type: string -shares: - description: | - The shares of the cluster. - in: body - required: true - type: string -start_time: - description: | - The date and time when the job started. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -status: - description: | - The status of the cluster. - in: body - required: true - type: string -status_1: - description: | - The current status of the image. - in: body - required: true - type: string -status_description: - description: | - The description of the cluster status. - in: body - required: true - type: string -tags: - description: | - List of tags to add. - in: body - required: true - type: array -tags_1: - description: | - Lists images only with specific tag. Can be used - multiple times. - in: body - required: false - type: string -tags_2: - description: | - One or more image tags. - in: body - required: true - type: array -tags_3: - description: | - List of tags to remove. - in: body - required: true - type: array -tenant_id: - description: | - The UUID of the tenant. - in: body - required: true - type: string -title: - description: | - The title of the plugin. - in: body - required: true - type: string -trust_id: - description: | - The id of the trust. - in: body - required: true - type: integer -type: - description: | - The type of the data source object. - in: body - required: true - type: string -type_1: - description: | - The type of the job object. - in: body - required: true - type: string -updated: - description: | - The date and time when the image was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -updated_at: - description: | - The date and time when the cluster was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -updated_at_1: - description: | - The date and time when the object was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -updated_at_2: - description: | - The date and time when the node was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -updated_at_3: - description: | - The date and time when the job execution object was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - The ``±hh:mm`` value, if included, returns the time zone as an - offset from UTC. - - For example, ``2015-08-27T09:49:58-05:00``. - in: body - required: true - type: string -url: - description: | - The url of the data source object. - in: body - required: true - type: string -url_1: - description: | - The url of the job binary object. - in: body - required: true - type: string -use_autoconfig: - description: | - If set to ``true``, the cluster is auto - configured. - in: body - required: true - type: boolean -use_autoconfig_1: - description: | - If set to ``true``, the node is auto configured. - in: body - required: true - type: boolean -username: - description: | - The name of the user for the image. - in: body - required: true - type: string -username_1: - description: | - The user name to log in to an instance operating - system for remote operations execution. - in: body - required: true - type: string -versions: - description: | - The list of plugin versions. - in: body - required: true - type: array -volume_local_to_instance: - description: | - If set to ``true``, the volume is local to the - instance. - in: body - required: true - type: boolean -volume_mount_prefix: - description: | - The mount point of the node. - in: body - required: true - type: string -volume_type: - description: | - The type of volume in a node. - in: body - required: true - type: string -volumes_availability_zone: - description: | - The availability zone of the volumes. - in: body - required: true - type: string -volumes_per_node: - description: | - The number of volumes for the node. - in: body - required: true - type: integer -volumes_size: - description: | - The size of the volumes in a node. - in: body - required: true - type: integer - diff --git a/api-ref/source/v2/plugins.inc b/api-ref/source/v2/plugins.inc deleted file mode 100644 index 6ece649d9c..0000000000 --- a/api-ref/source/v2/plugins.inc +++ /dev/null @@ -1,179 +0,0 @@ -.. -*- rst -*- - -======= -Plugins -======= - -A plugin object defines the Hadoop or Spark version that it can -install and which configurations can be set for the cluster. - - -Show plugin details -=================== - -.. rest_method:: GET /v2/plugins/{plugin_name} - -Shows details for a plugin. - - -Normal response codes: 200 -Error response codes: 400, 500 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - plugin_name: url_plugin_name - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - versions: versions - - title: title - - description: description_plugin - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugin-show-response.json - :language: javascript - - - - -List plugins -============ - -.. rest_method:: GET /v2/plugins - -Lists all registered plugins. - - -Normal response codes: 200 -Error response codes: 400, 500 - - - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - title: title - - versions: versions - - plugins: plugins - - description: description_plugin - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugins-list-response.json - :language: javascript - - - - -Show plugin version details -=========================== - -.. rest_method:: GET /v2/plugins/{plugin_name}/{version} - -Shows details for a plugin version. - - -Normal response codes: 200 -Error response codes: 400, 500 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - plugin_name: url_plugin_name - - version: version - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - versions: versions - - title: title - - description: description_plugin - - name: plugin_name - - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugin-version-show-response.json - :language: javascript - - - - -Update plugin details -===================== - -.. rest_method:: PATCH /v2/plugins/{plugin_name} - -Updates details for a plugin. - - -Normal response codes: 202 -Error response codes: 400, 500 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - plugin_name: url_plugin_name - - -Request Example ---------------- - -.. literalinclude:: samples/plugins/plugin-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - title: title - - versions: versions - - description: description_plugin - - name: plugin_name - - -Response Example ----------------- - -.. literalinclude:: samples/plugins/plugin-update-response.json - :language: javascript - - - - - diff --git a/api-ref/source/v2/samples/cluster-templates/cluster-template-create-request.json b/api-ref/source/v2/samples/cluster-templates/cluster-template-create-request.json deleted file mode 100644 index cc6869790a..0000000000 --- a/api-ref/source/v2/samples/cluster-templates/cluster-template-create-request.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "node_groups": [ - { - "name": "worker", - "count": 3, - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251" - }, - { - "name": "master", - "count": 1, - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae" - } - ], - "name": "cluster-template" -} diff --git a/api-ref/source/v2/samples/cluster-templates/cluster-template-create-response.json b/api-ref/source/v2/samples/cluster-templates/cluster-template-create-response.json deleted file mode 100644 index 423d09eafe..0000000000 --- a/api-ref/source/v2/samples/cluster-templates/cluster-template-create-response.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "cluster_template": { - "is_public": false, - "anti_affinity": [], - "name": "cluster-template", - "created_at": "2015-09-14T10:38:44", - "project_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": {}, - "shares": null, - "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": null, - "is_default": false, - "updated_at": null, - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "1751c04e-8f39-467e-a421-480961172d4b", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "3ee85068-c455-4391-9db2-b54a20b99df3", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": null, - "domain_name": null, - "plugin_version": "2.7.1", - "use_autoconfig": true, - "description": null, - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/cluster-templates/cluster-template-show-response.json b/api-ref/source/v2/samples/cluster-templates/cluster-template-show-response.json deleted file mode 100644 index 286571b9a9..0000000000 --- a/api-ref/source/v2/samples/cluster-templates/cluster-template-show-response.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "cluster_template": { - "is_public": false, - "anti_affinity": [], - "name": "cluster-template", - "created_at": "2015-09-14T10:38:44", - "project_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": {}, - "shares": null, - "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": null, - "is_default": false, - "updated_at": null, - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "1751c04e-8f39-467e-a421-480961172d4b", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "3ee85068-c455-4391-9db2-b54a20b99df3", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "domain_name": null, - "plugin_version": "2.7.1", - "use_autoconfig": true, - "description": null, - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/cluster-templates/cluster-template-update-request.json b/api-ref/source/v2/samples/cluster-templates/cluster-template-update-request.json deleted file mode 100644 index f4583b4143..0000000000 --- a/api-ref/source/v2/samples/cluster-templates/cluster-template-update-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "description": "Updated template", - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "name": "vanilla-updated", - "cluster_configs": { - "HDFS": { - "dfs.replication": 2 - } - } -} diff --git a/api-ref/source/v2/samples/cluster-templates/cluster-template-update-response.json b/api-ref/source/v2/samples/cluster-templates/cluster-template-update-response.json deleted file mode 100644 index 6d3e365df7..0000000000 --- a/api-ref/source/v2/samples/cluster-templates/cluster-template-update-response.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "cluster_template": { - "is_public": false, - "anti_affinity": [], - "name": "vanilla-updated", - "created_at": "2015-08-21T08:41:24", - "project_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": { - "HDFS": { - "dfs.replication": 2 - } - }, - "shares": null, - "id": "84d47e85-6094-473f-bf6d-5a7e6e86564e", - "default_image_id": null, - "is_default": false, - "updated_at": "2015-09-14T10:45:57", - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": {}, - "JobFlow": {}, - "MapReduce": {}, - "Hive": {}, - "Hadoop": {}, - "HDFS": {} - }, - "auto_security_group": true, - "availability_zone": "", - "count": 1, - "flavor_id": "3", - "id": "57b966ab-617e-4735-bf60-0cb991208a52", - "security_groups": [], - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-08-21T08:41:24", - "node_group_template_id": "a5533187-3f14-42c3-ba3a-196c13fe0fb5", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "all", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "datanode", - "historyserver", - "resourcemanager", - "nodemanager", - "oozie" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": null, - "domain_name": null, - "plugin_version": "2.7.1", - "use_autoconfig": true, - "description": "Updated template", - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/cluster-templates/cluster-templates-list-response.json b/api-ref/source/v2/samples/cluster-templates/cluster-templates-list-response.json deleted file mode 100644 index a8f9f559c8..0000000000 --- a/api-ref/source/v2/samples/cluster-templates/cluster-templates-list-response.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "cluster_templates": [ - { - "is_public": false, - "anti_affinity": [], - "name": "cluster-template", - "created_at": "2015-09-14T10:38:44", - "project_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": {}, - "shares": null, - "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": null, - "is_default": false, - "updated_at": null, - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "1751c04e-8f39-467e-a421-480961172d4b", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "3ee85068-c455-4391-9db2-b54a20b99df3", - "security_groups": null, - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:38:44", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "domain_name": null, - "plugin_version": "2.7.1", - "use_autoconfig": true, - "description": null, - "is_protected": false - }, - { - "is_public": true, - "anti_affinity": [], - "name": "asd", - "created_at": "2015-08-18T08:39:39", - "project_id": "808d5032ea0446889097723bfc8e919d", - "cluster_configs": { - "general": {} - }, - "shares": null, - "id": "5a9c787c-2078-4f7d-9a66-27759be9051b", - "default_image_id": null, - "is_default": false, - "updated_at": "2015-09-14T08:41:15", - "plugin_name": "vanilla", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": true, - "availability_zone": "", - "count": 1, - "flavor_id": "2", - "id": "a65864dd-3f99-4d29-a011-f7711cc23fa0", - "security_groups": [], - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-08-18T08:39:39", - "node_group_template_id": "42ce49de-1b8f-41d5-8f4a-244ec0826d92", - "updated_at": null, - "volumes_per_node": 1, - "is_proxy_gateway": false, - "name": "asd", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "jobtracker" - ], - "volumes_size": 10, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "neutron_management_network": null, - "domain_name": null, - "plugin_version": "2.7.1", - "use_autoconfig": true, - "description": "", - "is_protected": false - } - ], - "markers": { - "prev": null, - "next": "2c76e0d3-56cd-4d28-bb4f-4808e538c7b9" - } -} diff --git a/api-ref/source/v2/samples/clusters/cluster-create-request.json b/api-ref/source/v2/samples/clusters/cluster-create-request.json deleted file mode 100644 index c2db34de03..0000000000 --- a/api-ref/source/v2/samples/clusters/cluster-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "user_keypair_id": "test", - "name": "vanilla-cluster", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd" -} diff --git a/api-ref/source/v2/samples/clusters/cluster-create-response.json b/api-ref/source/v2/samples/clusters/cluster-create-response.json deleted file mode 100644 index 47b3a911ea..0000000000 --- a/api-ref/source/v2/samples/clusters/cluster-create-response.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "cluster": { - "is_public": false, - "project_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": {}, - "user_keypair_id": "test", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [], - "plugin_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T10:57:12", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Validating" - } -} diff --git a/api-ref/source/v2/samples/clusters/cluster-scale-request.json b/api-ref/source/v2/samples/clusters/cluster-scale-request.json deleted file mode 100644 index 8b61d5ea0f..0000000000 --- a/api-ref/source/v2/samples/clusters/cluster-scale-request.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "add_node_groups": [ - { - "count": 1, - "name": "b-worker", - "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622" - } - ], - "resize_node_groups": [ - { - "count": 4, - "name": "worker" - } - ] -} diff --git a/api-ref/source/v2/samples/clusters/cluster-scale-response.json b/api-ref/source/v2/samples/clusters/cluster-scale-response.json deleted file mode 100644 index 57bc007bf6..0000000000 --- a/api-ref/source/v2/samples/clusters/cluster-scale-response.json +++ /dev/null @@ -1,370 +0,0 @@ -{ - "cluster": { - "info": { - "YARN": { - "Web UI": "http://172.18.168.115:8088", - "ResourceManager": "http://172.18.168.115:8032" - }, - "HDFS": { - "Web UI": "http://172.18.168.115:50070", - "NameNode": "hdfs://vanilla-cluster-master-0:9000" - }, - "MapReduce JobHistory Server": { - "Web UI": "http://172.18.168.115:19888" - }, - "JobFlow": { - "Oozie": "http://172.18.168.115:11000" - } - }, - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "updated_at": "2015-09-14T11:01:15", - "name": "vanilla-cluster", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "trust_id": null, - "status_description": "", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "is_protected": false, - "is_transient": false, - "provision_progress": [ - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Create Heat stack", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:57:38", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:57:18", - "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Configure instances", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:58:22", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:58:16", - "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): Oozie", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:01:15", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:27", - "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Configure instances", - "step_type": "Plugin: configure cluster", - "updated_at": "2015-09-14T10:59:21", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:58:22", - "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Configure topology data", - "step_type": "Plugin: configure cluster", - "updated_at": "2015-09-14T10:59:37", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:59:21", - "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 3, - "successful": true, - "step_name": "Start the following process(es): DataNodes, NodeManagers", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:11", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:01", - "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Await DataNodes start up", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:21", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:11", - "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): HistoryServer", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:27", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T11:00:21", - "id": "c6327532-222b-416c-858f-73dbb32b8e97" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Wait for instance accessibility", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:58:14", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:57:41", - "id": "d3eca726-8b44-473a-ac29-fba45a893725" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 0, - "successful": true, - "step_name": "Mount volumes to instances", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:58:15", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:58:14", - "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): ResourceManager", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T11:00:00", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:59:55", - "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 1, - "successful": true, - "step_name": "Start the following process(es): NameNode", - "step_type": "Plugin: start cluster", - "updated_at": "2015-09-14T10:59:54", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:59:38", - "id": "e1701ff5-930a-4212-945a-43515dfe24d1" - }, - { - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "total": 4, - "successful": true, - "step_name": "Assign IPs", - "step_type": "Engine: create cluster", - "updated_at": "2015-09-14T10:57:41", - "project_id": "808d5032ea0446889097723bfc8e919d", - "created_at": "2015-09-14T10:57:38", - "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9" - } - ], - "status": "Active", - "description": null, - "use_autoconfig": true, - "shares": null, - "domain_name": null, - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "is_public": false, - "project_id": "808d5032ea0446889097723bfc8e919d", - "node_groups": [ - { - "volumes_per_node": 0, - "volume_type": null, - "updated_at": "2015-09-14T10:57:37", - "name": "b-worker", - "id": "b7a6dea4-c898-446b-8c67-4f378d4c06c4", - "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048, - "yarn.scheduler.maximum-allocation-mb": 2048 - }, - "MapReduce": { - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "yarn.app.mapreduce.am.resource.mb": 256 - } - }, - "auto_security_group": false, - "volumes_availability_zone": null, - "use_autoconfig": true, - "security_groups": null, - "shares": null, - "node_processes": [ - "datanode", - "nodemanager" - ], - "availability_zone": null, - "flavor_id": "2", - "image_id": null, - "volume_local_to_instance": false, - "count": 1, - "volumes_size": 0, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "volume_mount_prefix": "/volumes/disk", - "instances": [], - "is_proxy_gateway": false, - "created_at": "2015-09-14T10:57:11" - }, - { - "volumes_per_node": 0, - "volume_type": null, - "updated_at": "2015-09-14T10:57:36", - "name": "master", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048, - "yarn.scheduler.maximum-allocation-mb": 2048 - }, - "MapReduce": { - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "yarn.app.mapreduce.am.resource.mb": 256 - } - }, - "auto_security_group": false, - "volumes_availability_zone": null, - "use_autoconfig": true, - "security_groups": null, - "shares": null, - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "availability_zone": null, - "flavor_id": "2", - "image_id": null, - "volume_local_to_instance": false, - "count": 1, - "volumes_size": 0, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "volume_mount_prefix": "/volumes/disk", - "instances": [ - { - "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", - "internal_ip": "10.50.0.60", - "instance_name": "vanilla-cluster-master-0", - "updated_at": "2015-09-14T10:57:39", - "management_ip": "172.18.168.115", - "created_at": "2015-09-14T10:57:36", - "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491" - } - ], - "is_proxy_gateway": false, - "created_at": "2015-09-14T10:57:11" - }, - { - "volumes_per_node": 0, - "volume_type": null, - "updated_at": "2015-09-14T10:57:37", - "name": "worker", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048, - "yarn.scheduler.maximum-allocation-mb": 2048 - }, - "MapReduce": { - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "yarn.app.mapreduce.am.resource.mb": 256 - } - }, - "auto_security_group": false, - "volumes_availability_zone": null, - "use_autoconfig": true, - "security_groups": null, - "shares": null, - "node_processes": [ - "datanode", - "nodemanager" - ], - "availability_zone": null, - "flavor_id": "2", - "image_id": null, - "volume_local_to_instance": false, - "count": 4, - "volumes_size": 0, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "volume_mount_prefix": "/volumes/disk", - "instances": [ - { - "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", - "internal_ip": "10.50.0.63", - "instance_name": "vanilla-cluster-worker-0", - "updated_at": "2015-09-14T10:57:39", - "management_ip": "172.18.168.118", - "created_at": "2015-09-14T10:57:37", - "id": "f3633b30-c1e4-4144-930b-ab5b780b87be" - }, - { - "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", - "internal_ip": "10.50.0.62", - "instance_name": "vanilla-cluster-worker-1", - "updated_at": "2015-09-14T10:57:40", - "management_ip": "172.18.168.117", - "created_at": "2015-09-14T10:57:37", - "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f" - }, - { - "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", - "internal_ip": "10.50.0.61", - "instance_name": "vanilla-cluster-worker-2", - "updated_at": "2015-09-14T10:57:40", - "management_ip": "172.18.168.116", - "created_at": "2015-09-14T10:57:37", - "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7" - } - ], - "is_proxy_gateway": false, - "created_at": "2015-09-14T10:57:11" - } - ], - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "user_keypair_id": "apavlov", - "anti_affinity": [], - "created_at": "2015-09-14T10:57:11" - } -} diff --git a/api-ref/source/v2/samples/clusters/cluster-show-response.json b/api-ref/source/v2/samples/clusters/cluster-show-response.json deleted file mode 100644 index 47b3a911ea..0000000000 --- a/api-ref/source/v2/samples/clusters/cluster-show-response.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "cluster": { - "is_public": false, - "project_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": {}, - "user_keypair_id": "test", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [], - "plugin_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T10:57:12", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Validating" - } -} diff --git a/api-ref/source/v2/samples/clusters/cluster-update-request.json b/api-ref/source/v2/samples/clusters/cluster-update-request.json deleted file mode 100644 index ab01348afa..0000000000 --- a/api-ref/source/v2/samples/clusters/cluster-update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "name": "public-vanilla-cluster", - "is_public": true -} diff --git a/api-ref/source/v2/samples/clusters/cluster-update-response.json b/api-ref/source/v2/samples/clusters/cluster-update-response.json deleted file mode 100644 index f78247fa69..0000000000 --- a/api-ref/source/v2/samples/clusters/cluster-update-response.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "cluster": { - "is_public": true, - "project_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": {}, - "user_keypair_id": "test", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:12", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [], - "plugin_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T10:57:12", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "public-vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Validating" - } -} diff --git a/api-ref/source/v2/samples/clusters/clusters-list-response.json b/api-ref/source/v2/samples/clusters/clusters-list-response.json deleted file mode 100644 index e8d6e9b3c2..0000000000 --- a/api-ref/source/v2/samples/clusters/clusters-list-response.json +++ /dev/null @@ -1,327 +0,0 @@ -{ - "clusters": [ - { - "is_public": false, - "project_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "domain_name": null, - "status_description": "", - "plugin_name": "vanilla", - "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", - "info": { - "YARN": { - "Web UI": "http://172.18.168.115:8088", - "ResourceManager": "http://172.18.168.115:8032" - }, - "HDFS": { - "Web UI": "http://172.18.168.115:50070", - "NameNode": "hdfs://vanilla-cluster-master-0:9000" - }, - "JobFlow": { - "Oozie": "http://172.18.168.115:11000" - }, - "MapReduce JobHistory Server": { - "Web UI": "http://172.18.168.115:19888" - } - }, - "user_keypair_id": "apavlov", - "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", - "id": "e172d86c-906d-418e-a29c-6189f53bfa42", - "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", - "node_groups": [ - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 1, - "flavor_id": "2", - "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", - "security_groups": null, - "use_autoconfig": true, - "instances": [ - { - "created_at": "2015-09-14T10:57:36", - "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491", - "management_ip": "172.18.168.115", - "updated_at": "2015-09-14T10:57:39", - "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", - "internal_ip": "10.50.0.60", - "instance_name": "vanilla-cluster-master-0" - } - ], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "updated_at": "2015-09-14T10:57:36", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - }, - { - "image_id": null, - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": { - "YARN": { - "yarn.nodemanager.vmem-check-enabled": "false", - "yarn.scheduler.maximum-allocation-mb": 2048, - "yarn.scheduler.minimum-allocation-mb": 256, - "yarn.nodemanager.resource.memory-mb": 2048 - }, - "MapReduce": { - "yarn.app.mapreduce.am.resource.mb": 256, - "mapreduce.task.io.sort.mb": 102, - "mapreduce.reduce.java.opts": "-Xmx409m", - "mapreduce.reduce.memory.mb": 512, - "mapreduce.map.memory.mb": 256, - "yarn.app.mapreduce.am.command-opts": "-Xmx204m", - "mapreduce.map.java.opts": "-Xmx204m" - } - }, - "auto_security_group": false, - "availability_zone": null, - "count": 3, - "flavor_id": "2", - "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", - "security_groups": null, - "use_autoconfig": true, - "instances": [ - { - "created_at": "2015-09-14T10:57:37", - "id": "f3633b30-c1e4-4144-930b-ab5b780b87be", - "management_ip": "172.18.168.118", - "updated_at": "2015-09-14T10:57:39", - "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", - "internal_ip": "10.50.0.63", - "instance_name": "vanilla-cluster-worker-0" - }, - { - "created_at": "2015-09-14T10:57:37", - "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f", - "management_ip": "172.18.168.117", - "updated_at": "2015-09-14T10:57:40", - "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", - "internal_ip": "10.50.0.62", - "instance_name": "vanilla-cluster-worker-1" - }, - { - "created_at": "2015-09-14T10:57:37", - "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7", - "management_ip": "172.18.168.116", - "updated_at": "2015-09-14T10:57:40", - "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", - "internal_ip": "10.50.0.61", - "instance_name": "vanilla-cluster-worker-2" - } - ], - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:57:11", - "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "updated_at": "2015-09-14T10:57:37", - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } - ], - "provision_progress": [ - { - "created_at": "2015-09-14T10:57:18", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af", - "step_type": "Engine: create cluster", - "step_name": "Create Heat stack", - "updated_at": "2015-09-14T10:57:38", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:58:16", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a", - "step_type": "Engine: create cluster", - "step_name": "Configure instances", - "updated_at": "2015-09-14T10:58:22", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:27", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): Oozie", - "updated_at": "2015-09-14T11:01:15", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:58:22", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72", - "step_type": "Plugin: configure cluster", - "step_name": "Configure instances", - "updated_at": "2015-09-14T10:59:21", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:59:21", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17", - "step_type": "Plugin: configure cluster", - "step_name": "Configure topology data", - "updated_at": "2015-09-14T10:59:37", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:01", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): DataNodes, NodeManagers", - "updated_at": "2015-09-14T11:00:11", - "successful": true, - "total": 3, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:11", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971", - "step_type": "Plugin: start cluster", - "step_name": "Await DataNodes start up", - "updated_at": "2015-09-14T11:00:21", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T11:00:21", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "c6327532-222b-416c-858f-73dbb32b8e97", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): HistoryServer", - "updated_at": "2015-09-14T11:00:27", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:57:41", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "d3eca726-8b44-473a-ac29-fba45a893725", - "step_type": "Engine: create cluster", - "step_name": "Wait for instance accessibility", - "updated_at": "2015-09-14T10:58:14", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:58:14", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152", - "step_type": "Engine: create cluster", - "step_name": "Mount volumes to instances", - "updated_at": "2015-09-14T10:58:15", - "successful": true, - "total": 0, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:59:55", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): ResourceManager", - "updated_at": "2015-09-14T11:00:00", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:59:38", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "e1701ff5-930a-4212-945a-43515dfe24d1", - "step_type": "Plugin: start cluster", - "step_name": "Start the following process(es): NameNode", - "updated_at": "2015-09-14T10:59:54", - "successful": true, - "total": 1, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - }, - { - "created_at": "2015-09-14T10:57:38", - "project_id": "808d5032ea0446889097723bfc8e919d", - "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9", - "step_type": "Engine: create cluster", - "step_name": "Assign IPs", - "updated_at": "2015-09-14T10:57:41", - "successful": true, - "total": 4, - "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" - } - ], - "plugin_version": "2.7.1", - "use_autoconfig": true, - "trust_id": null, - "description": null, - "created_at": "2015-09-14T10:57:11", - "is_protected": false, - "updated_at": "2015-09-14T11:01:15", - "is_transient": false, - "cluster_configs": { - "HDFS": { - "dfs.replication": 3 - } - }, - "anti_affinity": [], - "name": "vanilla-cluster", - "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "status": "Active" - } - ] -} diff --git a/api-ref/source/v2/samples/clusters/multiple-clusters-create-request.json b/api-ref/source/v2/samples/clusters/multiple-clusters-create-request.json deleted file mode 100644 index 3bd5dca7f1..0000000000 --- a/api-ref/source/v2/samples/clusters/multiple-clusters-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugin_name": "vanilla", - "plugin_version": "2.6.0", - "cluster_template_id": "9951f86d-57ba-43d6-9cb0-14ed2ec7a6cf", - "default_image_id": "bc3c3d3c-2684-4bf8-a9fa-388fb71288a9", - "user_keypair_id": "test", - "name": "def-cluster", - "count": 2, - "cluster_configs": {}, - "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076" -} diff --git a/api-ref/source/v2/samples/clusters/multiple-clusters-create-response.json b/api-ref/source/v2/samples/clusters/multiple-clusters-create-response.json deleted file mode 100644 index 5b13bca55d..0000000000 --- a/api-ref/source/v2/samples/clusters/multiple-clusters-create-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "clusters": [ - "a007a3e7-658f-4568-b0f2-fe2fd5efc554", - "b012a6et-65hf-4566-b0f2-fe3fd7efc567" - ] -} diff --git a/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-request.json b/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-request.json deleted file mode 100644 index 9d9c9c945c..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "description": "This is hdfs input", - "url": "hdfs://test-master-node:8020/user/hadoop/input", - "type": "hdfs", - "name": "hdfs_input" -} diff --git a/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-response.json b/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-response.json deleted file mode 100644 index 45cda02bad..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:09:36.148464", - "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", - "updated_at": null, - "name": "hdfs_input", - "description": "This is hdfs input", - "url": "hdfs://test-master-node:8020/user/hadoop/input", - "type": "hdfs" - } -} diff --git a/api-ref/source/v2/samples/data-sources/data-source-register-swift-request.json b/api-ref/source/v2/samples/data-sources/data-source-register-swift-request.json deleted file mode 100644 index 30a1e535dd..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-source-register-swift-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "description": "This is input", - "url": "swift://container/text", - "credentials": { - "password": "swordfish", - "user": "dev" - }, - "type": "swift", - "name": "swift_input" -} diff --git a/api-ref/source/v2/samples/data-sources/data-source-register-swift-response.json b/api-ref/source/v2/samples/data-sources/data-source-register-swift-response.json deleted file mode 100644 index 7579ae9f68..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-source-register-swift-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:18:10.691493", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "updated_at": null, - "name": "swift_input", - "description": "This is input", - "url": "swift://container/text", - "type": "swift" - } -} diff --git a/api-ref/source/v2/samples/data-sources/data-source-show-response.json b/api-ref/source/v2/samples/data-sources/data-source-show-response.json deleted file mode 100644 index 7579ae9f68..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-source-show-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:18:10.691493", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "updated_at": null, - "name": "swift_input", - "description": "This is input", - "url": "swift://container/text", - "type": "swift" - } -} diff --git a/api-ref/source/v2/samples/data-sources/data-source-update-request.json b/api-ref/source/v2/samples/data-sources/data-source-update-request.json deleted file mode 100644 index 8397ae6545..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-source-update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "description": "This is public input", - "is_protected": true -} diff --git a/api-ref/source/v2/samples/data-sources/data-source-update-response.json b/api-ref/source/v2/samples/data-sources/data-source-update-response.json deleted file mode 100644 index ce6ad9206b..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-source-update-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "data_source": { - "is_public": true, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-09-15 12:32:24.847493", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "updated_at": "2015-09-15 12:34:42.597435", - "name": "swift_input", - "description": "This is public input", - "url": "swift://container/text", - "type": "swift" - } -} diff --git a/api-ref/source/v2/samples/data-sources/data-sources-list-response.json b/api-ref/source/v2/samples/data-sources/data-sources-list-response.json deleted file mode 100644 index 9ed7f0980b..0000000000 --- a/api-ref/source/v2/samples/data-sources/data-sources-list-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "data_sources": [ - { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:18:10", - "id": "953831f2-0852-49d8-ac71-af5805e25256", - "name": "swift_input", - "updated_at": null, - "description": "This is input", - "url": "swift://container/text", - "type": "swift" - }, - { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "is_protected": false, - "created_at": "2015-03-26 11:09:36", - "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", - "name": "hdfs_input", - "updated_at": null, - "description": "This is hdfs input", - "url": "hdfs://test-master-node:8020/user/hadoop/input", - "type": "hdfs" - } - ] -} diff --git a/api-ref/source/v2/samples/event-log/cluster-progress-response.json b/api-ref/source/v2/samples/event-log/cluster-progress-response.json deleted file mode 100644 index 7f4f6c3668..0000000000 --- a/api-ref/source/v2/samples/event-log/cluster-progress-response.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "status": "Error", - "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076", - "is_transient": false, - "description": "", - "user_keypair_id": "vgridnev", - "updated_at": "2015-03-31 14:10:59", - "plugin_name": "spark", - "provision_progress": [ - { - "successful": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-03-31 14:10:20", - "step_type": "Engine: create cluster", - "updated_at": "2015-03-31 14:10:35", - "events": [ - { - "instance_name": "sample-worker-spark-004", - "successful": false, - "created_at": "2015-03-31 14:10:35", - "updated_at": null, - "event_info": "Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", - "instance_id": "b5ba5ba8-e9c1-47f7-9355-3ce0ec0e449d", - "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", - "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", - "id": "34afcfc7-bdb0-43cb-b142-283d560dc6ad" - }, - { - "instance_name": "sample-worker-spark-001", - "successful": true, - "created_at": "2015-03-31 14:10:35", - "updated_at": null, - "event_info": null, - "instance_id": "c532ab71-38da-475a-95f8-f8eb93b8f1c2", - "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", - "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", - "id": "4ba50414-5216-4161-bc7a-12716122b99d" - } - ], - "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", - "step_name": "Wait for instances to become active", - "total": 5, - "id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6" - }, - { - "successful": true, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-03-31 14:10:12", - "step_type": "Engine: create cluster", - "updated_at": "2015-03-31 14:10:19", - "events": [], - "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", - "step_name": "Run instances", - "total": 5, - "id": "407ba50a-c799-46af-9dfb-6aa5f6ade426" - } - ], - "anti_affinity": [], - "node_groups": [], - "management_public_key": "Sahara", - "status_description": "Creating cluster failed for the following reason(s): Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", - "plugin_version": "1.0.0", - "id": "c26ec982-ba6b-4d75-1f8c-a50240164af0", - "trust_id": null, - "info": {}, - "cluster_template_id": "5a9a09a3-9349-43bd-9058-16c401fad2d5", - "name": "sample", - "cluster_configs": {}, - "created_at": "2015-03-31 14:10:07", - "default_image_id": "e6a6c5da-67be-4017-a7d2-81f466efe67e", - "project_id": "9cd1314a0a31493282b6712b76a8fcda" -} diff --git a/api-ref/source/v2/samples/image-registry/image-register-request.json b/api-ref/source/v2/samples/image-registry/image-register-request.json deleted file mode 100644 index 7bd4d15efd..0000000000 --- a/api-ref/source/v2/samples/image-registry/image-register-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "username": "ubuntu", - "description": "Ubuntu image for Hadoop 2.7.1" -} diff --git a/api-ref/source/v2/samples/image-registry/image-register-response.json b/api-ref/source/v2/samples/image-registry/image-register-response.json deleted file mode 100644 index 5851a58ec2..0000000000 --- a/api-ref/source/v2/samples/image-registry/image-register-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "image": { - "updated": "2015-03-24T10:05:10Z", - "metadata": { - "_sahara_description": "Ubuntu image for Hadoop 2.7.1", - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true, - "_sahara_tag_2.7.1": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "2.7.1" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.7.1-ubuntu-14.04", - "description": "Ubuntu image for Hadoop 2.7.1", - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v2/samples/image-registry/image-show-response.json b/api-ref/source/v2/samples/image-registry/image-show-response.json deleted file mode 100644 index 0f09f23f56..0000000000 --- a/api-ref/source/v2/samples/image-registry/image-show-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "image": { - "updated": "2015-02-03T10:29:32Z", - "metadata": { - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true, - "_sahara_tag_2.6.0": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "2.6.0" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.6.0-ubuntu-14.04", - "description": null, - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v2/samples/image-registry/image-tags-add-request.json b/api-ref/source/v2/samples/image-registry/image-tags-add-request.json deleted file mode 100644 index aa69662a6a..0000000000 --- a/api-ref/source/v2/samples/image-registry/image-tags-add-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "tags": [ - "vanilla", - "2.7.1", - "some_other_tag" - ] -} diff --git a/api-ref/source/v2/samples/image-registry/image-tags-add-response.json b/api-ref/source/v2/samples/image-registry/image-tags-add-response.json deleted file mode 100644 index 2c66b2930d..0000000000 --- a/api-ref/source/v2/samples/image-registry/image-tags-add-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "image": { - "updated": "2015-03-24T10:18:33Z", - "metadata": { - "_sahara_tag_vanilla": true, - "_sahara_description": "Ubuntu image for Hadoop 2.7.1", - "_sahara_username": "ubuntu", - "_sahara_tag_some_other_tag": true, - "_sahara_tag_2.7.1": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "some_other_tag", - "2.7.1" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.6.0-ubuntu-14.04", - "description": "Ubuntu image for Hadoop 2.7.1", - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v2/samples/image-registry/image-tags-delete-request.json b/api-ref/source/v2/samples/image-registry/image-tags-delete-request.json deleted file mode 100644 index 44e1cef468..0000000000 --- a/api-ref/source/v2/samples/image-registry/image-tags-delete-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "tags": [ - "some_other_tag" - ] -} diff --git a/api-ref/source/v2/samples/image-registry/image-tags-delete-response.json b/api-ref/source/v2/samples/image-registry/image-tags-delete-response.json deleted file mode 100644 index 44eb131390..0000000000 --- a/api-ref/source/v2/samples/image-registry/image-tags-delete-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "image": { - "updated": "2015-03-24T10:19:28Z", - "metadata": { - "_sahara_description": "Ubuntu image for Hadoop 2.7.1", - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true, - "_sahara_tag_2.7.1": true - }, - "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", - "minDisk": 0, - "status": "ACTIVE", - "tags": [ - "vanilla", - "2.7.1" - ], - "minRam": 0, - "progress": 100, - "username": "ubuntu", - "created": "2015-02-03T10:28:39Z", - "name": "sahara-vanilla-2.7.1-ubuntu-14.04", - "description": "Ubuntu image for Hadoop 2.7.1", - "OS-EXT-IMG-SIZE:size": 1101856768 - } -} diff --git a/api-ref/source/v2/samples/image-registry/images-list-response.json b/api-ref/source/v2/samples/image-registry/images-list-response.json deleted file mode 100644 index d40f0c215f..0000000000 --- a/api-ref/source/v2/samples/image-registry/images-list-response.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "images": [ - { - "name": "ubuntu-vanilla-2.7.1", - "id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", - "created": "2015-08-06T08:17:14Z", - "metadata": { - "_sahara_tag_2.7.1": true, - "_sahara_username": "ubuntu", - "_sahara_tag_vanilla": true - }, - "username": "ubuntu", - "progress": 100, - "OS-EXT-IMG-SIZE:size": 998716928, - "status": "ACTIVE", - "minDisk": 0, - "tags": [ - "vanilla", - "2.7.1" - ], - "updated": "2015-09-04T09:35:09Z", - "minRam": 0, - "description": null - }, - { - "name": "cdh-latest", - "id": "ff74035b-9da7-4edf-981d-57f270ed337d", - "created": "2015-09-04T11:56:44Z", - "metadata": { - "_sahara_username": "ubuntu", - "_sahara_tag_5.4.0": true, - "_sahara_tag_cdh": true - }, - "username": "ubuntu", - "progress": 100, - "OS-EXT-IMG-SIZE:size": 3281453056, - "status": "ACTIVE", - "minDisk": 0, - "tags": [ - "5.4.0", - "cdh" - ], - "updated": "2015-09-04T12:46:42Z", - "minRam": 0, - "description": null - } - ] -} diff --git a/api-ref/source/v2/samples/job-binaries/create-request.json b/api-ref/source/v2/samples/job-binaries/create-request.json deleted file mode 100644 index f32e15b40f..0000000000 --- a/api-ref/source/v2/samples/job-binaries/create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "url": "swift://container/jar-example.jar", - "name": "jar-example.jar", - "description": "This is a job binary", - "extra": { - "password": "swordfish", - "user": "admin" - } -} diff --git a/api-ref/source/v2/samples/job-binaries/create-response.json b/api-ref/source/v2/samples/job-binaries/create-response.json deleted file mode 100644 index 2d6aed83ea..0000000000 --- a/api-ref/source/v2/samples/job-binaries/create-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "job_binary": { - "is_public": false, - "description": "This is a job binary", - "url": "swift://container/jar-example.jar", - "project_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 14:49:20.106452", - "id": "07f86352-ee8a-4b08-b737-d705ded5ff9c", - "updated_at": null, - "name": "jar-example.jar", - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/job-binaries/list-response.json b/api-ref/source/v2/samples/job-binaries/list-response.json deleted file mode 100644 index f77380eb58..0000000000 --- a/api-ref/source/v2/samples/job-binaries/list-response.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "binaries": [ - { - "is_public": false, - "description": "", - "url": "internal-db://d2498cbf-4589-484a-a814-81436c18beb3", - "project_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 12:36:59.375060", - "updated_at": null, - "id": "84248975-3c82-4206-a58d-6e7fb3a563fd", - "name": "example.pig", - "is_protected": false - }, - { - "is_public": false, - "description": "", - "url": "internal-db://22f1d87a-23c8-483e-a0dd-cb4a16dde5f9", - "project_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 12:43:52.265899", - "updated_at": null, - "id": "508fc62d-1d58-4412-b603-bdab307bb926", - "name": "udf.jar", - "is_protected": false - }, - { - "is_public": false, - "description": "", - "url": "swift://container/jar-example.jar", - "project_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 14:25:04.970513", - "updated_at": null, - "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", - "name": "jar-example.jar", - "is_protected": false - } - ] -} diff --git a/api-ref/source/v2/samples/job-binaries/show-data-response b/api-ref/source/v2/samples/job-binaries/show-data-response deleted file mode 100644 index 8765f0c6c5..0000000000 --- a/api-ref/source/v2/samples/job-binaries/show-data-response +++ /dev/null @@ -1,3 +0,0 @@ -A = load '$INPUT' using PigStorage(':') as (fruit: chararray); -B = foreach A generate com.hadoopbook.pig.Trim(fruit); -store B into '$OUTPUT' USING PigStorage(); \ No newline at end of file diff --git a/api-ref/source/v2/samples/job-binaries/show-response.json b/api-ref/source/v2/samples/job-binaries/show-response.json deleted file mode 100644 index 36e12c85e4..0000000000 --- a/api-ref/source/v2/samples/job-binaries/show-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "job_binary": { - "is_public": false, - "description": "an example jar file", - "url": "swift://container/jar-example.jar", - "project_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2013-10-15 14:25:04.970513", - "updated_at": null, - "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", - "name": "jar-example.jar", - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/job-binaries/update-request.json b/api-ref/source/v2/samples/job-binaries/update-request.json deleted file mode 100644 index 456b0b209c..0000000000 --- a/api-ref/source/v2/samples/job-binaries/update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "url": "swift://container/new-jar-example.jar", - "name": "new-jar-example.jar", - "description": "This is a new job binary" -} diff --git a/api-ref/source/v2/samples/job-binaries/update-response.json b/api-ref/source/v2/samples/job-binaries/update-response.json deleted file mode 100644 index 6dcbfa8c54..0000000000 --- a/api-ref/source/v2/samples/job-binaries/update-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "job_binary": { - "is_public": false, - "description": "This is a new job binary", - "url": "swift://container/new-jar-example.jar", - "project_id": "11587919cc534bcbb1027a161c82cf58", - "created_at": "2015-09-15 12:42:51.421542", - "updated_at": null, - "id": "b713d7ad-4add-4f12-g1b6-cdg71aaef350", - "name": "new-jar-example.jar", - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/job-templates/job-template-create-request.json b/api-ref/source/v2/samples/job-templates/job-template-create-request.json deleted file mode 100644 index b8d1a8ed19..0000000000 --- a/api-ref/source/v2/samples/job-templates/job-template-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "description": "This is pig job example", - "mains": [ - "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e" - ], - "libs": [ - "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27" - ], - "type": "Pig", - "name": "pig-job-example" -} diff --git a/api-ref/source/v2/samples/job-templates/job-template-create-response.json b/api-ref/source/v2/samples/job-templates/job-template-create-response.json deleted file mode 100644 index c7d15f4fec..0000000000 --- a/api-ref/source/v2/samples/job-templates/job-template-create-response.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "job_template": { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-03-27 08:48:38.630827", - "id": "71defc8f-d005-484f-9d86-1aedf644d1ef", - "name": "pig-job-example", - "description": "This is pig job example", - "interface": [], - "libs": [ - { - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:53", - "id": "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27", - "name": "binary-job", - "updated_at": null, - "description": "", - "url": "internal-db://c6a925fa-ac1d-4b2e-b88a-7054e1927521" - } - ], - "type": "Pig", - "is_protected": false, - "mains": [ - { - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-03 10:47:51", - "id": "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e", - "name": "pig", - "updated_at": null, - "description": "", - "url": "internal-db://872878f6-72ea-44db-8d1d-e6a6396d2df0" - } - ] - } -} diff --git a/api-ref/source/v2/samples/job-templates/job-template-show-response.json b/api-ref/source/v2/samples/job-templates/job-template-show-response.json deleted file mode 100644 index dafbdc474c..0000000000 --- a/api-ref/source/v2/samples/job-templates/job-template-show-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "job_template": { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", - "name": "Edp-test-job", - "updated_at": null, - "description": "", - "interface": [], - "libs": [ - { - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", - "name": "binary-job.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" - } - ], - "type": "MapReduce", - "mains": [], - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/job-templates/job-template-update-request.json b/api-ref/source/v2/samples/job-templates/job-template-update-request.json deleted file mode 100644 index 810b8a60b1..0000000000 --- a/api-ref/source/v2/samples/job-templates/job-template-update-request.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "description": "This is public pig job example", - "name": "public-pig-job-example" -} diff --git a/api-ref/source/v2/samples/job-templates/job-template-update-response.json b/api-ref/source/v2/samples/job-templates/job-template-update-response.json deleted file mode 100644 index 5d4970457c..0000000000 --- a/api-ref/source/v2/samples/job-templates/job-template-update-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "job_template": { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", - "name": "public-pig-job-example", - "updated_at": null, - "description": "This is public pig job example", - "interface": [], - "libs": [ - { - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", - "name": "binary-job.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" - } - ], - "type": "MapReduce", - "mains": [], - "is_protected": false - } -} diff --git a/api-ref/source/v2/samples/job-templates/job-templates-list-response.json b/api-ref/source/v2/samples/job-templates/job-templates-list-response.json deleted file mode 100644 index d7250dab47..0000000000 --- a/api-ref/source/v2/samples/job-templates/job-templates-list-response.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "job_templates": [ - { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", - "name": "Edp-test-job-3d60854e", - "updated_at": null, - "description": "", - "interface": [], - "libs": [ - { - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:48", - "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", - "name": "binary-job-339c2d1a.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-c71e6bce.sahara/binary-job-339c2d1a.jar" - } - ], - "type": "MapReduce", - "mains": [], - "is_protected": false - }, - { - "is_public": false, - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:44", - "id": "4d1f3759-3497-4927-8352-910bacf24e62", - "name": "Edp-test-job-6b6953c8", - "updated_at": null, - "description": "", - "interface": [], - "libs": [ - { - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:44", - "id": "e0d47800-4ac1-4d63-a2e1-c92d669a44e2", - "name": "binary-job-6f21a2f8.jar", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-b409ec68.sahara/binary-job-6f21a2f8.jar" - } - ], - "type": "Pig", - "mains": [ - { - "project_id": "9cd1314a0a31493282b6712b76a8fcda", - "created_at": "2015-02-10 14:25:44", - "id": "e073e896-f123-4b76-995f-901d786262df", - "name": "binary-job-d4f8bd75.pig", - "updated_at": null, - "description": "", - "url": "swift://Edp-test-b409ec68.sahara/binary-job-d4f8bd75.pig" - } - ], - "is_protected": false - } - ], - "markers": { - "prev": null, - "next": "c53832da-6e7b-449e-a166-9f9ce1718d03" - } -} diff --git a/api-ref/source/v2/samples/job-types/job-types-list-response.json b/api-ref/source/v2/samples/job-types/job-types-list-response.json deleted file mode 100644 index c321c4fbd3..0000000000 --- a/api-ref/source/v2/samples/job-types/job-types-list-response.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "job_types": [ - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Hive" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Java" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "MapReduce" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "MapReduce.Streaming" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Pig" - }, - { - "plugins": [ - { - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": { - "1.2.1": {}, - "2.6.0": {} - }, - "title": "Vanilla Apache Hadoop", - "name": "vanilla" - }, - { - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": { - "1.3.2": {}, - "2.0.6": {} - }, - "title": "Hortonworks Data Platform", - "name": "hdp" - }, - { - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": { - "5": {}, - "5.3.0": {} - }, - "title": "Cloudera Plugin", - "name": "cdh" - } - ], - "name": "Shell" - }, - { - "plugins": [ - { - "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", - "versions": { - "1.0.0": {} - }, - "title": "Apache Spark", - "name": "spark" - } - ], - "name": "Spark" - } - ] -} diff --git a/api-ref/source/v2/samples/jobs/cancel-response.json b/api-ref/source/v2/samples/jobs/cancel-response.json deleted file mode 100644 index 61b7e3547b..0000000000 --- a/api-ref/source/v2/samples/jobs/cancel-response.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "job": { - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "is_protected": false, - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "created_at": "2015-09-15T09:49:24", - "end_time": "2015-09-15T12:50:46", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "is_public": false, - "updated_at": "2015-09-15T09:50:46", - "return_code": null, - "data_source_urls": { - "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", - "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" - }, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "start_time": "2015-09-15T12:49:43", - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "oozie_job_id": "0000001-150915094349962-oozie-hado-W", - "info": { - "user": "hadoop", - "actions": [ - { - "name": ":start:", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": "job-node", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "errorCode": null, - "id": "0000001-150915094349962-oozie-hado-W@:start:", - "consoleUrl": "-", - "errorMessage": null, - "toString": "Action name[:start:] status[OK]", - "stats": null, - "type": ":START:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "job-node", - "trackerUri": "http://172.18.168.119:8032", - "externalStatus": "FAILED/KILLED", - "status": "ERROR", - "externalId": "job_1442310173665_0002", - "transition": "fail", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "JA018", - "id": "0000001-150915094349962-oozie-hado-W@job-node", - "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", - "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", - "toString": "Action name[job-node] status[ERROR]", - "stats": null, - "type": "pig", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "fail", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": null, - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "E0729", - "id": "0000001-150915094349962-oozie-hado-W@fail", - "consoleUrl": "-", - "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", - "toString": "Action name[fail] status[OK]", - "stats": null, - "type": ":KILL:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "externalChildIDs": null, - "cred": "null" - } - ], - "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", - "status": "KILLED", - "group": null, - "externalId": null, - "acl": null, - "run": 0, - "appName": "job-wf", - "parentId": null, - "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", - "id": "0000001-150915094349962-oozie-hado-W", - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", - "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" - } - } -} diff --git a/api-ref/source/v2/samples/jobs/job-request.json b/api-ref/source/v2/samples/jobs/job-request.json deleted file mode 100644 index eabb89075d..0000000000 --- a/api-ref/source/v2/samples/jobs/job-request.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "job_template_id": "548ea8d4-a5sd-33a4-bt22-asf4n87a8e2dh", - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "job_configs": { - "configs": { - "mapred.map.tasks": "1", - "mapred.reduce.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - } -} diff --git a/api-ref/source/v2/samples/jobs/job-response.json b/api-ref/source/v2/samples/jobs/job-response.json deleted file mode 100644 index d461f6752f..0000000000 --- a/api-ref/source/v2/samples/jobs/job-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "job": { - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "is_protected": false, - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "created_at": "2015-09-15T09:49:24", - "is_public": false, - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "project_id": "808d5032ea0446889097723bfc8e919d", - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "info": { - "status": "PENDING" - } - } -} diff --git a/api-ref/source/v2/samples/jobs/job-update-request.json b/api-ref/source/v2/samples/jobs/job-update-request.json deleted file mode 100644 index 647a4175b9..0000000000 --- a/api-ref/source/v2/samples/jobs/job-update-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "is_public": true -} diff --git a/api-ref/source/v2/samples/jobs/job-update-response.json b/api-ref/source/v2/samples/jobs/job-update-response.json deleted file mode 100644 index 3121f0a53e..0000000000 --- a/api-ref/source/v2/samples/jobs/job-update-response.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "job: { - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "is_protected": false, - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "created_at": "2015-09-15T09:49:24", - "end_time": "2015-09-15T12:50:46", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "is_public": true, - "updated_at": "2015-09-15T09:50:46", - "return_code": null, - "data_source_urls": { - "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", - "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" - }, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "start_time": "2015-09-15T12:49:43", - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "oozie_job_id": "0000001-150915094349962-oozie-hado-W", - "info": { - "user": "hadoop", - "actions": [ - { - "name": ":start:", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": "job-node", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "errorCode": null, - "id": "0000001-150915094349962-oozie-hado-W@:start:", - "consoleUrl": "-", - "errorMessage": null, - "toString": "Action name[:start:] status[OK]", - "stats": null, - "type": ":START:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "job-node", - "trackerUri": "http://172.18.168.119:8032", - "externalStatus": "FAILED/KILLED", - "status": "ERROR", - "externalId": "job_1442310173665_0002", - "transition": "fail", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "JA018", - "id": "0000001-150915094349962-oozie-hado-W@job-node", - "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", - "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", - "toString": "Action name[job-node] status[ERROR]", - "stats": null, - "type": "pig", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "fail", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": null, - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "E0729", - "id": "0000001-150915094349962-oozie-hado-W@fail", - "consoleUrl": "-", - "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", - "toString": "Action name[fail] status[OK]", - "stats": null, - "type": ":KILL:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "externalChildIDs": null, - "cred": "null" - } - ], - "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", - "status": "KILLED", - "group": null, - "externalId": null, - "acl": null, - "run": 0, - "appName": "job-wf", - "parentId": null, - "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", - "id": "0000001-150915094349962-oozie-hado-W", - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", - "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" - } - } -} diff --git a/api-ref/source/v2/samples/jobs/list-response.json b/api-ref/source/v2/samples/jobs/list-response.json deleted file mode 100644 index 118645fb25..0000000000 --- a/api-ref/source/v2/samples/jobs/list-response.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "jobs": [ - { - "job_configs": { - "configs": { - "mapred.reduce.tasks": "1", - "mapred.map.tasks": "1" - }, - "args": [ - "arg1", - "arg2" - ], - "params": { - "param2": "value2", - "param1": "value1" - } - }, - "is_protected": false, - "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", - "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", - "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", - "created_at": "2015-09-15T09:49:24", - "end_time": "2015-09-15T12:50:46", - "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", - "is_public": false, - "updated_at": "2015-09-15T09:50:46", - "return_code": null, - "data_source_urls": { - "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", - "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" - }, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "start_time": "2015-09-15T12:49:43", - "id": "20da9edb-12ce-4b45-a473-41baeefef997", - "oozie_job_id": "0000001-150915094349962-oozie-hado-W", - "info": { - "user": "hadoop", - "actions": [ - { - "name": ":start:", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": "job-node", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "errorCode": null, - "id": "0000001-150915094349962-oozie-hado-W@:start:", - "consoleUrl": "-", - "errorMessage": null, - "toString": "Action name[:start:] status[OK]", - "stats": null, - "type": ":START:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "job-node", - "trackerUri": "http://172.18.168.119:8032", - "externalStatus": "FAILED/KILLED", - "status": "ERROR", - "externalId": "job_1442310173665_0002", - "transition": "fail", - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "JA018", - "id": "0000001-150915094349962-oozie-hado-W@job-node", - "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", - "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", - "toString": "Action name[job-node] status[ERROR]", - "stats": null, - "type": "pig", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "externalChildIDs": null, - "cred": "null" - }, - { - "name": "fail", - "trackerUri": "-", - "externalStatus": "OK", - "status": "OK", - "externalId": "-", - "transition": null, - "data": null, - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "errorCode": "E0729", - "id": "0000001-150915094349962-oozie-hado-W@fail", - "consoleUrl": "-", - "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", - "toString": "Action name[fail] status[OK]", - "stats": null, - "type": ":KILL:", - "retries": 0, - "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "externalChildIDs": null, - "cred": "null" - } - ], - "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", - "status": "KILLED", - "group": null, - "externalId": null, - "acl": null, - "run": 0, - "appName": "job-wf", - "parentId": null, - "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", - "id": "0000001-150915094349962-oozie-hado-W", - "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", - "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", - "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", - "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", - "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" - } - } - ] -} diff --git a/api-ref/source/v2/samples/node-group-templates/node-group-template-create-request.json b/api-ref/source/v2/samples/node-group-templates/node-group-template-create-request.json deleted file mode 100644 index 96c40f097c..0000000000 --- a/api-ref/source/v2/samples/node-group-templates/node-group-template-create-request.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "name": "master", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "flavor_id": "2" -} diff --git a/api-ref/source/v2/samples/node-group-templates/node-group-template-create-response.json b/api-ref/source/v2/samples/node-group-templates/node-group-template-create-response.json deleted file mode 100644 index c9b8e96312..0000000000 --- a/api-ref/source/v2/samples/node-group-templates/node-group-template-create-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "node_group_template": { - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "is_protected": false, - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "plugin_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "security_groups": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } -} diff --git a/api-ref/source/v2/samples/node-group-templates/node-group-template-show-response.json b/api-ref/source/v2/samples/node-group-templates/node-group-template-show-response.json deleted file mode 100644 index d948694db9..0000000000 --- a/api-ref/source/v2/samples/node-group-templates/node-group-template-show-response.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "node_group_template": { - "is_public": false, - "image_id": null, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "description": null, - "plugin_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "is_protected": false, - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "security_groups": null, - "volume_type": null - } -} diff --git a/api-ref/source/v2/samples/node-group-templates/node-group-template-update-request.json b/api-ref/source/v2/samples/node-group-templates/node-group-template-update-request.json deleted file mode 100644 index 27428f7aee..0000000000 --- a/api-ref/source/v2/samples/node-group-templates/node-group-template-update-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "plugin_name": "vanilla", - "plugin_version": "2.7.1", - "node_processes": [ - "datanode" - ], - "name": "new", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "flavor_id": "2" -} diff --git a/api-ref/source/v2/samples/node-group-templates/node-group-template-update-response.json b/api-ref/source/v2/samples/node-group-templates/node-group-template-update-response.json deleted file mode 100644 index 84a549ac44..0000000000 --- a/api-ref/source/v2/samples/node-group-templates/node-group-template-update-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "node_group_template": { - "is_public": false, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "is_protected": false, - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "plugin_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "security_groups": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "new", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "volume_type": null - } -} diff --git a/api-ref/source/v2/samples/node-group-templates/node-group-templates-list-response.json b/api-ref/source/v2/samples/node-group-templates/node-group-templates-list-response.json deleted file mode 100644 index bccd121c6e..0000000000 --- a/api-ref/source/v2/samples/node-group-templates/node-group-templates-list-response.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "node_group_templates": [ - { - "is_public": false, - "image_id": null, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "flavor_id": "2", - "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", - "description": null, - "plugin_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:20:11", - "is_protected": false, - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "master", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "namenode", - "resourcemanager", - "oozie", - "historyserver" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "security_groups": null, - "volume_type": null - }, - { - "is_public": false, - "image_id": null, - "tenant_id": "808d5032ea0446889097723bfc8e919d", - "shares": null, - "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", - "node_configs": {}, - "auto_security_group": false, - "is_default": false, - "availability_zone": null, - "plugin_name": "vanilla", - "flavor_id": "2", - "id": "846edb31-add5-46e6-a4ee-a4c339f99251", - "description": null, - "hadoop_version": "2.7.1", - "use_autoconfig": true, - "volumes_availability_zone": null, - "created_at": "2015-09-14T10:27:00", - "is_protected": false, - "updated_at": null, - "volumes_per_node": 0, - "is_proxy_gateway": false, - "name": "worker", - "volume_mount_prefix": "/volumes/disk", - "node_processes": [ - "datanode", - "nodemanager" - ], - "volumes_size": 0, - "volume_local_to_instance": false, - "security_groups": null, - "volume_type": null - } - ], - "markers": { - "prev":"39dfc852-8588-4b61-8d2b-eb08a67ab240", - "next":"eaa0bd97-ab54-43df-83ab-77a9774d7358" - } -} diff --git a/api-ref/source/v2/samples/plugins/plugin-show-response.json b/api-ref/source/v2/samples/plugins/plugin-show-response.json deleted file mode 100644 index 00b948a0e6..0000000000 --- a/api-ref/source/v2/samples/plugins/plugin-show-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "plugin": { - "name": "vanilla", - "versions": [ - "1.2.1", - "2.4.1", - "2.6.0" - ], - "title": "Vanilla Apache Hadoop", - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component." - } -} diff --git a/api-ref/source/v2/samples/plugins/plugin-update-request.json b/api-ref/source/v2/samples/plugins/plugin-update-request.json deleted file mode 100644 index 97a17c38f2..0000000000 --- a/api-ref/source/v2/samples/plugins/plugin-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "plugin_labels": { - "enabled": { - "status": false - } - } -} diff --git a/api-ref/source/v2/samples/plugins/plugin-update-response.json b/api-ref/source/v2/samples/plugins/plugin-update-response.json deleted file mode 100644 index 7541ae939c..0000000000 --- a/api-ref/source/v2/samples/plugins/plugin-update-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "plugin": { - "plugin_labels": { - "hidden": { - "status": true, - "mutable": true, - "description": "Existence of plugin or its version is hidden, but still can be used for cluster creation by CLI and directly by client." - }, - "enabled": { - "status": false, - "mutable": true, - "description": "Plugin or its version is enabled and can be used by user." - } - }, - "description": "It's a fake plugin that aimed to work on the CirrOS images. It doesn't install Hadoop. It's needed to be able to test provisioning part of Sahara codebase itself.", - "versions": [ - "0.1" - ], - "tenant_id": "993f53c1f51845e48e013aeb632358d8", - "title": "Fake Plugin", - "version_labels": { - "0.1": { - "enabled": { - "status": true, - "mutable": true, - "description": "Plugin or its version is enabled and can be used by user." - } - } - }, - "name": "fake" - } -} diff --git a/api-ref/source/v2/samples/plugins/plugin-version-show-response.json b/api-ref/source/v2/samples/plugins/plugin-version-show-response.json deleted file mode 100644 index cb1c175a59..0000000000 --- a/api-ref/source/v2/samples/plugins/plugin-version-show-response.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "plugin": { - "name": "vanilla", - "versions": [ - "1.2.1", - "2.4.1", - "2.6.0" - ], - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "required_image_tags": [ - "vanilla", - "2.6.0" - ], - "node_processes": { - "JobFlow": [ - "oozie" - ], - "HDFS": [ - "namenode", - "datanode", - "secondarynamenode" - ], - "YARN": [ - "resourcemanager", - "nodemanager" - ], - "MapReduce": [ - "historyserver" - ], - "Hadoop": [], - "Hive": [ - "hiveserver" - ] - }, - "configs": [ - { - "default_value": "/tmp/hadoop-${user.name}", - "name": "hadoop.tmp.dir", - "priority": 2, - "config_type": "string", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "node", - "description": "A base for other temporary directories." - }, - { - "default_value": true, - "name": "hadoop.native.lib", - "priority": 2, - "config_type": "bool", - "applicable_target": "HDFS", - "is_optional": true, - "scope": "node", - "description": "Should native hadoop libraries, if present, be used." - }, - { - "default_value": 1024, - "name": "NodeManager Heap Size", - "config_values": null, - "priority": 1, - "config_type": "int", - "applicable_target": "YARN", - "is_optional": false, - "scope": "node", - "description": null - }, - { - "default_value": true, - "name": "Enable Swift", - "config_values": null, - "priority": 1, - "config_type": "bool", - "applicable_target": "general", - "is_optional": false, - "scope": "cluster", - "description": null - }, - { - "default_value": true, - "name": "Enable MySQL", - "config_values": null, - "priority": 1, - "config_type": "bool", - "applicable_target": "general", - "is_optional": true, - "scope": "cluster", - "description": null - } - ], - "title": "Vanilla Apache Hadoop" - } -} diff --git a/api-ref/source/v2/samples/plugins/plugins-list-response.json b/api-ref/source/v2/samples/plugins/plugins-list-response.json deleted file mode 100644 index d92d85c114..0000000000 --- a/api-ref/source/v2/samples/plugins/plugins-list-response.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "plugins": [ - { - "name": "vanilla", - "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", - "versions": [ - "1.2.1", - "2.4.1", - "2.6.0" - ], - "title": "Vanilla Apache Hadoop" - }, - { - "name": "hdp", - "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", - "versions": [ - "1.3.2", - "2.0.6" - ], - "title": "Hortonworks Data Platform" - }, - { - "name": "spark", - "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", - "versions": [ - "1.0.0", - "0.9.1" - ], - "title": "Apache Spark" - }, - { - "name": "cdh", - "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", - "versions": [ - "5", - "5.3.0" - ], - "title": "Cloudera Plugin" - } - ] -} diff --git a/bandit.yaml b/bandit.yaml deleted file mode 100644 index 09e5cd50dd..0000000000 --- a/bandit.yaml +++ /dev/null @@ -1,133 +0,0 @@ -# optional: after how many files to update progress -#show_progress_every: 100 - -# optional: plugins directory name -#plugins_dir: 'plugins' - -# optional: plugins discovery name pattern -plugin_name_pattern: '*.py' - -# optional: terminal escape sequences to display colors -#output_colors: -# DEFAULT: '\033[0m' -# HEADER: '\033[95m' -# LOW: '\033[94m' -# WARN: '\033[93m' -# ERROR: '\033[91m' - -# optional: log format string -#log_format: "[%(module)s]\t%(levelname)s\t%(message)s" - -# globs of files which should be analyzed -include: - - '*.py' - - '*.pyw' - -# a list of strings, which if found in the path will cause files to be excluded -# for example /tests/ - to remove all files in tests directory -exclude_dirs: - -profiles: - sahara_default: - include: - - hardcoded_password_string - - hardcoded_password_funcarg - # - hardcoded_password_default - - blacklist_calls - - blacklist_imports - - subprocess_popen_with_shell_equals_true - - subprocess_without_shell_equals_true - - any_other_function_with_shell_equals_true - - start_process_with_a_shell - - start_process_with_no_shell - - hardcoded_sql_expressions - - jinja2_autoescape_false - - use_of_mako_templates - -blacklist_calls: - bad_name_sets: - - pickle: - qualnames: [pickle.loads, pickle.load, pickle.Unpickler, - cPickle.loads, cPickle.load, cPickle.Unpickler] - message: "Pickle library appears to be in use, possible security issue." - - marshal: - qualnames: [marshal.load, marshal.loads] - message: "Deserialization with the marshal module is possibly dangerous." - - md5: - qualnames: [hashlib.md5] - message: "Use of insecure MD5 hash function." - - mktemp_q: - qualnames: [tempfile.mktemp] - message: "Use of insecure and deprecated function (mktemp)." - - eval: - qualnames: [eval] - message: "Use of possibly insecure function - consider using safer ast.literal_eval." - - mark_safe: - qualnames: [mark_safe] - message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed." - - httpsconnection: - qualnames: [httplib.HTTPSConnection] - message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033" - - yaml_load: - qualnames: [yaml.load] - message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()." - - urllib_urlopen: - qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] - message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected." - -shell_injection: - # Start a process using the subprocess module, or one of its wrappers. - subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, - subprocess.check_output, utils.execute, utils.execute_with_timeout] - # Start a process with a function vulnerable to shell injection. - shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, - popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, - popen2.Popen4, commands.getoutput, commands.getstatusoutput] - # Start a process with a function that is not vulnerable to shell injection. - no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, - os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, - os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, - os.startfile] - -blacklist_imports: - bad_import_sets: - - telnet: - imports: [telnetlib] - level: ERROR - message: "Telnet is considered insecure. Use SSH or some other encrypted protocol." - - info_libs: - imports: [pickle, cPickle, subprocess, Crypto] - level: LOW - message: "Consider possible security implications associated with {module} module." - -hardcoded_tmp_directory: - tmp_dirs: [/tmp, /var/tmp, /dev/shm] - -hardcoded_password: - word_list: "wordlist/default-passwords" - -ssl_with_bad_version: - bad_protocol_versions: - - 'PROTOCOL_SSLv2' - - 'SSLv2_METHOD' - - 'SSLv23_METHOD' - - 'PROTOCOL_SSLv3' # strict option - - 'PROTOCOL_TLSv1' # strict option - - 'SSLv3_METHOD' # strict option - - 'TLSv1_METHOD' # strict option - -password_config_option_not_marked_secret: - function_names: - - oslo.config.cfg.StrOpt - - oslo_config.cfg.StrOpt - -execute_with_run_as_root_equals_true: - function_names: - - ceilometer.utils.execute - - cinder.utils.execute - - neutron.agent.linux.utils.execute - - nova.utils.execute - - nova.utils.trycmd - -try_except_pass: - check_typed_exception: True diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 95d8031819..0000000000 --- a/bindep.txt +++ /dev/null @@ -1,30 +0,0 @@ -# This file contains runtime (non-python) dependencies -# More info at: https://docs.openstack.org/infra/bindep/readme.html - -libssl-dev [platform:dpkg] -openssl-devel [platform:rpm] - -# updates of the localized release notes require msgmerge -gettext - -# Define the basic (test) requirements extracted from bindata-fallback.txt -# - mysqladmin and psql -mariadb [platform:rpm] -mariadb-devel [platform:rpm] -dev-db/mariadb [platform:gentoo] -mysql-client [platform:dpkg !platform:debian] -mysql-server [platform:dpkg !platform:debian] -mariadb-server [platform:debian] -postgresql -postgresql-client [platform:dpkg] -libpq-dev [platform:dpkg] -postgresql-server [platform:rpm] -postgresql-devel [platform:rpm] - -# The Python binding for libguestfs are used by the sahara-image-pack -# command. -python3-guestfs [platform:dpkg] -libguestfs-xfs [platform:dpkg] -python3-libguestfs [platform:rpm] -libguestfs-xfs [platform:redhat] -xfsprogs [platform:suse] diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 464d5c428f..0000000000 --- a/devstack/README.rst +++ /dev/null @@ -1,22 +0,0 @@ -==================== -Enabling in Devstack -==================== - -1. Download DevStack - -2. Add this repo as an external repository in ``local.conf`` - -.. sourcecode:: bash - - [[local|localrc]] - enable_plugin sahara https://opendev.org/openstack/sahara - enable_plugin heat https://opendev.org/openstack/heat - -Optionally, a git refspec may be provided as follows: - -.. sourcecode:: bash - - [[local|localrc]] - enable_plugin sahara https://opendev.org/openstack/sahara - -3. run ``stack.sh`` diff --git a/devstack/exercise.sh b/devstack/exercise.sh deleted file mode 100644 index e100169608..0000000000 --- a/devstack/exercise.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -# Sanity check that Sahara started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -# Import configuration -. $TOP_DIR/openrc - -# Import exercise configuration -. $TOP_DIR/exerciserc - -is_service_enabled sahara || exit 55 - -if is_ssl_enabled_service "sahara" ||\ - is_ssl_enabled_service "sahara-api" ||\ - is_service_enabled tls-proxy; then - SAHARA_SERVICE_PROTOCOL="https" -fi - -SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -$CURL_GET $SAHARA_SERVICE_PROTOCOL://$SERVICE_HOST:8386/ 2>/dev/null \ - | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/devstack/files/apache-sahara-api.template b/devstack/files/apache-sahara-api.template deleted file mode 100644 index 591739155c..0000000000 --- a/devstack/files/apache-sahara-api.template +++ /dev/null @@ -1,27 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess sahara-api processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup sahara-api - WSGIScriptAlias / %SAHARA_BIN_DIR%/sahara-wsgi-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/sahara-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100755 index 44b5ba4763..0000000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,373 +0,0 @@ -#!/bin/bash -# -# lib/sahara - -# Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_sahara -# install_python_saharaclient -# configure_sahara -# start_sahara -# stop_sahara -# cleanup_sahara - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - - -# Functions -# --------- - -# create_sahara_accounts() - Set up common required sahara accounts -# -# Tenant User Roles -# ------------------------------ -# service sahara admin -function create_sahara_accounts { - - create_service_user "sahara" - - get_or_create_service "sahara" "data-processing" "Sahara Data Processing" - get_or_create_endpoint "data-processing" \ - "$REGION_NAME" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT" -} - -# cleanup_sahara() - Remove residual data files, anything left over from -# previous runs that would need to clean up. -function cleanup_sahara { - - # Cleanup auth cache dir - if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then - sudo rm -f $(apache_site_config_for sahara-api) - fi -} - -function configure_sahara_apache_wsgi { - - local sahara_apache_conf=$(apache_site_config_for sahara-api) - local sahara_ssl="" - local sahara_certfile="" - local sahara_keyfile="" - local venv_path="" - - if is_ssl_enabled_service sahara; then - sahara_ssl="SSLEngine On" - sahara_certfile="SSLCertificateFile $SAHARA_SSL_CERT" - sahara_keyfile="SSLCertificateKeyFile $SAHARA_SSL_KEY" - fi - - sudo cp $SAHARA_DIR/devstack/files/apache-sahara-api.template $sahara_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$SAHARA_SERVICE_PORT|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%SAHARA_BIN_DIR%|$SAHARA_BIN_DIR|g; - s|%SSLENGINE%|$sahara_ssl|g; - s|%SSLCERTFILE%|$sahara_certfile|g; - s|%SSLKEYFILE%|$sahara_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $sahara_apache_conf - -} - -# configure_sahara() - Set config files, create data dirs, etc -function configure_sahara { - sudo install -d -o $STACK_USER $SAHARA_CONF_DIR - - cp -p $SAHARA_DIR/etc/sahara/api-paste.ini $SAHARA_CONF_DIR - - configure_keystone_authtoken_middleware $SAHARA_CONF_FILE sahara - - # Set admin user parameters needed for trusts creation - iniset $SAHARA_CONF_FILE \ - trustee project_name $SERVICE_TENANT_NAME - iniset $SAHARA_CONF_FILE trustee username sahara - iniset $SAHARA_CONF_FILE \ - trustee password $SERVICE_PASSWORD - iniset $SAHARA_CONF_FILE \ - trustee user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $SAHARA_CONF_FILE \ - trustee project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $SAHARA_CONF_FILE \ - trustee auth_url "$KEYSTONE_SERVICE_URI/v3" - - iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT - - # Set configuration to send notifications - - if is_service_enabled ceilometer; then - iniset $SAHARA_CONF_FILE oslo_messaging_notifications driver "messaging" - fi - - iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - iniset $SAHARA_CONF_FILE DEFAULT plugins $SAHARA_ENABLED_PLUGINS - - iniset $SAHARA_CONF_FILE \ - database connection `database_connection_url sahara` - - if is_service_enabled neutron; then - iniset $SAHARA_CONF_FILE neutron endpoint_type $SAHARA_ENDPOINT_TYPE - if is_ssl_enabled_service "neutron" \ - || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE - fi - fi - - if is_ssl_enabled_service "heat" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE heat ca_file $SSL_BUNDLE_FILE - fi - iniset $SAHARA_CONF_FILE heat endpoint_type $SAHARA_ENDPOINT_TYPE - - if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE cinder ca_file $SSL_BUNDLE_FILE - fi - iniset $SAHARA_CONF_FILE cinder endpoint_type $SAHARA_ENDPOINT_TYPE - - if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE nova ca_file $SSL_BUNDLE_FILE - fi - iniset $SAHARA_CONF_FILE nova endpoint_type $SAHARA_ENDPOINT_TYPE - - if is_ssl_enabled_service "swift" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE swift ca_file $SSL_BUNDLE_FILE - fi - iniset $SAHARA_CONF_FILE swift endpoint_type $SAHARA_ENDPOINT_TYPE - - if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE - fi - iniset $SAHARA_CONF_FILE keystone endpoint_type $SAHARA_ENDPOINT_TYPE - - if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE glance ca_file $SSL_BUNDLE_FILE - fi - iniset $SAHARA_CONF_FILE glance endpoint_type $SAHARA_ENDPOINT_TYPE - - # Register SSL certificates if provided - if is_ssl_enabled_service sahara; then - ensure_certificates SAHARA - - iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT" - iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY" - fi - - iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - if [ "$SAHARA_USE_MOD_WSGI" == "False" ]; then - setup_colorized_logging $SAHARA_CONF_FILE DEFAULT - fi - fi - - if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT - fi - - if [ "$SAHARA_ENABLE_DISTRIBUTED_PERIODICS" == "True" ]; then - # Enable distributed periodic tasks - iniset $SAHARA_CONF_FILE DEFAULT periodic_coordinator_backend_url\ - $SAHARA_PERIODIC_COORDINATOR_URL - pip_install tooz[memcached] - - restart_service memcached - fi - - recreate_database sahara - $SAHARA_BIN_DIR/sahara-db-manage \ - --config-file $SAHARA_CONF_FILE upgrade head - - if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then - configure_sahara_apache_wsgi - fi -} - -# install_sahara() - Collect source and prepare -function install_sahara { - setup_develop $SAHARA_DIR - if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then - install_apache_wsgi - fi -} - -# install_ambari() - Collect source and prepare -function install_ambari { - git_clone $AMBARI_PLUGIN_REPO $AMBARI_PLUGIN_DIR $AMBARI_PLUGIN_BRANCH - setup_develop $AMBARI_PLUGIN_DIR -} - -# install_cdh() - Collect source and prepare -function install_cdh { - git_clone $CDH_PLUGIN_REPO $CDH_PLUGIN_DIR $CDH_PLUGIN_BRANCH - setup_develop $CDH_PLUGIN_DIR -} - -# install_mapr() - Collect source and prepare -function install_mapr { - git_clone $MAPR_PLUGIN_REPO $MAPR_PLUGIN_DIR $MAPR_PLUGIN_BRANCH - setup_develop $MAPR_PLUGIN_DIR -} - -# install_spark() - Collect source and prepare -function install_spark { - git_clone $SPARK_PLUGIN_REPO $SPARK_PLUGIN_DIR $SPARK_PLUGIN_BRANCH - setup_develop $SPARK_PLUGIN_DIR -} - -# install_storm() - Collect source and prepare -function install_storm { - git_clone $STORM_PLUGIN_REPO $STORM_PLUGIN_DIR $STORM_PLUGIN_BRANCH - setup_develop $STORM_PLUGIN_DIR -} - -# install_vanilla() - Collect source and prepare -function install_vanilla { - git_clone $VANILLA_PLUGIN_REPO $VANILLA_PLUGIN_DIR $VANILLA_PLUGIN_BRANCH - setup_develop $VANILLA_PLUGIN_DIR -} - -# install_python_saharaclient() - Collect source and prepare -function install_python_saharaclient { - if use_library_from_git "python-saharaclient"; then - git_clone $SAHARACLIENT_REPO $SAHARACLIENT_DIR $SAHARACLIENT_BRANCH - setup_develop $SAHARACLIENT_DIR - fi -} - -# start_sahara() - Start running processes, including screen -function start_sahara { - local service_port=$SAHARA_SERVICE_PORT - local service_protocol=$SAHARA_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$SAHARA_SERVICE_PORT_INT - service_protocol="http" - fi - - if [ "$SAHARA_USE_MOD_WSGI" == "True" ] ; then - enable_apache_site sahara-api - restart_apache_server - else - run_process sahara-api "$SAHARA_BIN_DIR/sahara-api \ - --config-file $SAHARA_CONF_FILE" - fi - - run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine \ - --config-file $SAHARA_CONF_FILE" - - echo "Waiting for Sahara to start..." - if ! wait_for_service $SERVICE_TIMEOUT \ - $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then - die $LINENO "Sahara did not start" - fi - - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy sahara '*' $SAHARA_SERVICE_PORT \ - $SAHARA_SERVICE_HOST \ - $SAHARA_SERVICE_PORT_INT & - fi -} - -# configure_tempest_for_sahara() - Tune Tempest configuration for Sahara -function configure_tempest_for_sahara { - if is_service_enabled tempest; then - iniset $TEMPEST_CONFIG service_available sahara True - iniset $TEMPEST_CONFIG data-processing-feature-enabled plugins $SAHARA_INSTALLED_PLUGINS - fi -} - -# stop_sahara() - Stop running processes -function stop_sahara { - # Kill the Sahara screen windows - if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then - disable_apache_site sahara-api - restart_apache_server - else - stop_process sahara-all - stop_process sahara-api - stop_process sahara-eng - fi -} - -# is_sahara_enabled. This allows is_service_enabled sahara work -# correctly throughout devstack. -function is_sahara_enabled { - if is_service_enabled sahara-api || \ - is_service_enabled sahara-eng; then - return 0 - else - return 1 - fi -} - -function is_plugin_required { - if [ "${SAHARA_INSTALLED_PLUGINS/$1}" = "$SAHARA_INSTALLED_PLUGINS" ] ; then - return 1 - else - return 0 - fi -} - -# Dispatcher for Sahara plugin -if is_service_enabled sahara; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing sahara" - install_sahara - if is_plugin_required ambari; then - install_ambari - fi - if is_plugin_required cdh; then - install_cdh - fi - if is_plugin_required mapr; then - install_mapr - fi - if is_plugin_required spark; then - install_spark - fi - if is_plugin_required storm; then - install_storm - fi - if is_plugin_required vanilla; then - install_vanilla - fi - install_python_saharaclient - cleanup_sahara - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring sahara" - configure_sahara - create_sahara_accounts - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing sahara" - start_sahara - elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then - echo_summary "Configuring tempest" - configure_tempest_for_sahara - fi - - if [[ "$1" == "unstack" ]]; then - stop_sahara - fi - - if [[ "$1" == "clean" ]]; then - cleanup_sahara - fi -fi - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 22278a2f5c..0000000000 --- a/devstack/settings +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -# Settings needed for the Sahara plugin -# ------------------------------------- - -# Set up default directories -SAHARACLIENT_DIR=$DEST/python-saharaclient -SAHARA_DIR=$DEST/sahara -AMBARI_PLUGIN_DIR=$DEST/sahara-plugin-ambari -CDH_PLUGIN_DIR=$DEST/sahara-plugin-cdh -MAPR_PLUGIN_DIR=$DEST/sahara-plugin-mapr -SPARK_PLUGIN_DIR=$DEST/sahara-plugin-spark -STORM_PLUGIN_DIR=$DEST/sahara-plugin-storm -VANILLA_PLUGIN_DIR=$DEST/sahara-plugin-vanilla - -SAHARACLIENT_REPO=${SAHARACLIENT_REPO:-\ -${GIT_BASE}/openstack/python-saharaclient.git} -SAHARACLIENT_BRANCH=${SAHARACLIENT_BRANCH:-master} -AMBARI_PLUGIN_REPO=${AMBARI_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-ambari/} -AMBARI_PLUGIN_BRANCH=${AMBARI_PLUGIN_BRANCH:-master} -CDH_PLUGIN_REPO=${CDH_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-cdh/} -CDH_PLUGIN_BRANCH=${CDH_PLUGIN_BRANCH:-master} -MAPR_PLUGIN_REPO=${MAPR_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-mapr/} -MAPR_PLUGIN_BRANCH=${MAPR_PLUGIN_BRANCH:-master} -SPARK_PLUGIN_REPO=${SPARK_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-spark/} -SPARK_PLUGIN_BRANCH=${SPARK_PLUGIN_BRANCH:-master} -STORM_PLUGIN_REPO=${STORM_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-storm/} -STORM_PLUGIN_BRANCH=${STORM_PLUGIN_BRANCH:-master} -VANILLA_PLUGIN_REPO=${VANILLA_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-vanilla/} -VANILLA_PLUGIN_BRANCH=${VANILLA_PLUGIN_BRANCH:-master} - - -SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} -SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf - -# TODO(slukjanov): Should we append sahara to SSL_ENABLED_SERVICES? - -if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then - SAHARA_SERVICE_PROTOCOL="https" -fi -SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} -SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} -SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386} -SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -SAHARA_ENDPOINT_TYPE=${SAHARA_ENDPOINT_TYPE:-publicURL} - -SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-\ -vanilla,ambari,cdh,mapr,spark,storm,fake} -SAHARA_INSTALLED_PLUGINS=${SAHARA_INSTALLED_PLUGINS:-\ -vanilla,ambari,cdh,mapr,spark,storm,fake} -SAHARA_BIN_DIR=$(get_python_exec_prefix) - -SAHARA_ENABLE_DISTRIBUTED_PERIODICS=${SAHARA_ENABLE_DISTRIBUTED_PERIODICS:-\ -True} -SAHARA_PERIODIC_COORDINATOR_URL=${SAHARA_PERIODIC_COORDINATOR_URL:-\ -memcached://127.0.0.1:11211} - -#Toggle for deploying Sahara API with Apache + mod_wsgi -SAHARA_USE_MOD_WSGI=${SAHARA_USE_MOD_WSGI:-True} - -enable_service sahara-api sahara-eng -enable_service heat h-eng h-api h-api-cfn h-api-cw diff --git a/devstack/upgrade/from-liberty/upgrade-sahara b/devstack/upgrade/from-liberty/upgrade-sahara deleted file mode 100644 index 4ddcf50314..0000000000 --- a/devstack/upgrade/from-liberty/upgrade-sahara +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -# ``upgrade-sahara`` - -function configure_sahara_upgrade { - XTRACE=$(set +o | grep xtrace) - set -o xtrace - - # Copy api-paste.ini to configuration directory - cp -p $SAHARA_DIR/etc/sahara/api-paste.ini $SAHARA_CONF_DIR - - # reset to previous state - $XTRACE -} diff --git a/devstack/upgrade/from-mitaka/upgrade-sahara b/devstack/upgrade/from-mitaka/upgrade-sahara deleted file mode 100755 index d916b0df05..0000000000 --- a/devstack/upgrade/from-mitaka/upgrade-sahara +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -# ``upgrade-sahara`` - -function configure_sahara_upgrade { - XTRACE=$(set +o | grep xtrace) - set -o xtrace - local old_plugins - - old_plugins=$(cat $SAHARA_CONF_DIR/sahara.conf | grep ^plugins) - sed -i.bak "s/$old_plugins/plugins=fake,vanilla,cdh/g" $SAHARA_CONF_DIR/sahara.conf - - # reset to previous state - $XTRACE -} diff --git a/devstack/upgrade/from-rocky/upgrade-sahara b/devstack/upgrade/from-rocky/upgrade-sahara deleted file mode 100755 index 940e7ca7dd..0000000000 --- a/devstack/upgrade/from-rocky/upgrade-sahara +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -# ``upgrade-sahara`` - -function configure_sahara_upgrade { - XTRACE=$(set +o | grep xtrace) - set -o xtrace - - install_ambari - install_cdh - install_mapr - install_spark - install_storm - install_vanilla - - # reset to previous state - $XTRACE -} diff --git a/devstack/upgrade/resources.sh b/devstack/upgrade/resources.sh deleted file mode 100755 index 3b0c886eaa..0000000000 --- a/devstack/upgrade/resources.sh +++ /dev/null @@ -1,243 +0,0 @@ -#!/bin/bash - -set -o errexit - -. $GRENADE_DIR/grenaderc -. $GRENADE_DIR/functions - -. $TOP_DIR/openrc admin admin - -set -o xtrace - -SAHARA_USER=sahara_grenade -SAHARA_PROJECT=sahara_grenade -SAHARA_PASS=pass -SAHARA_KEY=sahara_key -SAHARA_KEY_FILE=$SAVE_DIR/sahara_key.pem - -PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-public} - -# cirros image is not appropriate for cluster creation -SAHARA_IMAGE_NAME=${SAHARA_IMAGE_NAME:-fedora-heat-test-image} -SAHARA_IMAGE_USER=${SAHARA_IMAGE_USER:-fedora} - -# custom flavor parameters -SAHARA_FLAVOR_NAME=${SAHARA_FLAVOR_NAME:-sahara_flavor} -SAHARA_FLAVOR_RAM=${SAHARA_FLAVOR_RAM:-1024} -SAHARA_FLAVOR_DISK=${SAHARA_FLAVOR_DISK:-10} - -NG_TEMPLATE_NAME=ng-template-grenade -CLUSTER_TEMPLATE_NAME=cluster-template-grenade -CLUSTER_NAME=cluster-grenade - -function sahara_set_user { - # set ourselves to the created sahara user - OS_TENANT_NAME=$SAHARA_PROJECT - OS_PROJECT_NAME=$SAHARA_PROJECT - OS_USERNAME=$SAHARA_USER - OS_PASSWORD=$SAHARA_PASS -} - -function create_tenant { - # create a tenant for the server - eval $(openstack project create -f shell -c id $SAHARA_PROJECT) - if [[ -z "$id" ]]; then - die $LINENO "Didn't create $SAHARA_PROJECT project" - fi - resource_save sahara project_id $id -} - -function create_user { - local project_id=$id - eval $(openstack user create $SAHARA_USER \ - --project $project_id \ - --password $SAHARA_PASS \ - -f shell -c id) - if [[ -z "$id" ]]; then - die $LINENO "Didn't create $SAHARA_USER user" - fi - resource_save sahara user_id $id - - # Workaround for bug: https://bugs.launchpad.net/keystone/+bug/1662911 - openstack role add member --user $id --project $project_id -} - -function create_keypair { - # create key pair for access - openstack keypair create $SAHARA_KEY > $SAHARA_KEY_FILE - chmod 600 $SAHARA_KEY_FILE -} - -function create_flavor { - eval $(openstack flavor create -f shell -c id \ - --ram $SAHARA_FLAVOR_RAM \ - --disk $SAHARA_FLAVOR_DISK \ - $SAHARA_FLAVOR_NAME) - resource_save sahara flavor_id $id -} - -function register_image { - eval $(openstack image show \ - -f shell -c id $SAHARA_IMAGE_NAME) - resource_save sahara image_id $id - openstack dataprocessing image register $id --username $SAHARA_IMAGE_USER - openstack dataprocessing image tags set $id --tags fake 0.1 -} - -function create_node_group_template { - eval $(openstack network show -f shell -c id $PUBLIC_NETWORK_NAME) - local public_net_id=$id - local flavor_id=$(resource_get sahara flavor_id) - openstack dataprocessing node group template create \ - --name $NG_TEMPLATE_NAME \ - --flavor $flavor_id \ - --plugin fake \ - --plugin-version 0.1 \ - --processes jobtracker namenode tasktracker datanode \ - --floating-ip-pool $public_net_id \ - --auto-security-group -} - -function create_cluster_template { - openstack dataprocessing cluster template create \ - --name $CLUSTER_TEMPLATE_NAME \ - --node-groups $NG_TEMPLATE_NAME:1 -} - -function create_cluster { - local net_id=$(resource_get network net_id) - local image_id=$(resource_get sahara image_id) - if [[ -n "$net_id" ]]; then - eval $(openstack dataprocessing cluster create \ - --name $CLUSTER_NAME \ - --cluster-template $CLUSTER_TEMPLATE_NAME \ - --image $image_id \ - --user-keypair $SAHARA_KEY \ - --neutron-network $net_id \ - -f shell -c id) - else - eval $(openstack dataprocessing cluster create \ - --name $CLUSTER_NAME \ - --cluster-template $CLUSTER_TEMPLATE_NAME \ - --image $image_id \ - --user-keypair $SAHARA_KEY \ - -f shell -c id) - fi - resource_save sahara cluster_id $id -} - -function wait_active_state { - # wait until cluster moves to active state - local timeleft=1000 - while [[ $timeleft -gt 0 ]]; do - eval $(openstack dataprocessing cluster show -f shell \ - -c Status $CLUSTER_NAME) - if [[ "$status" != "Active" ]]; then - if [[ "$status" == "Error" ]]; then - die $LINENO "Cluster is in Error state" - fi - echo "Cluster is still not in Active state" - sleep 10 - timeleft=$((timeleft - 10)) - if [[ $timeleft == 0 ]]; then - die $LINENO "Cluster hasn't moved to Active state \ - during 1000 seconds" - fi - else - break - fi - done -} - -function check_active { - # check that cluster is in Active state - eval $(openstack dataprocessing cluster show -f shell \ - -c Status $CLUSTER_NAME) - if [[ "$status" != "Active" ]]; then - die $LINENO "Cluster is not in Active state anymore" - fi - echo "Sahara verification: SUCCESS" -} - -function create { - create_tenant - - create_user - - create_flavor - - register_image - - sahara_set_user - - create_keypair - - create_node_group_template - - create_cluster_template - - create_cluster - - wait_active_state -} - -function verify { - : -} - -function verify_noapi { - : -} - -function destroy { - sahara_set_user - set +o errexit - - # delete cluster - check_active - - openstack dataprocessing cluster delete $CLUSTER_NAME --wait - - set -o errexit - - # delete cluster template - openstack dataprocessing cluster template delete $CLUSTER_TEMPLATE_NAME - - # delete node group template - openstack dataprocessing node group template delete $NG_TEMPLATE_NAME - - source_quiet $TOP_DIR/openrc admin admin - - # unregister image - local image_id=$(resource_get sahara image_id) - openstack dataprocessing image unregister $image_id - - # delete flavor - openstack flavor delete $SAHARA_FLAVOR_NAME - - # delete user and project - local user_id=$(resource_get sahara user_id) - local project_id=$(resource_get sahara project_id) - openstack user delete $user_id - openstack project delete $project_id -} - -# Dispatcher -case $1 in - "create") - create - ;; - "verify_noapi") - verify_noapi - ;; - "verify") - verify - ;; - "destroy") - destroy - ;; - "force_destroy") - set +o errexit - destroy - ;; -esac diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings deleted file mode 100644 index d50dcfc0b2..0000000000 --- a/devstack/upgrade/settings +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -register_project_for_upgrade sahara -register_db_to_save sahara -devstack_localrc base IMAGE_URLS=\ -"http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-uec.tar.gz,\ -http://tarballs.openstack.org/heat-test-image/fedora-heat-test-image.qcow2" -devstack_localrc base enable_plugin sahara \ - https://opendev.org/openstack/sahara \ - stable/train -devstack_localrc base enable_plugin heat \ - https://opendev.org/openstack/heat \ - stable/train -devstack_localrc base DEFAULT_IMAGE_NAME="cirros-0.3.5-x86_64-uec" - -devstack_localrc target IMAGE_URLS=\ -"http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-uec.tar.gz,\ -http://tarballs.openstack.org/heat-test-image/fedora-heat-test-image.qcow2" -devstack_localrc target enable_plugin sahara \ - https://opendev.org/openstack/sahara -devstack_localrc target enable_plugin heat \ - https://opendev.org/openstack/heat -devstack_localrc target LIBS_FROM_GIT=python-saharaclient -devstack_localrc target DEFAULT_IMAGE_NAME="cirros-0.3.5-x86_64-uec" diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh deleted file mode 100755 index 22ac599c5c..0000000000 --- a/devstack/upgrade/shutdown.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# ``shutdown-sahara`` - -set -o errexit - -. $GRENADE_DIR/grenaderc -. $GRENADE_DIR/functions - -# We need base DevStack functions for this -. $BASE_DEVSTACK_DIR/functions -. $BASE_DEVSTACK_DIR/stackrc # needed for status directory - -. $BASE_DEVSTACK_DIR/lib/tls -. $BASE_DEVSTACK_DIR/lib/apache -. ${GITDIR[sahara]}/devstack/plugin.sh - -set -o xtrace - -export ENABLED_SERVICES+=,sahara-api,sahara-eng, -stop_sahara - -# sanity check that service is actually down -ensure_services_stopped sahara-eng diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh deleted file mode 100755 index f754529b7e..0000000000 --- a/devstack/upgrade/upgrade.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash - -# ``upgrade-sahara`` - -echo "*********************************************************************" -echo "Begin $0" -echo "*********************************************************************" - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - echo "********************************************************************" - echo "ERROR: Abort $0" - echo "********************************************************************" - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM - -# Keep track of the grenade directory -RUN_DIR=$(cd $(dirname "$0") && pwd) - -# Source params -. $GRENADE_DIR/grenaderc - -# Import common functions -. $GRENADE_DIR/functions - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Upgrade Sahara -# ============ - -# Get functions from current DevStack -. $TARGET_DEVSTACK_DIR/stackrc -. $TARGET_DEVSTACK_DIR/lib/apache -. $TARGET_DEVSTACK_DIR/lib/tls -. $(dirname $(dirname $BASH_SOURCE))/plugin.sh -. $(dirname $(dirname $BASH_SOURCE))/settings - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - -# Save current config files for posterity -[[ -d $SAVE_DIR/etc.sahara ]] || cp -pr $SAHARA_CONF_DIR $SAVE_DIR/etc.sahara - -# install_sahara() -stack_install_service sahara -install_python_saharaclient - -# calls upgrade-sahara for specific release -upgrade_project sahara $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH - -# Migrate the database -$SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE \ - upgrade head || die $LINENO "DB sync error" - -# Start Sahara -start_sahara - -# Don't succeed unless the service come up -ensure_services_started sahara-eng - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End $0" -echo "*********************************************************************" diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 9b1ab70dad..0000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -openstackdocstheme>=1.31.2 # Apache-2.0 -os-api-ref>=1.6.0 # Apache-2.0 -reno>=2.5.0 # Apache-2.0 -sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD -sphinxcontrib-httpdomain>=1.3.0 # BSD -whereto>=0.3.0 # Apache-2.0 diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess deleted file mode 100644 index 0858c21f22..0000000000 --- a/doc/source/_extra/.htaccess +++ /dev/null @@ -1,9 +0,0 @@ -# renamed after the switch to Storyboard -redirectmatch 301 ^/sahara/([^/]+)/contributor/launchpad.html$ /sahara/$1/contributor/project.html -# renamed after some documentation reshuffling -redirectmatch 301 ^/sahara/(?!ocata|pike|queens)([^/]+)/user/vanilla-imagebuilder.html$ /sahara/$1/user/vanilla-plugin.html -redirectmatch 301 ^/sahara/(?!ocata|pike|queens)([^/]+)/user/cdh-imagebuilder.html$ /sahara/$1/user/cdh-plugin.html -redirectmatch 301 ^/sahara/(?!ocata|pike|queens)([^/]+)/user/guest-requirements.html$ /sahara/$1/user/building-guest-images.html -redirectmatch 301 ^/sahara/([^/]+)/user/([^-]+)-plugin.html$ /sahara-plugin-$2/$1/ -redirectmatch 301 ^/sahara/([^/]+)/contributor/how-to-participate.html$ /sahara/$1/contributor/contributing.html -redirectmatch 301 ^/sahara/([^/]+)/contributor/project.html$ /sahara/$1/contributor/contributing.html diff --git a/doc/source/_templates/sidebarlinks.html b/doc/source/_templates/sidebarlinks.html deleted file mode 100644 index 09ad156cb3..0000000000 --- a/doc/source/_templates/sidebarlinks.html +++ /dev/null @@ -1,11 +0,0 @@ -

Useful Links

- - -{% if READTHEDOCS %} - -{% endif %} diff --git a/doc/source/_theme_rtd/layout.html b/doc/source/_theme_rtd/layout.html deleted file mode 100644 index cd7ade1d70..0000000000 --- a/doc/source/_theme_rtd/layout.html +++ /dev/null @@ -1,4 +0,0 @@ -{% extends "basic/layout.html" %} -{% set css_files = css_files + ['_static/tweaks.css'] %} - -{% block relbar1 %}{% endblock relbar1 %} \ No newline at end of file diff --git a/doc/source/_theme_rtd/theme.conf b/doc/source/_theme_rtd/theme.conf deleted file mode 100644 index 8c44b0ce46..0000000000 --- a/doc/source/_theme_rtd/theme.conf +++ /dev/null @@ -1,4 +0,0 @@ -[theme] -inherit = nature -stylesheet = nature.css -pygments_style = tango \ No newline at end of file diff --git a/doc/source/admin/advanced-configuration-guide.rst b/doc/source/admin/advanced-configuration-guide.rst deleted file mode 100644 index 94445e0424..0000000000 --- a/doc/source/admin/advanced-configuration-guide.rst +++ /dev/null @@ -1,653 +0,0 @@ -Sahara Advanced Configuration Guide -=================================== - -This guide addresses specific aspects of Sahara configuration that pertain to -advanced usage. It is divided into sections about various features that can be -utilized, and their related configurations. - -.. _custom_network_topologies: - -Custom network topologies -------------------------- - -Sahara accesses instances at several stages of cluster spawning through -SSH and HTTP. Floating IPs and network namespaces will be automatically -used for access when present. When floating IPs are not assigned to -instances and namespaces are not being used, sahara will need an -alternative method to reach them. - -The ``proxy_command`` parameter of the configuration file can be used to -give sahara a command to access instances. This command is run on the -sahara host and must open a netcat socket to the instance destination -port. The ``{host}`` and ``{port}`` keywords should be used to describe the -destination, they will be substituted at runtime. Other keywords that -can be used are: ``{tenant_id}``, ``{network_id}`` and ``{router_id}``. - -Additionally, if ``proxy_command_use_internal_ip`` is set to ``True``, -then the internal IP will be substituted for ``{host}`` in the command. -Otherwise (if ``False``, by default) the management IP will be used: this -corresponds to floating IP if present in the relevant node group, else the -internal IP. The option is ignored if ``proxy_command`` is not also set. - -For example, the following parameter in the sahara configuration file -would be used if instances are accessed through a relay machine: - -.. code-block:: - - [DEFAULT] - proxy_command='ssh relay-machine-{tenant_id} nc {host} {port}' - -Whereas the following shows an example of accessing instances though -a custom network namespace: - -.. code-block:: - - [DEFAULT] - proxy_command='ip netns exec ns_for_{network_id} nc {host} {port}' - -.. _dns_hostname_resolution: - -DNS Hostname Resolution ------------------------ - -Sahara can resolve hostnames of cluster instances by using DNS. For this Sahara -uses Designate. With this feature, for each instance of the cluster Sahara will -create two ``A`` records (for internal and external ips) under one hostname -and one ``PTR`` record. Also all links in the Sahara dashboard will be -displayed as hostnames instead of just ip addresses. - -You should configure DNS server with Designate. Designate service should be -properly installed and registered in Keystone catalog. The detailed -instructions about Designate configuration can be found here: -:designate-doc:`Designate manual installation ` -and here: :neutron-doc:`Configuring OpenStack Networking with Designate -`. -Also if you use devstack you can just enable the -:designate-doc:`Designate devstack plugin `. - -When Designate is configured you should create domain(s) for hostname -resolution. This can be done by using the Designate dashboard or by CLI. Also -you have to create ``in-addr.arpa.`` domain for reverse hostname resolution -because some plugins (e.g. ``HDP``) determine hostname by ip. - -Sahara also should be properly configured. In ``sahara.conf`` you must specify -two config properties: - -.. code-block:: - - [DEFAULT] - # Use Designate for internal and external hostnames resolution: - use_designate=true - # IP addresses of Designate nameservers: - nameservers=1.1.1.1,2.2.2.2 - -An OpenStack operator should properly configure the network. It must enable -DHCP and specify DNS server ip addresses (e.g. 1.1.1.1 and 2.2.2.2) in -``DNS Name Servers`` field in the ``Subnet Details``. If the subnet already -exists and changing it or creating new one is impossible then Sahara will -manually change ``/etc/resolv.conf`` file on every instance of the cluster (if -``nameservers`` list has been specified in ``sahara.conf``). In this case, -though, Sahara cannot guarantee that these changes will not be overwritten by -DHCP or other services of the existing network. Sahara has a health check for -track this situation (and if it occurs the health status will be red). - -In order to resolve hostnames from your local machine you should properly -change your ``/etc/resolv.conf`` file by adding appropriate ip addresses of -DNS servers (e.g. 1.1.1.1 and 2.2.2.2). Also the VMs with DNS servers should -be available from your local machine. - -.. _data_locality_configuration: - -Data-locality configuration ---------------------------- - -Hadoop provides the data-locality feature to enable task tracker and -data nodes the capability of spawning on the same rack, Compute node, -or virtual machine. Sahara exposes this functionality to the user -through a few configuration parameters and user defined topology files. - -To enable data-locality, set the ``enable_data_locality`` parameter to -``true`` in the sahara configuration file - -.. code-block:: - - [DEFAULT] - enable_data_locality=true - -With data locality enabled, you must now specify the topology files -for the Compute and Object Storage services. These files are -specified in the sahara configuration file as follows: - -.. code-block:: - - [DEFAULT] - compute_topology_file=/etc/sahara/compute.topology - swift_topology_file=/etc/sahara/swift.topology - -The ``compute_topology_file`` should contain mappings between Compute -nodes and racks in the following format: - -.. code-block:: - - compute1 /rack1 - compute2 /rack2 - compute3 /rack2 - -Note that the Compute node names must be exactly the same as configured in -OpenStack (``host`` column in admin list for instances). - -The ``swift_topology_file`` should contain mappings between Object Storage -nodes and racks in the following format: - -.. code-block:: - - node1 /rack1 - node2 /rack2 - node3 /rack2 - -Note that the Object Storage node names must be exactly the same as -configured in the object ring. Also, you should ensure that instances -with the task tracker process have direct access to the Object Storage -nodes. - -Hadoop versions after 1.2.0 support four-layer topology (for more detail -please see `HADOOP-8468 JIRA issue`_). To enable this feature set the -``enable_hypervisor_awareness`` parameter to ``true`` in the configuration -file. In this case sahara will add the Compute node ID as a second level of -topology for virtual machines. - -.. _HADOOP-8468 JIRA issue: https://issues.apache.org/jira/browse/HADOOP-8468 - -.. _distributed-mode-configuration: - -Distributed mode configuration ------------------------------- - -Sahara can be configured to run in a distributed mode that creates a -separation between the API and engine processes. This allows the API -process to remain relatively free to handle requests while offloading -intensive tasks to the engine processes. - -The ``sahara-api`` application works as a front-end and serves user -requests. It offloads 'heavy' tasks to the ``sahara-engine`` process -via RPC mechanisms. While the ``sahara-engine`` process could be loaded -with tasks, ``sahara-api`` stays free and hence may quickly respond to -user queries. - -If sahara runs on several hosts, the API requests could be -balanced between several ``sahara-api`` hosts using a load balancer. -It is not required to balance load between different ``sahara-engine`` -hosts as this will be automatically done via the message broker. - -If a single host becomes unavailable, other hosts will continue -serving user requests. Hence, a better scalability is achieved and some -fault tolerance as well. Note that distributed mode is not a true -high availability. While the failure of a single host does not -affect the work of the others, all of the operations running on -the failed host will stop. For example, if a cluster scaling is -interrupted, the cluster will be stuck in a half-scaled state. The -cluster might continue working, but it will be impossible to scale it -further or run jobs on it via EDP. - -To run sahara in distributed mode pick several hosts on which -you want to run sahara services and follow these steps: - -* On each host install and configure sahara using the - `installation guide <../install/installation-guide.html>`_ - except: - - * Do not run ``sahara-db-manage`` or launch sahara with ``sahara-all`` - * Ensure that each configuration file provides a database connection - string to a single database for all hosts. - -* Run ``sahara-db-manage`` as described in the installation guide, - but only on a single (arbitrarily picked) host. - -* The ``sahara-api`` and ``sahara-engine`` processes use oslo.messaging to - communicate with each other. You will need to configure it properly on - each host (see below). - -* Run ``sahara-api`` and ``sahara-engine`` on the desired hosts. You may - run both processes on the same or separate hosts as long as they are - configured to use the same message broker and database. - -To configure ``oslo.messaging``, first you need to choose a message -broker driver. The recommended driver is ``RabbitMQ``. For the ``RabbitMQ`` -drivers please see the :ref:`notification-configuration` documentation -for an explanation of common configuration options; the entire list of -configuration options is found in the -:oslo.messaging-doc:`oslo_messaging_rabbit documentation -`. - -These options will also be present in the generated sample configuration -file. For instructions on creating the configuration file please see the -:doc:`configuration-guide`. - -.. _distributed-periodic-tasks: - -Distributed periodic tasks configuration ----------------------------------------- - -If sahara is configured to run in distributed mode (see -:ref:`distributed-mode-configuration`), periodic tasks can also be launched in -distributed mode. In this case tasks will be split across all ``sahara-engine`` -processes. This will reduce overall load. - -Distributed periodic tasks are based on Hash Ring implementation and the Tooz -library that provides group membership support for a set of backends. In order -to use periodic tasks distribution, the following steps are required: - -* One of the :tooz-doc:`supported backends ` - should be configured and started. -* Backend URL should be set in the sahara configuration file with the - ``periodic_coordinator_backend_url`` parameter. For example, if the - ZooKeeper backend is being used: - - .. code-block:: - - [DEFAULT] - periodic_coordinator_backend_url=kazoo://IP:PORT - -* Tooz extras should be installed. When using Zookeeper as coordination - backend, ``kazoo`` library should be installed. It can be done with pip: - - .. code-block:: - - pip install tooz[zookeeper] - -* Periodic tasks can be performed in parallel. Number of threads to run - periodic tasks on a single engine can be set with - ``periodic_workers_number`` parameter (only 1 thread will be launched by - default). Example: - - .. code-block:: - - [DEFAULT] - periodic_workers_number=2 - -* ``coordinator_heartbeat_interval`` can be set to change the interval between - heartbeat execution (1 second by default). Heartbeats are needed to make - sure that connection to the coordination backend is active. Example: - - .. code-block:: - - [DEFAULT] - coordinator_heartbeat_interval=2 - -* ``hash_ring_replicas_count`` can be set to change the number of replicas for - each engine on a Hash Ring. Each replica is a point on a Hash Ring that - belongs to a particular engine. A larger number of replicas leads to better - task distribution across the set of engines. (40 by default). Example: - - .. code-block:: - - [DEFAULT] - hash_ring_replicas_count=100 - -.. _external_key_manager_usage: - -External key manager usage --------------------------- - -Sahara generates and stores several passwords during the course of operation. -To harden sahara's usage of passwords it can be instructed to use an -external key manager for storage and retrieval of these secrets. To enable -this feature there must first be an OpenStack Key Manager service deployed -within the stack. - -With a Key Manager service deployed on the stack, sahara must be configured -to enable the external storage of secrets. Sahara uses the -:castellan-doc:`castellan <>` library -to interface with the OpenStack Key Manager service. This library provides -configurable access to a key manager. To configure sahara to use barbican as -the key manager, edit the sahara configuration file as follows: - -.. code-block:: - - [DEFAULT] - use_barbican_key_manager=true - -Enabling the ``use_barbican_key_manager`` option will configure castellan -to use barbican as its key management implementation. By default it will -attempt to find barbican in the Identity service's service catalog. - -For added control of the barbican server location, optional configuration -values may be added to specify the URL for the barbican API server. - -.. code-block:: - - [castellan] - barbican_api_endpoint=http://{barbican controller IP:PORT}/ - barbican_api_version=v1 - -The specific values for the barbican endpoint will be dictated by the -IP address of the controller for your installation. - -With all of these values configured and the Key Manager service deployed, -sahara will begin storing its secrets in the external manager. - -Indirect instance access through proxy nodes --------------------------------------------- - -.. warning:: - The indirect VMs access feature is in alpha state. We do not - recommend using it in a production environment. - -Sahara needs to access instances through SSH during cluster setup. This -access can be obtained a number of different ways (see -:ref:`floating_ip_management`,:ref:`custom_network_topologies`).Sometimes -it is impossible to provide access to all nodes (because of limited -numbers of floating IPs or security policies). In these cases access can -be gained using other nodes of the cluster as proxy gateways. To enable -this set ``is_proxy_gateway=true`` for the node group you want to use as -proxy. Sahara will communicate with all other cluster instances through -the instances of this node group. - -Note, if ``use_floating_ips=true`` and the cluster contains a node group with -``is_proxy_gateway=true``, the requirement to have ``floating_ip_pool`` -specified is applied only to the proxy node group. Other instances will be -accessed through proxy instances using the standard private network. - -Note, the Cloudera Hadoop plugin doesn't support access to Cloudera manager -through a proxy node. This means that for CDH clusters only nodes with -the Cloudera manager can be designated as proxy gateway nodes. - -Multi region deployment ------------------------ - -Sahara supports multi region deployment. To enable this option each -instance of sahara should have the ``os_region_name=`` -parameter set in the configuration file. The following example demonstrates -configuring sahara to use the ``RegionOne`` region: - -.. code-block:: - - [DEFAULT] - os_region_name=RegionOne - -.. _non-root-users: - -Non-root users --------------- - -In cases where a proxy command is being used to access cluster instances -(for example, when using namespaces or when specifying a custom proxy -command), rootwrap functionality is provided to allow users other than -``root`` access to the needed operating system facilities. To use rootwrap -the following configuration parameter is required to be set: - -.. code-block:: - - [DEFAULT] - use_rootwrap=true - -Assuming you elect to leverage the default rootwrap command -(``sahara-rootwrap``), you will need to perform the following additional setup -steps: - -* Copy the provided sudoers configuration file from the local project file - ``etc/sudoers.d/sahara-rootwrap`` to the system specific location, usually - ``/etc/sudoers.d``. This file is setup to allow a user named ``sahara`` - access to the rootwrap script. It contains the following: - -.. code-block:: - - sahara ALL = (root) NOPASSWD: /usr/bin/sahara-rootwrap /etc/sahara/rootwrap.conf * - -When using devstack to deploy sahara, please pay attention that you need to -change user in script from ``sahara`` to ``stack``. - -* Copy the provided rootwrap configuration file from the local project file - ``etc/sahara/rootwrap.conf`` to the system specific location, usually - ``/etc/sahara``. This file contains the default configuration for rootwrap. - -* Copy the provided rootwrap filters file from the local project file - ``etc/sahara/rootwrap.d/sahara.filters`` to the location specified in the - rootwrap configuration file, usually ``/etc/sahara/rootwrap.d``. This file - contains the filters that will allow the ``sahara`` user to access the - ``ip netns exec``, ``nc``, and ``kill`` commands through the rootwrap - (depending on ``proxy_command`` you may need to set additional filters). - It should look similar to the followings: - -.. code-block:: - - [Filters] - ip: IpNetnsExecFilter, ip, root - nc: CommandFilter, nc, root - kill: CommandFilter, kill, root - -If you wish to use a rootwrap command other than ``sahara-rootwrap`` you can -set the following parameter in your sahara configuration file: - -.. code-block:: - - [DEFAULT] - rootwrap_command='sudo sahara-rootwrap /etc/sahara/rootwrap.conf' - -For more information on rootwrap please refer to the -`official Rootwrap documentation `_ - -Object Storage access using proxy users ---------------------------------------- - -To improve security for clusters accessing files in Object Storage, -sahara can be configured to use proxy users and delegated trusts for -access. This behavior has been implemented to reduce the need for -storing and distributing user credentials. - -The use of proxy users involves creating an Identity domain that will be -designated as the home for these users. Proxy users will be -created on demand by sahara and will only exist during a job execution -which requires Object Storage access. The domain created for the -proxy users must be backed by a driver that allows sahara's admin user to -create new user accounts. This new domain should contain no roles, to limit -the potential access of a proxy user. - -Once the domain has been created, sahara must be configured to use it by -adding the domain name and any potential delegated roles that must be used -for Object Storage access to the sahara configuration file. With the -domain enabled in sahara, users will no longer be required to enter -credentials for their data sources and job binaries referenced in -Object Storage. - -Detailed instructions -^^^^^^^^^^^^^^^^^^^^^ - -First a domain must be created in the Identity service to hold proxy -users created by sahara. This domain must have an identity backend driver -that allows for sahara to create new users. The default SQL engine is -sufficient but if your keystone identity is backed by LDAP or similar -then domain specific configurations should be used to ensure sahara's -access. Please see the :keystone-doc:`Keystone documentation -` for more information. - -With the domain created, sahara's configuration file should be updated to -include the new domain name and any potential roles that will be needed. For -this example let's assume that the name of the proxy domain is -``sahara_proxy`` and the roles needed by proxy users will be ``member`` and -``SwiftUser``. - -.. code-block:: - - [DEFAULT] - use_domain_for_proxy_users=true - proxy_user_domain_name=sahara_proxy - proxy_user_role_names=member,SwiftUser - -A note on the use of roles. In the context of the proxy user, any roles -specified here are roles intended to be delegated to the proxy user from the -user with access to Object Storage. More specifically, any roles that -are required for Object Storage access by the project owning the object -store must be delegated to the proxy user for authentication to be -successful. - -Finally, the stack administrator must ensure that images registered with -sahara have the latest version of the Hadoop swift filesystem plugin -installed. The sources for this plugin can be found in the -`sahara extra repository`_. For more information on images or swift -integration see the sahara documentation sections -:ref:`building-guest-images-label` and :ref:`swift-integration-label`. - -.. _Sahara extra repository: https://opendev.org/openstack/sahara-extra - -.. _volume_instance_locality_configuration: - -Volume instance locality configuration --------------------------------------- - -The Block Storage service provides the ability to define volume instance -locality to ensure that instance volumes are created on the same host -as the hypervisor. The ``InstanceLocalityFilter`` provides the mechanism -for the selection of a storage provider located on the same physical -host as an instance. - -To enable this functionality for instances of a specific node group, the -``volume_local_to_instance`` field in the node group template should be -set to ``true`` and some extra configurations are needed: - -* The cinder-volume service should be launched on every physical host and at - least one physical host should run both cinder-scheduler and - cinder-volume services. -* ``InstanceLocalityFilter`` should be added to the list of default filters - (``scheduler_default_filters`` in cinder) for the Block Storage - configuration. -* The Extended Server Attributes extension needs to be active in the Compute - service (this is true by default in nova), so that the - ``OS-EXT-SRV-ATTR:host`` property is returned when requesting instance - info. -* The user making the call needs to have sufficient rights for the property to - be returned by the Compute service. - This can be done by: - - * by changing nova's ``policy.yaml`` to allow the user access to the - ``extended_server_attributes`` option. - * by designating an account with privileged rights in the cinder - configuration: - - .. code-block:: - - os_privileged_user_name = - os_privileged_user_password = - os_privileged_user_tenant = - -It should be noted that in a situation when the host has no space for volume -creation, the created volume will have an ``Error`` state and can not be used. - -Autoconfiguration for templates -------------------------------- - -:doc:`configs-recommendations` - - -NTP service configuration -------------------------- - -By default sahara will enable the NTP service on all cluster instances if the -NTP package is included in the image (the sahara disk image builder will -include NTP in all images it generates). The default NTP server will be -``pool.ntp.org``; this can be overridden using the ``default_ntp_server`` -setting in the ``DEFAULT`` section of the sahara configuration file. - -If you are creating cluster templates using the sahara UI and would like to -specify a different NTP server for a particular cluster template, use the ``URL -of NTP server`` setting in the ``General Parameters`` section when you create -the template. If you would like to disable NTP for a particular cluster -template, deselect the ``Enable NTP service`` checkbox in the ``General -Parameters`` section when you create the template. - -If you are creating clusters using the sahara CLI, you can specify another NTP -server or disable NTP service using the examples below. - -If you want to enable configuring the NTP service, you should specify the -following configs for the cluster: - -.. code-block:: - - { - "cluster_configs": { - "general": { - "URL of NTP server": "your_server.net" - } - } - } - -If you want to disable configuring NTP service, you should specify following -configs for the cluster: - -.. code-block:: - - { - "cluster_configs": { - "general": { - "Enable NTP service": false - } - } - } - -CORS (Cross Origin Resource Sharing) Configuration --------------------------------------------------- - -Sahara provides direct API access to user-agents (browsers) via the HTTP -CORS protocol. Detailed documentation, as well as troubleshooting examples, -may be found in the :oslo.middleware-doc:`documentation of the oslo.db -cross-project features `. - -To get started quickly, use the example configuration block below, replacing -the :code:`allowed origin` field with the host(s) from which your API expects -access. - -.. code-block:: - - [cors] - allowed_origin=https://we.example.com:443 - max_age=3600 - allow_credentials=true - - [cors.additional_domain_1] - allowed_origin=https://additional_domain_1.example.com:443 - - [cors.additional_domain_2] - allowed_origin=https://additional_domain_2.example.com:443 - -For more information on Cross Origin Resource Sharing, please review the `W3C -CORS specification`_. - -.. _W3C CORS specification: http://www.w3.org/TR/cors/ - -Cleanup time for incomplete clusters ------------------------------------- - -Sahara provides maximal time (in hours) for clusters allowed to be in states -other than "Active", "Deleting" or "Error". If a cluster is not in "Active", -"Deleting" or "Error" state and last update of it was longer than -``cleanup_time_for_incomplete_clusters`` hours ago then it will be deleted -automatically. You can enable this feature by adding appropriate config -property in the ``DEFAULT`` section (by default it set up to ``0`` value which -means that automatic clean up is disabled). For example, if you want cluster to -be deleted after 3 hours if it didn't leave "Starting" state then you should -specify: - -.. code-block:: - - [DEFAULT] - cleanup_time_for_incomplete_clusters = 3 - -Security Group Rules Configuration ----------------------------------- - -When auto_security_group is used, the amount of created security group rules -may be bigger than the default values configured in ``neutron.conf``. Then the -default limit should be raised up to some bigger value which is proportional to -the number of cluster node groups. You can change it in ``neutron.conf`` file: - -.. code-block:: - - [quotas] - quota_security_group = 1000 - quota_security_group_rule = 10000 - -Or you can execute openstack CLI command: - -.. code-block:: - - openstack quota set --secgroups 1000 --secgroup-rules 10000 $PROJECT_ID diff --git a/doc/source/admin/configs-recommendations.rst b/doc/source/admin/configs-recommendations.rst deleted file mode 100644 index fb647610a1..0000000000 --- a/doc/source/admin/configs-recommendations.rst +++ /dev/null @@ -1,44 +0,0 @@ -:orphan: - -Autoconfiguring templates -========================= - -During the Liberty development cycle sahara implemented a tool that recommends -and applies configuration values for cluster templates and node group -templates. These recommendations are based on the number of specific instances -and on flavors of the cluster node groups. Currently the following plugins -support this feature: - - * CDH; - * Ambari; - * Spark; - * the Vanilla Apache Hadoop plugin. - -By default this feature is enabled for all cluster templates and node group -templates. If you want to disable this feature for a particular cluster or -node group template you should set the ``use_autoconfig`` field to ``false``. - -.. NOTE - Also, if you manually set configs from the list below, the recommended - configs will not be applied. - -The following describes the settings for which sahara can recommend -autoconfiguration: - -The Cloudera, Spark and Vanilla Apache Hadoop plugin support configuring -``dfs.replication`` (``dfs_replication`` for Cloudera plugin) which is -calculated as a minimum from the amount of ``datanode`` (``HDFS_DATANODE`` for -Cloudera plugin) instances in the cluster and the default value for -``dfs.replication``. - -The Vanilla Apache Hadoop plugin and Cloudera plugin support autoconfiguration -of basic YARN and MapReduce configs. These autoconfigurations are based on the -following documentation: -http://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.0.9.1/bk_installing_manually_book/content/rpm-chap1-11.html - -The Ambari plugin has its own strategies on configuration recommendations. You -can choose one of ``ALWAYS_APPLY``, ``NEVER_APPLY``, and -``ONLY_STACK_DEFAULTS_APPLY``. By default the Ambari plugin follows the -``NEVER_APPLY`` strategy. You can get more information about strategies in -Ambari's official documentation: -https://cwiki.apache.org/confluence/display/AMBARI/Blueprints#Blueprints-ClusterCreationTemplateStructure diff --git a/doc/source/admin/configuration-guide.rst b/doc/source/admin/configuration-guide.rst deleted file mode 100644 index a0400f58c3..0000000000 --- a/doc/source/admin/configuration-guide.rst +++ /dev/null @@ -1,211 +0,0 @@ -Sahara Configuration Guide -========================== - -This guide covers the steps for a basic configuration of sahara. -It will help you to configure the service in the most simple -manner. - -Basic configuration -------------------- - -A full configuration file showing all possible configuration options and their -defaults can be generated with the following command: - -.. sourcecode:: cfg - - $ tox -e genconfig - -Running this command will create a file named ``sahara.conf.sample`` -in the ``etc/sahara`` directory of the project. - -After creating a configuration file by either generating one or starting with -an empty file, edit the ``connection`` parameter in the -``[database]`` section. The URL provided here should point to an empty -database. For example, the connection string for a MySQL database will be: - -.. sourcecode:: cfg - - connection=mysql+pymsql://username:password@host:port/database - -Next you will configure the Identity service parameters in the -``[keystone_authtoken]`` section. The ``www_authenticate_uri`` parameter -should point to the public Identity API endpoint. The ``auth_url`` -should point to the internal Identity API endpoint. For example: - -.. sourcecode:: cfg - - www_authenticate_uri=http://127.0.0.1:5000/v3/ - auth_url=http://127.0.0.1:5000/v3/ - -Specify the ``username``, ``user_domain_name``, ``password``, ``project_name``. -and ``project_domain_name``. These parameters must specify an Identity user who -has the ``admin`` role in the given project. These credentials allow sahara to -authenticate and authorize its users. - -Next you will configure the default Networking service. If using -neutron for networking the following parameter should be set -in the ``[DEFAULT]`` section: - -With these parameters set, sahara is ready to run. - -By default the sahara's log level is set to INFO. If you wish to increase -the logging levels for troubleshooting, set ``debug`` to ``true`` in the -``[DEFAULT]`` section of the configuration file. - -Networking configuration ------------------------- - -By default sahara is configured to use the neutron. Additionally, if the -cluster supports network namespaces the ``use_namespaces`` property can -be used to enable their usage. - -.. sourcecode:: cfg - - [DEFAULT] - use_namespaces=True - -.. note:: - If a user other than ``root`` will be running the Sahara server - instance and namespaces are used, some additional configuration is - required, please see :ref:`non-root-users` for more information. - -.. _floating_ip_management: - -Floating IP management -++++++++++++++++++++++ - -During cluster setup sahara must access instances through a secure -shell (SSH). To establish this connection it may use either the fixed -or floating IP address of an instance. By default sahara is configured -to use floating IP addresses for access. This is controlled by the -``use_floating_ips`` configuration parameter. With this setup the user -has two options for ensuring that the instances in the node groups -templates that requires floating IPs gain a floating IP address: - -* The user may specify a floating IP address pool for each node - group that requires floating IPs directly. - -From Newton changes were made to allow the coexistence of clusters using -floating IPs and clusters using fixed IPs. If ``use_floating_ips`` is -True it means that the floating IPs can be used by Sahara to spawn clusters. -But, differently from previous versions, this does not mean that all -instances in the cluster must have floating IPs and that all clusters -must use floating IPs. It is possible in a single Sahara deploy to have -clusters setup using fixed IPs, clusters using floating IPs and cluster that -use both. - -If not using floating IP addresses (``use_floating_ips=False``) sahara -will use fixed IP addresses for instance management. When using neutron -for the Networking service the user will be able to choose the -fixed IP network for all instances in a cluster. - -.. _notification-configuration: - -Notifications configuration ---------------------------- - -Sahara can be configured to send notifications to the OpenStack -Telemetry module. To enable this functionality the following parameter -``enable`` should be set in the ``[oslo_messaging_notifications]`` section -of the configuration file: - -.. sourcecode:: cfg - - [oslo_messaging_notifications] - enable = true - -And the following parameter ``driver`` should be set in the -``[oslo_messaging_notifications]`` section of the configuration file: - -.. sourcecode:: cfg - - [oslo_messaging_notifications] - driver = messaging - -By default sahara is configured to use RabbitMQ as its message broker. - -If you are using RabbitMQ as the message broker, then you should set the -following parameter in the ``[DEFAULT]`` section: - -.. sourcecode:: cfg - - rpc_backend = rabbit - -You may also need to specify the connection parameters for your -RabbitMQ installation. The following example shows the default -values in the ``[oslo_messaging_rabbit]`` section which may need -adjustment: - -.. sourcecode:: cfg - - rabbit_host=localhost - rabbit_port=5672 - rabbit_hosts=$rabbit_host:$rabbit_port - rabbit_userid=guest - rabbit_password=guest - rabbit_virtual_host=/ -.. - -.. _orchestration-configuration: - -Orchestration configuration ---------------------------- - -By default sahara is configured to use the heat engine for instance -creation. The heat engine uses the OpenStack Orchestration service to -provision instances. This engine makes calls directly to the services required -for instance provisioning. - -.. _policy-configuration-label: - -Policy configuration --------------------- - -.. warning:: - - JSON formatted policy file is deprecated since Sahara 15.0.0 (Xena). - This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing - JSON-formatted policy file to YAML in a backward-compatible way. - -.. __: https://docs.openstack.org/oslo.policy/victoria/cli/oslopolicy-convert-json-to-yaml.html - -Sahara's public API calls may be restricted to certain sets of users by -using a policy configuration file. The location of the policy file(s) -is controlled by the ``policy_file`` and ``policy_dirs`` parameters -in the ``[oslo_policy]`` section. By default sahara will search for -a ``policy.yaml`` file in the same directory as the ``sahara.conf`` -configuration file. - -Examples -++++++++ - -Example 1. Allow all method to all users (default policy). - -.. sourcecode:: json - - { - "default": "" - } - - -Example 2. Disallow image registry manipulations to non-admin users. - -.. sourcecode:: json - - { - "default": "", - - "data-processing:images:register": "role:admin", - "data-processing:images:unregister": "role:admin", - "data-processing:images:add_tags": "role:admin", - "data-processing:images:remove_tags": "role:admin" - } - -API configuration ------------------ - -Sahara uses the ``api-paste.ini`` file to configure the data processing API -service. For middleware injection sahara uses pastedeploy library. The location -of the api-paste file is controlled by the ``api_paste_config`` parameter in -the ``[default]`` section. By default sahara will search for a -``api-paste.ini`` file in the same directory as the configuration file. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 150ffe8972..0000000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -====================== -Operator Documentation -====================== - -.. toctree:: - :maxdepth: 2 - - configuration-guide - advanced-configuration-guide - upgrade-guide diff --git a/doc/source/admin/upgrade-guide.rst b/doc/source/admin/upgrade-guide.rst deleted file mode 100644 index 65fcd0f207..0000000000 --- a/doc/source/admin/upgrade-guide.rst +++ /dev/null @@ -1,155 +0,0 @@ -Sahara Upgrade Guide -==================== - -This page contains details about upgrading sahara between releases such as -configuration file updates, database migrations, and architectural changes. - -Icehouse -> Juno ----------------- - -Main binary renamed to sahara-all -+++++++++++++++++++++++++++++++++ - -The All-In-One sahara binary has been renamed from ``sahara-api`` -to ``sahara-all``. The new name should be used in all cases where the -All-In-One sahara is desired. - -Authentication middleware changes -+++++++++++++++++++++++++++++++++ - -The custom auth_token middleware has been deprecated in favor of the keystone -middleware. This change requires an update to the sahara configuration file. To -update your configuration file you should replace the following parameters from -the ``[DEFAULT]`` section with the new parameters in the -``[keystone_authtoken]`` section: - -+-----------------------+--------------------+ -| Old parameter name | New parameter name | -+=======================+====================+ -| os_admin_username | admin_user | -+-----------------------+--------------------+ -| os_admin_password | admin_password | -+-----------------------+--------------------+ -| os_admin_tenant_name | admin_tenant_name | -+-----------------------+--------------------+ - -Additionally, the parameters ``os_auth_protocol``, ``os_auth_host``, -and ``os_auth_port`` have been combined to create the ``auth_uri`` -and ``identity_uri`` parameters. These new parameters should be -full URIs to the keystone public and admin endpoints, respectively. - -For more information about these configuration parameters please see -the :doc:`../admin/configuration-guide`. - -Database package changes -++++++++++++++++++++++++ - -The oslo based code from sahara.openstack.common.db has been replaced by -the usage of the oslo.db package. This change does not require any -update to sahara's configuration file. - -Additionally, the usage of SQLite databases has been deprecated. Please use -MySQL or PostgreSQL databases for sahara. SQLite has been deprecated because it -does not, and is not going to, support the ``ALTER COLUMN`` and ``DROP COLUMN`` -commands required for migrations between versions. For more information please -see http://www.sqlite.org/omitted.html - -Sahara integration into OpenStack Dashboard -+++++++++++++++++++++++++++++++++++++++++++ - -The sahara dashboard package has been deprecated in the Juno release. The -functionality of the dashboard has been fully incorporated into the -OpenStack Dashboard. The sahara interface is available under the -"Project" -> "Data Processing" tab. - -The Data processing service endpoints must be registered in the Identity -service catalog for the Dashboard to properly recognize and display -those user interface components. For more details on this process please see -:ref:`registering Sahara in installation guide `. - -The -`sahara-dashboard `_ -project is now used solely to host sahara user interface integration tests. - -Virtual machine user name changes -+++++++++++++++++++++++++++++++++ - -The HEAT infrastructure engine has been updated to use the same rules for -instance user names as the direct engine. In previous releases the user -name for instances created by sahara using HEAT was always 'ec2-user'. As -of Juno, the user name is taken from the image registry as described in -the :doc:`../user/registering-image` document. - -This change breaks backward compatibility for clusters created using the HEAT -infrastructure engine prior to the Juno release. Clusters will continue to -operate, but we do not recommended using the scaling operations with them. - -Anti affinity implementation changed -++++++++++++++++++++++++++++++++++++ - -Starting with the Juno release the anti affinity feature is implemented -using server groups. From the user perspective there will be no -noticeable changes with this feature. Internally this change has -introduced the following behavior: - -1) Server group objects will be created for any clusters with anti affinity - enabled. -2) Affected instances on the same host will not be allowed even if they - do not have common processes. Prior to Juno, instances with differing - processes were allowed on the same host. The new implementation - guarantees that all affected instances will be on different hosts - regardless of their processes. - -The new anti affinity implementation will only be applied for new clusters. -Clusters created with previous versions will continue to operate under -the older implementation, this applies to scaling operations on these -clusters as well. - -Juno -> Kilo ------------- - -Sahara requires policy configuration -++++++++++++++++++++++++++++++++++++ - -Sahara now requires a policy configuration file. The ``policy.json`` file -should be placed in the same directory as the sahara configuration file or -specified using the ``policy_file`` parameter. For more details about the -policy file please see the -:ref:`policy section in the configuration guide `. - -Kilo -> Liberty ---------------- - -Direct engine deprecation -+++++++++++++++++++++++++ - -In the Liberty release the direct infrastructure engine has been deprecated and -the heat infrastructure engine is now default. This means, that it is -preferable to use heat engine instead now. In the Liberty release you can -continue to operate clusters with the direct engine (create, delete, scale). -Using heat engine only the delete operation is available on clusters that were -created by the direct engine. After the Liberty release the direct engine will -be removed, this means that you will only be able to delete clusters created -with the direct engine. - -Policy namespace changed (policy.json) -++++++++++++++++++++++++++++++++++++++ - -The "data-processing:" namespace has been added to the beginning of the all -Sahara's policy based actions, so, you need to update the policy.json file by -prepending all actions with "data-processing:". - -Liberty -> Mitaka ------------------ - -Direct engine is removed. - -Mitaka -> Newton ----------------- - -Sahara CLI command is deprecated, please use OpenStack Client. - -.. note:: - - Since Mitaka release sahara actively uses release notes so you can see all - required upgrade actions here: https://docs.openstack.org/releasenotes/sahara/ diff --git a/doc/source/cli/index.rst b/doc/source/cli/index.rst deleted file mode 100644 index 6c8fcf7072..0000000000 --- a/doc/source/cli/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -======================== -Sahara CLI Documentation -======================== - -In this section you will find information on Sahara’s command line -interface. - -.. toctree:: - :maxdepth: 1 - - sahara-status diff --git a/doc/source/cli/sahara-status.rst b/doc/source/cli/sahara-status.rst deleted file mode 100644 index a1ba218665..0000000000 --- a/doc/source/cli/sahara-status.rst +++ /dev/null @@ -1,83 +0,0 @@ -============= -sahara-status -============= - ----------------------------------------- -CLI interface for Sahara status commands ----------------------------------------- - -Synopsis -======== - -:: - - sahara-status [] - -Description -=========== - -:program:`sahara-status` is a tool that provides routines for checking the -status of a Sahara deployment. - -Options -======= - -The standard pattern for executing a :program:`sahara-status` command is:: - - sahara-status [] - -Run without arguments to see a list of available command categories:: - - sahara-status - -Categories are: - -* ``upgrade`` - -Detailed descriptions are below: - -You can also run with a category argument such as ``upgrade`` to see a list of -all commands in that category:: - - sahara-status upgrade - -These sections describe the available categories and arguments for -:program:`sahara-status`. - -Upgrade -~~~~~~~ - -.. _sahara-status-checks: - -``sahara-status upgrade check`` - Performs a release-specific readiness check before restarting services with - new code. For example, missing or changed configuration options, - incompatible object states, or other conditions that could lead to - failures while upgrading. - - **Return Codes** - - .. list-table:: - :widths: 20 80 - :header-rows: 1 - - * - Return code - - Description - * - 0 - - All upgrade readiness checks passed successfully and there is nothing - to do. - * - 1 - - At least one check encountered an issue and requires further - investigation. This is considered a warning but the upgrade may be OK. - * - 2 - - There was an upgrade status check failure that needs to be - investigated. This should be considered something that stops an - upgrade. - * - 255 - - An unexpected error occurred. - - **History of Checks** - - **10.0.0 (Stein)** - - * Sample check to be filled in with checks as they are added in Stein. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 86ded5ac84..0000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,282 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../../sahara')) -sys.path.append(os.path.abspath('..')) -sys.path.append(os.path.abspath('../bin')) - -# -- General configuration ----------------------------------------------------- - -on_rtd = os.environ.get('READTHEDOCS', None) == 'True' - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', - 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain', 'oslo_config.sphinxconfiggen', - 'oslo_config.sphinxext', 'openstackdocstheme'] - -# openstackdocstheme options -repository_name = 'openstack/sahara' -use_storyboard = True - -config_generator_config_file = 'config-generator.conf' -config_sample_basename = 'sahara' - -openstack_projects = [ - 'barbican', - 'castellan', - 'designate', - 'devstack', - 'ironic', - 'keystone', - 'keystoneauth', - 'kolla-ansible', - 'neutron', - 'nova', - 'oslo.messaging', - 'oslo.middleware', - 'sahara-plugin-ambari', - 'sahara-plugin-cdh', - 'sahara-plugin-mapr', - 'sahara-plugin-spark', - 'sahara-plugin-storm', - 'sahara-plugin-vanilla', - 'tooz' -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# Add any paths that contain "extra" files, such as .htaccess or -# robots.txt. -html_extra_path = ['_extra'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2014, OpenStack Foundation' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. - -if on_rtd: - html_theme_path = ['.'] - html_theme = '_theme_rtd' - -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = {"show_other_versions": "True",} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -html_title = 'Sahara' - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -html_sidebars = { - 'index': ['sidebarlinks.html', 'localtoc.html', 'searchbox.html', 'sourcelink.html'], - '**': ['localtoc.html', 'relations.html', - 'searchbox.html', 'sourcelink.html'] -} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'SaharaDoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - #'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'saharadoc.tex', 'Sahara', - 'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'sahara', 'Sahara', - ['OpenStack Foundation'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'Sahara', 'Sahara', - 'OpenStack Foundation', 'Sahara', 'Sahara', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' diff --git a/doc/source/config-generator.conf b/doc/source/config-generator.conf deleted file mode 120000 index 1a611114f8..0000000000 --- a/doc/source/config-generator.conf +++ /dev/null @@ -1 +0,0 @@ -../../tools/config/config-generator.sahara.conf \ No newline at end of file diff --git a/doc/source/configuration/descriptionconfig.rst b/doc/source/configuration/descriptionconfig.rst deleted file mode 100644 index 065ab0321b..0000000000 --- a/doc/source/configuration/descriptionconfig.rst +++ /dev/null @@ -1,8 +0,0 @@ -Configuration options -===================== - -This section provides a list of the configuration options that can -be set in the sahara configuration file. - -.. show-options:: - :config-file: tools/config/config-generator.sahara.conf diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index fad26d5e4a..0000000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -======================= -Configuration Reference -======================= - - -.. toctree:: - :maxdepth: 1 - - descriptionconfig - sampleconfig diff --git a/doc/source/configuration/sampleconfig.rst b/doc/source/configuration/sampleconfig.rst deleted file mode 100644 index e4fa2dfd15..0000000000 --- a/doc/source/configuration/sampleconfig.rst +++ /dev/null @@ -1,8 +0,0 @@ -Sample sahara.conf file -======================= - -This is an automatically generated sample of the sahara.conf file. - -.. literalinclude:: ../sample.config - :language: ini - :linenos: diff --git a/doc/source/contributor/adding-database-migrations.rst b/doc/source/contributor/adding-database-migrations.rst deleted file mode 100644 index 96f70f1d3a..0000000000 --- a/doc/source/contributor/adding-database-migrations.rst +++ /dev/null @@ -1,113 +0,0 @@ -Adding Database Migrations -========================== - -The migrations in ``sahara/db/migration/alembic_migrations/versions`` contain -the changes needed to migrate between Sahara database revisions. A migration -occurs by executing a script that details the changes needed to upgrade or -downgrade the database. The migration scripts are ordered so that multiple -scripts can run sequentially. The scripts are executed by Sahara's migration -wrapper which uses the Alembic library to manage the migration. Sahara supports -migration from Icehouse or later. - -Any code modifications that change the structure of the database require a -migration script so that previously existing databases will continue to -function when the new code is released. This page gives a brief overview of how -to add the migration. - -Generate a New Migration Script -+++++++++++++++++++++++++++++++ - -New migration scripts can be generated using the ``sahara-db-manage`` command. - -To generate a migration stub to be filled in by the developer:: - -$ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" - -To autogenerate a migration script that reflects the current structure of the -database:: - -$ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" --autogenerate - -Each of these commands will create a file of the form ``revision_description`` -where ``revision`` is a string generated by Alembic and ``description`` is -based on the text passed with the ``-m`` option. - -Follow the Sahara Naming Convention -+++++++++++++++++++++++++++++++++++ - -By convention Sahara uses 3-digit revision numbers, and this scheme differs -from the strings generated by Alembic. Consequently, it's necessary to rename -the generated script and modify the revision identifiers in the script. - -Open the new script and look for the variable ``down_revision``. The value -should be a 3-digit numeric string, and it identifies the current revision -number of the database. Set the ``revision`` value to the ``down_revision`` -value + 1. For example, the lines:: - - # revision identifiers, used by Alembic. - revision = '507eb70202af' - down_revision = '006' - -will become:: - - # revision identifiers, used by Alembic. - revision = '007' - down_revision = '006' - -Modify any comments in the file to match the changes and rename the file to -match the new revision number:: - -$ mv 507eb70202af_my_new_revision.py 007_my_new_revision.py - - -Add Alembic Operations to the Script -++++++++++++++++++++++++++++++++++++ - -The migration script contains method ``upgrade()``. Sahara has not supported -downgrades since the Kilo release. Fill in this method with the appropriate -Alembic operations to perform upgrades. In the above example, an upgrade will -move from revision '006' to revision '007'. - -Command Summary for sahara-db-manage -++++++++++++++++++++++++++++++++++++ - -You can upgrade to the latest database version via:: - -$ sahara-db-manage --config-file /path/to/sahara.conf upgrade head - -To check the current database version:: - -$ sahara-db-manage --config-file /path/to/sahara.conf current - -To create a script to run the migration offline:: - -$ sahara-db-manage --config-file /path/to/sahara.conf upgrade head --sql - -To run the offline migration between specific migration versions:: - -$ sahara-db-manage --config-file /path/to/sahara.conf upgrade : --sql - -To upgrade the database incrementally:: - -$ sahara-db-manage --config-file /path/to/sahara.conf upgrade --delta <# of revs> - -To create a new revision:: - -$ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" --autogenerate - -To create a blank file:: - -$ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" - -This command does not perform any migrations, it only sets the revision. -Revision may be any existing revision. Use this command carefully:: - -$ sahara-db-manage --config-file /path/to/sahara.conf stamp - -To verify that the timeline does branch, you can run this command:: - -$ sahara-db-manage --config-file /path/to/sahara.conf check_migration - -If the migration path does branch, you can find the branch point via:: - -$ sahara-db-manage --config-file /path/to/sahara.conf history diff --git a/doc/source/contributor/apiv2.rst b/doc/source/contributor/apiv2.rst deleted file mode 100644 index 5ebe95eb31..0000000000 --- a/doc/source/contributor/apiv2.rst +++ /dev/null @@ -1,112 +0,0 @@ -API Version 2 Development -========================= - -The sahara project is currently in the process of creating a new -RESTful application programming interface (API). This interface is -by-default enabled, although it remains experimental. - -This document defines the steps necessary to enable and communicate -with the new API. This API has a few fundamental changes from the -previous APIs and they should be noted before proceeding with -development work. - -.. warning:: - This API is currently marked as experimental. It is not supported - by the sahara python client. These instructions are included purely - for developers who wish to help participate in the development - effort. - -Enabling the experimental API ------------------------------ - -There are a few changes to the WSGI pipeline that must be made to -enable the new v2 API. These changes will leave the 1.0 and 1.1 API -versions in place and will not adjust their communication parameters. - -To begin, uncomment, or add, the following sections in your -api-paste.ini file: - -.. sourcecode:: ini - - [app:sahara_apiv2] - paste.app_factory = sahara.api.middleware.sahara_middleware:RouterV2.factory - - [filter:auth_validator_v2] - paste.filter_factory = sahara.api.middleware.auth_valid:AuthValidatorV2.factory - -These lines define a new authentication filter for the v2 API, and -define the application that will handle the new calls. - -With these new entries in the paste configuration, we can now enable -them with the following changes to the api-paste.ini file: - -.. sourcecode:: ini - - [pipeline:sahara] - pipeline = cors request_id acl auth_validator_v2 sahara_api - - [composite:sahara_api] - use = egg:Paste#urlmap - /: sahara_apiv2 - -There are 2 significant changes occurring here; changing the -authentication validator in the pipeline, and changing the root "/" -application to the new v2 handler. - -At this point the sahara API server should be configured to accept -requests on the new v2 endpoints. - -Communicating with the v2 API ------------------------------ - -The v2 API makes at least one major change from the previous versions, -removing the OpenStack project identifier from the URL. Now users of -the API do not provide their project ID explictly; instead we fully -trust keystonemiddeware to provide it in the WSGI environment based -on the given user token. - -For example, in previous versions of the API, a call to get the list of -clusters for project "12345678-1234-1234-1234-123456789ABC" would have -been made as follows:: - - GET /v1.1/12345678-1234-1234-1234-123456789ABC/clusters - X-Auth-Token: {valid auth token} - -This call would now be made to the following URL:: - - GET /v2/clusters - X-Auth-Token: {valid auth token} - -Using a tool like `HTTPie `_, the -same request could be made like this:: - - $ httpie http://{sahara service ip:port}/v2/clusters \ - X-Auth-Token:{valid auth token} - -Following the implementation progress -------------------------------------- - -As the creation of this API will be under regular change until it moves -out of the experimental phase, a wiki page has been established to help -track the progress. - -https://wiki.openstack.org/wiki/Sahara/api-v2 - -This page will help to coordinate the various reviews, specs, and work -items that are a continuing facet of this work. - -The API service layer ---------------------- - -When contributing to the version 2 API, it will be necessary to add code -that modifies the data and behavior of HTTP calls as they are sent to -and from the processing engine and data abstraction layers. Most -frequently in the sahara codebase, these interactions are handled in the -modules of the ``sahara.service.api`` package. This package contains -code for all versions of the API and follows a namespace mapping that is -similar to the routing functions of ``sahara.api`` - -Although these modules are not the definitive end of all answers to API -related code questions, they are a solid starting point when examining -the extent of new work. Furthermore, they serve as a central point to -begin API debugging efforts when the need arises. diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index 553a5d6754..0000000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,70 +0,0 @@ -============================ -So You Want to Contribute... -============================ - -For general information on contributing to OpenStack, please check out the -`contributor guide `_ to get started. -It covers all the basics that are common to all OpenStack projects: the -accounts you need, the basics of interacting with our Gerrit review system, how -we communicate as a community, etc. - -Below will cover the more project specific information you need to get started -with Sahara. - -Communication -~~~~~~~~~~~~~ -* If you have something to discuss use - `OpenStack development mail-list `_. - Prefix the mail subject with ``[sahara]`` - -* Join ``#openstack-sahara`` IRC channel on `OFTC `_ - -* Attend Sahara team meetings - - * Weekly on Thursdays at 1400 UTC - - * IRC channel: ``#openstack-meeting-3`` - -Contacting the Core Team -~~~~~~~~~~~~~~~~~~~~~~~~ -* The core team has coverage in the timezones of Europe and the Americas. - -* Just pop over to IRC; we keep a close eye on it! - -* You can also find the email addresses of the core team `here - https://review.opendev.org/#/admin/groups/133,members>`. - -New Feature Planning -~~~~~~~~~~~~~~~~~~~~ -Sahara uses specs to track feature requests. They provide a high-level summary -of proposed changes and track associated commits. Sahara also uses specs for -in-depth descriptions and discussions of blueprints. Specs follow a defined -format and are submitted as change requests to the openstack/sahara-specs -repository. - -Task Tracking -~~~~~~~~~~~~~ -We track our tasks in Storyboard. - -The Sahara project group homepage on Storyboard is -https://storyboard.openstack.org/#!/project_group/sahara. - -If you're looking for some smaller, easier work item to pick up and get started -on, search for the 'low-hanging-fruit' or 'new-contributor' tag. - -Reporting a Bug -~~~~~~~~~~~~~~~ -You found an issue and want to make sure we are aware of it? You can do so on -https://storyboard.openstack.org/#!/project_group/sahara. - -Getting Your Patch Merged -~~~~~~~~~~~~~~~~~~~~~~~~~ -Typically two +2s are required before merging. - -Project Team Lead Duties -~~~~~~~~~~~~~~~~~~~~~~~~ -If you are the PTL of Sahara then you should follow the `PTL guide -`_. You should also -keep track of new versions of the various Hadoop distros/components coming out -(this can also be delegated to another contributor, but the PTL needs to track -it either way). diff --git a/doc/source/contributor/dashboard-dev-environment-guide.rst b/doc/source/contributor/dashboard-dev-environment-guide.rst deleted file mode 100644 index 05ba9f4dfd..0000000000 --- a/doc/source/contributor/dashboard-dev-environment-guide.rst +++ /dev/null @@ -1,153 +0,0 @@ -Sahara UI Dev Environment Setup -=============================== - -This page describes how to setup Horizon for developing Sahara by either -installing it as part of DevStack with Sahara or installing it in an -isolated environment and running from the command line. - -Install as a part of DevStack ------------------------------ - -See the `DevStack guide `_ for more information -on installing and configuring DevStack with Sahara. - -Sahara UI can be installed as a DevStack plugin by adding the following line -to your ``local.conf`` file - -.. sourcecode:: bash - - # Enable sahara-dashboard - enable_plugin sahara-dashboard https://opendev.org/openstack/sahara-dashboard - - -Isolated Dashboard for Sahara ------------------------------ - -These installation steps serve two purposes: - 1. Setup a dev environment - 2. Setup an isolated Dashboard for Sahara - -**Note** The host where you are going to perform installation has to be able -to connect to all OpenStack endpoints. You can list all available endpoints -using the following command: - -.. sourcecode:: console - - $ openstack endpoint list - -You can list the registered services with this command: - -.. sourcecode:: console - - $ openstack service list - -Sahara service should be present in keystone service list with service type -*data-processing* - -1. Install prerequisites - -.. sourcecode:: console - - $ sudo apt-get update - $ sudo apt-get install git-core python-dev gcc python-setuptools \ - python-virtualenv node-less libssl-dev libffi-dev libxslt-dev -.. - -On Ubuntu 12.10 and higher you have to install the following lib as well: - -.. sourcecode:: console - - $ sudo apt-get install nodejs-legacy -.. - -2. Checkout Horizon from git and switch to your version of OpenStack - -Here is an example: - -.. sourcecode:: console - - $ git clone https://opendev.org/openstack/horizon/ {HORIZON_DIR} -.. - -Then install the virtual environment: - -.. sourcecode:: console - - $ python {HORIZON_DIR}/tools/install_venv.py -.. - -3. Create a ``local_settings.py`` file - -.. sourcecode:: console - - $ cp {HORIZON_DIR}/openstack_dashboard/local/local_settings.py.example \ - {HORIZON_DIR}/openstack_dashboard/local/local_settings.py -.. - -4. Modify ``{HORIZON_DIR}/openstack_dashboard/local/local_settings.py`` - -Set the proper values for host and url variables: - -.. sourcecode:: python - - OPENSTACK_HOST = "ip of your controller" -.. - -If you wish to disable floating IP options during node group template -creation, add the following parameter: - -.. sourcecode:: python - - SAHARA_FLOATING_IP_DISABLED = True -.. - -5. Clone sahara-dashboard repository and checkout the desired branch - -.. sourcecode:: console - - $ git clone https://opendev.org/openstack/sahara-dashboard/ \ - {SAHARA_DASHBOARD_DIR} -.. - -6. Copy plugin-enabling files from sahara-dashboard repository to horizon - -.. sourcecode:: console - - $ cp -a {SAHARA_DASHBOARD_DIR}/sahara_dashboard/enabled/* {HORIZON_DIR}/openstack_dashboard/local/enabled/ -.. - -7. Install sahara-dashboard project into your horizon virtualenv - in editable mode - -.. sourcecode:: console - - $ . {HORIZON_DIR}/.venv/bin/activate - $ pip install -e {SAHARA_DASHBOARD_DIR} -.. - -8. Start Horizon - -.. sourcecode:: console - - $ . {HORIZON_DIR}/.venv/bin/activate - $ python {HORIZON_DIR}/manage.py runserver 0.0.0.0:8080 -.. - -This will start Horizon in debug mode. That means the logs will be written to -console and if any exceptions happen, you will see the stack-trace rendered -as a web-page. - -Debug mode can be disabled by changing ``DEBUG=True`` to ``False`` in -``local_settings.py``. In that case Horizon should be started slightly -differently, otherwise it will not serve static files: - -.. sourcecode:: console - - $ . {HORIZON_DIR}/.venv/bin/activate - $ python {HORIZON_DIR}/manage.py runserver --insecure 0.0.0.0:8080 -.. - -.. note:: - - It is not recommended to use Horizon in this mode for production. - diff --git a/doc/source/contributor/development-environment.rst b/doc/source/contributor/development-environment.rst deleted file mode 100644 index 77972e7c69..0000000000 --- a/doc/source/contributor/development-environment.rst +++ /dev/null @@ -1,131 +0,0 @@ -Setting Up a Development Environment -==================================== - -This page describes how to setup a Sahara development environment by either -installing it as a part of DevStack or pointing a local running instance at an -external OpenStack. You should be able to debug and test your changes without -having to deploy Sahara. - -Setup a Local Environment with Sahara inside DevStack ------------------------------------------------------ - -See :doc:`the main article `. - -Setup a Local Environment with an external OpenStack ----------------------------------------------------- - -1. Install prerequisites - -On OS X Systems: - -.. sourcecode:: console - - # we actually need pip, which is part of python package - $ brew install python mysql postgresql rabbitmq - $ pip install virtualenv tox - -On Ubuntu: - -.. sourcecode:: console - - $ sudo apt-get update - $ sudo apt-get install git-core python-dev python-virtualenv gcc libpq-dev libmysqlclient-dev python-pip rabbitmq-server - $ sudo pip install tox - -On Red Hat and related distributions (CentOS/Fedora/RHEL/Scientific Linux): - -.. sourcecode:: console - - $ sudo yum install git-core python-devel python-virtualenv gcc python-pip mariadb-devel postgresql-devel erlang - $ sudo pip install tox - $ sudo wget http://www.rabbitmq.com/releases/rabbitmq-server/v3.2.2/rabbitmq-server-3.2.2-1.noarch.rpm - $ sudo rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc - $ sudo yum install rabbitmq-server-3.2.2-1.noarch.rpm - -On openSUSE-based distributions (SLES 12, openSUSE, Factory or Tumbleweed): - -.. sourcecode:: console - - $ sudo zypper in gcc git libmysqlclient-devel postgresql-devel python-devel python-pip python-tox python-virtualenv - -2. Grab the code - -.. sourcecode:: console - - $ git clone https://opendev.org/openstack/sahara.git - $ cd sahara - -3. Generate Sahara sample using tox - -.. sourcecode:: console - - tox -e genconfig - -4. Create config file from the sample - -.. sourcecode:: console - - $ cp ./etc/sahara/sahara.conf.sample ./etc/sahara/sahara.conf - -5. Look through the sahara.conf and modify parameter values as needed - For details see - :doc:`Sahara Configuration Guide <../admin/configuration-guide>` - -6. Create database schema - -.. sourcecode:: console - - $ tox -e venv -- sahara-db-manage --config-file etc/sahara/sahara.conf upgrade head - -7. To start Sahara API and Engine processes call - -.. sourcecode:: console - - $ tox -e venv -- sahara-api --config-file etc/sahara/sahara.conf --debug - $ tox -e venv -- sahara-engine --config-file etc/sahara/sahara.conf --debug - - -Setup local OpenStack dashboard with Sahara plugin --------------------------------------------------- - -.. toctree:: - :maxdepth: 1 - - - dashboard-dev-environment-guide - -Tips and tricks for dev environment ------------------------------------ - -1. Pip speedup - -Add the following lines to ~/.pip/pip.conf - -.. sourcecode:: cfg - - [global] - download-cache = /home//.pip/cache - index-url = - -Note that the ``~/.pip/cache`` folder should be created manually. - -2. Git hook for fast checks - -Just add the following lines to .git/hooks/pre-commit and do chmod +x for it. - -.. sourcecode:: - - #!/bin/sh - # Run fast checks (PEP8 style check and PyFlakes fast static analysis) - tox -epep8 - -You can add also other checks for pre-push, for example pylint (see below) -and tests (tox -epy27). - -3. Running static analysis (PyLint) - -Just run the following command - -.. sourcecode:: - - tox -e pylint diff --git a/doc/source/contributor/development-guidelines.rst b/doc/source/contributor/development-guidelines.rst deleted file mode 100644 index 3b6136440b..0000000000 --- a/doc/source/contributor/development-guidelines.rst +++ /dev/null @@ -1,238 +0,0 @@ -Development Guidelines -====================== - -Coding Guidelines ------------------ - -For all the Python code in Sahara we have a rule - it should pass `PEP 8`_. -All Bash code should pass `bashate`_. - -To check your code against PEP 8 and bashate run: - -.. sourcecode:: console - - $ tox -e pep8 - -.. note:: - For more details on coding guidelines see file ``HACKING.rst`` in the root - of Sahara repo. - -Static analysis ---------------- - -The static analysis checks are optional in Sahara, but they are still very -useful. The gate job will inform you if the number of static analysis warnings -has increased after your change. We recommend to always check the static -warnings. - -To run check first commit your change, then execute the following command: - -.. sourcecode:: console - - $ tox -e pylint - -Modification of Upstream Files ------------------------------- - -We never modify upstream files in Sahara. Any changes in upstream files should -be made in the upstream project and then merged back in to Sahara. This -includes whitespace changes, comments, and typos. Any change requests -containing upstream file modifications are almost certain to receive lots of -negative reviews. Be warned. - -Examples of upstream files are default xml configuration files used to -configure Hadoop, or code imported from the OpenStack Oslo project. The xml -files will usually be found in ``resource`` directories with an accompanying -``README`` file that identifies where the files came from. For example: - -.. sourcecode:: console - - $ pwd - /home/me/sahara/sahara/plugins/vanilla/v2_7_1/resources - - $ ls - core-default.xml hdfs-default.xml oozie-default.xml README.rst - create_oozie_db.sql mapred-default.xml post_conf.template yarn-default.xml -.. - -Testing Guidelines ------------------- - -Sahara has a suite of tests that are run on all submitted code, -and it is recommended that developers execute the tests themselves to -catch regressions early. Developers are also expected to keep the -test suite up-to-date with any submitted code changes. - -Unit tests are located at ``sahara/tests/unit``. - -Sahara's suite of unit tests can be executed in an isolated environment -with `Tox`_. To execute the unit tests run the following from the root of -Sahara repo: - -.. sourcecode:: console - - $ tox -e py27 - - -Documentation Guidelines ------------------------- - -All Sahara docs are written using Sphinx / RST and located in the main repo -in the ``doc`` directory. You can add or edit pages here to update the -https://docs.openstack.org/sahara/latest/ site. - -The documentation in docstrings should follow the `PEP 257`_ conventions -(as mentioned in the `PEP 8`_ guidelines). - -More specifically: - -1. Triple quotes should be used for all docstrings. -2. If the docstring is simple and fits on one line, then just use - one line. -3. For docstrings that take multiple lines, there should be a newline - after the opening quotes, and before the closing quotes. -4. `Sphinx`_ is used to build documentation, so use the restructured text - markup to designate parameters, return values, etc. - -Run the following command to build docs locally. - -.. sourcecode:: console - - $ tox -e docs - -After it you can access generated docs in ``doc/build/`` directory, for -example, main page - ``doc/build/html/index.html``. - -To make the doc generation process faster you can use: - -.. sourcecode:: console - - $ SPHINX_DEBUG=1 tox -e docs - -To avoid sahara reinstallation to virtual env each time you want to rebuild -docs you can use the following command (it can be executed only after -running ``tox -e docs`` first time): - -.. sourcecode:: console - - $ SPHINX_DEBUG=1 .tox/docs/bin/python setup.py build_sphinx - - - -.. note:: - For more details on documentation guidelines see HACKING.rst in the root of - the Sahara repo. - - -.. _PEP 8: http://www.python.org/dev/peps/pep-0008/ -.. _bashate: https://opendev.org/openstack/bashate -.. _PEP 257: http://www.python.org/dev/peps/pep-0257/ -.. _Tox: http://tox.testrun.org/ -.. _Sphinx: http://sphinx.pocoo.org/markup/index.html - -Event log Guidelines --------------------- - -Currently Sahara keeps useful information about provisioning for each cluster. -Cluster provisioning can be represented as a linear series of provisioning -steps, which are executed one after another. Each step may consist of several -events. The number of events depends on the step and the number of instances -in the cluster. Also each event can contain information about its cluster, -instance, and node group. In case of errors, events contain useful information -for identifying the error. Additionally, each exception in sahara contains a -unique identifier that allows the user to find extra information about that -error in the sahara logs. You can see an example of provisioning progress -information here: -https://docs.openstack.org/api-ref/data-processing/#event-log - -This means that if you add some important phase for cluster provisioning to -the sahara code, it's recommended to add a new provisioning step for this -phase. This will allow users to use event log for handling errors during this -phase. - -Sahara already has special utils for operating provisioning steps and events -in the module ``sahara/utils/cluster_progress_ops.py``. - -.. note:: - It's strictly recommended not to use ``conductor`` event log ops directly - to assign events and operate provisioning steps. - -.. note:: - You should not start a new provisioning step until the previous step has - successfully completed. - -.. note:: - It's strictly recommended to use ``event_wrapper`` for event handling. - -OpenStack client usage guidelines ---------------------------------- - -The sahara project uses several OpenStack clients internally. These clients -are all wrapped by utility functions which make using them more convenient. -When developing sahara, if you need to use an OpenStack client you should -check the ``sahara.utils.openstack`` package for the appropriate one. - -When developing new OpenStack client interactions in sahara, it is important -to understand the ``sahara.service.sessions`` package and the usage of the -keystone ``Session`` and auth plugin objects (for example, ``Token`` and -``Password``). Sahara is migrating all clients to use this authentication -methodology, where available. For more information on using sessions with -keystone, please see -:keystoneauth-doc:`the keystoneauth documentation ` - -Storing sensitive information ------------------------------ - -During the course of development, there is often cause to store sensitive -information (for example, login credentials) in the records for a cluster, -job, or some other record. Storing secret information this way is **not** -safe. To mitigate the risk of storing this information, sahara provides -access to the OpenStack Key Manager service (implemented by the -:barbican-doc:`barbican project <>`) through -the :castellan-doc:`castellan library <>`. - -To utilize the external key manager, the functions in -``sahara.service.castellan.utils`` are provided as wrappers around the -castellan library. These functions allow a developer to store, retrieve, and -delete secrets from the manager. Secrets that are managed through the key -manager have an identifier associated with them. These identifiers are -considered safe to store in the database. - -The following are some examples of working with secrets in the sahara -codebase. These examples are considered basic, any developer wishing to -learn more about the advanced features of storing secrets should look to -the code and docstrings contained in the ``sahara.service.castellan`` module. - -**Storing a secret** - -.. sourcecode:: python - - from sahara.service.castellan import utils as key_manager - - password = 'SooperSecretPassword' - identifier = key_manager.store_secret(password) - -**Retrieving a secret** - -.. sourcecode:: python - - from sahara.service.castellan import utils as key_manager - - password = key_manager.get_secret(identifier) - -**Deleting a secret** - -.. sourcecode:: python - - from sahara.service.castellan import utils as key_manager - - key_manager.delete_secret(identifier) - -When storing secrets through this interface it is important to remember that -if an external key manager is being used, each stored secret creates an -entry in an external service. When you are finished using the secret it is -good practice to delete it, as not doing so may leave artifacts in those -external services. - -For more information on configuring sahara to use the OpenStack Key -Manager service, see :ref:`external_key_manager_usage`. diff --git a/doc/source/contributor/devstack.rst b/doc/source/contributor/devstack.rst deleted file mode 100644 index fe065aa4ae..0000000000 --- a/doc/source/contributor/devstack.rst +++ /dev/null @@ -1,181 +0,0 @@ -Setup DevStack -============== - -DevStack can be installed on Fedora, Ubuntu, and CentOS. For supported -versions see `DevStack documentation `_ - -We recommend that you install DevStack in a VM, rather than on your main -system. That way you may avoid contamination of your system. You may find -hypervisor and VM requirements in the next section. If you still want to -install DevStack on your baremetal system, just skip the next section and read -further. - - -Start VM and set up OS ----------------------- - -In order to run DevStack in a local VM, you need to start by installing -a guest with Ubuntu 14.04 server. Download an image file from -`Ubuntu's web site `_ and create -a new guest from it. Virtualization solution must support -nested virtualization. Without nested virtualization VMs running inside -the DevStack will be extremely slow lacking hardware acceleration, i.e. -you will run QEMU VMs without KVM. - -On Linux QEMU/KVM supports nested virtualization, on Mac OS - VMware Fusion. -VMware Fusion requires adjustments to run VM with fixed IP. You may find -instructions which can help :ref:`below `. - -Start a new VM with Ubuntu Server 14.04. Recommended settings: - -- Processor - at least 2 cores -- Memory - at least 8GB -- Hard Drive - at least 60GB - -When allocating CPUs and RAM to the DevStack, assess how big clusters you -want to run. A single Hadoop VM needs at least 1 cpu and 1G of RAM to run. -While it is possible for several VMs to share a single cpu core, remember -that they can't share the RAM. - -After you installed the VM, connect to it via SSH and proceed with the -instructions below. - - -Install DevStack ----------------- - -The instructions assume that you've decided to install DevStack into -Ubuntu 14.04 system. - -**Note:** Make sure to use bash, as other shells are not fully compatible -and may cause hard to debug problems. - -1. Clone DevStack: - -.. sourcecode:: console - - $ sudo apt-get install git-core - $ git clone https://opendev.org/openstack/devstack.git - -2. Create the file ``local.conf`` in devstack directory with the following - content: - -.. sourcecode:: bash - - [[local|localrc]] - ADMIN_PASSWORD=nova - MYSQL_PASSWORD=nova - RABBIT_PASSWORD=nova - SERVICE_PASSWORD=$ADMIN_PASSWORD - SERVICE_TOKEN=nova - - # Enable Swift - enable_service s-proxy s-object s-container s-account - - SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 - SWIFT_REPLICAS=1 - SWIFT_DATA_DIR=$DEST/data - - # Force checkout prerequisites - # FORCE_PREREQ=1 - - # keystone is now configured by default to use PKI as the token format - # which produces huge tokens. - # set UUID as keystone token format which is much shorter and easier to - # work with. - KEYSTONE_TOKEN_FORMAT=UUID - - # Change the FLOATING_RANGE to whatever IPs VM is working in. - # In NAT mode it is the subnet VMware Fusion provides, in bridged mode - # it is your local network. But only use the top end of the network by - # using a /27 and starting at the 224 octet. - FLOATING_RANGE=192.168.55.224/27 - - # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly - # without Internet access. ``stack.sh`` must have been previously run - # with Internet access to install prerequisites and fetch repositories. - # OFFLINE=True - - # Enable sahara - enable_plugin sahara https://opendev.org/openstack/sahara - - # Enable heat - enable_plugin heat https://opendev.org/openstack/heat - -In cases where you need to specify a git refspec (branch, tag, or commit hash) -for the sahara in-tree devstack plugin (or sahara repo), it should be -appended to the git repo URL as follows: - -.. sourcecode:: bash - - enable_plugin sahara https://opendev.org/openstack/sahara - -3. Sahara can send notifications to Ceilometer, if Ceilometer is enabled. - If you want to enable Ceilometer add the following lines to the - ``local.conf`` file: - -.. sourcecode:: bash - - enable_plugin ceilometer https://opendev.org/openstack/ceilometer - -4. Start DevStack: - -.. sourcecode:: console - - $ ./stack.sh - -5. Once the previous step is finished Devstack will print a Horizon URL. - Navigate to this URL and login with login "admin" and password from - ``local.conf``. - -6. Congratulations! You have OpenStack running in your VM and you're ready to - launch VMs inside that VM. :) - - -Managing sahara in DevStack ---------------------------- - -If you install DevStack with sahara included you can rejoin screen with the -``screen -c stack-screenrc`` command and switch to the ``sahara`` tab. -Here you can manage the sahara service as other OpenStack services. -Sahara source code is located at ``$DEST/sahara`` which is usually -``/opt/stack/sahara``. - - -.. _fusion-fixed-ip: - -Setting fixed IP address for VMware Fusion VM ---------------------------------------------- - -1. Open file ``/Library/Preferences/VMware Fusion/vmnet8/dhcpd.conf`` - -2. There is a block named "subnet". It might look like this: - -.. sourcecode:: text - - subnet 192.168.55.0 netmask 255.255.255.0 { - range 192.168.55.128 192.168.55.254; - -3. You need to pick an IP address outside of that range. For example - - ``192.168.55.20`` - -4. Copy VM MAC address from VM settings->Network->Advanced - -5. Append the following block to file ``dhcpd.conf`` (don't forget to replace - ``VM_HOSTNAME`` and ``VM_MAC_ADDRESS`` with actual values): - -.. sourcecode:: text - - host VM_HOSTNAME { - hardware ethernet VM_MAC_ADDRESS; - fixed-address 192.168.55.20; - } - -6. Now quit all the VMware Fusion applications and restart vmnet: - -.. sourcecode:: console - - $ sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop - $ sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start - -7. Now start your VM; it should have new fixed IP address. diff --git a/doc/source/contributor/gerrit.rst b/doc/source/contributor/gerrit.rst deleted file mode 100644 index a27cc3c65a..0000000000 --- a/doc/source/contributor/gerrit.rst +++ /dev/null @@ -1,14 +0,0 @@ -Code Reviews with Gerrit -======================== - -Sahara uses the `Gerrit`_ tool to review proposed code changes. The review -site is https://review.opendev.org. - -Gerrit is a complete replacement for Github pull requests. `All Github pull -requests to the Sahara repository will be ignored`. - -See `Development Workflow`_ for information about how to get -started using Gerrit. - -.. _Gerrit: http://code.google.com/p/gerrit -.. _Development Workflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow diff --git a/doc/source/contributor/how-to-build-oozie.rst b/doc/source/contributor/how-to-build-oozie.rst deleted file mode 100644 index 9465824755..0000000000 --- a/doc/source/contributor/how-to-build-oozie.rst +++ /dev/null @@ -1,74 +0,0 @@ -How to build Oozie -================== - -.. note:: - - Apache does not make Oozie builds, so it has to be built manually. - -Download --------- - -* Download tarball from `Apache mirror `_ -* Unpack it with - - .. sourcecode:: console - - $ tar -xzvf oozie-4.3.1.tar.gz - -Hadoop Versions ---------------- - -To build Oozie the following command can be used: - -.. sourcecode:: console - - $ {oozie_dir}/bin/mkdistro.sh -DskipTests - -By default it builds against Hadoop 1.1.1. To built it with Hadoop version -2.x: - -* The hadoop-2 version should be changed in pom.xml. - This can be done manually or with the following command (you should - replace 2.x.x with your hadoop version): - - .. sourcecode:: console - - $ find . -name pom.xml | xargs sed -ri 's/2.3.0/2.x.x/' - -* The build command should be launched with the ``-P hadoop-2`` flag - -JDK Versions ------------- - -By default, the build configuration enforces that JDK 1.6.* is being used. - -There are 2 build properties that can be used to change the JDK version -requirements: - -* ``javaVersion`` specifies the version of the JDK used to compile (default - 1.6). - -* ``targetJavaVersion`` specifies the version of the generated bytecode - (default 1.6). - -For example, to specify JDK version 1.7, the build command should contain the -``-D javaVersion=1.7 -D tagetJavaVersion=1.7`` flags. - - - -Build ------ - -To build Oozie with Hadoop 2.6.0 and JDK version 1.7, the following command -can be used: - -.. sourcecode:: console - - $ {oozie_dir}/bin/mkdistro.sh assembly:single -P hadoop-2 -D javaVersion=1.7 -D targetJavaVersion=1.7 -D skipTests - -Also, the pig version can be passed as a maven property with the flag -``-D pig.version=x.x.x``. - -You can find similar instructions to build oozie.tar.gz here: -http://oozie.apache.org/docs/4.3.1/DG_QuickStart.html#Building_Oozie - diff --git a/doc/source/contributor/image-gen.rst b/doc/source/contributor/image-gen.rst deleted file mode 100644 index 7e25ccfe39..0000000000 --- a/doc/source/contributor/image-gen.rst +++ /dev/null @@ -1,344 +0,0 @@ -Image Generation -================ - -As of Newton, Sahara supports the creation of image generation and image -validation tooling as part of the plugin. If implemented properly, this -feature will enable your plugin to: - -* Validate that images passed to it for use in cluster provisioning meet its - specifications. -* Provision images from "clean" (OS-only) images. -* Pack pre-populated images for registration in Glance and use by Sahara. - -All of these features can use the same image declaration, meaning that logic -for these three use cases can be maintained in one place. - -This guide will explain how to enable this feature for your plugin, as well as -how to write or modify the image generation manifests that this feature uses. - - -Image Generation CLI --------------------- - -The key user-facing interface to this feature is the CLI script -``sahara-image-pack``. This script will be installed with all other Sahara -binaries. - -The usage of the CLI script ``sahara-image-pack`` is documented in -the :ref:`sahara-image-pack-label` section of the user guide. - - -The Image Manifest ------------------- - -As you'll read in the next section, Sahara's image packing tools allow plugin -authors to use any toolchain they choose. However, Sahara does provide a -built-in image packing framework which is uniquely suited to OpenStack use -cases, as it is designed to run the same logic while pre-packing an image or -while preparing an instance to launch a cluster after it is spawned in -OpenStack. - -By convention, the image specification, and all the scripts that it calls, -should be located in the plugin's resources directory under a subdirectory -named "images". - -A sample specification is below; the example is reasonably silly in practice, -and is only designed to highlight the use of the currently available -validator types. We'll go through each piece of this specification, but the -full sample is presented for context. - -:: - - arguments: - java-distro: - description: The java distribution. - default: openjdk - required: false - choices: - - oracle-java - - openjdk - - validators: - - os_case: - - redhat: - - package: nfs-utils - - debian: - - package: nfs-common - - argument_case: - argument_name: java-distro - cases: - openjdk: - - any: - - all: - - package: java-1.8.0-openjdk-devel - - argument_set: - argument_name: java-version - value: 1.8.0 - - all: - - package: java-1.7.0-openjdk-devel - - argument_set: - argument_name: java-version - value: 1.7.0 - oracle-java: - - script: install_oracle_java.sh - - script: setup_java.sh - - package: - - hadoop - - hadoop-libhdfs - - hadoop-native - - hadoop-pipes - - hadoop-sbin - - hadoop-lzo - - lzo - - lzo-devel - - hadoop-lzo-native - - -The Arguments Section ---------------------- - -First, the image specification should describe any arguments that may be used -to adjust properties of the image: - -:: - - arguments: # The section header - - java-distro: # The friendly name of the argument, and the name of the variable passed to scripts - description: The java distribution. # A friendly description to be used in help text - default: openjdk # A default value for the argument - required: false # Whether or not the argument is required - choices: # The argument value must match an element of this list - - oracle-java - - openjdk - -Specifications may contain any number of arguments, as declared above, by -adding more members to the list under the ``arguments`` key. - -The Validators Section ----------------------- - -This is where the logical flow of the image packing and validation process -is declared. A tiny example validator list is specified below. - -:: - - validators: - - package: nfs-utils - - script: setup_java.sh - -This is fairly straightforward: this specification will install the nfs-utils -package (or check that it's present) and then run the ``setup_java.sh`` script. - -All validators may be run in two modes: reconcile mode and test-only mode -(reconcile == false). If validators are run in reconcile mode, any image or -instance state which is not already true will be updated, if possible. If -validators are run in test-only mode, they will only test the image or -instance, and will raise an error if this fails. - -We'll now go over the types of validators that are currently available in -Sahara. This framework is made to easily allow new validators to be created -and old ones to be extended: if there's something you need, please do file a -wishlist bug or write and propose your own! - -Action validators ------------------ - -These validators take specific, concrete actions to assess or modify your -image or instance. - -The Package Validator -~~~~~~~~~~~~~~~~~~~~~ - -This validator type will install a package on the image, or validate that a -package is installed on the image. It can take several formats, as below: - -:: - - validators: - - package: hadoop - - package: - - hadoop-libhdfs - - nfs-utils: - version: 1.3.3-8 - -As you can see, a package declaration can consist of: - -* The package name as a string -* A list of packages, any of which may be: - * The package name as a string - * A dict with the package name as a key and a version property - -The Script Validator -~~~~~~~~~~~~~~~~~~~~ - -This validator will run a script on the image. It can take several formats -as well: - -:: - - validators: - - script: simple_script.sh # Runs this file - - script: - set_java_home: # The name of a script file - arguments: # Only the named environment arguments are passed, for clarity - - jdk-home - - jre-home - output: OUTPUT_VAR - - script: - store_nfs_version: # Because inline is set, this is just a friendly name - inline: rpm -q nfs-utils # Runs this text directly, rather than reading a file - output: nfs-version # Places the stdout of this script into an argument - # for future scripts to consume; if none exists, the - # argument is created - -Two variables are always available to scripts run under this framework: - -* ``distro``: The distro of the image, in case you want to switch on distro - within your script (rather than by using the os_case validator). -* ``test_only``: If this value equates to boolean false, then the script should - attempt to change the image or instance if it does not already meet the - specification. If this equates to boolean true, the script should exit with - a failure code if the image or instance does not already meet the - specification. - - -Flow Control Validators ------------------------ - -These validators are used to build more complex logic into your -specifications explicitly in the yaml layer, rather than by deferring -too much logic to scripts. - -The OS Case Validator -~~~~~~~~~~~~~~~~~~~~~ - -This validator runs different logic depending on which distribution of Linux -is being used in the guest. - -:: - - validators: - - os_case: # The contents are expressed as a list, not a dict, to preserve order - - fedora: # Only the first match runs, so put distros before families - - package: nfs_utils # The content of each case is a list of validators - - redhat: # Red Hat distros include fedora, centos, and rhel - - package: nfs-utils - - debian: # The major supported Debian distro in Sahara is ubuntu - - package: nfs-common - - -The Argument Case Validator -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This validator runs different logic depending on the value of an argument. - -:: - - validators: - - argument_case: - argument_name: java-distro # The name of the argument - cases: # The cases are expressed as a dict, as only one can equal the argument's value - openjdk: - - script: setup-openjdk # The content of each case is a list of validators - oracle-java: - - script: setup-oracle-java - -The All Validator -~~~~~~~~~~~~~~~~~ - -This validator runs all the validators within it, as one logical block. If any -validators within it fail to validate or modify the image or instance, it will -fail. - -:: - - validators: - - all: - - package: nfs-utils - - script: setup-nfs.sh - -The Any Validator -~~~~~~~~~~~~~~~~~ - -This validator attempts to run each validator within it, until one succeeds, -and will report success if any do. If this is run in reconcile mode, it will -first try each validator in test-only mode, and will succeed without -making changes if any succeed (in the case below, if openjdk 1.7.0 were -already installed, the validator would succeed and would not install 1.8.0.) - -:: - - validators: - - any: # This validator will try to install openjdk-1.8.0, but it will settle for 1.7.0 if that fails - - package: java-1.8.0-openjdk-devel - - package: java-1.7.0-openjdk-devel - -The Argument Set Validator -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You may find that you wish to store state in one place in the specification -for use in another. In this case, you can use this validator to set an -argument for future use. - -:: - - validators: - - argument_set: - argument_name: java-version - value: 1.7.0 - -SPI Methods ------------ - -In order to make this feature available for your plugin, you must -implement the following optional plugin SPI methods. - -When implementing these, you may choose to use your own framework of choice -(Packer for image packing, etc.) By doing so, you can ignore the entire -framework and specification language described above. However, you may -wish to instead use the abstraction we've provided (its ability to keep -logic in one place for both image packing and cluster validation is useful -in the OpenStack context.) We will, of course, focus on that framework here. - -:: - - def get_image_arguments(self, hadoop_version): - """Gets the argument set taken by the plugin's image generator""" - - def pack_image(self, hadoop_version, remote, - test_only=False, image_arguments=None): - """Packs an image for registration in Glance and use by Sahara""" - - def validate_images(self, cluster, test_only=False, image_arguments=None): - """Validates the image to be used by a cluster""" - -The validate_images method is called after Heat provisioning of your cluster, -but before cluster configuration. If the test_only keyword of this method is -set to True, the method should only test the instances without modification. -If it is set to False, the method should make any necessary changes (this can -be used to allow clusters to be spun up from clean, OS-only images.) This -method is expected to use an ssh remote to communicate with instances, as -per normal in Sahara. - -The pack_image method can be used to modify an image file (it is called by the -CLI above). This method expects an ImageRemote, which is essentially a -libguestfs handle to the disk image file, allowing commands to be run on the -image directly (though it could be any concretion that allows commands to be -run against the image.) - -By this means, the validators described above can execute the same logic in -the image packing, instance validation, and instance preparation cases with -the same degree of interactivity and logical control. - -In order to future-proof this document against possible changes, the doctext -of these methods will not be reproduced here, but they are documented very -fully in the sahara.plugins.provisioning abstraction. - -These abstractions can be found in the module sahara.plugins.images. -You will find that the framework has been built with extensibility and -abstraction in mind: you can overwrite validator types, add your own -without modifying any core sahara modules, declare hierarchies of resource -locations for shared resources, and more. These features are documented in -the sahara.plugins.images module itself (which has copious doctext,) and we -encourage you to explore and ask questions of the community if you are -curious or wish to build your own image generation tooling. diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index 3c7d7e6420..0000000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -===================== -Developer Information -===================== - -Programming HowTos and Tutorials -================================ - -.. toctree:: - :maxdepth: 2 - - development-guidelines - development-environment - devstack - dashboard-dev-environment-guide - how-to-build-oozie - adding-database-migrations - testing - log-guidelines - apiv2 - image-gen - -Other Resources -=============== - -.. toctree:: - :maxdepth: 2 - - contributing - gerrit - jenkins diff --git a/doc/source/contributor/jenkins.rst b/doc/source/contributor/jenkins.rst deleted file mode 100644 index e8ccbcc54a..0000000000 --- a/doc/source/contributor/jenkins.rst +++ /dev/null @@ -1,41 +0,0 @@ -Continuous Integration with Jenkins -=================================== - -Each change made to Sahara core code is tested with unit and integration tests -and style checks using flake8. - -Unit tests and style checks are performed on public `OpenStack Zuul -`_ instance. - -Unit tests are checked using python 2.7. - -The result of those checks and Unit tests are represented as a vote of +1 or --1 in the *Verify* column in code reviews from the *Jenkins* user. - -Integration tests check CRUD operations for the Image Registry, Templates, and -Clusters. Also a test job is launched on a created Cluster to verify Hadoop -work. - -All integration tests are launched by `Jenkins -`_ on the internal Mirantis OpenStack -Lab. - -Jenkins keeps a pool of VMs to run tests in parallel. Even with the pool of VMs -integration testing may take a while. - -Jenkins is controlled for the most part by Zuul which determines what jobs are -run when. - -Zuul status is available at this address: `Zuul Status -`_. - -For more information see: `Sahara Hadoop Cluster CI -`_. - -The integration tests result is represented as a vote of +1 or -1 in the -*Verify* column in a code review from the *Sahara Hadoop Cluster CI* user. - -You can put *sahara-ci-recheck* in comment, if you want to recheck sahara-ci -jobs. Also, you can put *recheck* in comment, if you want to recheck both -Jenkins and sahara-ci jobs. Finally, you can put *reverify* in a comment, if -you only want to recheck Jenkins jobs. diff --git a/doc/source/contributor/log-guidelines.rst b/doc/source/contributor/log-guidelines.rst deleted file mode 100644 index 4086f73349..0000000000 --- a/doc/source/contributor/log-guidelines.rst +++ /dev/null @@ -1,34 +0,0 @@ - -Log Guidelines -============== - -Levels Guidelines ------------------ - -During the Kilo release cycle the sahara community defined the following -log levels: - -* Debug: Shows everything and is likely not suitable for normal production - operation due to the sheer size of logs generated (e.g. scripts executions, - process execution, etc.). -* Info: Usually indicates successful service start/stop, versions and such - non-error related data. This should include largely positive units of work - that are accomplished (e.g. service setup and configuration, cluster start, - job execution information). -* Warning: Indicates that there might be a systemic issue; - potential predictive failure notice (e.g. job execution failed). -* Error: An error has occurred and the administrator should research the error - information (e.g. cluster failed to start, plugin violations of operation). -* Critical: An error has occurred and the system might be unstable, anything - that eliminates part of sahara's intended functionalities; immediately get - administrator assistance (e.g. failed to access keystone/database, failed to - load plugin). - - -Formatting Guidelines ---------------------- - -Sahara uses string formatting defined in `PEP 3101`_ for logs. - - -.. _PEP 3101: https://www.python.org/dev/peps/pep-3101/ diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index 92700a7f11..0000000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1,36 +0,0 @@ -Sahara Testing -============== - -We have a bunch of different tests for Sahara. - -Unit Tests -++++++++++ - -In most Sahara sub-repositories we have a directory that contains Python unit -tests, located at `_package_/tests/unit` or `_package_/tests`. - -Scenario integration tests -++++++++++++++++++++++++++ - -New scenario integration tests were implemented for Sahara. They are available -in the sahara-tests repository -(https://opendev.org/openstack/sahara-tests). - -Tempest tests -+++++++++++++ - -Sahara has a Tempest plugin in the sahara-tests repository covering all major -API features. - -Additional tests -++++++++++++++++ - -Additional tests reside in the sahara-tests repository (as above): - -* REST API tests checking to ensure that the Sahara REST API works. - The only parts that are not tested are cluster creation and EDP. - -* CLI tests check read-only operations using the Sahara CLI. - -For more information about these tests, please read -`Tempest Integration of Sahara `_. diff --git a/doc/source/images/hadoop-cluster-example.jpg b/doc/source/images/hadoop-cluster-example.jpg deleted file mode 100644 index 70f9f675da..0000000000 Binary files a/doc/source/images/hadoop-cluster-example.jpg and /dev/null differ diff --git a/doc/source/images/openstack-interop.png b/doc/source/images/openstack-interop.png deleted file mode 100644 index c2fdaf706c..0000000000 Binary files a/doc/source/images/openstack-interop.png and /dev/null differ diff --git a/doc/source/images/sahara-architecture.svg b/doc/source/images/sahara-architecture.svg deleted file mode 100644 index 81147b4f19..0000000000 --- a/doc/source/images/sahara-architecture.svg +++ /dev/null @@ -1,1529 +0,0 @@ - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index a9b03d6f4b..0000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,78 +0,0 @@ -Welcome to Sahara! -================== - -The sahara project aims to provide users with a simple means to provision data -processing frameworks (such as Apache Hadoop, Apache Spark and Apache Storm) -on OpenStack. This is accomplished by specifying configuration parameters such -as the framework version, cluster topology, node hardware details and more. - -Overview --------- - -.. toctree:: - :maxdepth: 2 - - intro/index - - -Installation ------------- - -.. toctree:: - :maxdepth: 2 - - install/index - - -Configuration -------------- - -.. toctree:: - :maxdepth: 2 - - configuration/index - - -User Guide ----------- - -.. toctree:: - :maxdepth: 2 - - user/index - - -CLI Guide ---------- - -.. toctree:: - :maxdepth: 2 - - cli/index - - -Operator Documentation ----------------------- - -.. toctree:: - :maxdepth: 2 - - admin/index - - -Contributor Documentation -------------------------- - -.. toctree:: - :maxdepth: 2 - - contributor/index - - -Programming Reference ---------------------- - -.. toctree:: - :maxdepth: 2 - - reference/index diff --git a/doc/source/install/dashboard-guide.rst b/doc/source/install/dashboard-guide.rst deleted file mode 100644 index fefce0725c..0000000000 --- a/doc/source/install/dashboard-guide.rst +++ /dev/null @@ -1,83 +0,0 @@ -Sahara Dashboard Configuration Guide -==================================== - -After installing the Sahara dashboard, there are a few extra configurations -that can be made. - -Dashboard configurations are applied through Horizon's local_settings.py file. -The sample configuration file is available `from the Horizon repository. `_ - -1. Networking -------------- - -Depending on the Networking backend (Neutron) used in the -cloud, Sahara panels will determine automatically which input fields should be -displayed. - -If you wish to disable floating IP options during node group template -creation, add the following parameter: - -Example: - -.. sourcecode:: python - - SAHARA_FLOATING_IP_DISABLED = True -.. - -2. Different endpoint ---------------------- - -Sahara UI panels normally use ``data-processing`` endpoint from Keystone to -talk to Sahara service. In some cases it may be useful to switch to another -endpoint, for example use locally installed Sahara instead of the one on the -OpenStack controller. - -To switch the UI to another endpoint the endpoint should be registered in the -first place. - -Local endpoint example: - -.. code-block:: - - $ openstack service create --name sahara_local --description \ - "Sahara Data Processing (local installation)" \ - data_processing_local - - $ openstack endpoint create --region RegionOne \ - data_processing_local public http://127.0.0.1:8386/v1.1/%\(project_id\)s - - $ openstack endpoint create --region RegionOne \ - data_processing_local internal http://127.0.0.1:8386/v1.1/%\(project_id\)s - - $ openstack endpoint create --region RegionOne \ - data_processing_local admin http://127.0.0.1:8386/v1.1/%\(project_id\)s -.. - -Then the endpoint name should be changed in ``sahara.py`` under the module of -`sahara-dashboard/sahara_dashboard/api/sahara.py -`__. - -.. sourcecode:: python - - # "type" of Sahara service registered in keystone - SAHARA_SERVICE = 'data_processing_local' - - -3. Hiding health check info ---------------------------- - -Sahara UI panels normally contain some information about cluster health. If -the relevant functionality has been disabled in the Sahara service, then -operators may prefer to not have any references to health at all in the UI, -since there would not be any usable health information in that case. - -The visibility of health check info can be toggled via the -``SAHARA_VERIFICATION_DISABLED`` parameter, whose default value is False, -meaning that the health check info will be visible. - -Example: - -.. sourcecode:: python - - SAHARA_VERIFICATION_DISABLED = True -.. diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 4a82d3d570..0000000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -================== -Installation Guide -================== - -.. toctree:: - :maxdepth: 2 - - installation-guide - dashboard-guide diff --git a/doc/source/install/installation-guide.rst b/doc/source/install/installation-guide.rst deleted file mode 100644 index ae16a438b5..0000000000 --- a/doc/source/install/installation-guide.rst +++ /dev/null @@ -1,300 +0,0 @@ -Sahara Installation Guide -========================= - -We recommend installing sahara in a way that will keep your system in a -consistent state. We suggest the following options: - -* Install via `Fuel `_ - -* Install via :kolla-ansible-doc:`Kolla <>` - -* Install via `RDO `_ - -* Install into a virtual environment - - - -To install with Fuel --------------------- - -1. Start by following the `MOS Quickstart - `_ to install and setup - OpenStack. - -2. Enable the sahara service during installation. - -To install with Kolla ---------------------- - -1. Start by following the :kolla-ansible-doc:`Kolla Quickstart - ` - to install and setup OpenStack. - -2. Enable the sahara service during installation. - - - -To install with RDO -------------------- - -1. Start by following the `RDO Quickstart - `_ to install and setup - OpenStack. - -2. Install sahara: - -.. sourcecode:: console - - # yum install openstack-sahara -.. - -3. Configure sahara as needed. The configuration file is located in - ``/etc/sahara/sahara.conf``. For details see - :doc:`Sahara Configuration Guide <../admin/configuration-guide>` - -4. Create the database schema: - -.. sourcecode:: console - - # sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head -.. - -5. Go through :ref:`common_installation_steps` and make any - necessary changes. - -6. Start the sahara-api and sahara-engine services: - -.. sourcecode:: console - - # systemctl start openstack-sahara-api - # systemctl start openstack-sahara-engine -.. - -7. *(Optional)* Enable sahara services to start on boot - -.. sourcecode:: console - - # systemctl enable openstack-sahara-api - # systemctl enable openstack-sahara-engine -.. - - -To install into a virtual environment -------------------------------------- - -1. First you need to install a number of packages with your - OS package manager. The list of packages depends on the OS you use. - For Ubuntu run: - -.. sourcecode:: console - - $ sudo apt-get install python-setuptools python-virtualenv python-dev -.. - -For Fedora: - -.. sourcecode:: console - - $ sudo yum install gcc python-setuptools python-virtualenv python-devel -.. - -For CentOS: - -.. sourcecode:: console - - $ sudo yum install gcc python-setuptools python-devel - $ sudo easy_install pip - $ sudo pip install virtualenv - -2. Setup a virtual environment for sahara: - -.. sourcecode:: console - - $ virtualenv sahara-venv -.. - -This will install a python virtual environment into ``sahara-venv`` -directory in your current working directory. This command does not -require super user privileges and can be executed in any directory where -the current user has write permissions. - -3. You can get a sahara archive from - ``_ and install it using pip: - -.. sourcecode:: console - - $ sahara-venv/bin/pip install 'https://tarballs.opendev.org/openstack/sahara/sahara-master.tar.gz' -.. - -Note that ``sahara-master.tar.gz`` contains the latest changes and -might not be stable at the moment. We recommend browsing -``_ and selecting the latest -stable release. For installation just execute (where replace the 'release' -word with release name, e.g. 'mitaka'): - -.. sourcecode:: console - - $ sahara-venv/bin/pip install 'https://tarballs.opendev.org/openstack/sahara/sahara-stable-release.tar.gz' -.. - -For example, you can get Sahara Mitaka release by executing: - -.. sourcecode:: console - - $ sahara-venv/bin/pip install 'https://tarballs.opendev.org/openstack/sahara/sahara-stable-mitaka.tar.gz' -.. - -4. After installation you should create a configuration file; as seen below it - is possible to generate a sample one: - -.. sourcecode:: console - - $ SAHARA_SOURCE_DIR="/path/to/sahara/source" - $ pushd $SAHARA_SOURCE_DIR - $ tox -e genconfig - $ popd - $ cp $SAHARA_SOURCE_DIR/etc/sahara/sahara.conf.sample sahara-venv/etc/sahara.conf -.. - -Make any necessary changes to ``sahara-venv/etc/sahara.conf``. -For details see -:doc:`Sahara Configuration Guide <../admin/configuration-guide>` - -.. _common_installation_steps: - -Common installation steps -------------------------- - -The steps below are common to both the RDO and virtual environment -installations of sahara. - -1. If you use sahara with a MySQL database, then for storing big job binaries - in the sahara internal database you must configure the size of the maximum - allowed packet. Edit the ``my.cnf`` file and change the - ``max_allowed_packet`` parameter as follows: - -.. sourcecode:: ini - - ... - [mysqld] - ... - max_allowed_packet = 256M -.. - -Then restart the mysql server to ensure these changes are active. - -2. Create the database schema: - -.. sourcecode:: console - - $ sahara-venv/bin/sahara-db-manage --config-file sahara-venv/etc/sahara.conf upgrade head -.. - -3. Start sahara services from different terminals: - -.. sourcecode:: console - - # first terminal - $ sahara-venv/bin/sahara-api --config-file sahara-venv/etc/sahara.conf - - # second terminal - $ sahara-venv/bin/sahara-engine --config-file sahara-venv/etc/sahara.conf -.. - -.. _register-sahara-label: - -4. For sahara to be accessible in the OpenStack Dashboard and for - python-saharaclient to work properly you must register sahara in - the Identity service catalog. For example: - -.. code-block:: - - $ openstack service create --name sahara --description \ - "Sahara Data Processing" data-processing - - $ openstack endpoint create --region RegionOne \ - data-processing public http://10.0.0.2:8386/v1.1/%\(project_id\)s - - $ openstack endpoint create --region RegionOne \ - data-processing internal http://10.0.0.2:8386/v1.1/%\(project_id\)s - - $ openstack endpoint create --region RegionOne \ - data-processing admin http://10.0.0.2:8386/v1.1/%\(project_id\)s - -.. note:: - - You have to install the openstack-client package in order to execute - ``openstack`` command. -.. - -5. For more information on configuring sahara with the OpenStack Dashboard - please see :doc:`dashboard-guide`. - -Optional installation of default templates ------------------------------------------- - -Sahara bundles default templates that define simple clusters for the -supported plugins. These templates may optionally be added to the -sahara database using a simple CLI included with sahara. - -The default template CLI is described in detail in a *README* file -included with the sahara sources at ``/db/templates/README.rst`` -but it is summarized here. - -Flavor id values must be specified for the default templates included -with sahara. The recommended configuration values below correspond to the -*m1.medium* and *m1.large* flavors in a default OpenStack installation (if -these flavors have been edited, their corresponding values will be different). -Values for flavor_id should be added to ``/etc/sahara/sahara.conf`` or another -configuration file in the sections shown here: - -.. sourcecode:: ini - - [DEFAULT] - # Use m1.medium for {flavor_id} unless specified in another section - flavor_id = 2 - - [cdh-5-default-namenode] - # Use m1.large for {flavor_id} in the cdh-5-default-namenode template - flavor_id = 4 - - [cdh-530-default-namenode] - # Use m1.large for {flavor_id} in the cdh-530-default-namenode template - flavor_id = 4 - -The above configuration values are included in a sample configuration -file at ``/plugins/default_templates/template.conf`` - -The command to install all of the default templates is as follows, where -``$PROJECT_ID`` should be a valid project id and the above configuration values -have been set in ``myconfig``: - -.. sourcecode:: console - - $ sahara-templates --config-file /etc/sahara/sahara.conf --config-file myconfig update -t $PROJECT_ID - -Help is available from the ``sahara-templates`` command: - -.. sourcecode:: console - - $ sahara-templates --help - $ sahara-templates update --help - -Notes: ------- - -Ensure that your operating system is not blocking the sahara port -(default: 8386). You may need to configure iptables in CentOS and -other Linux distributions to allow this access. - -To get the list of all possible options run: - -.. sourcecode:: console - - $ sahara-venv/bin/python sahara-venv/bin/sahara-api --help - $ sahara-venv/bin/python sahara-venv/bin/sahara-engine --help -.. - -Further, consider reading :doc:`../intro/overview` for general sahara -concepts and :doc:`../user/plugins` for specific plugin -features/requirements. diff --git a/doc/source/intro/architecture.rst b/doc/source/intro/architecture.rst deleted file mode 100644 index 6ebd2c443f..0000000000 --- a/doc/source/intro/architecture.rst +++ /dev/null @@ -1,39 +0,0 @@ -Architecture -============ - -.. image:: ../images/sahara-architecture.svg - :width: 960 - :height: 635 - :scale: 83 % - :align: left - - -The Sahara architecture consists of several components: - -* Auth component - responsible for client authentication & authorization, - communicates with the OpenStack Identity service (keystone). - -* DAL - Data Access Layer, persists internal models in DB. - -* Secure Storage Access Layer - persists the authentication data like passwords - and private keys in a secure storage. - -* Provisioning Engine - component responsible for communication with - the OpenStack Compute (nova), Orchestration (heat), Block Storage (cinder), - Image (glance), and DNS (designate) services. - -* Vendor Plugins - pluggable mechanism responsible for configuring and - launching data processing frameworks on provisioned VMs. Existing - management solutions like Apache Ambari and Cloudera Management Console - could be utilized for that purpose as well. - -* EDP - :doc:`../user/edp` responsible for scheduling and managing - data processing jobs on clusters provisioned by sahara. - -* REST API - exposes sahara functionality via REST HTTP interface. - -* Python Sahara Client - like other OpenStack components, sahara has - its own python client. - -* Sahara pages - a GUI for the sahara is located in the OpenStack Dashboard - (horizon). diff --git a/doc/source/intro/index.rst b/doc/source/intro/index.rst deleted file mode 100644 index 86afe42ef8..0000000000 --- a/doc/source/intro/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -=============== -Sahara Overview -=============== - -General overview of Sahara. - -.. toctree:: - :maxdepth: 2 - - overview - architecture - Roadmap diff --git a/doc/source/intro/overview.rst b/doc/source/intro/overview.rst deleted file mode 100644 index 6f12154176..0000000000 --- a/doc/source/intro/overview.rst +++ /dev/null @@ -1,192 +0,0 @@ -Rationale -========= - -Introduction ------------- - -Apache Hadoop is an industry standard and widely adopted MapReduce -implementation, it is one among a growing number of data processing -frameworks. The aim of this project is to enable users to easily provision -and manage clusters with Hadoop and other data processing frameworks on -OpenStack. It is worth mentioning that Amazon has provided Hadoop for -several years as Amazon Elastic MapReduce (EMR) service. - -Sahara aims to provide users with a simple means to provision Hadoop, Spark, -and Storm clusters by specifying several parameters such as the framework -version, cluster topology, hardware node details and more. After a user fills -in all the parameters, sahara deploys the cluster in a few minutes. Also sahara -provides means to scale an already provisioned cluster by adding or removing -worker nodes on demand. - -The solution will address the following use cases: - -* fast provisioning of data processing clusters on OpenStack for development - and quality assurance(QA). -* utilization of unused compute power from a general purpose OpenStack IaaS - cloud. -* "Analytics as a Service" for ad-hoc or bursty analytic workloads (similar - to AWS EMR). - -Key features are: - -* designed as an OpenStack component. -* managed through a REST API with a user interface(UI) available as part of - OpenStack Dashboard. -* support for a variety of data processing frameworks: - - * multiple Hadoop vendor distributions. - * Apache Spark and Storm. - * pluggable system of Hadoop installation engines. - * integration with vendor specific management tools, such as Apache - Ambari and Cloudera Management Console. - -* predefined configuration templates with the ability to modify parameters. - -Details -------- - -The sahara product communicates with the following OpenStack services: - -* Dashboard (horizon) - provides a GUI with ability to use all of sahara's - features. -* Identity (keystone) - authenticates users and provides security tokens that - are used to work with OpenStack, limiting a user's abilities in sahara to - their OpenStack privileges. -* Compute (nova) - used to provision VMs for data processing clusters. -* Bare metal (ironic) - used to provision Baremetal nodes for data processing - clusters. -* Orchestration (heat) - used to provision and orchestrate the deployment of - data processing clusters. -* Image (glance) - stores VM images, each image containing an operating system - and a pre-installed data processing distribution or framework. -* Object Storage (swift) - can be used as storage for job binaries and data - that will be processed or created by framework jobs. -* Block Storage (cinder) - can be used to provision block storage for VM - instances. -* Networking (neutron) - provides networking services to data processing - clusters. -* DNS service (designate) - provides ability to communicate with cluster - instances and Hadoop services by their hostnames. -* Telemetry (ceilometer) - used to collect measures of cluster usage for - metering and monitoring purposes. -* Shared file systems (manila) - can be used for storage of framework job - binaries and data that will be processed or created by jobs. -* Key manager (barbican & castellan) - persists the authentication data - like passwords and private keys in a secure storage. - -.. image:: ../images/openstack-interop.png - :width: 960 - :height: 720 - :scale: 83 % - :align: left - -General Workflow ----------------- - -Sahara will provide two levels of abstraction for the API and UI based on the -addressed use cases: cluster provisioning and analytics as a service. - -For fast cluster provisioning a generic workflow will be as following: - -* select a Hadoop (or framework) version. -* select a base image with or without pre-installed data processing framework: - - * for base images without a pre-installed framework, sahara will support - pluggable deployment engines that integrate with vendor tooling. - -* define cluster configuration, including cluster size, topology, and - framework parameters (for example, heap size): - - * to ease the configuration of such parameters, configurable templates - are provided. - -* provision the cluster; sahara will provision nodes (VMs or baremetal), - install and configure the data processing framework. -* perform operations on the cluster; add or remove nodes. -* terminate the cluster when it is no longer needed. - -For analytics as a service, a generic workflow will be as following: - -* select one of the predefined data processing framework versions. -* configure a job: - - * choose the type of job: pig, hive, jar-file, etc. - * provide the job script source or jar location. - * select input and output data location. - -* set the limit for the cluster size. -* execute the job: - - * all cluster provisioning and job execution will happen transparently - to the user. - * if using a transient cluster, it will be removed automatically after job - completion. - -* get the results of computations (for example, from swift). - -User's Perspective ------------------- - -While provisioning clusters through sahara, the user operates on three types -of entities: Node Group Templates, Cluster Templates and Clusters. - -A Node Group Template describes a group of nodes within cluster. It contains -a list of processes that will be launched on each instance in a group. -Also a Node Group Template may provide node scoped configurations for those -processes. This kind of template encapsulates hardware parameters (flavor) -for the node instance and configuration for data processing framework processes -running on the node. - -A Cluster Template is designed to bring Node Group Templates together to -form a Cluster. A Cluster Template defines what Node Groups will be included -and how many instances will be created for each. Some data processing framework -configurations can not be applied to a single node, but to a whole Cluster. -A user can specify these kinds of configurations in a Cluster Template. Sahara -enables users to specify which processes should be added to an anti-affinity -group within a Cluster Template. If a process is included into an -anti-affinity group, it means that instances where this process is going to be -launched should be scheduled to different hardware hosts. - -The Cluster entity represents a collection of instances that all have the -same data processing framework installed. It is mainly characterized by an -image with a pre-installed framework which will be used for cluster -deployment. Users may choose one of the pre-configured Cluster Templates to -start a Cluster. To get access to instances after a Cluster has started, the -user should specify a keypair. - -Sahara provides several constraints on cluster framework topology. You can see -all constraints in the documentation for the appropriate plugin. - -Each Cluster belongs to an Identity service project determined by the user. -Users have access only to objects located in projects they have access to. -Users can edit and delete only objects they have created or exist in their -projects. Naturally, admin users have full access to every object. In this -manner, sahara complies with general OpenStack access policy. - -Integration with Object Storage -------------------------------- - -The swift project provides the standard Object Storage service for OpenStack -environments; it is an analog of the Amazon S3 service. As a rule it is -deployed on bare metal machines. It is natural to expect data processing on -OpenStack to access data stored there. Sahara provides this option with a -file system implementation for swift -`HADOOP-8545 `_ and -`Change I6b1ba25b `_ which -implements the ability to list endpoints for an object, account or container. -This makes it possible to integrate swift with software that relies on data -locality information to avoid network overhead. - -To get more information on how to enable swift support see -:doc:`../user/hadoop-swift`. - -Pluggable Deployment and Monitoring ------------------------------------ - -In addition to the monitoring capabilities provided by vendor-specific -Hadoop management tooling, sahara provides pluggable integration with -external monitoring systems such as Nagios or Zabbix. - -Both deployment and monitoring tools can be installed on standalone VMs, -thus allowing a single instance to manage and monitor several clusters at -once. diff --git a/doc/source/reference/edp-spi.rst b/doc/source/reference/edp-spi.rst deleted file mode 100644 index 79d2fb67a2..0000000000 --- a/doc/source/reference/edp-spi.rst +++ /dev/null @@ -1,224 +0,0 @@ -Elastic Data Processing (EDP) SPI -================================= - -The EDP job engine objects provide methods for creating, monitoring, and -terminating jobs on Sahara clusters. Provisioning plugins that support EDP -must return an EDP job engine object from the :ref:`get_edp_engine` method -described in :doc:`plugin-spi`. - -Sahara provides subclasses of the base job engine interface that support EDP -on clusters running Oozie, Spark, and/or Storm. These are described below. - -.. _edp_spi_job_types: - -Job Types ---------- - -Some of the methods below test job type. Sahara supports the following string -values for job types: - -* Hive -* Java -* Pig -* MapReduce -* MapReduce.Streaming -* Spark -* Shell -* Storm - -.. note:: - Constants for job types are defined in *sahara.utils.edp*. - -Job Status Values ------------------ - -Several of the methods below return a job status value. A job status value is -a dictionary of the form: - -{'status': *job_status_value*} - -where *job_status_value* is one of the following string values: - -* DONEWITHERROR -* FAILED -* TOBEKILLED -* KILLED -* PENDING -* RUNNING -* SUCCEEDED - -Note, constants for job status are defined in *sahara.utils.edp* - -EDP Job Engine Interface ------------------------- - -The sahara.service.edp.base_engine.JobEngine class is an -abstract class with the following interface: - - -cancel_job(job_execution) -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Stops the running job whose id is stored in the job_execution object. - -*Returns*: None if the operation was unsuccessful or an updated job status -value. - -get_job_status(job_execution) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns the current status of the job whose id is stored in the job_execution -object. - -*Returns*: a job status value. - - -run_job(job_execution) -~~~~~~~~~~~~~~~~~~~~~~ - -Starts the job described by the job_execution object - -*Returns*: a tuple of the form (job_id, job_status_value, job_extra_info). - -* *job_id* is required and must be a string that allows the EDP engine to - uniquely identify the job. -* *job_status_value* may be None or a job status value -* *job_extra_info* may be None or optionally a dictionary that the EDP engine - uses to store extra information on the job_execution_object. - - -validate_job_execution(cluster, job, data) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Checks whether or not the job can run on the cluster with the specified data. -Data contains values passed to the */jobs//execute* REST API method -during job launch. If the job cannot run for any reason, including job -configuration, cluster configuration, or invalid data, this method should -raise an exception. - -*Returns*: None - -get_possible_job_config(job_type) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns hints used by the Sahara UI to prompt users for values when -configuring and launching a job. Note that no hints are required. - -See :doc:`../user/edp` for more information on how configuration values, -parameters, and arguments are used by different job types. - -*Returns*: a dictionary of the following form, containing hints for configs, -parameters, and arguments for the job type: - -{'job_config': {'configs': [], 'params': {}, 'args': []}} - -* *args* is a list of strings -* *params* contains simple key/value pairs -* each item in *configs* is a dictionary with entries - for 'name' (required), 'value', and 'description' - - -get_supported_job_types() -~~~~~~~~~~~~~~~~~~~~~~~~~ - -This method returns the job types that the engine supports. Not all engines -will support all job types. - -*Returns*: a list of job types supported by the engine. - -Oozie Job Engine Interface --------------------------- - -The sahara.service.edp.oozie.engine.OozieJobEngine class is derived from -JobEngine. It provides implementations for all of the methods in the base -interface but adds a few more abstract methods. - -Note that the *validate_job_execution(cluster, job, data)* method does basic -checks on the job configuration but probably should be overloaded to include -additional checks on the cluster configuration. For example, the job engines -for plugins that support Oozie add checks to make sure that the Oozie service -is up and running. - - -get_hdfs_user() -~~~~~~~~~~~~~~~ - -Oozie uses HDFS to distribute job files. This method gives the name of the -account that is used on the data nodes to access HDFS (such as 'hadoop' or -'hdfs'). The Oozie job engine expects that HDFS contains a directory for this -user under */user/*. - -*Returns*: a string giving the username for the account used to access HDFS on -the cluster. - - -create_hdfs_dir(remote, dir_name) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The remote object *remote* references a node in the cluster. This method -creates the HDFS directory *dir_name* under the user specified by -*get_hdfs_user()* in the HDFS accessible from the specified node. For example, -if the HDFS user is 'hadoop' and the dir_name is 'test' this method would -create '/user/hadoop/test'. - -The reason that this method is broken out in the interface as an abstract -method is that different versions of Hadoop treat path creation differently. - -*Returns*: None - - -get_oozie_server_uri(cluster) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns the full URI for the Oozie server, for example -*http://my_oozie_host:11000/oozie*. This URI is used by an Oozie client to -send commands and queries to the Oozie server. - -*Returns*: a string giving the Oozie server URI. - - -get_oozie_server(self, cluster) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns the node instance for the host in the cluster running the Oozie -server. - -*Returns*: a node instance. - - -get_name_node_uri(self, cluster) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns the full URI for the Hadoop NameNode, for example -*http://master_node:8020*. - -*Returns*: a string giving the NameNode URI. - -get_resource_manager_uri(self, cluster) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns the full URI for the Hadoop JobTracker for Hadoop version 1 or the -Hadoop ResourceManager for Hadoop version 2. - -*Returns*: a string giving the JobTracker or ResourceManager URI. - -Spark Job Engine ----------------- - -The sahara.service.edp.spark.engine.SparkJobEngine class provides a full EDP -implementation for Spark standalone clusters. - -.. note:: - The *validate_job_execution(cluster, job, data)* method does basic - checks on the job configuration but probably should be overloaded to - include additional checks on the cluster configuration. For example, the - job engine returned by the Spark plugin checks that the Spark version is - >= 1.0.0 to ensure that *spark-submit* is available. - -get_driver_classpath(self) -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns driver class path. - -*Returns*: a string of the following format ' --driver-class-path -*class_path_value*'. diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst deleted file mode 100644 index 4a3710ef74..0000000000 --- a/doc/source/reference/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -===================== -Programming Reference -===================== - -Plugins and EDP -=============== - -.. toctree:: - :maxdepth: 2 - - plugins - plugin-spi - edp-spi - - -REST API -======== - -.. toctree:: - :maxdepth: 2 - - restapi diff --git a/doc/source/reference/plugin-spi.rst b/doc/source/reference/plugin-spi.rst deleted file mode 100644 index 7e72605b8a..0000000000 --- a/doc/source/reference/plugin-spi.rst +++ /dev/null @@ -1,393 +0,0 @@ -Plugin SPI -========== - -Plugin interface ----------------- - -get_versions() -~~~~~~~~~~~~~~ - -Returns all available versions of the plugin. Depending on the plugin, this -version may map directly to the HDFS version, or it may not; check your -plugin's documentation. It is responsibility of the plugin to make sure that -all required images for each hadoop version are available, as well as configs -and whatever else that plugin needs to create the Hadoop cluster. - -*Returns*: list of strings representing plugin versions - -*Example return value*: ["1.2.1", "2.3.0", "2.4.1"] - -get_configs( hadoop_version ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Lists all configs supported by the plugin with descriptions, defaults, and -targets for which this config is applicable. - -*Returns*: list of configs - -*Example return value*: (("JobTracker heap size", "JobTracker heap size, in -MB", "int", "512", `"mapreduce"`, "node", True, 1)) - -get_node_processes( hadoop_version ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns all supported services and node processes for a given Hadoop version. -Each node process belongs to a single service and that relationship is -reflected in the returned dict object. See example for details. - -*Returns*: dictionary having entries (service -> list of processes) - -*Example return value*: {"mapreduce": ["tasktracker", "jobtracker"], "hdfs": -["datanode", "namenode"]} - -get_required_image_tags( hadoop_version ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Lists tags that should be added to OpenStack Image via Image Registry. Tags -are used to filter Images by plugin and hadoop version. - -*Returns*: list of tags - -*Example return value*: ["tag1", "some_other_tag", ...] - -validate( cluster ) -~~~~~~~~~~~~~~~~~~~ - -Validates a given cluster object. Raises a *SaharaException* with a meaningful -message in the case of validation failure. - -*Returns*: None - -*Example exception*: - -validate_scaling( cluster, existing, additional ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To be improved. - -Validates a given cluster before scaling operation. - -*Returns*: list of validation_errors - -update_infra( cluster ) -~~~~~~~~~~~~~~~~~~~~~~~ - -This method is no longer used now that Sahara utilizes Heat for OpenStack -resource provisioning, and is not currently utilized by any plugin. - -*Returns*: None - -configure_cluster( cluster ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Configures cluster on the VMs provisioned by sahara. In this function the -plugin should perform all actions like adjusting OS, installing required -packages (including Hadoop, if needed), configuring Hadoop, etc. - -*Returns*: None - -start_cluster( cluster ) -~~~~~~~~~~~~~~~~~~~~~~~~ - -Start already configured cluster. This method is guaranteed to be called only -on a cluster which was already prepared with configure_cluster(...) call. - -*Returns*: None - -scale_cluster( cluster, instances ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Scale an existing cluster with additional instances. The instances argument is -a list of ready-to-configure instances. Plugin should do all configuration -operations in this method and start all services on those instances. - -*Returns*: None - -.. _get_edp_engine: - -get_edp_engine( cluster, job_type ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Returns an EDP job engine object that supports the specified job_type on the -given cluster, or None if there is no support. The EDP job engine object -returned must implement the interface described in :doc:`edp-spi`. The -job_type is a String matching one of the job types listed in -:ref:`edp_spi_job_types`. - -*Returns*: an EDP job engine object or None - -decommission_nodes( cluster, instances ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Scale cluster down by removing a list of instances. The plugin should stop -services on the provided list of instances. The plugin also may need to update -some configurations on other instances when nodes are removed; if so, this -method must perform that reconfiguration. - -*Returns*: None - -on_terminate_cluster( cluster ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When user terminates cluster, sahara simply shuts down all the cluster VMs. -This method is guaranteed to be invoked before that, allowing the plugin to do -some clean-up. - -*Returns*: None - -get_open_ports( node_group ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When user requests sahara to automatically create a security group for the -node group (``auto_security_group`` property set to True), sahara will call -this plugin method to get a list of ports that need to be opened. - -*Returns*: list of ports to be open in auto security group for the given node -group - -get_edp_job_types( versions ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Optional method, which provides the ability to see all supported job types for -specified plugin versions. - -*Returns*: dict with supported job types for specified versions of plugin - -recommend_configs( self, cluster, scaling=False ) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Optional method, which provides recommendations for cluster configuration -before creating/scaling operation. - -get_image_arguments( self, hadoop_version ): -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Optional method, which gets the argument set taken by the plugin's image -generator, or NotImplemented if the plugin does not provide image generation -support. See :doc:`../contributor/image-gen`. - -*Returns*: A sequence with items of type sahara.plugins.images.ImageArgument. - -pack_image( self, hadoop_version, remote, test_only=False, ... ): -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Optional method which packs an image for registration in Glance and use by -Sahara. This method is called from the image generation CLI rather than from -the Sahara api or engine service. See :doc:`../contributor/image-gen`. - -*Returns*: None (modifies the image pointed to by the remote in-place.) - -validate_images( self, cluster, test_only=False, image_arguments=None ): -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Validates the image to be used to create a cluster, to ensure that it meets -the specifications of the plugin. See :doc:`../contributor/image-gen`. - -*Returns*: None; may raise a sahara.plugins.exceptions.ImageValidationError - - -Object Model -============ - -Here is a description of all the objects involved in the API. - -Notes: - - - clusters and node_groups have 'extra' fields allowing the plugin to - persist any supplementary info about the cluster. - - node_process is just a process that runs on some node in cluster. - -Example list of node processes: - -1. jobtracker -2. namenode -3. tasktracker -4. datanode - -- Each plugin may have different names for the same processes. - -Config ------- - -An object, describing one configuration entry - -+-------------------+--------+------------------------------------------------+ -| Property | Type | Description | -+===================+========+================================================+ -| name | string | Config name. | -+-------------------+--------+------------------------------------------------+ -| description | string | A hint for user, what this config is used for. | -+-------------------+--------+------------------------------------------------+ -| config_type | enum | possible values are: 'string', 'integer', | -| | | 'boolean', 'enum'. | -+-------------------+--------+------------------------------------------------+ -| config_values | list | List of possible values, if config_type is | -| | | enum. | -+-------------------+--------+------------------------------------------------+ -| default_value | string | Default value for config. | -+-------------------+--------+------------------------------------------------+ -| applicable_target | string | The target could be either a service returned | -| | | by get_node_processes(...) call | -| | | in form of 'service:', or | -| | | 'general'. | -+-------------------+--------+------------------------------------------------+ -| scope | enum | Could be either 'node' or 'cluster'. | -+-------------------+--------+------------------------------------------------+ -| is_optional | bool | If is_optional is False and no default_value | -| | | is specified, user must provide a value. | -+-------------------+--------+------------------------------------------------+ -| priority | int | 1 or 2. A Hint for UI. Configs with priority | -| | | *1* are always displayed. | -| | | Priority *2* means user should click a button | -| | | to see the config. | -+-------------------+--------+------------------------------------------------+ - - -User Input ----------- - -Value provided by user for a specific config. - -+----------+--------+--------------------------------------------------------+ -| Property | Type | Description | -+==========+========+========================================================+ -| config | config | A config object for which this user_input is provided. | -+----------+--------+--------------------------------------------------------+ -| value | ... | Value for the config. Type depends on Config type. | -+----------+--------+--------------------------------------------------------+ - - -Instance --------- - -An instance created for cluster. - -+---------------+---------+---------------------------------------------------+ -| Property | Type | Description | -+===============+=========+===================================================+ -| instance_id | string | Unique instance identifier. | -+---------------+---------+---------------------------------------------------+ -| instance_name | string | OpenStack instance name. | -+---------------+---------+---------------------------------------------------+ -| internal_ip | string | IP to communicate with other instances. | -+---------------+---------+---------------------------------------------------+ -| management_ip | string | IP of instance, accessible outside of internal | -| | | network. | -+---------------+---------+---------------------------------------------------+ -| volumes | list | List of volumes attached to instance. Empty if | -| | | ephemeral drive is used. | -+---------------+---------+---------------------------------------------------+ -| nova_info | object | Nova instance object. | -+---------------+---------+---------------------------------------------------+ -| username | string | Username, that sahara uses for establishing | -| | | remote connections to instance. | -+---------------+---------+---------------------------------------------------+ -| hostname | string | Same as instance_name. | -+---------------+---------+---------------------------------------------------+ -| fqdn | string | Fully qualified domain name for this instance. | -+---------------+---------+---------------------------------------------------+ -| remote | helpers | Object with helpers for performing remote | -| | | operations. | -+---------------+---------+---------------------------------------------------+ - - -Node Group ----------- - -Group of instances. - -+----------------------+--------+---------------------------------------------+ -| Property | Type | Description | -+======================+========+=============================================+ -| name | string | Name of this Node Group in Cluster. | -+----------------------+--------+---------------------------------------------+ -| flavor_id | string | OpenStack Flavor used to boot instances. | -+----------------------+--------+---------------------------------------------+ -| image_id | string | Image id used to boot instances. | -+----------------------+--------+---------------------------------------------+ -| node_processes | list | List of processes running on each instance. | -+----------------------+--------+---------------------------------------------+ -| node_configs | dict | Configs dictionary, applied to instances. | -+----------------------+--------+---------------------------------------------+ -| volumes_per_node | int | Number of volumes mounted to each instance. | -| | | 0 means use ephemeral drive. | -+----------------------+--------+---------------------------------------------+ -| volumes_size | int | Size of each volume (GB). | -+----------------------+--------+---------------------------------------------+ -| volumes_mount_prefix | string | Prefix added to mount path of each volume. | -+----------------------+--------+---------------------------------------------+ -| floating_ip_pool | string | Floating IP Pool name. All instances in the | -| | | Node Group will have Floating IPs assigned | -| | | from this pool. | -+----------------------+--------+---------------------------------------------+ -| count | int | Number of instances in this Node Group. | -+----------------------+--------+---------------------------------------------+ -| username | string | Username used by sahara to establish remote | -| | | connections to instances. | -+----------------------+--------+---------------------------------------------+ -| configuration | dict | Merged dictionary of node configurations | -| | | and cluster configurations. | -+----------------------+--------+---------------------------------------------+ -| storage_paths | list | List of directories where storage should be | -| | | placed. | -+----------------------+--------+---------------------------------------------+ - -Cluster -------- - -Contains all relevant info about cluster. This object is provided to the -plugin for both cluster creation and scaling. The "Cluster Lifecycle" section -below further specifies which fields are filled at which moment. - -+----------------------------+--------+---------------------------------------+ -| Property | Type | Description | -+============================+========+=======================================+ -| name | string | Cluster name. | -+----------------------------+--------+---------------------------------------+ -| project_id | string | OpenStack Project id where this | -| | | Cluster is available. | -+----------------------------+--------+---------------------------------------+ -| plugin_name | string | Plugin name. | -+----------------------------+--------+---------------------------------------+ -| hadoop_version | string | Hadoop version running on instances. | -+----------------------------+--------+---------------------------------------+ -| default_image_id | string | OpenStack image used to boot | -| | | instances. | -+----------------------------+--------+---------------------------------------+ -| node_groups | list | List of Node Groups. | -+----------------------------+--------+---------------------------------------+ -| cluster_configs | dict | Dictionary of Cluster scoped | -| | | configurations. | -+----------------------------+--------+---------------------------------------+ -| cluster_template_id | string | Cluster Template used for Node Groups | -| | | and Configurations. | -+----------------------------+--------+---------------------------------------+ -| user_keypair_id | string | OpenStack keypair added to instances | -| | | to make them accessible for user. | -+----------------------------+--------+---------------------------------------+ -| neutron_management_network | string | Neutron network ID. Instances will | -| | | get fixed IPs in this network. | -+----------------------------+--------+---------------------------------------+ -| anti_affinity | list | List of processes that will be run on | -| | | different hosts. | -+----------------------------+--------+---------------------------------------+ -| description | string | Cluster Description. | -+----------------------------+--------+---------------------------------------+ -| info | dict | Dictionary for additional information.| -+----------------------------+--------+---------------------------------------+ - - -Validation Error ----------------- - -Describes what is wrong with one of the values provided by user. - -+---------------+--------+-----------------------------------------------+ -| Property | Type | Description | -+===============+========+===============================================+ -| config | config | A config object that is not valid. | -+---------------+--------+-----------------------------------------------+ -| error_message | string | Message that describes what exactly is wrong. | -+---------------+--------+-----------------------------------------------+ diff --git a/doc/source/reference/plugins.rst b/doc/source/reference/plugins.rst deleted file mode 100644 index a1e9813360..0000000000 --- a/doc/source/reference/plugins.rst +++ /dev/null @@ -1,23 +0,0 @@ -Pluggable Provisioning Mechanism -================================ - -Sahara can be integrated with 3rd party management tools like Apache Ambari -and Cloudera Management Console. The integration is achieved using the plugin -mechanism. - -In short, responsibilities are divided between the Sahara core and a plugin as -follows. Sahara interacts with the user and uses Heat to provision OpenStack -resources (VMs, baremetal servers, security groups, etc.) The plugin installs -and configures a Hadoop cluster on the provisioned instances. Optionally, -a plugin can deploy management and monitoring tools for the cluster. Sahara -provides plugins with utility methods to work with provisioned instances. - -A plugin must extend the `sahara.plugins.provisioning:ProvisioningPluginBase` -class and implement all the required methods. Read :doc:`plugin-spi` for -details. - -The `instance` objects provided by Sahara have a `remote` property which -can be used to interact with instances. The `remote` is a context manager so -you can use it in `with instance.remote:` statements. The list of available -commands can be found in `sahara.utils.remote.InstanceInteropHelper`. -See the source code of the Vanilla plugin for usage examples. diff --git a/doc/source/reference/restapi.rst b/doc/source/reference/restapi.rst deleted file mode 100644 index 862c43acaf..0000000000 --- a/doc/source/reference/restapi.rst +++ /dev/null @@ -1,119 +0,0 @@ -Sahara REST API v1.1 -******************** - -1 General API information -========================= - -This section contains base info about the sahara REST API design. - -1.1 Authentication and Authorization ------------------------------------- - -The sahara API uses the OpenStack Identity service as the default -authentication service. When the Identity service is enabled, users who -submit requests to the sahara service must provide an authentication token in -the ``X-Auth-Token`` request header. A user can obtain the token by -authenticating to the Identity service endpoint. For more information about -the Identity service, please see the :keystone-doc:`keystone project developer -documentation <>`. - -With each request, a user must specify the keystone project -in the url path, for example: '/v1.1/{project_id}/clusters'. Sahara -will perform the requested operation in the specified project using the -provided credentials. Therefore, clusters may be created and managed only -within projects to which the user has access. - -1.2 Request / Response Types ----------------------------- - -The sahara API supports the JSON data serialization format. This means that -for requests that contain a body, the ``Content-Type`` header must be set to -the MIME type value ``application/json``. Also, clients should accept JSON -serialized responses by specifying the ``Accept`` header with the MIME type -value ``application/json`` or adding the ``.json`` extension to the resource -name. The default response format is ``application/json`` if the client does -not specify an ``Accept`` header or append the ``.json`` extension in the URL -path. - -Example: - -.. sourcecode:: text - - GET /v1.1/{project_id}/clusters.json - -or - -.. sourcecode:: text - - GET /v1.1/{project_id}/clusters - Accept: application/json - -1.3 Navigation by response --------------------------- -Sahara API supports delivering response data by pages. User can pass -two parameters in API GET requests which return an array of objects. -The parameters are: - -``limit`` - maximum number of objects in response data. -This parameter must be a positive integer number. - -``marker`` - ID of the last element on the list which won't be in response. - -Example: -Get 15 clusters after cluster with id=d62ad147-5c10-418c-a21a-3a6597044f29: - -.. sourcecode:: text - - GET /v1.1/{project_id}/clusters?limit=15&marker=d62ad147-5c10-418c-a21a-3a6597044f29 - -For convenience, response contains markers of previous and following pages -which are named 'prev' and 'next' fields. Also there is ``sort_by`` parameter -for sorting objects. Sahara API supports ascending and descending sorting. - -Examples: -Sort clusters by name: - -.. sourcecode:: text - - GET /v1.1/{project_id}/clusters?sort_by=name - -Sort clusters by date of creation in descending order: - -.. sourcecode:: text - - GET /v1.1/{project_id}/clusters?sort_by=-created_at - - -1.4 Faults ----------- -The sahara API returns an error response if a failure occurs while -processing a request. Sahara uses only standard HTTP error codes. 4xx errors -indicate problems in the particular request being sent from the client and -5xx errors indicate server-side problems. - -The response body will contain richer information about the cause of the -error. An error response follows the format illustrated by the following -example: - -.. sourcecode:: http - - HTTP/1.1 400 BAD REQUEST - Content-type: application/json - Content-length: 126 - - { - "error_name": "CLUSTER_NAME_ALREADY_EXISTS", - "error_message": "Cluster with name 'test-cluster' already exists", - "error_code": 400 - } - - -The ``error_code`` attribute is an HTTP response code. The ``error_name`` -attribute indicates the generic error type without any concrete ids or -names, etc. The last attribute, ``error_message``, contains a human readable -error description. - -2 API -===== - -- `Sahara REST API Reference (OpenStack API Complete Reference - DataProcessing) `_ diff --git a/doc/source/user/building-guest-images.rst b/doc/source/user/building-guest-images.rst deleted file mode 100644 index da42ce5a65..0000000000 --- a/doc/source/user/building-guest-images.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. _building-guest-images-label: - -Building guest images -===================== - -Sahara plugins represent different Hadoop or other Big Data platforms -and requires specific guest images. - -While it is possible to use cloud images which only contain the basic -software requirements (also called *plain images*), their usage slows down -the cluster provisioning process and was not throughly tested recently. - -It is strongly advised to build images which contain -the software required to create the clusters for the various plugins -and use them instead of *plain images*. - -Sahara currently provides two different tools for building -guest images: -- ``sahara-image-pack`` is newer and support more recent images; -- ``sahara-image-create`` is the older tool. - -Both tools are described in the details in the next sections. - -The documentation of each plugin describes which method is supported -for the various versions. If both are supported, ``sahara-image-pack`` -is recommended. - -General requirements for guest images -------------------------------------- - -There are few common requirements for all guest images, -which must be based on GNU/Linux distributions. - -* cloud-init must be installed -* the ssh server must be installed -* the firewall, if enabled, must allow connections on port 22 (ssh) - -The cloud images provided by the GNU/Linux distributions respect -those requirements. - -Each plugin specifies additional requirements. -The image building tools provided by Sahara take care of preparing the images -with those additional requirements. - -.. toctree:: - - building-guest-images/sahara-image-pack - building-guest-images/sahara-image-create - building-guest-images/baremetal diff --git a/doc/source/user/building-guest-images/baremetal.rst b/doc/source/user/building-guest-images/baremetal.rst deleted file mode 100644 index bceb260ff3..0000000000 --- a/doc/source/user/building-guest-images/baremetal.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _building-baremetal-images-label: - -Bare metal images ------------------ - -Images that can be used for bare metal deployment through Ironic -can be generated using both image building tools: - -sahara-image-create: - pass the -b parameters to the command - -sahara-image-pack: - use `virt-get-kernel` on the generated image to extract the kernel and - the initramfs file diff --git a/doc/source/user/building-guest-images/sahara-image-create.rst b/doc/source/user/building-guest-images/sahara-image-create.rst deleted file mode 100644 index b7cc45855f..0000000000 --- a/doc/source/user/building-guest-images/sahara-image-create.rst +++ /dev/null @@ -1,80 +0,0 @@ -sahara-image-create -------------------- - -The historical tool for building images, ``sahara-image-create``, is based on -`Disk Image Builder `_. - -`Disk Image Builder` builds disk images using elements. An element is a -particular set of code that alters how the image is built, or runs within the -chroot to prepare the image. - -The additional elements required by Sahara images and the ``sahara-image-create`` -command itself are stored in the -`Sahara image elements repository `_ - -To create images for a specific plugin follow these steps: - -1. Clone repository "https://opendev.org/openstack/sahara-image-elements" - locally. - -2. Use tox to build images. - - You can run the command below in sahara-image-elements - directory to build images. By default this script will attempt to create - cloud images for all versions of supported plugins and all operating systems - (subset of Ubuntu, Fedora, and CentOS depending on plugin). - - .. sourcecode:: - - tox -e venv -- sahara-image-create -u - - If you want to build a image for ```` with ```` on a specific - ```` just execute: - - .. sourcecode:: - - tox -e venv -- sahara-image-create -p -v -i - - Tox will create a virtualenv and install required python packages in it, - clone the repositories "https://opendev.org/openstack/diskimage-builder" and - "https://opendev.org/openstack/sahara-image-elements" and export necessary - parameters. - - The valid values for the ```` argument are: - - - Ubuntu (all versions): ``ubuntu`` - - CentOS 7: ``centos7`` - - Fedora: ``fedora`` - - ``sahara-image-create`` will then create the required cloud images - using image elements that install all the necessary packages - and configure them. - You will find created images in the parent directory. - -Variables -~~~~~~~~~ - -The following environment variables can be used to change the behavior of the -image building: - -* ``JAVA_DOWNLOAD_URL`` - download link for JDK (tarball or bin) -* ``DIB_IMAGE_SIZE`` - parameter that specifies a volume of hard disk - of instance. You need to specify it only for Fedora because Fedora - doesn't use all available volume - -The following variables can be used to change the name of the output -image: - -* ``centos7_image_name`` -* ``ubuntu_image_name`` -* ``fedora_image_name`` - -.. note:: - - Disk Image Builder will generate QCOW2 images, used with the default - OpenStack Qemu/KVM hypervisors. If your OpenStack uses a different - hypervisor, the generated image should be converted to an appropriate - format. - -For finer control of ``sahara-image-create`` see the `official documentation -`_ diff --git a/doc/source/user/building-guest-images/sahara-image-pack.rst b/doc/source/user/building-guest-images/sahara-image-pack.rst deleted file mode 100644 index a77d38df1f..0000000000 --- a/doc/source/user/building-guest-images/sahara-image-pack.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. _sahara-image-pack-label: - -sahara-image-pack ------------------ - -The CLI command ``sahara-image-pack`` operates in-place on an existing image -and installs and configures the software required for the plugin. - -The script ``sahara-image-pack`` takes the following primary arguments: - -:: - - --config-file PATH Path to a config file to use. Multiple config files - can be specified, with values in later files taking - precedence. Defaults to None. - --image IMAGE The path to an image to modify. This image will be - modified in-place: be sure to target a copy if you - wish to maintain a clean master image. - --root-filesystem ROOT_FS - The filesystem to mount as the root volume on the - image. Novalue is required if only one filesystem is - detected. - --test-only If this flag is set, no changes will be made to the - image; instead, the script will fail if discrepancies - are found between the image and the intended state. - -After these arguments, the script takes ``PLUGIN`` and ``VERSION`` arguments. -These arguments will allow any plugin and version combination which supports -the image packing feature. Plugins may require their own arguments at specific -versions; use the ``--help`` feature with ``PLUGIN`` and ``VERSION`` to see -the appropriate argument structure. - - -a plausible command-line invocation would be: - -:: - - sahara-image-pack --image CentOS.qcow2 \ - --config-file etc/sahara/sahara.conf \ - cdh 5.7.0 [cdh 5.7.0 specific arguments, if any] - -This script will modify the target image in-place. Please copy your image -if you want a backup or if you wish to create multiple images from a single -base image. - -This CLI will automatically populate the set of available plugins and -versions from the plugin set loaded in Sahara, and will show any plugin for -which the image packing feature is available. The next sections of this guide -will first describe how to modify an image packing specification for one -of the plugins, and second, how to enable the image packing feature for new -or existing plugins. - -Note: In case of a RHEL 7 images, it is necessary to register the image before -starting to pack it, also enable some required repos. - -:: - - virt-customize -v -a $SAHARA_RHEL_IMAGE --sm-register \ - --sm-credentials ${REG_USER}:password:${REG_PASSWORD} --sm-attach \ - pool:${REG_POOL_ID} --run-command 'subscription-manager repos \ - --disable=* --enable=$REPO_A \ --enable=$REPO_B \ --enable=$REPO_C' - -Installation and developer notes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The script is part of the Sahara repository, but it does not depend -on the Sahara services. In order to use its development version, -clone the `Sahara repository `_, -check out the branch which matches the Sahara version used, and -install the repository in a virtualenv. - -The script is also provided by binary distributions of OpenStack. -For example, RDO ships it in the ``openstack-sahara-image-pack`` package. - -The script depends on a python library which is not packaged -in pip, but is available through yum, dnf, and apt. If you have installed -Sahara through yum, dnf, or apt, you should have appropriate dependencies, -but if you wish to use the script but are working with Sahara from source, -run whichever of the following is appropriate to your OS: - -:: - - sudo yum install libguestfs python3-libguestfs libguestfs-tools - sudo dnf install libguestfs python3-libguestfs libguestfs-tools - sudo apt-get install libguestfs python3-guestfs libguestfs-tools - -If you are using tox to create virtual environments for your Sahara work, -please use the ``images`` environment to run sahara-image-pack. This -environment is configured to use system site packages, and will thus -be able to find its dependency on python-libguestfs. diff --git a/doc/source/user/dashboard-user-guide.rst b/doc/source/user/dashboard-user-guide.rst deleted file mode 100644 index 602c8e7fe5..0000000000 --- a/doc/source/user/dashboard-user-guide.rst +++ /dev/null @@ -1,477 +0,0 @@ -Sahara (Data Processing) UI User Guide -====================================== - -This guide assumes that you already have the sahara service and Horizon -dashboard up and running. Don't forget to make sure that sahara is -registered in Keystone. If you require assistance with that, please see the -`installation guide <../install/installation-guide.html>`_. - -The sections below give a panel by panel overview of setting up clusters -and running jobs. For a description of using the guided cluster and job tools, -look at `Launching a cluster via the Cluster Creation Guide`_ and -`Running a job via the Job Execution Guide`_. - -Launching a cluster via the sahara UI -------------------------------------- -Registering an Image --------------------- - -1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then - click on the "Clusters" panel and finally the "Image Registry" tab. - -2) From that page, click on the "Register Image" button at the top right - -3) Choose the image that you'd like to register with sahara - -4) Enter the username of the cloud-init user on the image - -5) Choose plugin and version to make the image available only for the intended - clusters - -6) Click the "Done" button to finish the registration - -Create Node Group Templates ---------------------------- - -1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then - click on the "Clusters" panel and then the "Node Group Templates" tab. - -2) From that page, click on the "Create Template" button at the top right - -3) Choose your desired Plugin name and Version from the dropdowns and click - "Next" - -4) Give your Node Group Template a name (description is optional) - -5) Choose a flavor for this template (based on your CPU/memory/disk needs) - -6) Choose the storage location for your instance, this can be either "Ephemeral - Drive" or "Cinder Volume". If you choose "Cinder Volume", you will need to - add additional configuration - -7) Switch to the Node processes tab and choose which processes should be run - for all instances that are spawned from this Node Group Template - -8) Click on the "Create" button to finish creating your Node Group Template - -Create a Cluster Template -------------------------- - -1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then - click on the "Clusters" panel and finally the "Cluster Templates" tab. - -2) From that page, click on the "Create Template" button at the top right - -3) Choose your desired Plugin name and Version from the dropdowns and click - "Next" - -4) Under the "Details" tab, you must give your template a name - -5) Under the "Node Groups" tab, you should add one or more nodes that can be - based on one or more templates - -- To do this, start by choosing a Node Group Template from the dropdown and - click the "+" button -- You can adjust the number of nodes to be spawned for this node group via - the text box or the "-" and "+" buttons -- Repeat these steps if you need nodes from additional node group templates - -6) Optionally, you can adjust your configuration further by using the "General - Parameters", "HDFS Parameters" and "MapReduce Parameters" tabs - -7) If you have Designate DNS service you can choose the domain name in "DNS" - tab for internal and external hostname resolution - -8) Click on the "Create" button to finish creating your Cluster Template - -Launching a Cluster -------------------- - -1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then - click on the "Clusters" panel and lastly, click on the "Clusters" tab. - -2) Click on the "Launch Cluster" button at the top right - -3) Choose your desired Plugin name and Version from the dropdowns and click - "Next" - -4) Give your cluster a name (required) - -5) Choose which cluster template should be used for your cluster - -6) Choose the image that should be used for your cluster (if you do not see any - options here, see `Registering an Image`_ above) - -7) Optionally choose a keypair that can be used to authenticate to your cluster - instances - -8) Click on the "Create" button to start your cluster - -- Your cluster's status will display on the Clusters table -- It will likely take several minutes to reach the "Active" state - -Scaling a Cluster ------------------ -1) From the Data Processing/Clusters page (Clusters tab), click on the - "Scale Cluster" button of the row that contains the cluster that you want to - scale - -2) You can adjust the numbers of instances for existing Node Group Templates - -3) You can also add a new Node Group Template and choose a number of instances - to launch - -- This can be done by selecting your desired Node Group Template from the - dropdown and clicking the "+" button -- Your new Node Group will appear below and you can adjust the number of - instances via the text box or the "+" and "-" buttons - -4) To confirm the scaling settings and trigger the spawning/deletion of - instances, click on "Scale" - -Elastic Data Processing (EDP) ------------------------------ -Data Sources ------------- -Data Sources are where the input and output from your jobs are housed. - -1) From the Data Processing/Jobs page (Data Sources tab), click on the - "Create Data Source" button at the top right - -2) Give your Data Source a name - -3) Enter the URL of the Data Source - -- For a swift object, enter / (ie: *mycontainer/inputfile*). - sahara will prepend *swift://* for you -- For an HDFS object, enter an absolute path, a relative path or a full URL: - - + */my/absolute/path* indicates an absolute path in the cluster HDFS - + *my/path* indicates the path */user/hadoop/my/path* in the cluster HDFS - assuming the defined HDFS user is *hadoop* - + *hdfs://host:port/path* can be used to indicate any HDFS location - -4) Enter the username and password for the Data Source (also see - `Additional Notes`_) - -5) Enter an optional description - -6) Click on "Create" - -7) Repeat for additional Data Sources - -Job Binaries ------------- -Job Binaries are where you define/upload the source code (mains and libraries) -for your job. - -1) From the Data Processing/Jobs (Job Binaries tab), click on the - "Create Job Binary" button at the top right - -2) Give your Job Binary a name (this can be different than the actual filename) - -3) Choose the type of storage for your Job Binary - -- For "swift", enter the URL of your binary (/) as well as - the username and password (also see `Additional Notes`_) -- For "manila", choose the share and enter the path for the binary in this - share. This assumes that you have already stored that file in the - appropriate path on the share. The share will be automatically mounted to - any cluster nodes which require access to the file, if it is not mounted - already. -- For "Internal database", you can choose from "Create a script" or "Upload - a new file" (**only API v1.1**) - -4) Enter an optional description - -5) Click on "Create" - -6) Repeat for additional Job Binaries - -Job Templates (Known as "Jobs" in the API) ------------------------------------------- -Job templates are where you define the type of job you'd like to run as well -as which "Job Binaries" are required. - -1) From the Data Processing/Jobs page (Job Templates tab), - click on the "Create Job Template" button at the top right - -2) Give your Job Template a name - -3) Choose the type of job you'd like to run - -4) Choose the main binary from the dropdown - - - This is required for Hive, Pig, and Spark jobs - - Other job types do not use a main binary - -5) Enter an optional description for your Job Template - -6) Click on the "Libs" tab and choose any libraries needed by your job template - - - MapReduce and Java jobs require at least one library - - Other job types may optionally use libraries - -7) Click on "Create" - -Jobs (Known as "Job Executions" in the API) -------------------------------------------- -Jobs are what you get by "Launching" a job template. You can monitor the -status of your job to see when it has completed its run - -1) From the Data Processing/Jobs page (Job Templates tab), find the row - that contains the job template you want to launch and click either - "Launch on New Cluster" or "Launch on Existing Cluster" the right side - of that row - -2) Choose the cluster (already running--see `Launching a Cluster`_ above) on - which you would like the job to run - -3) Choose the Input and Output Data Sources (Data Sources defined above) - -4) If additional configuration is required, click on the "Configure" tab - -- Additional configuration properties can be defined by clicking on the "Add" - button -- An example configuration entry might be mapred.mapper.class for the Name - and org.apache.oozie.example.SampleMapper for the Value - -5) Click on "Launch". To monitor the status of your job, you can navigate to - the Data Processing/Jobs panel and click on the Jobs tab. - -6) You can relaunch a Job from the Jobs page by using the - "Relaunch on New Cluster" or "Relaunch on Existing Cluster" links - -- Relaunch on New Cluster will take you through the forms to start a new - cluster before letting you specify input/output Data Sources and job - configuration -- Relaunch on Existing Cluster will prompt you for input/output Data Sources - as well as allow you to change job configuration before launching the job - -Example Jobs ------------- -There are sample jobs located in the sahara repository. In this section, we -will give a walkthrough on how to run those jobs via the Horizon UI. These -steps assume that you already have a cluster up and running (in the "Active" -state). You may want to clone into https://opendev.org/openstack/sahara-tests/ -so that you will have all of the source code and inputs stored locally. - -1) Sample Pig job - - https://opendev.org/openstack/sahara-tests/src/branch/master/sahara_tests/scenario/defaults/edp-examples/edp-pig/cleanup-string/example.pig - -- Load the input data file from - https://opendev.org/openstack/sahara-tests/src/branch/master/sahara_tests/scenario/defaults/edp-examples/edp-pig/cleanup-string/data/input - into swift - - - Click on Project/Object Store/Containers and create a container with any - name ("samplecontainer" for our purposes here) - - - Click on Upload Object and give the object a name - ("piginput" in this case) - -- Navigate to Data Processing/Jobs/Data Sources, Click on Create Data Source - - - Name your Data Source ("pig-input-ds" in this sample) - - - Type = Swift, URL samplecontainer/piginput, fill-in the Source - username/password fields with your username/password and click "Create" - -- Create another Data Source to use as output for the job - - - Name = pig-output-ds, Type = Swift, URL = samplecontainer/pigoutput, - Source username/password, "Create" - -- Store your Job Binaries in Swift (you can choose another type of storage - if you want) - - - Navigate to Project/Object Store/Containers, choose "samplecontainer" - - - Click on Upload Object and find example.pig at - /sahara-tests/scenario/defaults/edp-examples/ - edp-pig/cleanup-string/, name it "example.pig" (or other name). - The Swift path will be swift://samplecontainer/example.pig - - - Click on Upload Object and find edp-pig-udf-stringcleaner.jar at - /sahara-tests/scenario/defaults/edp-examples/ - edp-pig/cleanup-string/, name it "edp-pig-udf-stringcleaner.jar" - (or other name). The Swift path will be - swift://samplecontainer/edp-pig-udf-stringcleaner.jar - - - Navigate to Data Processing/Jobs/Job Binaries, Click on Create Job Binary - - - Name = example.pig, Storage type = Swift, - URL = samplecontainer/example.pig, Username = , - Password = - - - Create another Job Binary: Name = edp-pig-udf-stringcleaner.jar, - Storage type = Swift, - URL = samplecontainer/edp-pig-udf-stringcleaner.jar, - Username = , Password = - -- Create a Job Template - - - Navigate to Data Processing/Jobs/Job Templates, Click on - Create Job Template - - - Name = pigsample, Job Type = Pig, Choose "example.pig" as the main binary - - - Click on the "Libs" tab and choose "edp-pig-udf-stringcleaner.jar", - then hit the "Choose" button beneath the dropdown, then click - on "Create" - -- Launch your job - - - To launch your job from the Job Templates page, click on the down - arrow at the far right of the screen and choose - "Launch on Existing Cluster" - - - For the input, choose "pig-input-ds", for output choose "pig-output-ds". - Also choose whichever cluster you'd like to run the job on - - - For this job, no additional configuration is necessary, so you can just - click on "Launch" - - - You will be taken to the "Jobs" page where you can see your job - progress through "PENDING, RUNNING, SUCCEEDED" phases - - - When your job finishes with "SUCCEEDED", you can navigate back to Object - Store/Containers and browse to the samplecontainer to see your output. - It should be in the "pigoutput" folder - -2) Sample Spark job - - https://opendev.org/openstack/sahara-tests/src/branch/master/sahara_tests/scenario/defaults/edp-examples/edp-spark - You can clone into https://opendev.org/openstack/sahara-tests/ for quicker - access to the files for this sample job. - -- Store the Job Binary in Swift (you can choose another type of storage if - you want) - - - Click on Project/Object Store/Containers and create a container with any - name ("samplecontainer" for our purposes here) - - - Click on Upload Object and find spark-wordcount.jar at - /sahara-tests/scenario/defaults/edp-examples/ - edp-spark/, name it "spark-wordcount.jar" (or other name). - The Swift path will be swift://samplecontainer/spark-wordcount.jar - - - Navigate to Data Processing/Jobs/Job Binaries, Click on Create Job Binary - - - Name = sparkexample.jar, Storage type = Swift, - URL = samplecontainer/spark-wordcount.jar, Username = , - Password = - -- Create a Job Template - - - Name = sparkexamplejob, Job Type = Spark, - Main binary = Choose sparkexample.jar, Click "Create" - -- Launch your job - - - To launch your job from the Job Templates page, click on the - down arrow at the far right of the screen and choose - "Launch on Existing Cluster" - - - Choose whichever cluster you'd like to run the job on - - - Click on the "Configure" tab - - - Set the main class to be: sahara.edp.spark.SparkWordCount - - - Under Arguments, click Add and fill URL for the input file, - once more click Add and fill URL for the output file. - - - Click on Launch - - - You will be taken to the "Jobs" page where you can see your job - progress through "PENDING, RUNNING, SUCCEEDED" phases - - - When your job finishes with "SUCCEEDED", you can see your results in - your output file. - - - The stdout and stderr files of the command used for executing your job - are located at /tmp/spark-edp// - on Spark master node in case of Spark clusters, or on Spark JobHistory - node in other cases like Vanilla, CDH and so on. - - -Additional Notes ----------------- -1) Throughout the sahara UI, you will find that if you try to delete an object - that you will not be able to delete it if another object depends on it. - An example of this would be trying to delete a Job Template that has an - existing Job. In order to be able to delete that job, you would - first need to delete any Job Templates that relate to that job. - -2) In the examples above, we mention adding your username/password for the - swift Data Sources. It should be noted that it is possible to configure - sahara such that the username/password credentials are *not* required. For - more information on that, please refer to: :doc:`Sahara Advanced - Configuration Guide <../admin/advanced-configuration-guide>` - -Launching a cluster via the Cluster Creation Guide --------------------------------------------------- -1) Under the Data Processing group, choose "Clusters" and then click on the - "Clusters" tab. The "Cluster Creation Guide" button is above that table. - Click on it. - -2) Click on the "Choose Plugin" button then select the cluster type from the - Plugin Name dropdown and choose your target version. When done, click - on "Select" to proceed. - -3) Click on "Create a Master Node Group Template". Give your template a name, - choose a flavor and choose which processes should run on nodes launched - for this node group. The processes chosen here should be things that are - more server-like in nature (namenode, oozieserver, spark master, etc). - Optionally, you can set other options here such as availability zone, - storage, security and process specific parameters. Click on "Create" - to proceed. - -4) Click on "Create a Worker Node Group Template". Give your template a name, - choose a flavor and choose which processes should run on nodes launched - for this node group. Processes chosen here should be more worker-like in - nature (datanode, spark slave, task tracker, etc). Optionally, you can set - other options here such as availability zone, storage, security and process - specific parameters. Click on "Create" to proceed. - -5) Click on "Create a Cluster Template". Give your template a name. Next, - click on the "Node Groups" tab and enter the count for each of the node - groups (these are pre-populated from steps 3 and 4). It would be common - to have 1 for the "master" node group type and some larger number of - "worker" instances depending on you desired cluster size. Optionally, - you can also set additional parameters for cluster-wide settings via - the other tabs on this page. Click on "Create" to proceed. - -6) Click on "Launch a Cluster". Give your cluster a name and choose the image - that you want to use for all instances in your cluster. The cluster - template that you created in step 5 is already pre-populated. If you want - ssh access to the instances of your cluster, select a keypair from the - dropdown. Click on "Launch" to proceed. You will be taken to the Clusters - panel where you can see your cluster progress toward the Active state. - -Running a job via the Job Execution Guide ------------------------------------------ -1) Under the Data Processing group, choose "Jobs" and then click on the - "Jobs" tab. The "Job Execution Guide" button is above that table. Click - on it. - -2) Click on "Select type" and choose the type of job that you want to run. - -3) If your job requires input/output data sources, you will have the option - to create them via the "Create a Data Source" button (Note: This button will - not be shown for job types that do not require data sources). Give your - data source a name and choose the type. If you have chosen swift, you - may also enter the username and password. Enter the URL for your data - source. For more details on what the URL should look like, see - `Data Sources`_. - -4) Click on "Create a job template". Give your job template a name. - Depending on the type of job that you've chosen, you may need to select - your main binary and/or additional libraries (available from the "Libs" - tab). If you have not yet uploaded the files to run your program, you - can add them via the "+" icon next to the "Choose a main binary" select box. - -5) Click on "Launch job". Choose the active cluster where you want to run you - job. Optionally, you can click on the "Configure" tab and provide any - required configuration, arguments or parameters for your job. Click on - "Launch" to execute your job. You will be taken to the Jobs tab where - you can monitor the state of your job as it progresses. diff --git a/doc/source/user/edp-s3.rst b/doc/source/user/edp-s3.rst deleted file mode 100644 index 20507b6979..0000000000 --- a/doc/source/user/edp-s3.rst +++ /dev/null @@ -1,87 +0,0 @@ -============================== -EDP with S3-like Object Stores -============================== - -Overview and rationale of S3 integration -======================================== -Since the Rocky release, Sahara clusters have full support for interaction with -S3-like object stores, for example Ceph Rados Gateway. Through the abstractions -offered by EDP, a Sahara job execution may consume input data and job binaries -stored in S3, as well as write back its output data to S3. - -The copying of job binaries from S3 to a cluster is performed by the botocore -library. A job's input and output to and from S3 is handled by the Hadoop-S3A -driver. - -It's also worth noting that the Hadoop-S3A driver may be more mature and -performant than the Hadoop-SwiftFS driver (either as hosted by Apache or in -the sahara-extra respository). - -Sahara clusters are also provisioned such that data in S3-like stora